diff --git a/.github/workflows/claude-ci-failure-auto-fix.yml b/.github/workflows/claude-ci-failure-auto-fix.yml index 070a4acd14..ab07c8ae10 100644 --- a/.github/workflows/claude-ci-failure-auto-fix.yml +++ b/.github/workflows/claude-ci-failure-auto-fix.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.event.workflow_run.head_branch }} fetch-depth: 0 diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml index 6dbe068c3d..da37df6de7 100644 --- a/.github/workflows/claude-dependabot.yml +++ b/.github/workflows/claude-dependabot.yml @@ -30,7 +30,7 @@ jobs: actions: read # Required for CI access steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml index 8e165b823e..ee901fe5d4 100644 --- a/.github/workflows/claude.yml +++ b/.github/workflows/claude.yml @@ -40,7 +40,7 @@ jobs: actions: read # Required for CI access steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a6c36ed86c..966243323c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -58,7 +58,7 @@ jobs: # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml index eae6eea5d2..dad99cb8d9 100644 --- a/.github/workflows/copilot-setup-steps.yml +++ b/.github/workflows/copilot-setup-steps.yml @@ -27,7 +27,7 @@ jobs: # If you do not check out your code, Copilot will do this for you. steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 submodules: true diff --git a/.github/workflows/docs-block-sync.yml b/.github/workflows/docs-block-sync.yml index 4977877b19..32f205019f 100644 --- a/.github/workflows/docs-block-sync.yml +++ b/.github/workflows/docs-block-sync.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/docs-claude-review.yml b/.github/workflows/docs-claude-review.yml index 1643fe1c49..ca2788b387 100644 --- a/.github/workflows/docs-claude-review.yml +++ b/.github/workflows/docs-claude-review.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 diff --git a/.github/workflows/docs-enhance.yml b/.github/workflows/docs-enhance.yml index 4baa882cd1..52607fa5df 100644 --- a/.github/workflows/docs-enhance.yml +++ b/.github/workflows/docs-enhance.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/platform-autogpt-deploy-dev.yaml b/.github/workflows/platform-autogpt-deploy-dev.yaml index 6e1e23d3eb..b415fb1b7b 100644 --- a/.github/workflows/platform-autogpt-deploy-dev.yaml +++ b/.github/workflows/platform-autogpt-deploy-dev.yaml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.event.inputs.git_ref || github.ref_name }} @@ -52,7 +52,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger deploy workflow - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DEPLOY_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-autogpt-deploy-prod.yml b/.github/workflows/platform-autogpt-deploy-prod.yml index 4d7c16d710..e0c524d8d2 100644 --- a/.github/workflows/platform-autogpt-deploy-prod.yml +++ b/.github/workflows/platform-autogpt-deploy-prod.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.ref_name || 'master' }} @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger deploy workflow - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DEPLOY_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml index a301477ecf..1f0c6da3dd 100644 --- a/.github/workflows/platform-backend-ci.yml +++ b/.github/workflows/platform-backend-ci.yml @@ -68,7 +68,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 submodules: true diff --git a/.github/workflows/platform-dev-deploy-event-dispatcher.yml b/.github/workflows/platform-dev-deploy-event-dispatcher.yml index b5324b7c2c..1a581c55c2 100644 --- a/.github/workflows/platform-dev-deploy-event-dispatcher.yml +++ b/.github/workflows/platform-dev-deploy-event-dispatcher.yml @@ -82,7 +82,7 @@ jobs: - name: Dispatch Deploy Event if: steps.check_status.outputs.should_deploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure @@ -110,7 +110,7 @@ jobs: - name: Dispatch Undeploy Event (from comment) if: steps.check_status.outputs.should_undeploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure @@ -168,7 +168,7 @@ jobs: github.event_name == 'pull_request' && github.event.action == 'closed' && steps.check_pr_close.outputs.should_undeploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 669a775934..6410daae9f 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check for component changes uses: dorny/paths-filter@v3 @@ -71,7 +71,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 @@ -107,7 +107,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -148,7 +148,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive @@ -277,7 +277,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml index 67be0ae939..b4724245dc 100644 --- a/.github/workflows/platform-fullstack-ci.yml +++ b/.github/workflows/platform-fullstack-ci.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 @@ -56,14 +56,14 @@ jobs: run: pnpm install --frozen-lockfile types: - runs-on: ubuntu-latest + runs-on: big-boi needs: setup strategy: fail-fast: false steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive @@ -85,7 +85,7 @@ jobs: - name: Run docker compose run: | - docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d + docker compose -f ../docker-compose.yml --profile local up -d deps_backend - name: Restore dependencies cache uses: actions/cache@v5 diff --git a/.github/workflows/repo-workflow-checker.yml b/.github/workflows/repo-workflow-checker.yml index 35536ba922..aa94622d31 100644 --- a/.github/workflows/repo-workflow-checker.yml +++ b/.github/workflows/repo-workflow-checker.yml @@ -11,7 +11,7 @@ jobs: steps: # - name: Wait some time for all actions to start # run: sleep 30 - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 # with: # fetch-depth: 0 - name: Set up Python diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock index f1d1e932fe..0a421dda31 100644 --- a/autogpt_platform/autogpt_libs/poetry.lock +++ b/autogpt_platform/autogpt_libs/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-doc" @@ -67,7 +67,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event optional = false python-versions = "<3.11,>=3.8" groups = ["dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"}, {file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"}, @@ -326,100 +326,118 @@ files = [ [[package]] name = "coverage" -version = "7.10.5" +version = "7.13.4" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "coverage-7.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6a5c3414bfc7451b879141ce772c546985163cf553f08e0f135f0699a911801"}, - {file = "coverage-7.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc8e4d99ce82f1710cc3c125adc30fd1487d3cf6c2cd4994d78d68a47b16989a"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:02252dc1216e512a9311f596b3169fad54abcb13827a8d76d5630c798a50a754"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73269df37883e02d460bee0cc16be90509faea1e3bd105d77360b512d5bb9c33"}, - {file = "coverage-7.10.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f8a81b0614642f91c9effd53eec284f965577591f51f547a1cbeb32035b4c2f"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6a29f8e0adb7f8c2b95fa2d4566a1d6e6722e0a637634c6563cb1ab844427dd9"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fcf6ab569436b4a647d4e91accba12509ad9f2554bc93d3aee23cc596e7f99c3"}, - {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:90dc3d6fb222b194a5de60af8d190bedeeddcbc7add317e4a3cd333ee6b7c879"}, - {file = "coverage-7.10.5-cp310-cp310-win32.whl", hash = "sha256:414a568cd545f9dc75f0686a0049393de8098414b58ea071e03395505b73d7a8"}, - {file = "coverage-7.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:e551f9d03347196271935fd3c0c165f0e8c049220280c1120de0084d65e9c7ff"}, - {file = "coverage-7.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c177e6ffe2ebc7c410785307758ee21258aa8e8092b44d09a2da767834f075f2"}, - {file = "coverage-7.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:14d6071c51ad0f703d6440827eaa46386169b5fdced42631d5a5ac419616046f"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:61f78c7c3bc272a410c5ae3fde7792b4ffb4acc03d35a7df73ca8978826bb7ab"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f39071caa126f69d63f99b324fb08c7b1da2ec28cbb1fe7b5b1799926492f65c"}, - {file = "coverage-7.10.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343a023193f04d46edc46b2616cdbee68c94dd10208ecd3adc56fcc54ef2baa1"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:585ffe93ae5894d1ebdee69fc0b0d4b7c75d8007983692fb300ac98eed146f78"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0ef4e66f006ed181df29b59921bd8fc7ed7cd6a9289295cd8b2824b49b570df"}, - {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eb7b0bbf7cc1d0453b843eca7b5fa017874735bef9bfdfa4121373d2cc885ed6"}, - {file = "coverage-7.10.5-cp311-cp311-win32.whl", hash = "sha256:1d043a8a06987cc0c98516e57c4d3fc2c1591364831e9deb59c9e1b4937e8caf"}, - {file = "coverage-7.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:fefafcca09c3ac56372ef64a40f5fe17c5592fab906e0fdffd09543f3012ba50"}, - {file = "coverage-7.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:7e78b767da8b5fc5b2faa69bb001edafcd6f3995b42a331c53ef9572c55ceb82"}, - {file = "coverage-7.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2d05c7e73c60a4cecc7d9b60dbfd603b4ebc0adafaef371445b47d0f805c8a9"}, - {file = "coverage-7.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32ddaa3b2c509778ed5373b177eb2bf5662405493baeff52278a0b4f9415188b"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dd382410039fe062097aa0292ab6335a3f1e7af7bba2ef8d27dcda484918f20c"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7fa22800f3908df31cea6fb230f20ac49e343515d968cc3a42b30d5c3ebf9b5a"}, - {file = "coverage-7.10.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f366a57ac81f5e12797136552f5b7502fa053c861a009b91b80ed51f2ce651c6"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1dc8f1980a272ad4a6c84cba7981792344dad33bf5869361576b7aef42733a"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2285c04ee8676f7938b02b4936d9b9b672064daab3187c20f73a55f3d70e6b4a"}, - {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2492e4dd9daab63f5f56286f8a04c51323d237631eb98505d87e4c4ff19ec34"}, - {file = "coverage-7.10.5-cp312-cp312-win32.whl", hash = "sha256:38a9109c4ee8135d5df5505384fc2f20287a47ccbe0b3f04c53c9a1989c2bbaf"}, - {file = "coverage-7.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:6b87f1ad60b30bc3c43c66afa7db6b22a3109902e28c5094957626a0143a001f"}, - {file = "coverage-7.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:672a6c1da5aea6c629819a0e1461e89d244f78d7b60c424ecf4f1f2556c041d8"}, - {file = "coverage-7.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ef3b83594d933020f54cf65ea1f4405d1f4e41a009c46df629dd964fcb6e907c"}, - {file = "coverage-7.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b96bfdf7c0ea9faebce088a3ecb2382819da4fbc05c7b80040dbc428df6af44"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:63df1fdaffa42d914d5c4d293e838937638bf75c794cf20bee12978fc8c4e3bc"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8002dc6a049aac0e81ecec97abfb08c01ef0c1fbf962d0c98da3950ace89b869"}, - {file = "coverage-7.10.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63d4bb2966d6f5f705a6b0c6784c8969c468dbc4bcf9d9ded8bff1c7e092451f"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1f672efc0731a6846b157389b6e6d5d5e9e59d1d1a23a5c66a99fd58339914d5"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3f39cef43d08049e8afc1fde4a5da8510fc6be843f8dea350ee46e2a26b2f54c"}, - {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2968647e3ed5a6c019a419264386b013979ff1fb67dd11f5c9886c43d6a31fc2"}, - {file = "coverage-7.10.5-cp313-cp313-win32.whl", hash = "sha256:0d511dda38595b2b6934c2b730a1fd57a3635c6aa2a04cb74714cdfdd53846f4"}, - {file = "coverage-7.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:9a86281794a393513cf117177fd39c796b3f8e3759bb2764259a2abba5cce54b"}, - {file = "coverage-7.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:cebd8e906eb98bb09c10d1feed16096700b1198d482267f8bf0474e63a7b8d84"}, - {file = "coverage-7.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0520dff502da5e09d0d20781df74d8189ab334a1e40d5bafe2efaa4158e2d9e7"}, - {file = "coverage-7.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d9cd64aca68f503ed3f1f18c7c9174cbb797baba02ca8ab5112f9d1c0328cd4b"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0913dd1613a33b13c4f84aa6e3f4198c1a21ee28ccb4f674985c1f22109f0aae"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1b7181c0feeb06ed8a02da02792f42f829a7b29990fef52eff257fef0885d760"}, - {file = "coverage-7.10.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36d42b7396b605f774d4372dd9c49bed71cbabce4ae1ccd074d155709dd8f235"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b4fdc777e05c4940b297bf47bf7eedd56a39a61dc23ba798e4b830d585486ca5"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:42144e8e346de44a6f1dbd0a56575dd8ab8dfa7e9007da02ea5b1c30ab33a7db"}, - {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:66c644cbd7aed8fe266d5917e2c9f65458a51cfe5eeff9c05f15b335f697066e"}, - {file = "coverage-7.10.5-cp313-cp313t-win32.whl", hash = "sha256:2d1b73023854068c44b0c554578a4e1ef1b050ed07cf8b431549e624a29a66ee"}, - {file = "coverage-7.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:54a1532c8a642d8cc0bd5a9a51f5a9dcc440294fd06e9dda55e743c5ec1a8f14"}, - {file = "coverage-7.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:74d5b63fe3f5f5d372253a4ef92492c11a4305f3550631beaa432fc9df16fcff"}, - {file = "coverage-7.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:68c5e0bc5f44f68053369fa0d94459c84548a77660a5f2561c5e5f1e3bed7031"}, - {file = "coverage-7.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cf33134ffae93865e32e1e37df043bef15a5e857d8caebc0099d225c579b0fa3"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad8fa9d5193bafcf668231294241302b5e683a0518bf1e33a9a0dfb142ec3031"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:146fa1531973d38ab4b689bc764592fe6c2f913e7e80a39e7eeafd11f0ef6db2"}, - {file = "coverage-7.10.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6013a37b8a4854c478d3219ee8bc2392dea51602dd0803a12d6f6182a0061762"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:eb90fe20db9c3d930fa2ad7a308207ab5b86bf6a76f54ab6a40be4012d88fcae"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:384b34482272e960c438703cafe63316dfbea124ac62006a455c8410bf2a2262"}, - {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:467dc74bd0a1a7de2bedf8deaf6811f43602cb532bd34d81ffd6038d6d8abe99"}, - {file = "coverage-7.10.5-cp314-cp314-win32.whl", hash = "sha256:556d23d4e6393ca898b2e63a5bca91e9ac2d5fb13299ec286cd69a09a7187fde"}, - {file = "coverage-7.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:f4446a9547681533c8fa3e3c6cf62121eeee616e6a92bd9201c6edd91beffe13"}, - {file = "coverage-7.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:5e78bd9cf65da4c303bf663de0d73bf69f81e878bf72a94e9af67137c69b9fe9"}, - {file = "coverage-7.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5661bf987d91ec756a47c7e5df4fbcb949f39e32f9334ccd3f43233bbb65e508"}, - {file = "coverage-7.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a46473129244db42a720439a26984f8c6f834762fc4573616c1f37f13994b357"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1f64b8d3415d60f24b058b58d859e9512624bdfa57a2d1f8aff93c1ec45c429b"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:44d43de99a9d90b20e0163f9770542357f58860a26e24dc1d924643bd6aa7cb4"}, - {file = "coverage-7.10.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a931a87e5ddb6b6404e65443b742cb1c14959622777f2a4efd81fba84f5d91ba"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9559b906a100029274448f4c8b8b0a127daa4dade5661dfd821b8c188058842"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b08801e25e3b4526ef9ced1aa29344131a8f5213c60c03c18fe4c6170ffa2874"}, - {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ed9749bb8eda35f8b636fb7632f1c62f735a236a5d4edadd8bbcc5ea0542e732"}, - {file = "coverage-7.10.5-cp314-cp314t-win32.whl", hash = "sha256:609b60d123fc2cc63ccee6d17e4676699075db72d14ac3c107cc4976d516f2df"}, - {file = "coverage-7.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:0666cf3d2c1626b5a3463fd5b05f5e21f99e6aec40a3192eee4d07a15970b07f"}, - {file = "coverage-7.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:bc85eb2d35e760120540afddd3044a5bf69118a91a296a8b3940dfc4fdcfe1e2"}, - {file = "coverage-7.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:62835c1b00c4a4ace24c1a88561a5a59b612fbb83a525d1c70ff5720c97c0610"}, - {file = "coverage-7.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5255b3bbcc1d32a4069d6403820ac8e6dbcc1d68cb28a60a1ebf17e47028e898"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3876385722e335d6e991c430302c24251ef9c2a9701b2b390f5473199b1b8ebf"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8048ce4b149c93447a55d279078c8ae98b08a6951a3c4d2d7e87f4efc7bfe100"}, - {file = "coverage-7.10.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4028e7558e268dd8bcf4d9484aad393cafa654c24b4885f6f9474bf53183a82a"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03f47dc870eec0367fcdd603ca6a01517d2504e83dc18dbfafae37faec66129a"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2d488d7d42b6ded7ea0704884f89dcabd2619505457de8fc9a6011c62106f6e5"}, - {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3dcf2ead47fa8be14224ee817dfc1df98043af568fe120a22f81c0eb3c34ad2"}, - {file = "coverage-7.10.5-cp39-cp39-win32.whl", hash = "sha256:02650a11324b80057b8c9c29487020073d5e98a498f1857f37e3f9b6ea1b2426"}, - {file = "coverage-7.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:b45264dd450a10f9e03237b41a9a24e85cbb1e278e5a32adb1a303f58f0017f3"}, - {file = "coverage-7.10.5-py3-none-any.whl", hash = "sha256:0be24d35e4db1d23d0db5c0f6a74a962e2ec83c426b5cac09f4234aadef38e4a"}, - {file = "coverage-7.10.5.tar.gz", hash = "sha256:f2e57716a78bc3ae80b2207be0709a3b2b63b9f2dcf9740ee6ac03588a2015b6"}, + {file = "coverage-7.13.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fc31c787a84f8cd6027eba44010517020e0d18487064cd3d8968941856d1415"}, + {file = "coverage-7.13.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a32ebc02a1805adf637fc8dec324b5cdacd2e493515424f70ee33799573d661b"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e24f9156097ff9dc286f2f913df3a7f63c0e333dcafa3c196f2c18b4175ca09a"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8041b6c5bfdc03257666e9881d33b1abc88daccaf73f7b6340fb7946655cd10f"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a09cfa6a5862bc2fc6ca7c3def5b2926194a56b8ab78ffcf617d28911123012"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:296f8b0af861d3970c2a4d8c91d48eb4dd4771bcef9baedec6a9b515d7de3def"}, + {file = "coverage-7.13.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e101609bcbbfb04605ea1027b10dc3735c094d12d40826a60f897b98b1c30256"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aa3feb8db2e87ff5e6d00d7e1480ae241876286691265657b500886c98f38bda"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4fc7fa81bbaf5a02801b65346c8b3e657f1d93763e58c0abdf7c992addd81a92"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:33901f604424145c6e9c2398684b92e176c0b12df77d52db81c20abd48c3794c"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:bb28c0f2cf2782508a40cec377935829d5fcc3ad9a3681375af4e84eb34b6b58"}, + {file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9d107aff57a83222ddbd8d9ee705ede2af2cc926608b57abed8ef96b50b7e8f9"}, + {file = "coverage-7.13.4-cp310-cp310-win32.whl", hash = "sha256:a6f94a7d00eb18f1b6d403c91a88fd58cfc92d4b16080dfdb774afc8294469bf"}, + {file = "coverage-7.13.4-cp310-cp310-win_amd64.whl", hash = "sha256:2cb0f1e000ebc419632bbe04366a8990b6e32c4e0b51543a6484ffe15eaeda95"}, + {file = "coverage-7.13.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d490ba50c3f35dd7c17953c68f3270e7ccd1c6642e2d2afe2d8e720b98f5a053"}, + {file = "coverage-7.13.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19bc3c88078789f8ef36acb014d7241961dbf883fd2533d18cb1e7a5b4e28b11"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3998e5a32e62fdf410c0dbd3115df86297995d6e3429af80b8798aad894ca7aa"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8e264226ec98e01a8e1054314af91ee6cde0eacac4f465cc93b03dbe0bce2fd7"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a3aa4e7b9e416774b21797365b358a6e827ffadaaca81b69ee02946852449f00"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:71ca20079dd8f27fcf808817e281e90220475cd75115162218d0e27549f95fef"}, + {file = "coverage-7.13.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e2f25215f1a359ab17320b47bcdaca3e6e6356652e8256f2441e4ef972052903"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d65b2d373032411e86960604dc4edac91fdfb5dca539461cf2cbe78327d1e64f"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94eb63f9b363180aff17de3e7c8760c3ba94664ea2695c52f10111244d16a299"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e856bf6616714c3a9fbc270ab54103f4e685ba236fa98c054e8f87f266c93505"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:65dfcbe305c3dfe658492df2d85259e0d79ead4177f9ae724b6fb245198f55d6"}, + {file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b507778ae8a4c915436ed5c2e05b4a6cecfa70f734e19c22a005152a11c7b6a9"}, + {file = "coverage-7.13.4-cp311-cp311-win32.whl", hash = "sha256:784fc3cf8be001197b652d51d3fd259b1e2262888693a4636e18879f613a62a9"}, + {file = "coverage-7.13.4-cp311-cp311-win_amd64.whl", hash = "sha256:2421d591f8ca05b308cf0092807308b2facbefe54af7c02ac22548b88b95c98f"}, + {file = "coverage-7.13.4-cp311-cp311-win_arm64.whl", hash = "sha256:79e73a76b854d9c6088fe5d8b2ebe745f8681c55f7397c3c0a016192d681045f"}, + {file = "coverage-7.13.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02231499b08dabbe2b96612993e5fc34217cdae907a51b906ac7fca8027a4459"}, + {file = "coverage-7.13.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40aa8808140e55dc022b15d8aa7f651b6b3d68b365ea0398f1441e0b04d859c3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5b856a8ccf749480024ff3bd7310adaef57bf31fd17e1bfc404b7940b6986634"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c048ea43875fbf8b45d476ad79f179809c590ec7b79e2035c662e7afa3192e3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7b38448866e83176e28086674fe7368ab8590e4610fb662b44e345b86d63ffa"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:de6defc1c9badbf8b9e67ae90fd00519186d6ab64e5cc5f3d21359c2a9b2c1d3"}, + {file = "coverage-7.13.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7eda778067ad7ffccd23ecffce537dface96212576a07924cbf0d8799d2ded5a"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e87f6c587c3f34356c3759f0420693e35e7eb0e2e41e4c011cb6ec6ecbbf1db7"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8248977c2e33aecb2ced42fef99f2d319e9904a36e55a8a68b69207fb7e43edc"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:25381386e80ae727608e662474db537d4df1ecd42379b5ba33c84633a2b36d47"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:ee756f00726693e5ba94d6df2bdfd64d4852d23b09bb0bc700e3b30e6f333985"}, + {file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fdfc1e28e7c7cdce44985b3043bc13bbd9c747520f94a4d7164af8260b3d91f0"}, + {file = "coverage-7.13.4-cp312-cp312-win32.whl", hash = "sha256:01d4cbc3c283a17fc1e42d614a119f7f438eabb593391283adca8dc86eff1246"}, + {file = "coverage-7.13.4-cp312-cp312-win_amd64.whl", hash = "sha256:9401ebc7ef522f01d01d45532c68c5ac40fb27113019b6b7d8b208f6e9baa126"}, + {file = "coverage-7.13.4-cp312-cp312-win_arm64.whl", hash = "sha256:b1ec7b6b6e93255f952e27ab58fbc68dcc468844b16ecbee881aeb29b6ab4d8d"}, + {file = "coverage-7.13.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b66a2da594b6068b48b2692f043f35d4d3693fb639d5ea8b39533c2ad9ac3ab9"}, + {file = "coverage-7.13.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3599eb3992d814d23b35c536c28df1a882caa950f8f507cef23d1cbf334995ac"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:93550784d9281e374fb5a12bf1324cc8a963fd63b2d2f223503ef0fd4aa339ea"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b720ce6a88a2755f7c697c23268ddc47a571b88052e6b155224347389fdf6a3b"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b322db1284a2ed3aa28ffd8ebe3db91c929b7a333c0820abec3d838ef5b3525"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4594c67d8a7c89cf922d9df0438c7c7bb022ad506eddb0fdb2863359ff78242"}, + {file = "coverage-7.13.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:53d133df809c743eb8bce33b24bcababb371f4441340578cd406e084d94a6148"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76451d1978b95ba6507a039090ba076105c87cc76fc3efd5d35d72093964d49a"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7f57b33491e281e962021de110b451ab8a24182589be17e12a22c79047935e23"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1731dc33dc276dafc410a885cbf5992f1ff171393e48a21453b78727d090de80"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:bd60d4fe2f6fa7dff9223ca1bbc9f05d2b6697bc5961072e5d3b952d46e1b1ea"}, + {file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9181a3ccead280b828fae232df12b16652702b49d41e99d657f46cc7b1f6ec7a"}, + {file = "coverage-7.13.4-cp313-cp313-win32.whl", hash = "sha256:f53d492307962561ac7de4cd1de3e363589b000ab69617c6156a16ba7237998d"}, + {file = "coverage-7.13.4-cp313-cp313-win_amd64.whl", hash = "sha256:e6f70dec1cc557e52df5306d051ef56003f74d56e9c4dd7ddb07e07ef32a84dd"}, + {file = "coverage-7.13.4-cp313-cp313-win_arm64.whl", hash = "sha256:fb07dc5da7e849e2ad31a5d74e9bece81f30ecf5a42909d0a695f8bd1874d6af"}, + {file = "coverage-7.13.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40d74da8e6c4b9ac18b15331c4b5ebc35a17069410cad462ad4f40dcd2d50c0d"}, + {file = "coverage-7.13.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4223b4230a376138939a9173f1bdd6521994f2aff8047fae100d6d94d50c5a12"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1d4be36a5114c499f9f1f9195e95ebf979460dbe2d88e6816ea202010ba1c34b"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:200dea7d1e8095cc6e98cdabe3fd1d21ab17d3cee6dab00cadbb2fe35d9c15b9"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8eb931ee8e6d8243e253e5ed7336deea6904369d2fd8ae6e43f68abbf167092"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:75eab1ebe4f2f64d9509b984f9314d4aa788540368218b858dad56dc8f3e5eb9"}, + {file = "coverage-7.13.4-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c35eb28c1d085eb7d8c9b3296567a1bebe03ce72962e932431b9a61f28facf26"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb88b316ec33760714a4720feb2816a3a59180fd58c1985012054fa7aebee4c2"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7d41eead3cc673cbd38a4417deb7fd0b4ca26954ff7dc6078e33f6ff97bed940"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:fb26a934946a6afe0e326aebe0730cdff393a8bc0bbb65a2f41e30feddca399c"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:dae88bc0fc77edaa65c14be099bd57ee140cf507e6bfdeea7938457ab387efb0"}, + {file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:845f352911777a8e722bfce168958214951e07e47e5d5d9744109fa5fe77f79b"}, + {file = "coverage-7.13.4-cp313-cp313t-win32.whl", hash = "sha256:2fa8d5f8de70688a28240de9e139fa16b153cc3cbb01c5f16d88d6505ebdadf9"}, + {file = "coverage-7.13.4-cp313-cp313t-win_amd64.whl", hash = "sha256:9351229c8c8407645840edcc277f4a2d44814d1bc34a2128c11c2a031d45a5dd"}, + {file = "coverage-7.13.4-cp313-cp313t-win_arm64.whl", hash = "sha256:30b8d0512f2dc8c8747557e8fb459d6176a2c9e5731e2b74d311c03b78451997"}, + {file = "coverage-7.13.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:300deaee342f90696ed186e3a00c71b5b3d27bffe9e827677954f4ee56969601"}, + {file = "coverage-7.13.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29e3220258d682b6226a9b0925bc563ed9a1ebcff3cad30f043eceea7eaf2689"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:391ee8f19bef69210978363ca930f7328081c6a0152f1166c91f0b5fdd2a773c"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0dd7ab8278f0d58a0128ba2fca25824321f05d059c1441800e934ff2efa52129"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78cdf0d578b15148b009ccf18c686aa4f719d887e76e6b40c38ffb61d264a552"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:48685fee12c2eb3b27c62f2658e7ea21e9c3239cba5a8a242801a0a3f6a8c62a"}, + {file = "coverage-7.13.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4e83efc079eb39480e6346a15a1bcb3e9b04759c5202d157e1dd4303cd619356"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecae9737b72408d6a950f7e525f30aca12d4bd8dd95e37342e5beb3a2a8c4f71"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ae4578f8528569d3cf303fef2ea569c7f4c4059a38c8667ccef15c6e1f118aa5"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6fdef321fdfbb30a197efa02d48fcd9981f0d8ad2ae8903ac318adc653f5df98"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b0f6ccf3dbe577170bebfce1318707d0e8c3650003cb4b3a9dd744575daa8b5"}, + {file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75fcd519f2a5765db3f0e391eb3b7d150cce1a771bf4c9f861aeab86c767a3c0"}, + {file = "coverage-7.13.4-cp314-cp314-win32.whl", hash = "sha256:8e798c266c378da2bd819b0677df41ab46d78065fb2a399558f3f6cae78b2fbb"}, + {file = "coverage-7.13.4-cp314-cp314-win_amd64.whl", hash = "sha256:245e37f664d89861cf2329c9afa2c1fe9e6d4e1a09d872c947e70718aeeac505"}, + {file = "coverage-7.13.4-cp314-cp314-win_arm64.whl", hash = "sha256:ad27098a189e5838900ce4c2a99f2fe42a0bf0c2093c17c69b45a71579e8d4a2"}, + {file = "coverage-7.13.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:85480adfb35ffc32d40918aad81b89c69c9cc5661a9b8a81476d3e645321a056"}, + {file = "coverage-7.13.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:79be69cf7f3bf9b0deeeb062eab7ac7f36cd4cc4c4dd694bd28921ba4d8596cc"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:caa421e2684e382c5d8973ac55e4f36bed6821a9bad5c953494de960c74595c9"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14375934243ee05f56c45393fe2ce81fe5cc503c07cee2bdf1725fb8bef3ffaf"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:25a41c3104d08edb094d9db0d905ca54d0cd41c928bb6be3c4c799a54753af55"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6f01afcff62bf9a08fb32b2c1d6e924236c0383c02c790732b6537269e466a72"}, + {file = "coverage-7.13.4-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eb9078108fbf0bcdde37c3f4779303673c2fa1fe8f7956e68d447d0dd426d38a"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e086334e8537ddd17e5f16a344777c1ab8194986ec533711cbe6c41cde841b6"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:725d985c5ab621268b2edb8e50dfe57633dc69bda071abc470fed55a14935fd3"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3c06f0f1337c667b971ca2f975523347e63ec5e500b9aa5882d91931cd3ef750"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:590c0ed4bf8e85f745e6b805b2e1c457b2e33d5255dd9729743165253bc9ad39"}, + {file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:eb30bf180de3f632cd043322dad5751390e5385108b2807368997d1a92a509d0"}, + {file = "coverage-7.13.4-cp314-cp314t-win32.whl", hash = "sha256:c4240e7eded42d131a2d2c4dec70374b781b043ddc79a9de4d55ca71f8e98aea"}, + {file = "coverage-7.13.4-cp314-cp314t-win_amd64.whl", hash = "sha256:4c7d3cc01e7350f2f0f6f7036caaf5673fb56b6998889ccfe9e1c1fe75a9c932"}, + {file = "coverage-7.13.4-cp314-cp314t-win_arm64.whl", hash = "sha256:23e3f687cf945070d1c90f85db66d11e3025665d8dafa831301a0e0038f3db9b"}, + {file = "coverage-7.13.4-py3-none-any.whl", hash = "sha256:1af1641e57cf7ba1bd67d677c9abdbcd6cc2ab7da3bca7fa1e2b7e50e65f2ad0"}, + {file = "coverage-7.13.4.tar.gz", hash = "sha256:e5c8f6ed1e61a8b2dcdf31eb0b9bbf0130750ca79c1c49eb898e2ad86f5ccc91"}, ] [package.dependencies] @@ -523,7 +541,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -2162,23 +2180,23 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "pytest-cov" -version = "6.2.1" +version = "7.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"}, - {file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"}, + {file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"}, + {file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"}, ] [package.dependencies] -coverage = {version = ">=7.5", extras = ["toml"]} +coverage = {version = ">=7.10.6", extras = ["toml"]} pluggy = ">=1.2" -pytest = ">=6.2.5" +pytest = ">=7" [package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +testing = ["process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-mock" @@ -2545,7 +2563,7 @@ description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "python_version == \"3.10\"" +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2893,4 +2911,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<4.0" -content-hash = "b7ac335a86aa44c3d7d2802298818b389a6f1286e3e9b7b0edb2ff06377cecaf" +content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d" diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml index 9b2bcb8fbe..8deb4d2169 100644 --- a/autogpt_platform/autogpt_libs/pyproject.toml +++ b/autogpt_platform/autogpt_libs/pyproject.toml @@ -26,7 +26,7 @@ pyright = "^1.1.408" pytest = "^8.4.1" pytest-asyncio = "^1.3.0" pytest-mock = "^3.15.1" -pytest-cov = "^6.2.1" +pytest-cov = "^7.0.0" ruff = "^0.15.0" [build-system] diff --git a/autogpt_platform/backend/backend/api/external/v1/routes.py b/autogpt_platform/backend/backend/api/external/v1/routes.py index 00933c1899..69a0c36637 100644 --- a/autogpt_platform/backend/backend/api/external/v1/routes.py +++ b/autogpt_platform/backend/backend/api/external/v1/routes.py @@ -10,7 +10,7 @@ from typing_extensions import TypedDict import backend.api.features.store.cache as store_cache import backend.api.features.store.model as store_model -import backend.data.block +import backend.blocks from backend.api.external.middleware import require_permission from backend.data import execution as execution_db from backend.data import graph as graph_db @@ -67,7 +67,7 @@ async def get_user_info( dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))], ) async def get_graph_blocks() -> Sequence[dict[Any, Any]]: - blocks = [block() for block in backend.data.block.get_blocks().values()] + blocks = [block() for block in backend.blocks.get_blocks().values()] return [b.to_dict() for b in blocks if not b.disabled] @@ -83,7 +83,7 @@ async def execute_graph_block( require_permission(APIKeyPermission.EXECUTE_BLOCK) ), ) -> CompletedBlockOutput: - obj = backend.data.block.get_block(block_id) + obj = backend.blocks.get_block(block_id) if not obj: raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") if obj.disabled: diff --git a/autogpt_platform/backend/backend/api/features/builder/db.py b/autogpt_platform/backend/backend/api/features/builder/db.py index 7177fa4dc6..e8d35b0bb5 100644 --- a/autogpt_platform/backend/backend/api/features/builder/db.py +++ b/autogpt_platform/backend/backend/api/features/builder/db.py @@ -10,10 +10,15 @@ import backend.api.features.library.db as library_db import backend.api.features.library.model as library_model import backend.api.features.store.db as store_db import backend.api.features.store.model as store_model -import backend.data.block from backend.blocks import load_all_blocks +from backend.blocks._base import ( + AnyBlockSchema, + BlockCategory, + BlockInfo, + BlockSchema, + BlockType, +) from backend.blocks.llm import LlmModel -from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema from backend.data.db import query_raw_with_schema from backend.integrations.providers import ProviderName from backend.util.cache import cached @@ -22,7 +27,7 @@ from backend.util.models import Pagination from .model import ( BlockCategoryResponse, BlockResponse, - BlockType, + BlockTypeFilter, CountResponse, FilterType, Provider, @@ -88,7 +93,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse def get_blocks( *, category: str | None = None, - type: BlockType | None = None, + type: BlockTypeFilter | None = None, provider: ProviderName | None = None, page: int = 1, page_size: int = 50, @@ -669,9 +674,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: for block_type in load_all_blocks().values(): block: AnyBlockSchema = block_type() if block.disabled or block.block_type in ( - backend.data.block.BlockType.INPUT, - backend.data.block.BlockType.OUTPUT, - backend.data.block.BlockType.AGENT, + BlockType.INPUT, + BlockType.OUTPUT, + BlockType.AGENT, ): continue # Find the execution count for this block diff --git a/autogpt_platform/backend/backend/api/features/builder/model.py b/autogpt_platform/backend/backend/api/features/builder/model.py index fcd19dba94..8aa8ed06ed 100644 --- a/autogpt_platform/backend/backend/api/features/builder/model.py +++ b/autogpt_platform/backend/backend/api/features/builder/model.py @@ -4,7 +4,7 @@ from pydantic import BaseModel import backend.api.features.library.model as library_model import backend.api.features.store.model as store_model -from backend.data.block import BlockInfo +from backend.blocks._base import BlockInfo from backend.integrations.providers import ProviderName from backend.util.models import Pagination @@ -15,7 +15,7 @@ FilterType = Literal[ "my_agents", ] -BlockType = Literal["all", "input", "action", "output"] +BlockTypeFilter = Literal["all", "input", "action", "output"] class SearchEntry(BaseModel): diff --git a/autogpt_platform/backend/backend/api/features/builder/routes.py b/autogpt_platform/backend/backend/api/features/builder/routes.py index 15b922178d..091f477178 100644 --- a/autogpt_platform/backend/backend/api/features/builder/routes.py +++ b/autogpt_platform/backend/backend/api/features/builder/routes.py @@ -88,7 +88,7 @@ async def get_block_categories( ) async def get_blocks( category: Annotated[str | None, fastapi.Query()] = None, - type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None, + type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None, provider: Annotated[ProviderName | None, fastapi.Query()] = None, page: Annotated[int, fastapi.Query()] = 1, page_size: Annotated[int, fastapi.Query()] = 50, diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py index 0b37e42df8..808692f97f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/config.py +++ b/autogpt_platform/backend/backend/api/features/chat/config.py @@ -93,6 +93,12 @@ class ChatConfig(BaseSettings): description="Name of the prompt in Langfuse to fetch", ) + # Extended thinking configuration for Claude models + thinking_enabled: bool = Field( + default=True, + description="Enable adaptive thinking for Claude models via OpenRouter", + ) + @field_validator("api_key", mode="before") @classmethod def get_api_key(cls, v): diff --git a/autogpt_platform/backend/backend/api/features/chat/model.py b/autogpt_platform/backend/backend/api/features/chat/model.py index 7318ef88d7..35418f174f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model.py +++ b/autogpt_platform/backend/backend/api/features/chat/model.py @@ -2,7 +2,7 @@ import asyncio import logging import uuid from datetime import UTC, datetime -from typing import Any +from typing import Any, cast from weakref import WeakValueDictionary from openai.types.chat import ( @@ -104,6 +104,26 @@ class ChatSession(BaseModel): successful_agent_runs: dict[str, int] = {} successful_agent_schedules: dict[str, int] = {} + def add_tool_call_to_current_turn(self, tool_call: dict) -> None: + """Attach a tool_call to the current turn's assistant message. + + Searches backwards for the most recent assistant message (stopping at + any user message boundary). If found, appends the tool_call to it. + Otherwise creates a new assistant message with the tool_call. + """ + for msg in reversed(self.messages): + if msg.role == "user": + break + if msg.role == "assistant": + if not msg.tool_calls: + msg.tool_calls = [] + msg.tool_calls.append(tool_call) + return + + self.messages.append( + ChatMessage(role="assistant", content="", tool_calls=[tool_call]) + ) + @staticmethod def new(user_id: str) -> "ChatSession": return ChatSession( @@ -172,6 +192,47 @@ class ChatSession(BaseModel): successful_agent_schedules=successful_agent_schedules, ) + @staticmethod + def _merge_consecutive_assistant_messages( + messages: list[ChatCompletionMessageParam], + ) -> list[ChatCompletionMessageParam]: + """Merge consecutive assistant messages into single messages. + + Long-running tool flows can create split assistant messages: one with + text content and another with tool_calls. Anthropic's API requires + tool_result blocks to reference a tool_use in the immediately preceding + assistant message, so these splits cause 400 errors via OpenRouter. + """ + if len(messages) < 2: + return messages + + result: list[ChatCompletionMessageParam] = [messages[0]] + for msg in messages[1:]: + prev = result[-1] + if prev.get("role") != "assistant" or msg.get("role") != "assistant": + result.append(msg) + continue + + prev = cast(ChatCompletionAssistantMessageParam, prev) + curr = cast(ChatCompletionAssistantMessageParam, msg) + + curr_content = curr.get("content") or "" + if curr_content: + prev_content = prev.get("content") or "" + prev["content"] = ( + f"{prev_content}\n{curr_content}" if prev_content else curr_content + ) + + curr_tool_calls = curr.get("tool_calls") + if curr_tool_calls: + prev_tool_calls = prev.get("tool_calls") + prev["tool_calls"] = ( + list(prev_tool_calls) + list(curr_tool_calls) + if prev_tool_calls + else list(curr_tool_calls) + ) + return result + def to_openai_messages(self) -> list[ChatCompletionMessageParam]: messages = [] for message in self.messages: @@ -258,7 +319,7 @@ class ChatSession(BaseModel): name=message.name or "", ) ) - return messages + return self._merge_consecutive_assistant_messages(messages) async def _get_session_from_cache(session_id: str) -> ChatSession | None: diff --git a/autogpt_platform/backend/backend/api/features/chat/model_test.py b/autogpt_platform/backend/backend/api/features/chat/model_test.py index c230b00f9c..239137844d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/model_test.py @@ -1,4 +1,16 @@ +from typing import cast + import pytest +from openai.types.chat import ( + ChatCompletionAssistantMessageParam, + ChatCompletionMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, +) +from openai.types.chat.chat_completion_message_tool_call_param import ( + ChatCompletionMessageToolCallParam, + Function, +) from .model import ( ChatMessage, @@ -117,3 +129,205 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id): loaded.tool_calls is not None ), f"Tool calls missing for {orig.role} message" assert len(orig.tool_calls) == len(loaded.tool_calls) + + +# --------------------------------------------------------------------------- # +# _merge_consecutive_assistant_messages # +# --------------------------------------------------------------------------- # + +_tc = ChatCompletionMessageToolCallParam( + id="tc1", type="function", function=Function(name="do_stuff", arguments="{}") +) +_tc2 = ChatCompletionMessageToolCallParam( + id="tc2", type="function", function=Function(name="other", arguments="{}") +) + + +def test_merge_noop_when_no_consecutive_assistants(): + """Messages without consecutive assistants are returned unchanged.""" + msgs = [ + ChatCompletionUserMessageParam(role="user", content="hi"), + ChatCompletionAssistantMessageParam(role="assistant", content="hello"), + ChatCompletionUserMessageParam(role="user", content="bye"), + ] + merged = ChatSession._merge_consecutive_assistant_messages(msgs) + assert len(merged) == 3 + assert [m["role"] for m in merged] == ["user", "assistant", "user"] + + +def test_merge_splits_text_and_tool_calls(): + """The exact bug scenario: text-only assistant followed by tool_calls-only assistant.""" + msgs = [ + ChatCompletionUserMessageParam(role="user", content="build agent"), + ChatCompletionAssistantMessageParam( + role="assistant", content="Let me build that" + ), + ChatCompletionAssistantMessageParam( + role="assistant", content="", tool_calls=[_tc] + ), + ChatCompletionToolMessageParam(role="tool", content="ok", tool_call_id="tc1"), + ] + merged = ChatSession._merge_consecutive_assistant_messages(msgs) + + assert len(merged) == 3 + assert merged[0]["role"] == "user" + assert merged[2]["role"] == "tool" + a = cast(ChatCompletionAssistantMessageParam, merged[1]) + assert a["role"] == "assistant" + assert a.get("content") == "Let me build that" + assert a.get("tool_calls") == [_tc] + + +def test_merge_combines_tool_calls_from_both(): + """Both consecutive assistants have tool_calls — they get merged.""" + msgs: list[ChatCompletionAssistantMessageParam] = [ + ChatCompletionAssistantMessageParam( + role="assistant", content="text", tool_calls=[_tc] + ), + ChatCompletionAssistantMessageParam( + role="assistant", content="", tool_calls=[_tc2] + ), + ] + merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type] + + assert len(merged) == 1 + a = cast(ChatCompletionAssistantMessageParam, merged[0]) + assert a.get("tool_calls") == [_tc, _tc2] + assert a.get("content") == "text" + + +def test_merge_three_consecutive_assistants(): + """Three consecutive assistants collapse into one.""" + msgs: list[ChatCompletionAssistantMessageParam] = [ + ChatCompletionAssistantMessageParam(role="assistant", content="a"), + ChatCompletionAssistantMessageParam(role="assistant", content="b"), + ChatCompletionAssistantMessageParam( + role="assistant", content="", tool_calls=[_tc] + ), + ] + merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type] + + assert len(merged) == 1 + a = cast(ChatCompletionAssistantMessageParam, merged[0]) + assert a.get("content") == "a\nb" + assert a.get("tool_calls") == [_tc] + + +def test_merge_empty_and_single_message(): + """Edge cases: empty list and single message.""" + assert ChatSession._merge_consecutive_assistant_messages([]) == [] + + single: list[ChatCompletionMessageParam] = [ + ChatCompletionUserMessageParam(role="user", content="hi") + ] + assert ChatSession._merge_consecutive_assistant_messages(single) == single + + +# --------------------------------------------------------------------------- # +# add_tool_call_to_current_turn # +# --------------------------------------------------------------------------- # + +_raw_tc = { + "id": "tc1", + "type": "function", + "function": {"name": "f", "arguments": "{}"}, +} +_raw_tc2 = { + "id": "tc2", + "type": "function", + "function": {"name": "g", "arguments": "{}"}, +} + + +def test_add_tool_call_appends_to_existing_assistant(): + """When the last assistant is from the current turn, tool_call is added to it.""" + session = ChatSession.new(user_id="u") + session.messages = [ + ChatMessage(role="user", content="hi"), + ChatMessage(role="assistant", content="working on it"), + ] + session.add_tool_call_to_current_turn(_raw_tc) + + assert len(session.messages) == 2 # no new message created + assert session.messages[1].tool_calls == [_raw_tc] + + +def test_add_tool_call_creates_assistant_when_none_exists(): + """When there's no current-turn assistant, a new one is created.""" + session = ChatSession.new(user_id="u") + session.messages = [ + ChatMessage(role="user", content="hi"), + ] + session.add_tool_call_to_current_turn(_raw_tc) + + assert len(session.messages) == 2 + assert session.messages[1].role == "assistant" + assert session.messages[1].tool_calls == [_raw_tc] + + +def test_add_tool_call_does_not_cross_user_boundary(): + """A user message acts as a boundary — previous assistant is not modified.""" + session = ChatSession.new(user_id="u") + session.messages = [ + ChatMessage(role="assistant", content="old turn"), + ChatMessage(role="user", content="new message"), + ] + session.add_tool_call_to_current_turn(_raw_tc) + + assert len(session.messages) == 3 # new assistant was created + assert session.messages[0].tool_calls is None # old assistant untouched + assert session.messages[2].role == "assistant" + assert session.messages[2].tool_calls == [_raw_tc] + + +def test_add_tool_call_multiple_times(): + """Multiple long-running tool calls accumulate on the same assistant.""" + session = ChatSession.new(user_id="u") + session.messages = [ + ChatMessage(role="user", content="hi"), + ChatMessage(role="assistant", content="doing stuff"), + ] + session.add_tool_call_to_current_turn(_raw_tc) + # Simulate a pending tool result in between (like _yield_tool_call does) + session.messages.append( + ChatMessage(role="tool", content="pending", tool_call_id="tc1") + ) + session.add_tool_call_to_current_turn(_raw_tc2) + + assert len(session.messages) == 3 # user, assistant, tool — no extra assistant + assert session.messages[1].tool_calls == [_raw_tc, _raw_tc2] + + +def test_to_openai_messages_merges_split_assistants(): + """End-to-end: session with split assistants produces valid OpenAI messages.""" + session = ChatSession.new(user_id="u") + session.messages = [ + ChatMessage(role="user", content="build agent"), + ChatMessage(role="assistant", content="Let me build that"), + ChatMessage( + role="assistant", + content="", + tool_calls=[ + { + "id": "tc1", + "type": "function", + "function": {"name": "create_agent", "arguments": "{}"}, + } + ], + ), + ChatMessage(role="tool", content="done", tool_call_id="tc1"), + ChatMessage(role="assistant", content="Saved!"), + ChatMessage(role="user", content="show me an example run"), + ] + openai_msgs = session.to_openai_messages() + + # The two consecutive assistants at index 1,2 should be merged + roles = [m["role"] for m in openai_msgs] + assert roles == ["user", "assistant", "tool", "assistant", "user"] + + # The merged assistant should have both content and tool_calls + merged = cast(ChatCompletionAssistantMessageParam, openai_msgs[1]) + assert merged.get("content") == "Let me build that" + tc_list = merged.get("tool_calls") + assert tc_list is not None and len(list(tc_list)) == 1 + assert list(tc_list)[0]["id"] == "tc1" diff --git a/autogpt_platform/backend/backend/api/features/chat/response_model.py b/autogpt_platform/backend/backend/api/features/chat/response_model.py index f627a42fcc..8ea0c1f97a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/response_model.py +++ b/autogpt_platform/backend/backend/api/features/chat/response_model.py @@ -10,6 +10,8 @@ from typing import Any from pydantic import BaseModel, Field +from backend.util.json import dumps as json_dumps + class ResponseType(str, Enum): """Types of streaming responses following AI SDK protocol.""" @@ -18,6 +20,10 @@ class ResponseType(str, Enum): START = "start" FINISH = "finish" + # Step lifecycle (one LLM API call within a message) + START_STEP = "start-step" + FINISH_STEP = "finish-step" + # Text streaming TEXT_START = "text-start" TEXT_DELTA = "text-delta" @@ -57,6 +63,16 @@ class StreamStart(StreamBaseResponse): description="Task ID for SSE reconnection. Clients can reconnect using GET /tasks/{taskId}/stream", ) + def to_sse(self) -> str: + """Convert to SSE format, excluding non-protocol fields like taskId.""" + import json + + data: dict[str, Any] = { + "type": self.type.value, + "messageId": self.messageId, + } + return f"data: {json.dumps(data)}\n\n" + class StreamFinish(StreamBaseResponse): """End of message/stream.""" @@ -64,6 +80,26 @@ class StreamFinish(StreamBaseResponse): type: ResponseType = ResponseType.FINISH +class StreamStartStep(StreamBaseResponse): + """Start of a step (one LLM API call within a message). + + The AI SDK uses this to add a step-start boundary to message.parts, + enabling visual separation between multiple LLM calls in a single message. + """ + + type: ResponseType = ResponseType.START_STEP + + +class StreamFinishStep(StreamBaseResponse): + """End of a step (one LLM API call within a message). + + The AI SDK uses this to reset activeTextParts and activeReasoningParts, + so the next LLM call in a tool-call continuation starts with clean state. + """ + + type: ResponseType = ResponseType.FINISH_STEP + + # ========== Text Streaming ========== @@ -117,7 +153,7 @@ class StreamToolOutputAvailable(StreamBaseResponse): type: ResponseType = ResponseType.TOOL_OUTPUT_AVAILABLE toolCallId: str = Field(..., description="Tool call ID this responds to") output: str | dict[str, Any] = Field(..., description="Tool execution output") - # Additional fields for internal use (not part of AI SDK spec but useful) + # Keep these for internal backend use toolName: str | None = Field( default=None, description="Name of the tool that was executed" ) @@ -125,6 +161,17 @@ class StreamToolOutputAvailable(StreamBaseResponse): default=True, description="Whether the tool execution succeeded" ) + def to_sse(self) -> str: + """Convert to SSE format, excluding non-spec fields.""" + import json + + data = { + "type": self.type.value, + "toolCallId": self.toolCallId, + "output": self.output, + } + return f"data: {json.dumps(data)}\n\n" + # ========== Other ========== @@ -148,6 +195,18 @@ class StreamError(StreamBaseResponse): default=None, description="Additional error details" ) + def to_sse(self) -> str: + """Convert to SSE format, only emitting fields required by AI SDK protocol. + + The AI SDK uses z.strictObject({type, errorText}) which rejects + any extra fields like `code` or `details`. + """ + data = { + "type": self.type.value, + "errorText": self.errorText, + } + return f"data: {json_dumps(data)}\n\n" + class StreamHeartbeat(StreamBaseResponse): """Heartbeat to keep SSE connection alive during long-running operations. diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index 74e6e8ba1e..c6f37569b7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -6,7 +6,7 @@ from collections.abc import AsyncGenerator from typing import Annotated from autogpt_libs import auth -from fastapi import APIRouter, Depends, Header, HTTPException, Query, Security +from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response, Security from fastapi.responses import StreamingResponse from pydantic import BaseModel @@ -17,7 +17,29 @@ from . import stream_registry from .completion_handler import process_operation_failure, process_operation_success from .config import ChatConfig from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions -from .response_model import StreamFinish, StreamHeartbeat, StreamStart +from .response_model import StreamFinish, StreamHeartbeat +from .tools.models import ( + AgentDetailsResponse, + AgentOutputResponse, + AgentPreviewResponse, + AgentSavedResponse, + AgentsFoundResponse, + BlockListResponse, + BlockOutputResponse, + ClarificationNeededResponse, + DocPageResponse, + DocSearchResultsResponse, + ErrorResponse, + ExecutionStartedResponse, + InputValidationErrorResponse, + NeedLoginResponse, + NoResultsResponse, + OperationInProgressResponse, + OperationPendingResponse, + OperationStartedResponse, + SetupRequirementsResponse, + UnderstandingUpdatedResponse, +) config = ChatConfig() @@ -269,8 +291,6 @@ async def stream_chat_post( import time stream_start_time = time.perf_counter() - - # Base log metadata (task_id added after creation) log_meta = {"component": "ChatStream", "session_id": session_id} if user_id: log_meta["user_id"] = user_id @@ -328,24 +348,6 @@ async def stream_chat_post( first_chunk_time, ttfc = None, None chunk_count = 0 try: - # Emit a start event with task_id for reconnection - start_chunk = StreamStart(messageId=task_id, taskId=task_id) - await stream_registry.publish_chunk(task_id, start_chunk) - logger.info( - f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time)*1000:.1f}ms", - extra={ - "json_fields": { - **log_meta, - "elapsed_ms": (time_module.perf_counter() - gen_start_time) - * 1000, - } - }, - ) - - logger.info( - "[TIMING] Calling stream_chat_completion", - extra={"json_fields": log_meta}, - ) async for chunk in chat_service.stream_chat_completion( session_id, request.message, @@ -353,6 +355,7 @@ async def stream_chat_post( user_id=user_id, session=session, # Pass pre-fetched session to avoid double-fetch context=request.context, + _task_id=task_id, # Pass task_id so service emits start with taskId for reconnection ): chunk_count += 1 if first_chunk_time is None: @@ -388,7 +391,6 @@ async def stream_chat_post( } }, ) - await stream_registry.mark_task_completed(task_id, "completed") except Exception as e: elapsed = time_module.perf_counter() - gen_start_time @@ -428,34 +430,13 @@ async def stream_chat_post( chunks_yielded = 0 try: # Subscribe to the task stream (this replays existing messages + live updates) - subscribe_start = time_module.perf_counter() - logger.info( - "[TIMING] Calling subscribe_to_task", - extra={"json_fields": log_meta}, - ) subscriber_queue = await stream_registry.subscribe_to_task( task_id=task_id, user_id=user_id, last_message_id="0-0", # Get all messages from the beginning ) - subscribe_time = (time_module.perf_counter() - subscribe_start) * 1000 - logger.info( - f"[TIMING] subscribe_to_task completed in {subscribe_time:.1f}ms, " - f"queue_ok={subscriber_queue is not None}", - extra={ - "json_fields": { - **log_meta, - "duration_ms": subscribe_time, - "queue_obtained": subscriber_queue is not None, - } - }, - ) if subscriber_queue is None: - logger.info( - "[TIMING] subscriber_queue is None, yielding finish", - extra={"json_fields": log_meta}, - ) yield StreamFinish().to_sse() yield "data: [DONE]\n\n" return @@ -467,11 +448,7 @@ async def stream_chat_post( ) while True: try: - queue_wait_start = time_module.perf_counter() chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0) - queue_wait_time = ( - time_module.perf_counter() - queue_wait_start - ) * 1000 chunks_yielded += 1 if not first_chunk_yielded: @@ -479,26 +456,12 @@ async def stream_chat_post( elapsed = time_module.perf_counter() - event_gen_start logger.info( f"[TIMING] FIRST CHUNK from queue at {elapsed:.2f}s, " - f"type={type(chunk).__name__}, " - f"wait={queue_wait_time:.1f}ms", + f"type={type(chunk).__name__}", extra={ "json_fields": { **log_meta, "chunk_type": type(chunk).__name__, "elapsed_ms": elapsed * 1000, - "queue_wait_ms": queue_wait_time, - } - }, - ) - elif chunks_yielded % 50 == 0: - logger.info( - f"[TIMING] Chunk #{chunks_yielded}, " - f"type={type(chunk).__name__}", - extra={ - "json_fields": { - **log_meta, - "chunk_number": chunks_yielded, - "chunk_type": type(chunk).__name__, } }, ) @@ -521,13 +484,6 @@ async def stream_chat_post( ) break except asyncio.TimeoutError: - # Send heartbeat to keep connection alive - logger.info( - f"[TIMING] Heartbeat timeout, chunks_so_far={chunks_yielded}", - extra={ - "json_fields": {**log_meta, "chunks_so_far": chunks_yielded} - }, - ) yield StreamHeartbeat().to_sse() except GeneratorExit: @@ -592,63 +548,90 @@ async def stream_chat_post( @router.get( "/sessions/{session_id}/stream", ) -async def stream_chat_get( +async def resume_session_stream( session_id: str, - message: Annotated[str, Query(min_length=1, max_length=10000)], user_id: str | None = Depends(auth.get_user_id), - is_user_message: bool = Query(default=True), ): """ - Stream chat responses for a session (GET - legacy endpoint). + Resume an active stream for a session. - Streams the AI/completion responses in real time over Server-Sent Events (SSE), including: - - Text fragments as they are generated - - Tool call UI elements (if invoked) - - Tool execution results + Called by the AI SDK's ``useChat(resume: true)`` on page load. + Checks for an active (in-progress) task on the session and either replays + the full SSE stream or returns 204 No Content if nothing is running. Args: - session_id: The chat session identifier to associate with the streamed messages. - message: The user's new message to process. + session_id: The chat session identifier. user_id: Optional authenticated user ID. - is_user_message: Whether the message is a user message. - Returns: - StreamingResponse: SSE-formatted response chunks. + Returns: + StreamingResponse (SSE) when an active stream exists, + or 204 No Content when there is nothing to resume. """ - session = await _validate_and_get_session(session_id, user_id) + import asyncio + + active_task, _last_id = await stream_registry.get_active_task_for_session( + session_id, user_id + ) + + if not active_task: + return Response(status_code=204) + + subscriber_queue = await stream_registry.subscribe_to_task( + task_id=active_task.task_id, + user_id=user_id, + last_message_id="0-0", # Full replay so useChat rebuilds the message + ) + + if subscriber_queue is None: + return Response(status_code=204) async def event_generator() -> AsyncGenerator[str, None]: chunk_count = 0 first_chunk_type: str | None = None - async for chunk in chat_service.stream_chat_completion( - session_id, - message, - is_user_message=is_user_message, - user_id=user_id, - session=session, # Pass pre-fetched session to avoid double-fetch - ): - if chunk_count < 3: - logger.info( - "Chat stream chunk", - extra={ - "session_id": session_id, - "chunk_type": str(chunk.type), - }, + try: + while True: + try: + chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0) + if chunk_count < 3: + logger.info( + "Resume stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 + yield chunk.to_sse() + + if isinstance(chunk, StreamFinish): + break + except asyncio.TimeoutError: + yield StreamHeartbeat().to_sse() + except GeneratorExit: + pass + except Exception as e: + logger.error(f"Error in resume stream for session {session_id}: {e}") + finally: + try: + await stream_registry.unsubscribe_from_task( + active_task.task_id, subscriber_queue ) - if not first_chunk_type: - first_chunk_type = str(chunk.type) - chunk_count += 1 - yield chunk.to_sse() - logger.info( - "Chat stream completed", - extra={ - "session_id": session_id, - "n_chunks": chunk_count, - "first_chunk_type": first_chunk_type, - }, - ) - # AI SDK protocol termination - yield "data: [DONE]\n\n" + except Exception as unsub_err: + logger.error( + f"Error unsubscribing from task {active_task.task_id}: {unsub_err}", + exc_info=True, + ) + logger.info( + "Resume stream completed", + extra={ + "session_id": session_id, + "n_chunks": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) + yield "data: [DONE]\n\n" return StreamingResponse( event_generator(), @@ -656,8 +639,8 @@ async def stream_chat_get( headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", - "X-Accel-Buffering": "no", # Disable nginx buffering - "x-vercel-ai-ui-message-stream": "v1", # AI SDK protocol header + "X-Accel-Buffering": "no", + "x-vercel-ai-ui-message-stream": "v1", }, ) @@ -969,3 +952,42 @@ async def health_check() -> dict: "service": "chat", "version": "0.1.0", } + + +# ========== Schema Export (for OpenAPI / Orval codegen) ========== + +ToolResponseUnion = ( + AgentsFoundResponse + | NoResultsResponse + | AgentDetailsResponse + | SetupRequirementsResponse + | ExecutionStartedResponse + | NeedLoginResponse + | ErrorResponse + | InputValidationErrorResponse + | AgentOutputResponse + | UnderstandingUpdatedResponse + | AgentPreviewResponse + | AgentSavedResponse + | ClarificationNeededResponse + | BlockListResponse + | BlockOutputResponse + | DocSearchResultsResponse + | DocPageResponse + | OperationStartedResponse + | OperationPendingResponse + | OperationInProgressResponse +) + + +@router.get( + "/schema/tool-responses", + response_model=ToolResponseUnion, + include_in_schema=True, + summary="[Dummy] Tool response type export for codegen", + description="This endpoint is not meant to be called. It exists solely to " + "expose tool response models in the OpenAPI schema for frontend codegen.", +) +async def _tool_response_schema() -> ToolResponseUnion: # type: ignore[return] + """Never called at runtime. Exists only so Orval generates TS types.""" + raise HTTPException(status_code=501, detail="Schema-only endpoint") diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index da18421b98..193566ea01 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -52,8 +52,10 @@ from .response_model import ( StreamBaseResponse, StreamError, StreamFinish, + StreamFinishStep, StreamHeartbeat, StreamStart, + StreamStartStep, StreamTextDelta, StreamTextEnd, StreamTextStart, @@ -351,6 +353,10 @@ async def stream_chat_completion( retry_count: int = 0, session: ChatSession | None = None, context: dict[str, str] | None = None, # {url: str, content: str} + _continuation_message_id: ( + str | None + ) = None, # Internal: reuse message ID for tool call continuations + _task_id: str | None = None, # Internal: task ID for SSE reconnection support ) -> AsyncGenerator[StreamBaseResponse, None]: """Main entry point for streaming chat completions with database handling. @@ -517,16 +523,21 @@ async def stream_chat_completion( # Generate unique IDs for AI SDK protocol import uuid as uuid_module - message_id = str(uuid_module.uuid4()) + is_continuation = _continuation_message_id is not None + message_id = _continuation_message_id or str(uuid_module.uuid4()) text_block_id = str(uuid_module.uuid4()) - # Yield message start + # Only yield message start for the initial call, not for continuations. setup_time = (time.monotonic() - completion_start) * 1000 logger.info( f"[TIMING] Setup complete, yielding StreamStart at {setup_time:.1f}ms", extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}}, ) - yield StreamStart(messageId=message_id) + if not is_continuation: + yield StreamStart(messageId=message_id, taskId=_task_id) + + # Emit start-step before each LLM call (AI SDK uses this to add step boundaries) + yield StreamStartStep() try: logger.info( @@ -632,6 +643,10 @@ async def stream_chat_completion( ) yield chunk elif isinstance(chunk, StreamFinish): + if has_done_tool_call: + # Tool calls happened — close the step but don't send message-level finish. + # The continuation will open a new step, and finish will come at the end. + yield StreamFinishStep() if not has_done_tool_call: # Emit text-end before finish if we received text but haven't closed it if has_received_text and not text_streaming_ended: @@ -663,6 +678,8 @@ async def stream_chat_completion( has_saved_assistant_message = True has_yielded_end = True + # Emit finish-step before finish (resets AI SDK text/reasoning state) + yield StreamFinishStep() yield chunk elif isinstance(chunk, StreamError): has_yielded_error = True @@ -712,6 +729,10 @@ async def stream_chat_completion( logger.info( f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}" ) + # Close the current step before retrying so the recursive call's + # StreamStartStep doesn't produce unbalanced step events. + if not has_yielded_end: + yield StreamFinishStep() should_retry = True else: # Non-retryable error or max retries exceeded @@ -747,6 +768,7 @@ async def stream_chat_completion( error_response = StreamError(errorText=error_message) yield error_response if not has_yielded_end: + yield StreamFinishStep() yield StreamFinish() return @@ -761,6 +783,8 @@ async def stream_chat_completion( retry_count=retry_count + 1, session=session, context=context, + _continuation_message_id=message_id, # Reuse message ID since start was already sent + _task_id=_task_id, ): yield chunk return # Exit after retry to avoid double-saving in finally block @@ -776,9 +800,13 @@ async def stream_chat_completion( # Build the messages list in the correct order messages_to_save: list[ChatMessage] = [] - # Add assistant message with tool_calls if any + # Add assistant message with tool_calls if any. + # Use extend (not assign) to preserve tool_calls already added by + # _yield_tool_call for long-running tools. if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls + if not assistant_response.tool_calls: + assistant_response.tool_calls = [] + assistant_response.tool_calls.extend(accumulated_tool_calls) logger.info( f"Added {len(accumulated_tool_calls)} tool calls to assistant message" ) @@ -830,6 +858,8 @@ async def stream_chat_completion( session=session, # Pass session object to avoid Redis refetch context=context, tool_call_response=str(tool_response_messages), + _continuation_message_id=message_id, # Reuse message ID to avoid duplicates + _task_id=_task_id, ): yield chunk @@ -1040,6 +1070,10 @@ async def _stream_chat_chunks( :128 ] # OpenRouter limit + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in model.lower(): + extra_body["reasoning"] = {"enabled": True} + api_call_start = time_module.perf_counter() stream = await client.chat.completions.create( model=model, @@ -1374,13 +1408,9 @@ async def _yield_tool_call( operation_id=operation_id, ) - # Save assistant message with tool_call FIRST (required by LLM) - assistant_message = ChatMessage( - role="assistant", - content="", - tool_calls=[tool_calls[yield_idx]], - ) - session.messages.append(assistant_message) + # Attach the tool_call to the current turn's assistant message + # (or create one if this is a tool-only response with no text). + session.add_tool_call_to_current_turn(tool_calls[yield_idx]) # Then save pending tool result pending_message = ChatMessage( @@ -1686,6 +1716,7 @@ async def _execute_long_running_tool_with_streaming( task_id, StreamError(errorText=str(e)), ) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) await stream_registry.publish_chunk(task_id, StreamFinish()) await _update_pending_operation( @@ -1802,6 +1833,10 @@ async def _generate_llm_continuation( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + retry_count = 0 last_error: Exception | None = None response = None @@ -1932,6 +1967,10 @@ async def _generate_llm_continuation_with_streaming( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + # Make streaming LLM call (no tools - just text response) from typing import cast @@ -1943,6 +1982,7 @@ async def _generate_llm_continuation_with_streaming( # Publish start event await stream_registry.publish_chunk(task_id, StreamStart(messageId=message_id)) + await stream_registry.publish_chunk(task_id, StreamStartStep()) await stream_registry.publish_chunk(task_id, StreamTextStart(id=text_block_id)) # Stream the response @@ -1966,6 +2006,7 @@ async def _generate_llm_continuation_with_streaming( # Publish end events await stream_registry.publish_chunk(task_id, StreamTextEnd(id=text_block_id)) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) if assistant_content: # Reload session from DB to avoid race condition with user messages @@ -2007,4 +2048,5 @@ async def _generate_llm_continuation_with_streaming( task_id, StreamError(errorText=f"Failed to generate response: {e}"), ) + await stream_registry.publish_chunk(task_id, StreamFinishStep()) await stream_registry.publish_chunk(task_id, StreamFinish()) diff --git a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py index 509d20d9f4..abc34b1fc9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py +++ b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py @@ -857,8 +857,10 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None: ResponseType, StreamError, StreamFinish, + StreamFinishStep, StreamHeartbeat, StreamStart, + StreamStartStep, StreamTextDelta, StreamTextEnd, StreamTextStart, @@ -872,6 +874,8 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None: type_to_class: dict[str, type[StreamBaseResponse]] = { ResponseType.START.value: StreamStart, ResponseType.FINISH.value: StreamFinish, + ResponseType.START_STEP.value: StreamStartStep, + ResponseType.FINISH_STEP.value: StreamFinishStep, ResponseType.TEXT_START.value: StreamTextStart, ResponseType.TEXT_DELTA.value: StreamTextDelta, ResponseType.TEXT_END.value: StreamTextEnd, diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py new file mode 100644 index 0000000000..cf0e76d3b3 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py @@ -0,0 +1,154 @@ +"""Dummy Agent Generator for testing. + +Returns mock responses matching the format expected from the external service. +Enable via AGENTGENERATOR_USE_DUMMY=true in settings. + +WARNING: This is for testing only. Do not use in production. +""" + +import asyncio +import logging +import uuid +from typing import Any + +logger = logging.getLogger(__name__) + +# Dummy decomposition result (instructions type) +DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = { + "type": "instructions", + "steps": [ + { + "description": "Get input from user", + "action": "input", + "block_name": "AgentInputBlock", + }, + { + "description": "Process the input", + "action": "process", + "block_name": "TextFormatterBlock", + }, + { + "description": "Return output to user", + "action": "output", + "block_name": "AgentOutputBlock", + }, + ], +} + +# Block IDs from backend/blocks/io.py +AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b" +AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4" + + +def _generate_dummy_agent_json() -> dict[str, Any]: + """Generate a minimal valid agent JSON for testing.""" + input_node_id = str(uuid.uuid4()) + output_node_id = str(uuid.uuid4()) + + return { + "id": str(uuid.uuid4()), + "version": 1, + "is_active": True, + "name": "Dummy Test Agent", + "description": "A dummy agent generated for testing purposes", + "nodes": [ + { + "id": input_node_id, + "block_id": AGENT_INPUT_BLOCK_ID, + "input_default": { + "name": "input", + "title": "Input", + "description": "Enter your input", + "placeholder_values": [], + }, + "metadata": {"position": {"x": 0, "y": 0}}, + }, + { + "id": output_node_id, + "block_id": AGENT_OUTPUT_BLOCK_ID, + "input_default": { + "name": "output", + "title": "Output", + "description": "Agent output", + "format": "{output}", + }, + "metadata": {"position": {"x": 400, "y": 0}}, + }, + ], + "links": [ + { + "id": str(uuid.uuid4()), + "source_id": input_node_id, + "sink_id": output_node_id, + "source_name": "result", + "sink_name": "value", + "is_static": False, + }, + ], + } + + +async def decompose_goal_dummy( + description: str, + context: str = "", + library_agents: list[dict[str, Any]] | None = None, +) -> dict[str, Any]: + """Return dummy decomposition result.""" + logger.info("Using dummy agent generator for decompose_goal") + return DUMMY_DECOMPOSITION_RESULT.copy() + + +async def generate_agent_dummy( + instructions: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy agent JSON after a simulated delay.""" + logger.info("Using dummy agent generator for generate_agent (30s delay)") + await asyncio.sleep(30) + return _generate_dummy_agent_json() + + +async def generate_agent_patch_dummy( + update_request: str, + current_agent: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy patched agent (returns the current agent with updated description).""" + logger.info("Using dummy agent generator for generate_agent_patch") + patched = current_agent.copy() + patched["description"] = ( + f"{current_agent.get('description', '')} (updated: {update_request})" + ) + return patched + + +async def customize_template_dummy( + template_agent: dict[str, Any], + modification_request: str, + context: str = "", +) -> dict[str, Any]: + """Return dummy customized template (returns template with updated description).""" + logger.info("Using dummy agent generator for customize_template") + customized = template_agent.copy() + customized["description"] = ( + f"{template_agent.get('description', '')} (customized: {modification_request})" + ) + return customized + + +async def get_blocks_dummy() -> list[dict[str, Any]]: + """Return dummy blocks list.""" + logger.info("Using dummy agent generator for get_blocks") + return [ + {"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"}, + {"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"}, + ] + + +async def health_check_dummy() -> bool: + """Always returns healthy for dummy service.""" + return True diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index 62411b4e1b..2b40c6d6f3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -12,8 +12,19 @@ import httpx from backend.util.settings import Settings +from .dummy import ( + customize_template_dummy, + decompose_goal_dummy, + generate_agent_dummy, + generate_agent_patch_dummy, + get_blocks_dummy, + health_check_dummy, +) + logger = logging.getLogger(__name__) +_dummy_mode_warned = False + def _create_error_response( error_message: str, @@ -90,10 +101,26 @@ def _get_settings() -> Settings: return _settings -def is_external_service_configured() -> bool: - """Check if external Agent Generator service is configured.""" +def _is_dummy_mode() -> bool: + """Check if dummy mode is enabled for testing.""" + global _dummy_mode_warned settings = _get_settings() - return bool(settings.config.agentgenerator_host) + is_dummy = bool(settings.config.agentgenerator_use_dummy) + if is_dummy and not _dummy_mode_warned: + logger.warning( + "Agent Generator running in DUMMY MODE - returning mock responses. " + "Do not use in production!" + ) + _dummy_mode_warned = True + return is_dummy + + +def is_external_service_configured() -> bool: + """Check if external Agent Generator service is configured (or dummy mode).""" + settings = _get_settings() + return bool(settings.config.agentgenerator_host) or bool( + settings.config.agentgenerator_use_dummy + ) def _get_base_url() -> str: @@ -137,6 +164,9 @@ async def decompose_goal_external( - {"type": "error", "error": "...", "error_type": "..."} on error Or None on unexpected error """ + if _is_dummy_mode(): + return await decompose_goal_dummy(description, context, library_agents) + client = _get_client() if context: @@ -226,6 +256,11 @@ async def generate_agent_external( Returns: Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error """ + if _is_dummy_mode(): + return await generate_agent_dummy( + instructions, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -297,6 +332,11 @@ async def generate_agent_patch_external( Returns: Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error """ + if _is_dummy_mode(): + return await generate_agent_patch_dummy( + update_request, current_agent, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -383,6 +423,11 @@ async def customize_template_external( Returns: Customized agent JSON, clarifying questions dict, or error dict on error """ + if _is_dummy_mode(): + return await customize_template_dummy( + template_agent, modification_request, context + ) + client = _get_client() request = modification_request @@ -445,6 +490,9 @@ async def get_blocks_external() -> list[dict[str, Any]] | None: Returns: List of block info dicts or None on error """ + if _is_dummy_mode(): + return await get_blocks_dummy() + client = _get_client() try: @@ -478,6 +526,9 @@ async def health_check() -> bool: if not is_external_service_configured(): return False + if _is_dummy_mode(): + return await health_check_dummy() + client = _get_client() try: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index f55cd567e8..6a8cfa9bbc 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -13,7 +13,8 @@ from backend.api.features.chat.tools.models import ( NoResultsResponse, ) from backend.api.features.store.hybrid_search import unified_hybrid_search -from backend.data.block import BlockType, get_block +from backend.blocks import get_block +from backend.blocks._base import BlockType logger = logging.getLogger(__name__) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py index 0f3d4cbfa5..d567a89bbe 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py @@ -10,7 +10,7 @@ from backend.api.features.chat.tools.find_block import ( FindBlockTool, ) from backend.api.features.chat.tools.models import BlockListResponse -from backend.data.block import BlockType +from backend.blocks._base import BlockType from ._test_data import make_session diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index fc4a470fdd..8c29820f8e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -12,7 +12,8 @@ from backend.api.features.chat.tools.find_block import ( COPILOT_EXCLUDED_BLOCK_IDS, COPILOT_EXCLUDED_BLOCK_TYPES, ) -from backend.data.block import AnyBlockSchema, get_block +from backend.blocks import get_block +from backend.blocks._base import AnyBlockSchema from backend.data.execution import ExecutionContext from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput from backend.data.workspace import get_or_create_workspace diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py index 2aae45e875..aadc161155 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py @@ -6,7 +6,7 @@ import pytest from backend.api.features.chat.tools.models import ErrorResponse from backend.api.features.chat.tools.run_block import RunBlockTool -from backend.data.block import BlockType +from backend.blocks._base import BlockType from ._test_data import make_session diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index b40ae5b160..9c04e8ceef 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -12,12 +12,11 @@ import backend.api.features.store.image_gen as store_image_gen import backend.api.features.store.media as store_media import backend.data.graph as graph_db import backend.data.integrations as integrations_db -from backend.data.block import BlockInput from backend.data.db import transaction from backend.data.execution import get_graph_execution from backend.data.graph import GraphSettings from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include -from backend.data.model import CredentialsMetaInput +from backend.data.model import CredentialsMetaInput, GraphInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.webhooks.graph_lifecycle_hooks import ( on_graph_activate, @@ -1130,7 +1129,7 @@ async def create_preset_from_graph_execution( async def update_preset( user_id: str, preset_id: str, - inputs: Optional[BlockInput] = None, + inputs: Optional[GraphInput] = None, credentials: Optional[dict[str, CredentialsMetaInput]] = None, name: Optional[str] = None, description: Optional[str] = None, diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index c6bc0e0427..9ecbaecccb 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -6,9 +6,12 @@ import prisma.enums import prisma.models import pydantic -from backend.data.block import BlockInput from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo -from backend.data.model import CredentialsMetaInput, is_credentials_field_name +from backend.data.model import ( + CredentialsMetaInput, + GraphInput, + is_credentials_field_name, +) from backend.util.json import loads as json_loads from backend.util.models import Pagination @@ -323,7 +326,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel): graph_id: str graph_version: int - inputs: BlockInput + inputs: GraphInput credentials: dict[str, CredentialsMetaInput] name: str @@ -352,7 +355,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel): Request model used when updating a preset for a library agent. """ - inputs: Optional[BlockInput] = None + inputs: Optional[GraphInput] = None credentials: Optional[dict[str, CredentialsMetaInput]] = None name: Optional[str] = None @@ -395,7 +398,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable): "Webhook must be included in AgentPreset query when webhookId is set" ) - input_data: BlockInput = {} + input_data: GraphInput = {} input_credentials: dict[str, CredentialsMetaInput] = {} for preset_input in preset.InputPresets: diff --git a/autogpt_platform/backend/backend/api/features/otto/service.py b/autogpt_platform/backend/backend/api/features/otto/service.py index 5f00022ff2..992021c0ca 100644 --- a/autogpt_platform/backend/backend/api/features/otto/service.py +++ b/autogpt_platform/backend/backend/api/features/otto/service.py @@ -5,8 +5,8 @@ from typing import Optional import aiohttp from fastapi import HTTPException +from backend.blocks import get_block from backend.data import graph as graph_db -from backend.data.block import get_block from backend.util.settings import Settings from .models import ApiResponse, ChatRequest, GraphData diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers.py b/autogpt_platform/backend/backend/api/features/store/content_handlers.py index cbbdcfbebf..38fc1e27d0 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers.py @@ -152,7 +152,7 @@ class BlockHandler(ContentHandler): async def get_missing_items(self, batch_size: int) -> list[ContentItem]: """Fetch blocks without embeddings.""" - from backend.data.block import get_blocks + from backend.blocks import get_blocks # Get all available blocks all_blocks = get_blocks() @@ -249,7 +249,7 @@ class BlockHandler(ContentHandler): async def get_stats(self) -> dict[str, int]: """Get statistics about block embedding coverage.""" - from backend.data.block import get_blocks + from backend.blocks import get_blocks all_blocks = get_blocks() diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py index fee879fae0..c552e44a9d 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers_test.py @@ -93,7 +93,7 @@ async def test_block_handler_get_missing_items(mocker): mock_existing = [] with patch( - "backend.data.block.get_blocks", + "backend.blocks.get_blocks", return_value=mock_blocks, ): with patch( @@ -135,7 +135,7 @@ async def test_block_handler_get_stats(mocker): mock_embedded = [{"count": 2}] with patch( - "backend.data.block.get_blocks", + "backend.blocks.get_blocks", return_value=mock_blocks, ): with patch( @@ -327,7 +327,7 @@ async def test_block_handler_handles_missing_attributes(): mock_blocks = {"block-minimal": mock_block_class} with patch( - "backend.data.block.get_blocks", + "backend.blocks.get_blocks", return_value=mock_blocks, ): with patch( @@ -360,7 +360,7 @@ async def test_block_handler_skips_failed_blocks(): mock_blocks = {"good-block": good_block, "bad-block": bad_block} with patch( - "backend.data.block.get_blocks", + "backend.blocks.get_blocks", return_value=mock_blocks, ): with patch( diff --git a/autogpt_platform/backend/backend/api/features/store/embeddings.py b/autogpt_platform/backend/backend/api/features/store/embeddings.py index 434f2fe2ce..921e103618 100644 --- a/autogpt_platform/backend/backend/api/features/store/embeddings.py +++ b/autogpt_platform/backend/backend/api/features/store/embeddings.py @@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]: ) current_ids = {row["id"] for row in valid_agents} elif content_type == ContentType.BLOCK: - from backend.data.block import get_blocks + from backend.blocks import get_blocks current_ids = set(get_blocks().keys()) elif content_type == ContentType.DOCUMENTATION: diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py index e1b8f402c8..b10bfbcc06 100644 --- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py +++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py @@ -8,6 +8,7 @@ Includes BM25 reranking for improved lexical relevance. import logging import re +import time from dataclasses import dataclass from typing import Any, Literal @@ -362,7 +363,11 @@ async def unified_hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema(sql_query, *params) + try: + results = await query_raw_with_schema(sql_query, *params) + except Exception as e: + await _log_vector_error_diagnostics(e) + raise total = results[0]["total_count"] if results else 0 # Apply BM25 reranking @@ -686,7 +691,11 @@ async def hybrid_search( LIMIT {limit_param} OFFSET {offset_param} """ - results = await query_raw_with_schema(sql_query, *params) + try: + results = await query_raw_with_schema(sql_query, *params) + except Exception as e: + await _log_vector_error_diagnostics(e) + raise total = results[0]["total_count"] if results else 0 @@ -718,6 +727,87 @@ async def hybrid_search_simple( return await hybrid_search(query=query, page=page, page_size=page_size) +# ============================================================================ +# Diagnostics +# ============================================================================ + +# Rate limit: only log vector error diagnostics once per this interval +_VECTOR_DIAG_INTERVAL_SECONDS = 60 +_last_vector_diag_time: float = 0 + + +async def _log_vector_error_diagnostics(error: Exception) -> None: + """Log diagnostic info when 'type vector does not exist' error occurs. + + Note: Diagnostic queries use query_raw_with_schema which may run on a different + pooled connection than the one that failed. Session-level search_path can differ, + so these diagnostics show cluster-wide state, not necessarily the failed session. + + Includes rate limiting to avoid log spam - only logs once per minute. + Caller should re-raise the error after calling this function. + """ + global _last_vector_diag_time + + # Check if this is the vector type error + error_str = str(error).lower() + if not ( + "type" in error_str and "vector" in error_str and "does not exist" in error_str + ): + return + + # Rate limit: only log once per interval + now = time.time() + if now - _last_vector_diag_time < _VECTOR_DIAG_INTERVAL_SECONDS: + return + _last_vector_diag_time = now + + try: + diagnostics: dict[str, object] = {} + + try: + search_path_result = await query_raw_with_schema("SHOW search_path") + diagnostics["search_path"] = search_path_result + except Exception as e: + diagnostics["search_path"] = f"Error: {e}" + + try: + schema_result = await query_raw_with_schema("SELECT current_schema()") + diagnostics["current_schema"] = schema_result + except Exception as e: + diagnostics["current_schema"] = f"Error: {e}" + + try: + user_result = await query_raw_with_schema( + "SELECT current_user, session_user, current_database()" + ) + diagnostics["user_info"] = user_result + except Exception as e: + diagnostics["user_info"] = f"Error: {e}" + + try: + # Check pgvector extension installation (cluster-wide, stable info) + ext_result = await query_raw_with_schema( + "SELECT extname, extversion, nspname as schema " + "FROM pg_extension e " + "JOIN pg_namespace n ON e.extnamespace = n.oid " + "WHERE extname = 'vector'" + ) + diagnostics["pgvector_extension"] = ext_result + except Exception as e: + diagnostics["pgvector_extension"] = f"Error: {e}" + + logger.error( + f"Vector type error diagnostics:\n" + f" Error: {error}\n" + f" search_path: {diagnostics.get('search_path')}\n" + f" current_schema: {diagnostics.get('current_schema')}\n" + f" user_info: {diagnostics.get('user_info')}\n" + f" pgvector_extension: {diagnostics.get('pgvector_extension')}" + ) + except Exception as diag_error: + logger.error(f"Failed to collect vector error diagnostics: {diag_error}") + + # Backward compatibility alias - HybridSearchWeights maps to StoreAgentSearchWeights # for existing code that expects the popularity parameter HybridSearchWeights = StoreAgentSearchWeights diff --git a/autogpt_platform/backend/backend/api/features/store/image_gen.py b/autogpt_platform/backend/backend/api/features/store/image_gen.py index 087a7895ba..64ac203182 100644 --- a/autogpt_platform/backend/backend/api/features/store/image_gen.py +++ b/autogpt_platform/backend/backend/api/features/store/image_gen.py @@ -7,15 +7,6 @@ from replicate.client import Client as ReplicateClient from replicate.exceptions import ReplicateError from replicate.helpers import FileOutput -from backend.blocks.ideogram import ( - AspectRatio, - ColorPalettePreset, - IdeogramModelBlock, - IdeogramModelName, - MagicPromptOption, - StyleType, - UpscaleOption, -) from backend.data.graph import GraphBaseMeta from backend.data.model import CredentialsMetaInput, ProviderName from backend.integrations.credentials_store import ideogram_credentials @@ -50,6 +41,16 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes if not ideogram_credentials.api_key: raise ValueError("Missing Ideogram API key") + from backend.blocks.ideogram import ( + AspectRatio, + ColorPalettePreset, + IdeogramModelBlock, + IdeogramModelName, + MagicPromptOption, + StyleType, + UpscaleOption, + ) + name = graph.name description = f"{name} ({graph.description})" if graph.description else name diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index a8610702cc..dd8ef3611f 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -40,10 +40,11 @@ from backend.api.model import ( UpdateTimezoneRequest, UploadFileResponse, ) +from backend.blocks import get_block, get_blocks from backend.data import execution as execution_db from backend.data import graph as graph_db from backend.data.auth import api_key as api_key_db -from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks +from backend.data.block import BlockInput, CompletedBlockOutput from backend.data.credit import ( AutoTopUpConfig, RefundRequest, diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py index a6c16393c7..524e47c31d 100644 --- a/autogpt_platform/backend/backend/blocks/__init__.py +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -3,22 +3,19 @@ import logging import os import re from pathlib import Path -from typing import TYPE_CHECKING, TypeVar +from typing import Sequence, Type, TypeVar +from backend.blocks._base import AnyBlockSchema, BlockType from backend.util.cache import cached logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from backend.data.block import Block - T = TypeVar("T") @cached(ttl_seconds=3600) -def load_all_blocks() -> dict[str, type["Block"]]: - from backend.data.block import Block +def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]: + from backend.blocks._base import Block from backend.util.settings import Config # Check if example blocks should be loaded from settings @@ -50,8 +47,8 @@ def load_all_blocks() -> dict[str, type["Block"]]: importlib.import_module(f".{module}", package=__name__) # Load all Block instances from the available modules - available_blocks: dict[str, type["Block"]] = {} - for block_cls in all_subclasses(Block): + available_blocks: dict[str, type["AnyBlockSchema"]] = {} + for block_cls in _all_subclasses(Block): class_name = block_cls.__name__ if class_name.endswith("Base"): @@ -64,7 +61,7 @@ def load_all_blocks() -> dict[str, type["Block"]]: "please name the class with 'Base' at the end" ) - block = block_cls.create() + block = block_cls() # pyright: ignore[reportAbstractUsage] if not isinstance(block.id, str) or len(block.id) != 36: raise ValueError( @@ -105,7 +102,7 @@ def load_all_blocks() -> dict[str, type["Block"]]: available_blocks[block.id] = block_cls # Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets - from backend.data.block import is_block_auth_configured + from ._utils import is_block_auth_configured filtered_blocks = {} for block_id, block_cls in available_blocks.items(): @@ -115,11 +112,48 @@ def load_all_blocks() -> dict[str, type["Block"]]: return filtered_blocks -__all__ = ["load_all_blocks"] - - -def all_subclasses(cls: type[T]) -> list[type[T]]: +def _all_subclasses(cls: type[T]) -> list[type[T]]: subclasses = cls.__subclasses__() for subclass in subclasses: - subclasses += all_subclasses(subclass) + subclasses += _all_subclasses(subclass) return subclasses + + +# ============== Block access helper functions ============== # + + +def get_blocks() -> dict[str, Type["AnyBlockSchema"]]: + return load_all_blocks() + + +# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281 +def get_block(block_id: str) -> "AnyBlockSchema | None": + cls = get_blocks().get(block_id) + return cls() if cls else None + + +@cached(ttl_seconds=3600) +def get_webhook_block_ids() -> Sequence[str]: + return [ + id + for id, B in get_blocks().items() + if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL) + ] + + +@cached(ttl_seconds=3600) +def get_io_block_ids() -> Sequence[str]: + return [ + id + for id, B in get_blocks().items() + if B().block_type in (BlockType.INPUT, BlockType.OUTPUT) + ] + + +@cached(ttl_seconds=3600) +def get_human_in_the_loop_block_ids() -> Sequence[str]: + return [ + id + for id, B in get_blocks().items() + if B().block_type == BlockType.HUMAN_IN_THE_LOOP + ] diff --git a/autogpt_platform/backend/backend/blocks/_base.py b/autogpt_platform/backend/backend/blocks/_base.py new file mode 100644 index 0000000000..0ba4daec40 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/_base.py @@ -0,0 +1,739 @@ +import inspect +import logging +from abc import ABC, abstractmethod +from enum import Enum +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Generic, + Optional, + Type, + TypeAlias, + TypeVar, + cast, + get_origin, +) + +import jsonref +import jsonschema +from pydantic import BaseModel + +from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry +from backend.data.model import ( + Credentials, + CredentialsFieldInfo, + CredentialsMetaInput, + SchemaField, + is_credentials_field_name, +) +from backend.integrations.providers import ProviderName +from backend.util import json +from backend.util.exceptions import ( + BlockError, + BlockExecutionError, + BlockInputError, + BlockOutputError, + BlockUnknownError, +) +from backend.util.settings import Config + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from backend.data.execution import ExecutionContext + from backend.data.model import ContributorDetails, NodeExecutionStats + + from ..data.graph import Link + +app_config = Config() + + +BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]] + + +class BlockType(Enum): + STANDARD = "Standard" + INPUT = "Input" + OUTPUT = "Output" + NOTE = "Note" + WEBHOOK = "Webhook" + WEBHOOK_MANUAL = "Webhook (manual)" + AGENT = "Agent" + AI = "AI" + AYRSHARE = "Ayrshare" + HUMAN_IN_THE_LOOP = "Human In The Loop" + + +class BlockCategory(Enum): + AI = "Block that leverages AI to perform a task." + SOCIAL = "Block that interacts with social media platforms." + TEXT = "Block that processes text data." + SEARCH = "Block that searches or extracts information from the internet." + BASIC = "Block that performs basic operations." + INPUT = "Block that interacts with input of the graph." + OUTPUT = "Block that interacts with output of the graph." + LOGIC = "Programming logic to control the flow of your agent" + COMMUNICATION = "Block that interacts with communication platforms." + DEVELOPER_TOOLS = "Developer tools such as GitHub blocks." + DATA = "Block that interacts with structured data." + HARDWARE = "Block that interacts with hardware." + AGENT = "Block that interacts with other agents." + CRM = "Block that interacts with CRM services." + SAFETY = ( + "Block that provides AI safety mechanisms such as detecting harmful content" + ) + PRODUCTIVITY = "Block that helps with productivity" + ISSUE_TRACKING = "Block that helps with issue tracking" + MULTIMEDIA = "Block that interacts with multimedia content" + MARKETING = "Block that helps with marketing" + + def dict(self) -> dict[str, str]: + return {"category": self.name, "description": self.value} + + +class BlockCostType(str, Enum): + RUN = "run" # cost X credits per run + BYTE = "byte" # cost X credits per byte + SECOND = "second" # cost X credits per second + + +class BlockCost(BaseModel): + cost_amount: int + cost_filter: BlockInput + cost_type: BlockCostType + + def __init__( + self, + cost_amount: int, + cost_type: BlockCostType = BlockCostType.RUN, + cost_filter: Optional[BlockInput] = None, + **data: Any, + ) -> None: + super().__init__( + cost_amount=cost_amount, + cost_filter=cost_filter or {}, + cost_type=cost_type, + **data, + ) + + +class BlockInfo(BaseModel): + id: str + name: str + inputSchema: dict[str, Any] + outputSchema: dict[str, Any] + costs: list[BlockCost] + description: str + categories: list[dict[str, str]] + contributors: list[dict[str, Any]] + staticOutput: bool + uiType: str + + +class BlockSchema(BaseModel): + cached_jsonschema: ClassVar[dict[str, Any]] + + @classmethod + def jsonschema(cls) -> dict[str, Any]: + if cls.cached_jsonschema: + return cls.cached_jsonschema + + model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True) + + def ref_to_dict(obj): + if isinstance(obj, dict): + # OpenAPI <3.1 does not support sibling fields that has a $ref key + # So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item. + keys = {"allOf", "anyOf", "oneOf"} + one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None) + if one_key: + obj.update(obj[one_key][0]) + + return { + key: ref_to_dict(value) + for key, value in obj.items() + if not key.startswith("$") and key != one_key + } + elif isinstance(obj, list): + return [ref_to_dict(item) for item in obj] + + return obj + + cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model)) + + return cls.cached_jsonschema + + @classmethod + def validate_data(cls, data: BlockInput) -> str | None: + return json.validate_with_jsonschema( + schema=cls.jsonschema(), + data={k: v for k, v in data.items() if v is not None}, + ) + + @classmethod + def get_mismatch_error(cls, data: BlockInput) -> str | None: + return cls.validate_data(data) + + @classmethod + def get_field_schema(cls, field_name: str) -> dict[str, Any]: + model_schema = cls.jsonschema().get("properties", {}) + if not model_schema: + raise ValueError(f"Invalid model schema {cls}") + + property_schema = model_schema.get(field_name) + if not property_schema: + raise ValueError(f"Invalid property name {field_name}") + + return property_schema + + @classmethod + def validate_field(cls, field_name: str, data: BlockInput) -> str | None: + """ + Validate the data against a specific property (one of the input/output name). + Returns the validation error message if the data does not match the schema. + """ + try: + property_schema = cls.get_field_schema(field_name) + jsonschema.validate(json.to_dict(data), property_schema) + return None + except jsonschema.ValidationError as e: + return str(e) + + @classmethod + def get_fields(cls) -> set[str]: + return set(cls.model_fields.keys()) + + @classmethod + def get_required_fields(cls) -> set[str]: + return { + field + for field, field_info in cls.model_fields.items() + if field_info.is_required() + } + + @classmethod + def __pydantic_init_subclass__(cls, **kwargs): + """Validates the schema definition. Rules: + - Fields with annotation `CredentialsMetaInput` MUST be + named `credentials` or `*_credentials` + - Fields named `credentials` or `*_credentials` MUST be + of type `CredentialsMetaInput` + """ + super().__pydantic_init_subclass__(**kwargs) + + # Reset cached JSON schema to prevent inheriting it from parent class + cls.cached_jsonschema = {} + + credentials_fields = cls.get_credentials_fields() + + for field_name in cls.get_fields(): + if is_credentials_field_name(field_name): + if field_name not in credentials_fields: + raise TypeError( + f"Credentials field '{field_name}' on {cls.__qualname__} " + f"is not of type {CredentialsMetaInput.__name__}" + ) + + CredentialsMetaInput.validate_credentials_field_schema( + cls.get_field_schema(field_name), field_name + ) + + elif field_name in credentials_fields: + raise KeyError( + f"Credentials field '{field_name}' on {cls.__qualname__} " + "has invalid name: must be 'credentials' or *_credentials" + ) + + @classmethod + def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]: + return { + field_name: info.annotation + for field_name, info in cls.model_fields.items() + if ( + inspect.isclass(info.annotation) + and issubclass( + get_origin(info.annotation) or info.annotation, + CredentialsMetaInput, + ) + ) + } + + @classmethod + def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]: + """ + Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput). + + Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config} + + Raises: + ValueError: If multiple fields have the same kwarg_name, as this would + cause silent overwriting and only the last field would be processed. + """ + result: dict[str, dict[str, Any]] = {} + schema = cls.jsonschema() + properties = schema.get("properties", {}) + + for field_name, field_schema in properties.items(): + auto_creds = field_schema.get("auto_credentials") + if auto_creds: + kwarg_name = auto_creds.get("kwarg_name", "credentials") + if kwarg_name in result: + raise ValueError( + f"Duplicate auto_credentials kwarg_name '{kwarg_name}' " + f"in fields '{result[kwarg_name]['field_name']}' and " + f"'{field_name}' on {cls.__qualname__}" + ) + result[kwarg_name] = { + "field_name": field_name, + "config": auto_creds, + } + return result + + @classmethod + def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]: + result = {} + + # Regular credentials fields + for field_name in cls.get_credentials_fields().keys(): + result[field_name] = CredentialsFieldInfo.model_validate( + cls.get_field_schema(field_name), by_alias=True + ) + + # Auto-generated credentials fields (from GoogleDriveFileInput etc.) + for kwarg_name, info in cls.get_auto_credentials_fields().items(): + config = info["config"] + # Build a schema-like dict that CredentialsFieldInfo can parse + auto_schema = { + "credentials_provider": [config.get("provider", "google")], + "credentials_types": [config.get("type", "oauth2")], + "credentials_scopes": config.get("scopes"), + } + result[kwarg_name] = CredentialsFieldInfo.model_validate( + auto_schema, by_alias=True + ) + + return result + + @classmethod + def get_input_defaults(cls, data: BlockInput) -> BlockInput: + return data # Return as is, by default. + + @classmethod + def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]: + input_fields_from_nodes = {link.sink_name for link in links} + return input_fields_from_nodes - set(data) + + @classmethod + def get_missing_input(cls, data: BlockInput) -> set[str]: + return cls.get_required_fields() - set(data) + + +class BlockSchemaInput(BlockSchema): + """ + Base schema class for block inputs. + All block input schemas should extend this class for consistency. + """ + + pass + + +class BlockSchemaOutput(BlockSchema): + """ + Base schema class for block outputs that includes a standard error field. + All block output schemas should extend this class to ensure consistent error handling. + """ + + error: str = SchemaField( + description="Error message if the operation failed", default="" + ) + + +BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput) +BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput) + + +class EmptyInputSchema(BlockSchemaInput): + pass + + +class EmptyOutputSchema(BlockSchemaOutput): + pass + + +# For backward compatibility - will be deprecated +EmptySchema = EmptyOutputSchema + + +# --8<-- [start:BlockWebhookConfig] +class BlockManualWebhookConfig(BaseModel): + """ + Configuration model for webhook-triggered blocks on which + the user has to manually set up the webhook at the provider. + """ + + provider: ProviderName + """The service provider that the webhook connects to""" + + webhook_type: str + """ + Identifier for the webhook type. E.g. GitHub has repo and organization level hooks. + + Only for use in the corresponding `WebhooksManager`. + """ + + event_filter_input: str = "" + """ + Name of the block's event filter input. + Leave empty if the corresponding webhook doesn't have distinct event/payload types. + """ + + event_format: str = "{event}" + """ + Template string for the event(s) that a block instance subscribes to. + Applied individually to each event selected in the event filter input. + + Example: `"pull_request.{event}"` -> `"pull_request.opened"` + """ + + +class BlockWebhookConfig(BlockManualWebhookConfig): + """ + Configuration model for webhook-triggered blocks for which + the webhook can be automatically set up through the provider's API. + """ + + resource_format: str + """ + Template string for the resource that a block instance subscribes to. + Fields will be filled from the block's inputs (except `payload`). + + Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented) + + Only for use in the corresponding `WebhooksManager`. + """ + # --8<-- [end:BlockWebhookConfig] + + +class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): + def __init__( + self, + id: str = "", + description: str = "", + contributors: list["ContributorDetails"] = [], + categories: set[BlockCategory] | None = None, + input_schema: Type[BlockSchemaInputType] = EmptyInputSchema, + output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema, + test_input: BlockInput | list[BlockInput] | None = None, + test_output: BlockTestOutput | list[BlockTestOutput] | None = None, + test_mock: dict[str, Any] | None = None, + test_credentials: Optional[Credentials | dict[str, Credentials]] = None, + disabled: bool = False, + static_output: bool = False, + block_type: BlockType = BlockType.STANDARD, + webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None, + is_sensitive_action: bool = False, + ): + """ + Initialize the block with the given schema. + + Args: + id: The unique identifier for the block, this value will be persisted in the + DB. So it should be a unique and constant across the application run. + Use the UUID format for the ID. + description: The description of the block, explaining what the block does. + contributors: The list of contributors who contributed to the block. + input_schema: The schema, defined as a Pydantic model, for the input data. + output_schema: The schema, defined as a Pydantic model, for the output data. + test_input: The list or single sample input data for the block, for testing. + test_output: The list or single expected output if the test_input is run. + test_mock: function names on the block implementation to mock on test run. + disabled: If the block is disabled, it will not be available for execution. + static_output: Whether the output links of the block are static by default. + """ + from backend.data.model import NodeExecutionStats + + self.id = id + self.input_schema = input_schema + self.output_schema = output_schema + self.test_input = test_input + self.test_output = test_output + self.test_mock = test_mock + self.test_credentials = test_credentials + self.description = description + self.categories = categories or set() + self.contributors = contributors or set() + self.disabled = disabled + self.static_output = static_output + self.block_type = block_type + self.webhook_config = webhook_config + self.is_sensitive_action = is_sensitive_action + self.execution_stats: "NodeExecutionStats" = NodeExecutionStats() + + if self.webhook_config: + if isinstance(self.webhook_config, BlockWebhookConfig): + # Enforce presence of credentials field on auto-setup webhook blocks + if not (cred_fields := self.input_schema.get_credentials_fields()): + raise TypeError( + "credentials field is required on auto-setup webhook blocks" + ) + # Disallow multiple credentials inputs on webhook blocks + elif len(cred_fields) > 1: + raise ValueError( + "Multiple credentials inputs not supported on webhook blocks" + ) + + self.block_type = BlockType.WEBHOOK + else: + self.block_type = BlockType.WEBHOOK_MANUAL + + # Enforce shape of webhook event filter, if present + if self.webhook_config.event_filter_input: + event_filter_field = self.input_schema.model_fields[ + self.webhook_config.event_filter_input + ] + if not ( + isinstance(event_filter_field.annotation, type) + and issubclass(event_filter_field.annotation, BaseModel) + and all( + field.annotation is bool + for field in event_filter_field.annotation.model_fields.values() + ) + ): + raise NotImplementedError( + f"{self.name} has an invalid webhook event selector: " + "field must be a BaseModel and all its fields must be boolean" + ) + + # Enforce presence of 'payload' input + if "payload" not in self.input_schema.model_fields: + raise TypeError( + f"{self.name} is webhook-triggered but has no 'payload' input" + ) + + # Disable webhook-triggered block if webhook functionality not available + if not app_config.platform_base_url: + self.disabled = True + + @abstractmethod + async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: + """ + Run the block with the given input data. + Args: + input_data: The input data with the structure of input_schema. + + Kwargs: Currently 14/02/2025 these include + graph_id: The ID of the graph. + node_id: The ID of the node. + graph_exec_id: The ID of the graph execution. + node_exec_id: The ID of the node execution. + user_id: The ID of the user. + + Returns: + A Generator that yields (output_name, output_data). + output_name: One of the output name defined in Block's output_schema. + output_data: The data for the output_name, matching the defined schema. + """ + # --- satisfy the type checker, never executed ------------- + if False: # noqa: SIM115 + yield "name", "value" # pyright: ignore[reportMissingYield] + raise NotImplementedError(f"{self.name} does not implement the run method.") + + async def run_once( + self, input_data: BlockSchemaInputType, output: str, **kwargs + ) -> Any: + async for item in self.run(input_data, **kwargs): + name, data = item + if name == output: + return data + raise ValueError(f"{self.name} did not produce any output for {output}") + + def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats": + self.execution_stats += stats + return self.execution_stats + + @property + def name(self): + return self.__class__.__name__ + + def to_dict(self): + return { + "id": self.id, + "name": self.name, + "inputSchema": self.input_schema.jsonschema(), + "outputSchema": self.output_schema.jsonschema(), + "description": self.description, + "categories": [category.dict() for category in self.categories], + "contributors": [ + contributor.model_dump() for contributor in self.contributors + ], + "staticOutput": self.static_output, + "uiType": self.block_type.value, + } + + def get_info(self) -> BlockInfo: + from backend.data.credit import get_block_cost + + return BlockInfo( + id=self.id, + name=self.name, + inputSchema=self.input_schema.jsonschema(), + outputSchema=self.output_schema.jsonschema(), + costs=get_block_cost(self), + description=self.description, + categories=[category.dict() for category in self.categories], + contributors=[ + contributor.model_dump() for contributor in self.contributors + ], + staticOutput=self.static_output, + uiType=self.block_type.value, + ) + + async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: + try: + async for output_name, output_data in self._execute(input_data, **kwargs): + yield output_name, output_data + except Exception as ex: + if isinstance(ex, BlockError): + raise ex + else: + raise ( + BlockExecutionError + if isinstance(ex, ValueError) + else BlockUnknownError + )( + message=str(ex), + block_name=self.name, + block_id=self.id, + ) from ex + + async def is_block_exec_need_review( + self, + input_data: BlockInput, + *, + user_id: str, + node_id: str, + node_exec_id: str, + graph_exec_id: str, + graph_id: str, + graph_version: int, + execution_context: "ExecutionContext", + **kwargs, + ) -> tuple[bool, BlockInput]: + """ + Check if this block execution needs human review and handle the review process. + + Returns: + Tuple of (should_pause, input_data_to_use) + - should_pause: True if execution should be paused for review + - input_data_to_use: The input data to use (may be modified by reviewer) + """ + if not ( + self.is_sensitive_action and execution_context.sensitive_action_safe_mode + ): + return False, input_data + + from backend.blocks.helpers.review import HITLReviewHelper + + # Handle the review request and get decision + decision = await HITLReviewHelper.handle_review_decision( + input_data=input_data, + user_id=user_id, + node_id=node_id, + node_exec_id=node_exec_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + graph_version=graph_version, + block_name=self.name, + editable=True, + ) + + if decision is None: + # We're awaiting review - pause execution + return True, input_data + + if not decision.should_proceed: + # Review was rejected, raise an error to stop execution + raise BlockExecutionError( + message=f"Block execution rejected by reviewer: {decision.message}", + block_name=self.name, + block_id=self.id, + ) + + # Review was approved - use the potentially modified data + # ReviewResult.data must be a dict for block inputs + reviewed_data = decision.review_result.data + if not isinstance(reviewed_data, dict): + raise BlockExecutionError( + message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}", + block_name=self.name, + block_id=self.id, + ) + return False, reviewed_data + + async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: + # Check for review requirement only if running within a graph execution context + # Direct block execution (e.g., from chat) skips the review process + has_graph_context = all( + key in kwargs + for key in ( + "node_exec_id", + "graph_exec_id", + "graph_id", + "execution_context", + ) + ) + if has_graph_context: + should_pause, input_data = await self.is_block_exec_need_review( + input_data, **kwargs + ) + if should_pause: + return + + # Validate the input data (original or reviewer-modified) once + if error := self.input_schema.validate_data(input_data): + raise BlockInputError( + message=f"Unable to execute block with invalid input data: {error}", + block_name=self.name, + block_id=self.id, + ) + + # Use the validated input data + async for output_name, output_data in self.run( + self.input_schema(**{k: v for k, v in input_data.items() if v is not None}), + **kwargs, + ): + if output_name == "error": + raise BlockExecutionError( + message=output_data, block_name=self.name, block_id=self.id + ) + if self.block_type == BlockType.STANDARD and ( + error := self.output_schema.validate_field(output_name, output_data) + ): + raise BlockOutputError( + message=f"Block produced an invalid output data: {error}", + block_name=self.name, + block_id=self.id, + ) + yield output_name, output_data + + def is_triggered_by_event_type( + self, trigger_config: dict[str, Any], event_type: str + ) -> bool: + if not self.webhook_config: + raise TypeError("This method can't be used on non-trigger blocks") + if not self.webhook_config.event_filter_input: + return True + event_filter = trigger_config.get(self.webhook_config.event_filter_input) + if not event_filter: + raise ValueError("Event filter is not configured on trigger") + return event_type in [ + self.webhook_config.event_format.format(event=k) + for k in event_filter + if event_filter[k] is True + ] + + +# Type alias for any block with standard input/output schemas +AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput] diff --git a/autogpt_platform/backend/backend/blocks/_utils.py b/autogpt_platform/backend/backend/blocks/_utils.py new file mode 100644 index 0000000000..bec033bd2c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/_utils.py @@ -0,0 +1,122 @@ +import logging +import os + +from backend.integrations.providers import ProviderName + +from ._base import AnyBlockSchema + +logger = logging.getLogger(__name__) + + +def is_block_auth_configured( + block_cls: type[AnyBlockSchema], +) -> bool: + """ + Check if a block has a valid authentication method configured at runtime. + + For example if a block is an OAuth-only block and there env vars are not set, + do not show it in the UI. + + """ + from backend.sdk.registry import AutoRegistry + + # Create an instance to access input_schema + try: + block = block_cls() + except Exception as e: + # If we can't create a block instance, assume it's not OAuth-only + logger.error(f"Error creating block instance for {block_cls.__name__}: {e}") + return True + logger.debug( + f"Checking if block {block_cls.__name__} has a valid provider configured" + ) + + # Get all credential inputs from input schema + credential_inputs = block.input_schema.get_credentials_fields_info() + required_inputs = block.input_schema.get_required_fields() + if not credential_inputs: + logger.debug( + f"Block {block_cls.__name__} has no credential inputs - Treating as valid" + ) + return True + + # Check credential inputs + if len(required_inputs.intersection(credential_inputs.keys())) == 0: + logger.debug( + f"Block {block_cls.__name__} has only optional credential inputs" + " - will work without credentials configured" + ) + + # Check if the credential inputs for this block are correctly configured + for field_name, field_info in credential_inputs.items(): + provider_names = field_info.provider + if not provider_names: + logger.warning( + f"Block {block_cls.__name__} " + f"has credential input '{field_name}' with no provider options" + " - Disabling" + ) + return False + + # If a field has multiple possible providers, each one needs to be usable to + # prevent breaking the UX + for _provider_name in provider_names: + provider_name = _provider_name.value + if provider_name in ProviderName.__members__.values(): + logger.debug( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"provider '{provider_name}' is part of the legacy provider system" + " - Treating as valid" + ) + break + + provider = AutoRegistry.get_provider(provider_name) + if not provider: + logger.warning( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"refers to unknown provider '{provider_name}' - Disabling" + ) + return False + + # Check the provider's supported auth types + if field_info.supported_types != provider.supported_auth_types: + logger.warning( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"has mismatched supported auth types (field <> Provider): " + f"{field_info.supported_types} != {provider.supported_auth_types}" + ) + + if not (supported_auth_types := provider.supported_auth_types): + # No auth methods are been configured for this provider + logger.warning( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"provider '{provider_name}' " + "has no authentication methods configured - Disabling" + ) + return False + + # Check if provider supports OAuth + if "oauth2" in supported_auth_types: + # Check if OAuth environment variables are set + if (oauth_config := provider.oauth_config) and bool( + os.getenv(oauth_config.client_id_env_var) + and os.getenv(oauth_config.client_secret_env_var) + ): + logger.debug( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"provider '{provider_name}' is configured for OAuth" + ) + else: + logger.error( + f"Block {block_cls.__name__} credential input '{field_name}' " + f"provider '{provider_name}' " + "is missing OAuth client ID or secret - Disabling" + ) + return False + + logger.debug( + f"Block {block_cls.__name__} credential input '{field_name}' is valid; " + f"supported credential types: {', '.join(field_info.supported_types)}" + ) + + return True diff --git a/autogpt_platform/backend/backend/blocks/agent.py b/autogpt_platform/backend/backend/blocks/agent.py index 0efc0a3369..574dbc2530 100644 --- a/autogpt_platform/backend/backend/blocks/agent.py +++ b/autogpt_platform/backend/backend/blocks/agent.py @@ -1,7 +1,7 @@ import logging -from typing import Any, Optional +from typing import TYPE_CHECKING, Any, Optional -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockInput, @@ -9,13 +9,15 @@ from backend.data.block import ( BlockSchema, BlockSchemaInput, BlockType, - get_block, ) from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks from backend.data.model import NodeExecutionStats, SchemaField from backend.util.json import validate_with_jsonschema from backend.util.retry import func_retry +if TYPE_CHECKING: + from backend.executor.utils import LogMetadata + _logger = logging.getLogger(__name__) @@ -124,9 +126,10 @@ class AgentExecutorBlock(Block): graph_version: int, graph_exec_id: str, user_id: str, - logger, + logger: "LogMetadata", ) -> BlockOutput: + from backend.blocks import get_block from backend.data.execution import ExecutionEventType from backend.executor import utils as execution_utils @@ -198,7 +201,7 @@ class AgentExecutorBlock(Block): self, graph_exec_id: str, user_id: str, - logger, + logger: "LogMetadata", ) -> None: from backend.executor import utils as execution_utils diff --git a/autogpt_platform/backend/backend/blocks/ai_condition.py b/autogpt_platform/backend/backend/blocks/ai_condition.py index 2a5cdcdeec..c28c1e9f7d 100644 --- a/autogpt_platform/backend/backend/blocks/ai_condition.py +++ b/autogpt_platform/backend/backend/blocks/ai_condition.py @@ -1,5 +1,11 @@ from typing import Any +from backend.blocks._base import ( + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.llm import ( DEFAULT_LLM_MODEL, TEST_CREDENTIALS, @@ -11,12 +17,6 @@ from backend.blocks.llm import ( LLMResponse, llm_call, ) -from backend.data.block import ( - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py index 91be33a60e..402e520ea0 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py @@ -6,7 +6,7 @@ from pydantic import SecretStr from replicate.client import Client as ReplicateClient from replicate.helpers import FileOutput -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py index e40731cd97..fcea24fb01 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py @@ -5,7 +5,12 @@ from pydantic import SecretStr from replicate.client import Client as ReplicateClient from replicate.helpers import FileOutput -from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import ( + Block, + BlockCategory, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.data.execution import ExecutionContext from backend.data.model import ( APIKeyCredentials, diff --git a/autogpt_platform/backend/backend/blocks/ai_music_generator.py b/autogpt_platform/backend/backend/blocks/ai_music_generator.py index 1ecb78f95e..9a0639a9c0 100644 --- a/autogpt_platform/backend/backend/blocks/ai_music_generator.py +++ b/autogpt_platform/backend/backend/blocks/ai_music_generator.py @@ -6,7 +6,7 @@ from typing import Literal from pydantic import SecretStr from replicate.client import Client as ReplicateClient -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py index eb60843185..2c53748fde 100644 --- a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -6,7 +6,7 @@ from typing import Literal from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/apollo/organization.py b/autogpt_platform/backend/backend/blocks/apollo/organization.py index 93acbff0b8..6722de4a79 100644 --- a/autogpt_platform/backend/backend/blocks/apollo/organization.py +++ b/autogpt_platform/backend/backend/blocks/apollo/organization.py @@ -1,3 +1,10 @@ +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._auth import ( TEST_CREDENTIALS, @@ -10,13 +17,6 @@ from backend.blocks.apollo.models import ( PrimaryPhone, SearchOrganizationsRequest, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/apollo/people.py b/autogpt_platform/backend/backend/blocks/apollo/people.py index a58321ecfc..b5059a2a26 100644 --- a/autogpt_platform/backend/backend/blocks/apollo/people.py +++ b/autogpt_platform/backend/backend/blocks/apollo/people.py @@ -1,5 +1,12 @@ import asyncio +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._auth import ( TEST_CREDENTIALS, @@ -14,13 +21,6 @@ from backend.blocks.apollo.models import ( SearchPeopleRequest, SenorityLevels, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/apollo/person.py b/autogpt_platform/backend/backend/blocks/apollo/person.py index 84b86d2bfd..4d586175e0 100644 --- a/autogpt_platform/backend/backend/blocks/apollo/person.py +++ b/autogpt_platform/backend/backend/blocks/apollo/person.py @@ -1,3 +1,10 @@ +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._auth import ( TEST_CREDENTIALS, @@ -6,13 +13,6 @@ from backend.blocks.apollo._auth import ( ApolloCredentialsInput, ) from backend.blocks.apollo.models import Contact, EnrichPersonRequest -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/_util.py b/autogpt_platform/backend/backend/blocks/ayrshare/_util.py index 8d0b9914f9..231239310f 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/_util.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/_util.py @@ -3,7 +3,7 @@ from typing import Optional from pydantic import BaseModel, Field -from backend.data.block import BlockSchemaInput +from backend.blocks._base import BlockSchemaInput from backend.data.model import SchemaField, UserIntegrations from backend.integrations.ayrshare import AyrshareClient from backend.util.clients import get_database_manager_async_client diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index 95193b3feb..f129d2707b 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -1,7 +1,7 @@ import enum from typing import Any -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py index 95c92a41ab..d3f482fc65 100644 --- a/autogpt_platform/backend/backend/blocks/block.py +++ b/autogpt_platform/backend/backend/blocks/block.py @@ -2,7 +2,7 @@ import os import re from typing import Type -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/branching.py b/autogpt_platform/backend/backend/blocks/branching.py index e9177a8b65..fa4d8089ff 100644 --- a/autogpt_platform/backend/backend/blocks/branching.py +++ b/autogpt_platform/backend/backend/blocks/branching.py @@ -1,7 +1,7 @@ from enum import Enum from typing import Any -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/claude_code.py b/autogpt_platform/backend/backend/blocks/claude_code.py index 4ef44603b2..2e870f02b6 100644 --- a/autogpt_platform/backend/backend/blocks/claude_code.py +++ b/autogpt_platform/backend/backend/blocks/claude_code.py @@ -1,12 +1,12 @@ import json import shlex import uuid -from typing import Literal, Optional +from typing import TYPE_CHECKING, Literal, Optional from e2b import AsyncSandbox as BaseAsyncSandbox -from pydantic import BaseModel, SecretStr +from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext class ClaudeCodeExecutionError(Exception): @@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block): advanced=True, ) - class FileOutput(BaseModel): - """A file extracted from the sandbox.""" - - path: str - relative_path: str # Path relative to working directory (for GitHub, etc.) - name: str - content: str - class Output(BlockSchemaOutput): response: str = SchemaField( description="The output/response from Claude Code execution" ) - files: list["ClaudeCodeBlock.FileOutput"] = SchemaField( + files: list[SandboxFileOutput] = SchemaField( description=( "List of text files created/modified by Claude Code during this execution. " - "Each file has 'path', 'relative_path', 'name', and 'content' fields." + "Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. " + "workspace_ref contains a workspace:// URI if the file was stored to workspace." ) ) conversation_history: str = SchemaField( @@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block): "relative_path": "index.html", "name": "index.html", "content": "Hello World", + "workspace_ref": None, } ], ), @@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block): "execute_claude_code": lambda *args, **kwargs: ( "Created index.html with hello world content", # response [ - ClaudeCodeBlock.FileOutput( + SandboxFileOutput( path="/home/user/index.html", relative_path="index.html", name="index.html", content="Hello World", + workspace_ref=None, ) ], # files "User: Create a hello world HTML file\n" @@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block): existing_sandbox_id: str, conversation_history: str, dispose_sandbox: bool, - ) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]: + execution_context: "ExecutionContext", + ) -> tuple[str, list[SandboxFileOutput], str, str, str]: """ Execute Claude Code in an E2B sandbox. @@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block): else: new_conversation_history = turn_entry - # Extract files created/modified during this run - files = await self._extract_files( - sandbox, working_directory, start_timestamp + # Extract files created/modified during this run and store to workspace + sandbox_files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=True, ) return ( response, - files, + sandbox_files, # Already SandboxFileOutput objects new_conversation_history, current_session_id, sandbox_id, @@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block): if dispose_sandbox and sandbox: await sandbox.kill() - async def _extract_files( - self, - sandbox: BaseAsyncSandbox, - working_directory: str, - since_timestamp: str | None = None, - ) -> list["ClaudeCodeBlock.FileOutput"]: - """ - Extract text files created/modified during this Claude Code execution. - - Args: - sandbox: The E2B sandbox instance - working_directory: Directory to search for files - since_timestamp: ISO timestamp - only return files modified after this time - - Returns: - List of FileOutput objects with path, relative_path, name, and content - """ - files: list[ClaudeCodeBlock.FileOutput] = [] - - # Text file extensions we can safely read as text - text_extensions = { - ".txt", - ".md", - ".html", - ".htm", - ".css", - ".js", - ".ts", - ".jsx", - ".tsx", - ".json", - ".xml", - ".yaml", - ".yml", - ".toml", - ".ini", - ".cfg", - ".conf", - ".py", - ".rb", - ".php", - ".java", - ".c", - ".cpp", - ".h", - ".hpp", - ".cs", - ".go", - ".rs", - ".swift", - ".kt", - ".scala", - ".sh", - ".bash", - ".zsh", - ".sql", - ".graphql", - ".env", - ".gitignore", - ".dockerfile", - "Dockerfile", - ".vue", - ".svelte", - ".astro", - ".mdx", - ".rst", - ".tex", - ".csv", - ".log", - } - - try: - # List files recursively using find command - # Exclude node_modules and .git directories, but allow hidden files - # like .env and .gitignore (they're filtered by text_extensions later) - # Filter by timestamp to only get files created/modified during this run - safe_working_dir = shlex.quote(working_directory) - timestamp_filter = "" - if since_timestamp: - timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " - find_result = await sandbox.commands.run( - f"find {safe_working_dir} -type f " - f"{timestamp_filter}" - f"-not -path '*/node_modules/*' " - f"-not -path '*/.git/*' " - f"2>/dev/null" - ) - - if find_result.stdout: - for file_path in find_result.stdout.strip().split("\n"): - if not file_path: - continue - - # Check if it's a text file we can read - is_text = any( - file_path.endswith(ext) for ext in text_extensions - ) or file_path.endswith("Dockerfile") - - if is_text: - try: - content = await sandbox.files.read(file_path) - # Handle bytes or string - if isinstance(content, bytes): - content = content.decode("utf-8", errors="replace") - - # Extract filename from path - file_name = file_path.split("/")[-1] - - # Calculate relative path by stripping working directory - relative_path = file_path - if file_path.startswith(working_directory): - relative_path = file_path[len(working_directory) :] - # Remove leading slash if present - if relative_path.startswith("/"): - relative_path = relative_path[1:] - - files.append( - ClaudeCodeBlock.FileOutput( - path=file_path, - relative_path=relative_path, - name=file_name, - content=content, - ) - ) - except Exception: - # Skip files that can't be read - pass - - except Exception: - # If file extraction fails, return empty results - pass - - return files - def _escape_prompt(self, prompt: str) -> str: """Escape the prompt for safe shell execution.""" # Use single quotes and escape any single quotes in the prompt @@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block): *, e2b_credentials: APIKeyCredentials, anthropic_credentials: APIKeyCredentials, + execution_context: "ExecutionContext", **kwargs, ) -> BlockOutput: try: @@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block): existing_sandbox_id=input_data.sandbox_id, conversation_history=input_data.conversation_history, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, ) yield "response", response diff --git a/autogpt_platform/backend/backend/blocks/code_executor.py b/autogpt_platform/backend/backend/blocks/code_executor.py index be6f2bba55..26bf9acd4f 100644 --- a/autogpt_platform/backend/backend/blocks/code_executor.py +++ b/autogpt_platform/backend/backend/blocks/code_executor.py @@ -1,12 +1,12 @@ from enum import Enum -from typing import Any, Literal, Optional +from typing import TYPE_CHECKING, Any, Literal, Optional from e2b_code_interpreter import AsyncSandbox from e2b_code_interpreter import Result as E2BExecutionResult from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart from pydantic import BaseModel, Field, JsonValue, SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext TEST_CREDENTIALS = APIKeyCredentials( id="01234567-89ab-cdef-0123-456789abcdef", @@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult): class BaseE2BExecutorMixin: """Shared implementation methods for E2B executor blocks.""" + # Default working directory in E2B sandboxes + WORKING_DIR = "/home/user" + async def execute_code( self, api_key: str, @@ -95,14 +105,21 @@ class BaseE2BExecutorMixin: timeout: Optional[int] = None, sandbox_id: Optional[str] = None, dispose_sandbox: bool = False, + execution_context: Optional["ExecutionContext"] = None, + extract_files: bool = False, ): """ Unified code execution method that handles all three use cases: 1. Create new sandbox and execute (ExecuteCodeBlock) 2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock) 3. Connect to existing sandbox and execute (ExecuteCodeStepBlock) + + Args: + extract_files: If True and execution_context provided, extract files + created/modified during execution and store to workspace. """ # noqa sandbox = None + files: list[SandboxFileOutput] = [] try: if sandbox_id: # Connect to existing sandbox (ExecuteCodeStepBlock case) @@ -118,6 +135,12 @@ class BaseE2BExecutorMixin: for cmd in setup_commands: await sandbox.commands.run(cmd) + # Capture timestamp before execution to scope file extraction + start_timestamp = None + if extract_files: + ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S") + start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None + # Execute the code execution = await sandbox.run_code( code, @@ -133,7 +156,24 @@ class BaseE2BExecutorMixin: stdout_logs = "".join(execution.logs.stdout) stderr_logs = "".join(execution.logs.stderr) - return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id + # Extract files created/modified during this execution + if extract_files and execution_context: + files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=self.WORKING_DIR, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=False, # Include binary files too + ) + + return ( + results, + text_output, + stdout_logs, + stderr_logs, + sandbox.sandbox_id, + files, + ) finally: # Dispose of sandbox if requested to reduce usage costs if dispose_sandbox and sandbox: @@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): description="Standard output logs from execution" ) stderr_logs: str = SchemaField(description="Standard error logs from execution") + files: list[SandboxFileOutput] = SchemaField( + description=( + "Files created or modified during execution. " + "Each file has path, name, content, and workspace_ref (if stored)." + ), + ) def __init__(self): super().__init__( @@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): ("results", []), ("response", "Hello World"), ("stdout_logs", "Hello World\n"), + ("files", []), ], test_mock={ - "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa + "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa [], # results "Hello World", # text_output "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: "ExecutionContext", + **kwargs, ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, files = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.code, language=input_data.language, @@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): setup_commands=input_data.setup_commands, timeout=input_data.timeout, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, + extract_files=True, ) # Determine result object shape & filter out empty formats @@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): yield "stdout_logs", stdout if stderr: yield "stderr_logs", stderr + # Always yield files (empty list if none) + yield "files", [f.model_dump() for f in files] except Exception as e: yield "error", str(e) @@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) @@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - _, text_output, stdout, stderr, sandbox_id = await self.execute_code( + _, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.setup_code, language=input_data.language, @@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs sandbox_id, # sandbox_id + [], # files ), }, ) @@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.step_code, language=input_data.language, diff --git a/autogpt_platform/backend/backend/blocks/code_extraction_block.py b/autogpt_platform/backend/backend/blocks/code_extraction_block.py index 98f40c7a8b..bde4bc9fc6 100644 --- a/autogpt_platform/backend/backend/blocks/code_extraction_block.py +++ b/autogpt_platform/backend/backend/blocks/code_extraction_block.py @@ -1,6 +1,6 @@ import re -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/codex.py b/autogpt_platform/backend/backend/blocks/codex.py index 1b907cafce..07dffec39f 100644 --- a/autogpt_platform/backend/backend/blocks/codex.py +++ b/autogpt_platform/backend/backend/blocks/codex.py @@ -6,7 +6,7 @@ from openai import AsyncOpenAI from openai.types.responses import Response as OpenAIResponse from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/compass/triggers.py b/autogpt_platform/backend/backend/blocks/compass/triggers.py index f6ac8dfd81..2afd03852e 100644 --- a/autogpt_platform/backend/backend/blocks/compass/triggers.py +++ b/autogpt_platform/backend/backend/blocks/compass/triggers.py @@ -1,6 +1,6 @@ from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockManualWebhookConfig, diff --git a/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py b/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py index 20a5077a2d..041f1bfaa1 100644 --- a/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py +++ b/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py @@ -1,4 +1,4 @@ -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py index 1014236b8c..a8f25ecb18 100644 --- a/autogpt_platform/backend/backend/blocks/data_manipulation.py +++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py @@ -1,6 +1,6 @@ from typing import Any, List -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/decoder_block.py b/autogpt_platform/backend/backend/blocks/decoder_block.py index 7a7406bd1a..b9eb56e48f 100644 --- a/autogpt_platform/backend/backend/blocks/decoder_block.py +++ b/autogpt_platform/backend/backend/blocks/decoder_block.py @@ -1,6 +1,6 @@ import codecs -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py b/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py index 4438af1955..4ec3d0eec2 100644 --- a/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py +++ b/autogpt_platform/backend/backend/blocks/discord/bot_blocks.py @@ -8,7 +8,7 @@ from typing import Any, Literal, cast import discord from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/discord/oauth_blocks.py b/autogpt_platform/backend/backend/blocks/discord/oauth_blocks.py index ca20eb6337..74e9229776 100644 --- a/autogpt_platform/backend/backend/blocks/discord/oauth_blocks.py +++ b/autogpt_platform/backend/backend/blocks/discord/oauth_blocks.py @@ -2,7 +2,7 @@ Discord OAuth-based blocks. """ -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py index fad2f411cb..626bb6cdac 100644 --- a/autogpt_platform/backend/backend/blocks/email_block.py +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -7,7 +7,7 @@ from typing import Literal from pydantic import BaseModel, ConfigDict, SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/encoder_block.py b/autogpt_platform/backend/backend/blocks/encoder_block.py index b60a4ae828..bfab8f4555 100644 --- a/autogpt_platform/backend/backend/blocks/encoder_block.py +++ b/autogpt_platform/backend/backend/blocks/encoder_block.py @@ -2,7 +2,7 @@ import codecs -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py b/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py index 974ad28eed..de06230c00 100644 --- a/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py +++ b/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py @@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information. import logging from typing import Optional -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py index c2079ef159..945e53578c 100644 --- a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py +++ b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py @@ -3,6 +3,13 @@ import logging from enum import Enum from typing import Any +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.fal._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -10,13 +17,6 @@ from backend.blocks.fal._auth import ( FalCredentialsField, FalCredentialsInput, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import store_media_file diff --git a/autogpt_platform/backend/backend/blocks/flux_kontext.py b/autogpt_platform/backend/backend/blocks/flux_kontext.py index d56baa6d92..f2b35aee40 100644 --- a/autogpt_platform/backend/backend/blocks/flux_kontext.py +++ b/autogpt_platform/backend/backend/blocks/flux_kontext.py @@ -5,7 +5,7 @@ from pydantic import SecretStr from replicate.client import Client as ReplicateClient from replicate.helpers import FileOutput -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/checks.py b/autogpt_platform/backend/backend/blocks/github/checks.py index 02bc8d2400..99feefec88 100644 --- a/autogpt_platform/backend/backend/blocks/github/checks.py +++ b/autogpt_platform/backend/backend/blocks/github/checks.py @@ -3,7 +3,7 @@ from typing import Optional from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/ci.py b/autogpt_platform/backend/backend/blocks/github/ci.py index 8ba58e389e..c717be96e7 100644 --- a/autogpt_platform/backend/backend/blocks/github/ci.py +++ b/autogpt_platform/backend/backend/blocks/github/ci.py @@ -5,7 +5,7 @@ from typing import Optional from typing_extensions import TypedDict -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/issues.py b/autogpt_platform/backend/backend/blocks/github/issues.py index 22b4149663..7269c44f73 100644 --- a/autogpt_platform/backend/backend/blocks/github/issues.py +++ b/autogpt_platform/backend/backend/blocks/github/issues.py @@ -3,7 +3,7 @@ from urllib.parse import urlparse from typing_extensions import TypedDict -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/pull_requests.py b/autogpt_platform/backend/backend/blocks/github/pull_requests.py index 9049037716..b336c7bfa3 100644 --- a/autogpt_platform/backend/backend/blocks/github/pull_requests.py +++ b/autogpt_platform/backend/backend/blocks/github/pull_requests.py @@ -2,7 +2,7 @@ import re from typing_extensions import TypedDict -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/repo.py b/autogpt_platform/backend/backend/blocks/github/repo.py index 78ce26bfad..9b1e60b00c 100644 --- a/autogpt_platform/backend/backend/blocks/github/repo.py +++ b/autogpt_platform/backend/backend/blocks/github/repo.py @@ -2,7 +2,7 @@ import base64 from typing_extensions import TypedDict -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/reviews.py b/autogpt_platform/backend/backend/blocks/github/reviews.py index 11718d1402..932362c09a 100644 --- a/autogpt_platform/backend/backend/blocks/github/reviews.py +++ b/autogpt_platform/backend/backend/blocks/github/reviews.py @@ -4,7 +4,7 @@ from typing import Any, List, Optional from typing_extensions import TypedDict -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/statuses.py b/autogpt_platform/backend/backend/blocks/github/statuses.py index 42826a8a51..caa1282a9b 100644 --- a/autogpt_platform/backend/backend/blocks/github/statuses.py +++ b/autogpt_platform/backend/backend/blocks/github/statuses.py @@ -3,7 +3,7 @@ from typing import Optional from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/github/triggers.py b/autogpt_platform/backend/backend/blocks/github/triggers.py index 2fc568a468..e35dbb4123 100644 --- a/autogpt_platform/backend/backend/blocks/github/triggers.py +++ b/autogpt_platform/backend/backend/blocks/github/triggers.py @@ -4,7 +4,7 @@ from pathlib import Path from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/google/calendar.py b/autogpt_platform/backend/backend/blocks/google/calendar.py index 55c41f047c..b9fda2cf31 100644 --- a/autogpt_platform/backend/backend/blocks/google/calendar.py +++ b/autogpt_platform/backend/backend/blocks/google/calendar.py @@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials from googleapiclient.discovery import build from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/google/docs.py b/autogpt_platform/backend/backend/blocks/google/docs.py index 7840cbae73..33aab4638d 100644 --- a/autogpt_platform/backend/backend/blocks/google/docs.py +++ b/autogpt_platform/backend/backend/blocks/google/docs.py @@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials from googleapiclient.discovery import build from gravitas_md2gdocs import to_requests -from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField from backend.data.model import SchemaField from backend.util.settings import Settings diff --git a/autogpt_platform/backend/backend/blocks/google/gmail.py b/autogpt_platform/backend/backend/blocks/google/gmail.py index 2040cabe3f..2051f86b9e 100644 --- a/autogpt_platform/backend/backend/blocks/google/gmail.py +++ b/autogpt_platform/backend/backend/blocks/google/gmail.py @@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials from googleapiclient.discovery import build from pydantic import BaseModel, Field -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py index da541d3bf5..6e21008a23 100644 --- a/autogpt_platform/backend/backend/blocks/google/sheets.py +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -7,14 +7,14 @@ from enum import Enum from google.oauth2.credentials import Credentials from googleapiclient.discovery import build -from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField from backend.data.model import SchemaField from backend.util.settings import Settings diff --git a/autogpt_platform/backend/backend/blocks/google_maps.py b/autogpt_platform/backend/backend/blocks/google_maps.py index 2ee2959326..bab0841c5d 100644 --- a/autogpt_platform/backend/backend/blocks/google_maps.py +++ b/autogpt_platform/backend/backend/blocks/google_maps.py @@ -3,7 +3,7 @@ from typing import Literal import googlemaps from pydantic import BaseModel, SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/helpers/review.py b/autogpt_platform/backend/backend/blocks/helpers/review.py index 4bd85e424b..23d1af6db3 100644 --- a/autogpt_platform/backend/backend/blocks/helpers/review.py +++ b/autogpt_platform/backend/backend/blocks/helpers/review.py @@ -9,9 +9,7 @@ from typing import Any, Optional from prisma.enums import ReviewStatus from pydantic import BaseModel -from backend.data.execution import ExecutionStatus from backend.data.human_review import ReviewResult -from backend.executor.manager import async_update_node_execution_status from backend.util.clients import get_database_manager_async_client logger = logging.getLogger(__name__) @@ -43,6 +41,8 @@ class HITLReviewHelper: @staticmethod async def update_node_execution_status(**kwargs) -> None: """Update the execution status of a node.""" + from backend.executor.manager import async_update_node_execution_status + await async_update_node_execution_status( db_client=get_database_manager_async_client(), **kwargs ) @@ -88,12 +88,13 @@ class HITLReviewHelper: Raises: Exception: If review creation or status update fails """ + from backend.data.execution import ExecutionStatus + # Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode) # are handled by the caller: # - HITL blocks check human_in_the_loop_safe_mode in their run() method # - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review() # This function only handles checking for existing approvals. - # Check if this node has already been approved (normal or auto-approval) if approval_result := await HITLReviewHelper.check_approval( node_exec_id=node_exec_id, diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 77e7fe243f..21c2964412 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -8,7 +8,7 @@ from typing import Literal import aiofiles from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/hubspot/company.py b/autogpt_platform/backend/backend/blocks/hubspot/company.py index dee9169e59..543d16db0c 100644 --- a/autogpt_platform/backend/backend/blocks/hubspot/company.py +++ b/autogpt_platform/backend/backend/blocks/hubspot/company.py @@ -1,15 +1,15 @@ -from backend.blocks.hubspot._auth import ( - HubSpotCredentials, - HubSpotCredentialsField, - HubSpotCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/hubspot/contact.py b/autogpt_platform/backend/backend/blocks/hubspot/contact.py index b4451c3b8b..1cdbf99b39 100644 --- a/autogpt_platform/backend/backend/blocks/hubspot/contact.py +++ b/autogpt_platform/backend/backend/blocks/hubspot/contact.py @@ -1,15 +1,15 @@ -from backend.blocks.hubspot._auth import ( - HubSpotCredentials, - HubSpotCredentialsField, - HubSpotCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/hubspot/engagement.py b/autogpt_platform/backend/backend/blocks/hubspot/engagement.py index 683607c5b3..9408a543b6 100644 --- a/autogpt_platform/backend/backend/blocks/hubspot/engagement.py +++ b/autogpt_platform/backend/backend/blocks/hubspot/engagement.py @@ -1,17 +1,17 @@ from datetime import datetime, timedelta -from backend.blocks.hubspot._auth import ( - HubSpotCredentials, - HubSpotCredentialsField, - HubSpotCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 568ac4b33f..69c52081d8 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -3,8 +3,7 @@ from typing import Any from prisma.enums import ReviewStatus -from backend.blocks.helpers.review import HITLReviewHelper -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, @@ -12,6 +11,7 @@ from backend.data.block import ( BlockSchemaOutput, BlockType, ) +from backend.blocks.helpers.review import HITLReviewHelper from backend.data.execution import ExecutionContext from backend.data.human_review import ReviewResult from backend.data.model import SchemaField @@ -21,43 +21,71 @@ logger = logging.getLogger(__name__) class HumanInTheLoopBlock(Block): """ - This block pauses execution and waits for human approval or modification of the data. + Pauses execution and waits for human approval or rejection of the data. - When executed, it creates a pending review entry and sets the node execution status - to REVIEW. The execution will remain paused until a human user either: - - Approves the data (with or without modifications) - - Rejects the data + When executed, this block creates a pending review entry and sets the node execution + status to REVIEW. The execution remains paused until a human user either approves + or rejects the data. - This is useful for workflows that require human validation or intervention before - proceeding to the next steps. + **How it works:** + - The input data is presented to a human reviewer + - The reviewer can approve or reject (and optionally modify the data if editable) + - On approval: the data flows out through the `approved_data` output pin + - On rejection: the data flows out through the `rejected_data` output pin + + **Important:** The output pins yield the actual data itself, NOT status strings. + The approval/rejection decision determines WHICH output pin fires, not the value. + You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect + downstream blocks to the appropriate output pin for each case. + + **Example usage:** + - Connect `approved_data` → next step in your workflow (data was approved) + - Connect `rejected_data` → error handling or notification (data was rejected) """ class Input(BlockSchemaInput): - data: Any = SchemaField(description="The data to be reviewed by a human user") + data: Any = SchemaField( + description="The data to be reviewed by a human user. " + "This exact data will be passed through to either approved_data or " + "rejected_data output based on the reviewer's decision." + ) name: str = SchemaField( - description="A descriptive name for what this data represents", + description="A descriptive name for what this data represents. " + "This helps the reviewer understand what they are reviewing.", ) editable: bool = SchemaField( - description="Whether the human reviewer can edit the data", + description="Whether the human reviewer can edit the data before " + "approving or rejecting it", default=True, advanced=True, ) class Output(BlockSchemaOutput): approved_data: Any = SchemaField( - description="The data when approved (may be modified by reviewer)" + description="Outputs the input data when the reviewer APPROVES it. " + "The value is the actual data itself (not a status string like 'APPROVED'). " + "If the reviewer edited the data, this contains the modified version. " + "Connect downstream blocks here for the 'approved' workflow path." ) rejected_data: Any = SchemaField( - description="The data when rejected (may be modified by reviewer)" + description="Outputs the input data when the reviewer REJECTS it. " + "The value is the actual data itself (not a status string like 'REJECTED'). " + "If the reviewer edited the data, this contains the modified version. " + "Connect downstream blocks here for the 'rejected' workflow path." ) review_message: str = SchemaField( - description="Any message provided by the reviewer", default="" + description="Optional message provided by the reviewer explaining their " + "decision. Only outputs when the reviewer provides a message; " + "this pin does not fire if no message was given.", + default="", ) def __init__(self): super().__init__( id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d", - description="Pause execution and wait for human approval or modification of data", + description="Pause execution for human review. Data flows through " + "approved_data or rejected_data output based on the reviewer's decision. " + "Outputs contain the actual data, not status strings.", categories={BlockCategory.BASIC}, input_schema=HumanInTheLoopBlock.Input, output_schema=HumanInTheLoopBlock.Output, diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py index 09a384c74a..5aed4aa5a9 100644 --- a/autogpt_platform/backend/backend/blocks/ideogram.py +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/io.py b/autogpt_platform/backend/backend/blocks/io.py index a9c3859490..94542790ef 100644 --- a/autogpt_platform/backend/backend/blocks/io.py +++ b/autogpt_platform/backend/backend/blocks/io.py @@ -2,9 +2,7 @@ import copy from datetime import date, time from typing import Any, Optional -# Import for Google Drive file input block -from backend.blocks.google._drive import AttachmentView, GoogleDriveFile -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, @@ -12,6 +10,9 @@ from backend.data.block import ( BlockSchemaInput, BlockType, ) + +# Import for Google Drive file input block +from backend.blocks.google._drive import AttachmentView, GoogleDriveFile from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import store_media_file diff --git a/autogpt_platform/backend/backend/blocks/iteration.py b/autogpt_platform/backend/backend/blocks/iteration.py index 441f73fc4a..a35bcac9c1 100644 --- a/autogpt_platform/backend/backend/blocks/iteration.py +++ b/autogpt_platform/backend/backend/blocks/iteration.py @@ -1,6 +1,6 @@ from typing import Any -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/jina/chunking.py b/autogpt_platform/backend/backend/blocks/jina/chunking.py index 9a9b242aae..c248e3dd24 100644 --- a/autogpt_platform/backend/backend/blocks/jina/chunking.py +++ b/autogpt_platform/backend/backend/blocks/jina/chunking.py @@ -1,15 +1,15 @@ -from backend.blocks.jina._auth import ( - JinaCredentials, - JinaCredentialsField, - JinaCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/jina/embeddings.py b/autogpt_platform/backend/backend/blocks/jina/embeddings.py index 0f6cf68c6c..f787de03b3 100644 --- a/autogpt_platform/backend/backend/blocks/jina/embeddings.py +++ b/autogpt_platform/backend/backend/blocks/jina/embeddings.py @@ -1,15 +1,15 @@ -from backend.blocks.jina._auth import ( - JinaCredentials, - JinaCredentialsField, - JinaCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/jina/fact_checker.py b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py index 3367ab99e6..df73ef94b1 100644 --- a/autogpt_platform/backend/backend/blocks/jina/fact_checker.py +++ b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py @@ -3,18 +3,18 @@ from urllib.parse import quote from typing_extensions import TypedDict -from backend.blocks.jina._auth import ( - JinaCredentials, - JinaCredentialsField, - JinaCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests diff --git a/autogpt_platform/backend/backend/blocks/jina/search.py b/autogpt_platform/backend/backend/blocks/jina/search.py index 05cddcc1df..22a883fa03 100644 --- a/autogpt_platform/backend/backend/blocks/jina/search.py +++ b/autogpt_platform/backend/backend/blocks/jina/search.py @@ -1,5 +1,12 @@ from urllib.parse import quote +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.jina._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -8,13 +15,6 @@ from backend.blocks.jina._auth import ( JinaCredentialsInput, ) from backend.blocks.search import GetRequest -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField from backend.util.exceptions import BlockExecutionError diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 7a020593d7..1272a9ec1b 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -15,7 +15,7 @@ from anthropic.types import ToolParam from groq import AsyncGroq from pydantic import BaseModel, SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/maths.py b/autogpt_platform/backend/backend/blocks/maths.py index ad6dc67bbe..0f94075277 100644 --- a/autogpt_platform/backend/backend/blocks/maths.py +++ b/autogpt_platform/backend/backend/blocks/maths.py @@ -2,7 +2,7 @@ import operator from enum import Enum from typing import Any -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py index d54062d3ab..f511f19329 100644 --- a/autogpt_platform/backend/backend/blocks/medium.py +++ b/autogpt_platform/backend/backend/blocks/medium.py @@ -3,7 +3,7 @@ from typing import List, Literal from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/mem0.py b/autogpt_platform/backend/backend/blocks/mem0.py index b8dc11064a..ba0bd24290 100644 --- a/autogpt_platform/backend/backend/blocks/mem0.py +++ b/autogpt_platform/backend/backend/blocks/mem0.py @@ -3,7 +3,7 @@ from typing import Any, Literal, Optional, Union from mem0 import MemoryClient from pydantic import BaseModel, SecretStr -from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.model import ( APIKeyCredentials, CredentialsField, diff --git a/autogpt_platform/backend/backend/blocks/notion/create_page.py b/autogpt_platform/backend/backend/blocks/notion/create_page.py index 5edef144e3..315730d37c 100644 --- a/autogpt_platform/backend/backend/blocks/notion/create_page.py +++ b/autogpt_platform/backend/backend/blocks/notion/create_page.py @@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional from pydantic import model_validator -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/notion/read_database.py b/autogpt_platform/backend/backend/blocks/notion/read_database.py index 5720bea2f8..7b1dcf7be4 100644 --- a/autogpt_platform/backend/backend/blocks/notion/read_database.py +++ b/autogpt_platform/backend/backend/blocks/notion/read_database.py @@ -2,7 +2,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/notion/read_page.py b/autogpt_platform/backend/backend/blocks/notion/read_page.py index 400fd2a929..a2b5273ad9 100644 --- a/autogpt_platform/backend/backend/blocks/notion/read_page.py +++ b/autogpt_platform/backend/backend/blocks/notion/read_page.py @@ -1,6 +1,6 @@ from __future__ import annotations -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/notion/read_page_markdown.py b/autogpt_platform/backend/backend/blocks/notion/read_page_markdown.py index 7ed87eaef9..cad3e85e79 100644 --- a/autogpt_platform/backend/backend/blocks/notion/read_page_markdown.py +++ b/autogpt_platform/backend/backend/blocks/notion/read_page_markdown.py @@ -1,6 +1,6 @@ from __future__ import annotations -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/notion/search.py b/autogpt_platform/backend/backend/blocks/notion/search.py index 1983763537..71af844b64 100644 --- a/autogpt_platform/backend/backend/blocks/notion/search.py +++ b/autogpt_platform/backend/backend/blocks/notion/search.py @@ -4,7 +4,7 @@ from typing import List, Optional from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py b/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py index f60b649839..06b05ebc50 100644 --- a/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py +++ b/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py @@ -1,15 +1,15 @@ -from backend.blocks.nvidia._auth import ( - NvidiaCredentials, - NvidiaCredentialsField, - NvidiaCredentialsInput, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.nvidia._auth import ( + NvidiaCredentials, + NvidiaCredentialsField, + NvidiaCredentialsInput, +) from backend.data.model import SchemaField from backend.util.request import Requests from backend.util.type import MediaFileType diff --git a/autogpt_platform/backend/backend/blocks/perplexity.py b/autogpt_platform/backend/backend/blocks/perplexity.py index e2796718a9..270081a3a8 100644 --- a/autogpt_platform/backend/backend/blocks/perplexity.py +++ b/autogpt_platform/backend/backend/blocks/perplexity.py @@ -6,7 +6,7 @@ from typing import Any, Literal import openai from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/persistence.py b/autogpt_platform/backend/backend/blocks/persistence.py index a327fd22c7..7584993beb 100644 --- a/autogpt_platform/backend/backend/blocks/persistence.py +++ b/autogpt_platform/backend/backend/blocks/persistence.py @@ -1,7 +1,7 @@ import logging from typing import Any, Literal -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/pinecone.py b/autogpt_platform/backend/backend/blocks/pinecone.py index 878f6f72fb..f882212ab2 100644 --- a/autogpt_platform/backend/backend/blocks/pinecone.py +++ b/autogpt_platform/backend/backend/blocks/pinecone.py @@ -3,7 +3,7 @@ from typing import Any, Literal from pinecone import Pinecone, ServerlessSpec -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py index 1109d568db..6544c698a3 100644 --- a/autogpt_platform/backend/backend/blocks/reddit.py +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -6,7 +6,7 @@ import praw from praw.models import Comment, MoreComments, Submission from pydantic import BaseModel, SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py b/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py index c112ce75c4..e7a0a82cce 100644 --- a/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py +++ b/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py @@ -4,19 +4,19 @@ from enum import Enum from pydantic import SecretStr from replicate.client import Client as ReplicateClient -from backend.blocks.replicate._auth import ( - TEST_CREDENTIALS, - TEST_CREDENTIALS_INPUT, - ReplicateCredentialsInput, -) -from backend.blocks.replicate._helper import ReplicateOutputs, extract_result -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.replicate._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ReplicateCredentialsInput, +) +from backend.blocks.replicate._helper import ReplicateOutputs, extract_result from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/replicate/replicate_block.py b/autogpt_platform/backend/backend/blocks/replicate/replicate_block.py index 7ee054d02e..2758c7cd06 100644 --- a/autogpt_platform/backend/backend/blocks/replicate/replicate_block.py +++ b/autogpt_platform/backend/backend/blocks/replicate/replicate_block.py @@ -4,19 +4,19 @@ from typing import Optional from pydantic import SecretStr from replicate.client import Client as ReplicateClient -from backend.blocks.replicate._auth import ( - TEST_CREDENTIALS, - TEST_CREDENTIALS_INPUT, - ReplicateCredentialsInput, -) -from backend.blocks.replicate._helper import ReplicateOutputs, extract_result -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.replicate._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ReplicateCredentialsInput, +) +from backend.blocks.replicate._helper import ReplicateOutputs, extract_result from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField from backend.util.exceptions import BlockExecutionError, BlockInputError diff --git a/autogpt_platform/backend/backend/blocks/rss.py b/autogpt_platform/backend/backend/blocks/rss.py index a23b3ee25c..5d26bc592c 100644 --- a/autogpt_platform/backend/backend/blocks/rss.py +++ b/autogpt_platform/backend/backend/blocks/rss.py @@ -6,7 +6,7 @@ from typing import Any import feedparser import pydantic -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/sampling.py b/autogpt_platform/backend/backend/blocks/sampling.py index b4463947a7..eb5f47e80e 100644 --- a/autogpt_platform/backend/backend/blocks/sampling.py +++ b/autogpt_platform/backend/backend/blocks/sampling.py @@ -3,7 +3,7 @@ from collections import defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Union -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/screenshotone.py b/autogpt_platform/backend/backend/blocks/screenshotone.py index ee998f8da2..1ce133af83 100644 --- a/autogpt_platform/backend/backend/blocks/screenshotone.py +++ b/autogpt_platform/backend/backend/blocks/screenshotone.py @@ -4,7 +4,7 @@ from typing import Literal from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py index 09e16034a3..61acb2108e 100644 --- a/autogpt_platform/backend/backend/blocks/search.py +++ b/autogpt_platform/backend/backend/blocks/search.py @@ -3,14 +3,14 @@ from urllib.parse import quote from pydantic import SecretStr -from backend.blocks.helpers.http import GetRequest -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.helpers.http import GetRequest from backend.data.model import ( APIKeyCredentials, CredentialsField, diff --git a/autogpt_platform/backend/backend/blocks/slant3d/base.py b/autogpt_platform/backend/backend/blocks/slant3d/base.py index e368a1b451..3ce24f8ddc 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/base.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/base.py @@ -1,6 +1,6 @@ from typing import Any, Dict -from backend.data.block import Block +from backend.blocks._base import Block from backend.util.request import Requests from ._api import Color, CustomerDetails, OrderItem, Profile diff --git a/autogpt_platform/backend/backend/blocks/slant3d/filament.py b/autogpt_platform/backend/backend/blocks/slant3d/filament.py index f2b9eae38d..723ebff59e 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/filament.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/filament.py @@ -1,6 +1,6 @@ from typing import List -from backend.data.block import BlockOutput, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.model import APIKeyCredentials, SchemaField from ._api import ( diff --git a/autogpt_platform/backend/backend/blocks/slant3d/order.py b/autogpt_platform/backend/backend/blocks/slant3d/order.py index 4ece3fc51e..36d2705ea5 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/order.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/order.py @@ -1,7 +1,7 @@ import uuid from typing import List -from backend.data.block import BlockOutput, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.model import APIKeyCredentials, SchemaField from backend.util.settings import BehaveAs, Settings diff --git a/autogpt_platform/backend/backend/blocks/slant3d/slicing.py b/autogpt_platform/backend/backend/blocks/slant3d/slicing.py index 1952b162d2..8740f9504f 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/slicing.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/slicing.py @@ -1,4 +1,4 @@ -from backend.data.block import BlockOutput, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.model import APIKeyCredentials, SchemaField from ._api import ( diff --git a/autogpt_platform/backend/backend/blocks/slant3d/webhook.py b/autogpt_platform/backend/backend/blocks/slant3d/webhook.py index e5a2d72568..f2cb86ec09 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/webhook.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/webhook.py @@ -1,6 +1,6 @@ from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py index ff6042eaab..5e6b11eebd 100644 --- a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py @@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, Any from pydantic import BaseModel import backend.blocks.llm as llm -from backend.blocks.agent import AgentExecutorBlock -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockInput, @@ -17,6 +16,7 @@ from backend.data.block import ( BlockSchemaOutput, BlockType, ) +from backend.blocks.agent import AgentExecutorBlock from backend.data.dynamic_fields import ( extract_base_field_name, get_dynamic_field_description, diff --git a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py index c3bf930068..302a38f4db 100644 --- a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py +++ b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py @@ -1,3 +1,10 @@ +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.smartlead._api import SmartLeadClient from backend.blocks.smartlead._auth import ( TEST_CREDENTIALS, @@ -16,13 +23,6 @@ from backend.blocks.smartlead.models import ( SaveSequencesResponse, Sequence, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/blocks/spreadsheet.py b/autogpt_platform/backend/backend/blocks/spreadsheet.py index a13f9e2f6d..2bbfd6776f 100644 --- a/autogpt_platform/backend/backend/blocks/spreadsheet.py +++ b/autogpt_platform/backend/backend/blocks/spreadsheet.py @@ -1,6 +1,6 @@ from pathlib import Path -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/system/library_operations.py b/autogpt_platform/backend/backend/blocks/system/library_operations.py index 116da64599..b2433ce220 100644 --- a/autogpt_platform/backend/backend/blocks/system/library_operations.py +++ b/autogpt_platform/backend/backend/blocks/system/library_operations.py @@ -3,7 +3,7 @@ from typing import Any from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/system/store_operations.py b/autogpt_platform/backend/backend/blocks/system/store_operations.py index e9b7a01ebe..88958a5707 100644 --- a/autogpt_platform/backend/backend/blocks/system/store_operations.py +++ b/autogpt_platform/backend/backend/blocks/system/store_operations.py @@ -3,7 +3,7 @@ from typing import Literal from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py index e01e3d4023..f199d030ff 100644 --- a/autogpt_platform/backend/backend/blocks/talking_head.py +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -3,7 +3,7 @@ from typing import Literal from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/test/test_block.py b/autogpt_platform/backend/backend/blocks/test/test_block.py index 7a1fdbcc73..c7f3ca62f2 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_block.py +++ b/autogpt_platform/backend/backend/blocks/test/test_block.py @@ -2,7 +2,8 @@ from typing import Any, Type import pytest -from backend.data.block import Block, BlockSchemaInput, get_blocks +from backend.blocks import get_blocks +from backend.blocks._base import Block, BlockSchemaInput from backend.data.model import SchemaField from backend.util.test import execute_block_test diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py index 359e22a84f..4276ff3a45 100644 --- a/autogpt_platform/backend/backend/blocks/text.py +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -4,7 +4,7 @@ from typing import Any import regex # Has built-in timeout support -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/text_to_speech_block.py b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py index 8fe9e1cda7..a408c8772f 100644 --- a/autogpt_platform/backend/backend/blocks/text_to_speech_block.py +++ b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py @@ -2,7 +2,7 @@ from typing import Any, Literal from pydantic import SecretStr -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py index 3a1f4c678e..5ee13db30b 100644 --- a/autogpt_platform/backend/backend/blocks/time_blocks.py +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -7,7 +7,7 @@ from zoneinfo import ZoneInfo from pydantic import BaseModel -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/todoist/comments.py b/autogpt_platform/backend/backend/blocks/todoist/comments.py index f11534cbe3..dc8eef3919 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/comments.py +++ b/autogpt_platform/backend/backend/blocks/todoist/comments.py @@ -4,6 +4,13 @@ from pydantic import BaseModel from todoist_api_python.api import TodoistAPI from typing_extensions import Optional +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.todoist._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -12,13 +19,6 @@ from backend.blocks.todoist._auth import ( TodoistCredentialsField, TodoistCredentialsInput, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/todoist/labels.py b/autogpt_platform/backend/backend/blocks/todoist/labels.py index 8107459567..0b0f26cc77 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/labels.py +++ b/autogpt_platform/backend/backend/blocks/todoist/labels.py @@ -1,6 +1,13 @@ from todoist_api_python.api import TodoistAPI from typing_extensions import Optional +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.todoist._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -10,13 +17,6 @@ from backend.blocks.todoist._auth import ( TodoistCredentialsInput, ) from backend.blocks.todoist._types import Colors -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/todoist/projects.py b/autogpt_platform/backend/backend/blocks/todoist/projects.py index c6d345c116..a35bd3d41e 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/projects.py +++ b/autogpt_platform/backend/backend/blocks/todoist/projects.py @@ -1,6 +1,13 @@ from todoist_api_python.api import TodoistAPI from typing_extensions import Optional +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.todoist._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -10,13 +17,6 @@ from backend.blocks.todoist._auth import ( TodoistCredentialsInput, ) from backend.blocks.todoist._types import Colors -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/todoist/sections.py b/autogpt_platform/backend/backend/blocks/todoist/sections.py index 52dceb70b9..23cabdb661 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/sections.py +++ b/autogpt_platform/backend/backend/blocks/todoist/sections.py @@ -1,6 +1,13 @@ from todoist_api_python.api import TodoistAPI from typing_extensions import Optional +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.todoist._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -9,13 +16,6 @@ from backend.blocks.todoist._auth import ( TodoistCredentialsField, TodoistCredentialsInput, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/todoist/tasks.py b/autogpt_platform/backend/backend/blocks/todoist/tasks.py index 183a3340b3..6aaf766114 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/tasks.py +++ b/autogpt_platform/backend/backend/blocks/todoist/tasks.py @@ -4,6 +4,13 @@ from todoist_api_python.api import TodoistAPI from todoist_api_python.models import Task from typing_extensions import Optional +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.todoist._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -12,13 +19,6 @@ from backend.blocks.todoist._auth import ( TodoistCredentialsField, TodoistCredentialsInput, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/_types.py b/autogpt_platform/backend/backend/blocks/twitter/_types.py index 88050ed545..ead54677be 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/_types.py +++ b/autogpt_platform/backend/backend/blocks/twitter/_types.py @@ -3,7 +3,7 @@ from enum import Enum from pydantic import BaseModel -from backend.data.block import BlockSchemaInput +from backend.blocks._base import BlockSchemaInput from backend.data.model import SchemaField # -------------- Tweets ----------------- diff --git a/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py index 0ce8e08535..f4b07ca53e 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py @@ -4,8 +4,8 @@ # import tweepy # from tweepy.client import Response +# from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchema, BlockSchemaInput, BlockSchemaOutput # from backend.blocks.twitter._serializer import IncludesSerializer, ResponseDataSerializer -# from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockSchemaInput, BlockSchemaOutput # from backend.data.model import SchemaField # from backend.blocks.twitter._builders import DMExpansionsBuilder # from backend.blocks.twitter._types import DMEventExpansion, DMEventExpansionInputs, DMEventType, DMMediaField, DMTweetField, TweetUserFields diff --git a/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py b/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py index cbbe019f37..0104e3e9c5 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py +++ b/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py @@ -5,7 +5,7 @@ # import tweepy # from tweepy.client import Response -# from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockSchemaInput, BlockSchemaOutput +# from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchema, BlockSchemaInput, BlockSchemaOutput # from backend.data.model import SchemaField # from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception # from backend.blocks.twitter._auth import ( diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py index 5616e0ce14..93dfaef919 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py @@ -1,6 +1,13 @@ # from typing import cast import tweepy +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -13,13 +20,6 @@ from backend.blocks.twitter._auth import ( # from backend.blocks.twitter._builders import UserExpansionsBuilder # from backend.blocks.twitter._types import TweetFields, TweetUserFields, UserExpansionInputs, UserExpansions from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField # from tweepy.client import Response diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py index 6b46f00a37..a6a5607196 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -23,7 +24,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py index 32ffb9e5b6..5505f1457a 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -29,13 +36,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py index e43980683e..57dc6579c9 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -26,7 +27,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py index 4092fbaa93..9bab05e98b 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -12,13 +19,6 @@ from backend.blocks.twitter._auth import ( TwitterCredentialsInput, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py b/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py index 7bc5bb543f..0ebe9503b0 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -23,13 +30,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py b/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py index bd013cecc1..a38dc5452e 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py +++ b/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -24,7 +25,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py index 2c99d3ba3a..c31f0efd38 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py @@ -4,6 +4,7 @@ import tweepy from pydantic import BaseModel from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -36,7 +37,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py index b69002837e..9d8bfccad9 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -26,13 +33,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py index f9992ea7c0..72ed2096a7 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py @@ -1,5 +1,12 @@ import tweepy +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -9,13 +16,6 @@ from backend.blocks.twitter._auth import ( TwitterCredentialsInput, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py index 2d499257a9..c2a920276c 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -31,13 +38,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py index 875e22738b..68e379b895 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py @@ -5,6 +5,13 @@ import tweepy from pydantic import BaseModel from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -35,13 +42,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py index fc6c336e20..be8d5b3125 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -27,7 +28,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py index 1f65f90ea3..606e3b8a74 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -23,13 +30,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py index 9f07beba66..347ff5aee1 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py @@ -4,6 +4,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -31,7 +32,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py index 540aa1395f..f452848288 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -26,7 +27,6 @@ from backend.blocks.twitter._types import ( TweetUserFieldsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py b/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py index 1c192aa6b5..12df24cfe2 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py +++ b/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py @@ -3,6 +3,7 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -20,7 +21,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/follows.py b/autogpt_platform/backend/backend/blocks/twitter/users/follows.py index 537aea6031..20276b19b4 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/users/follows.py +++ b/autogpt_platform/backend/backend/blocks/twitter/users/follows.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -23,13 +30,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py b/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py index e22aec94dc..31927e2b71 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py +++ b/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py @@ -3,6 +3,13 @@ from typing import cast import tweepy from tweepy.client import Response +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -23,13 +30,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py index 67c7d14c9b..8d01876955 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py @@ -4,6 +4,7 @@ import tweepy from pydantic import BaseModel from tweepy.client import Response +from backend.blocks._base import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.blocks.twitter._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -24,7 +25,6 @@ from backend.blocks.twitter._types import ( UserExpansionsFilter, ) from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception -from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/video/add_audio.py b/autogpt_platform/backend/backend/blocks/video/add_audio.py index ebd4ab94f2..f91a82a758 100644 --- a/autogpt_platform/backend/backend/blocks/video/add_audio.py +++ b/autogpt_platform/backend/backend/blocks/video/add_audio.py @@ -3,14 +3,14 @@ from moviepy.audio.io.AudioFileClip import AudioFileClip from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import MediaFileType, get_exec_file_path, store_media_file diff --git a/autogpt_platform/backend/backend/blocks/video/clip.py b/autogpt_platform/backend/backend/blocks/video/clip.py index 05deea6530..990a8b2f31 100644 --- a/autogpt_platform/backend/backend/blocks/video/clip.py +++ b/autogpt_platform/backend/backend/blocks/video/clip.py @@ -4,18 +4,18 @@ from typing import Literal from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import ( - extract_source_name, - get_video_codecs, - strip_chapters_inplace, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import ( + extract_source_name, + get_video_codecs, + strip_chapters_inplace, +) from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.exceptions import BlockExecutionError diff --git a/autogpt_platform/backend/backend/blocks/video/concat.py b/autogpt_platform/backend/backend/blocks/video/concat.py index b49854fb40..3bf2b5142b 100644 --- a/autogpt_platform/backend/backend/blocks/video/concat.py +++ b/autogpt_platform/backend/backend/blocks/video/concat.py @@ -6,18 +6,18 @@ from moviepy import concatenate_videoclips from moviepy.video.fx import CrossFadeIn, CrossFadeOut, FadeIn, FadeOut from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import ( - extract_source_name, - get_video_codecs, - strip_chapters_inplace, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import ( + extract_source_name, + get_video_codecs, + strip_chapters_inplace, +) from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.exceptions import BlockExecutionError diff --git a/autogpt_platform/backend/backend/blocks/video/download.py b/autogpt_platform/backend/backend/blocks/video/download.py index 4046d5df42..c6d2617f73 100644 --- a/autogpt_platform/backend/backend/blocks/video/download.py +++ b/autogpt_platform/backend/backend/blocks/video/download.py @@ -9,7 +9,7 @@ import yt_dlp if typing.TYPE_CHECKING: from yt_dlp import _Params -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/video/duration.py b/autogpt_platform/backend/backend/blocks/video/duration.py index 9e05d35b00..ff904ad650 100644 --- a/autogpt_platform/backend/backend/blocks/video/duration.py +++ b/autogpt_platform/backend/backend/blocks/video/duration.py @@ -3,14 +3,14 @@ from moviepy.audio.io.AudioFileClip import AudioFileClip from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import strip_chapters_inplace -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import strip_chapters_inplace from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import MediaFileType, get_exec_file_path, store_media_file diff --git a/autogpt_platform/backend/backend/blocks/video/loop.py b/autogpt_platform/backend/backend/blocks/video/loop.py index 461610f713..0cb360a5b2 100644 --- a/autogpt_platform/backend/backend/blocks/video/loop.py +++ b/autogpt_platform/backend/backend/blocks/video/loop.py @@ -5,14 +5,14 @@ from typing import Optional from moviepy.video.fx.Loop import Loop from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.file import MediaFileType, get_exec_file_path, store_media_file diff --git a/autogpt_platform/backend/backend/blocks/video/narration.py b/autogpt_platform/backend/backend/blocks/video/narration.py index adf41753c8..39b9c481b0 100644 --- a/autogpt_platform/backend/backend/blocks/video/narration.py +++ b/autogpt_platform/backend/backend/blocks/video/narration.py @@ -8,6 +8,13 @@ from moviepy import CompositeAudioClip from moviepy.audio.io.AudioFileClip import AudioFileClip from moviepy.video.io.VideoFileClip import VideoFileClip +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.elevenlabs._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, @@ -19,13 +26,6 @@ from backend.blocks.video._utils import ( get_video_codecs, strip_chapters_inplace, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.execution import ExecutionContext from backend.data.model import CredentialsField, SchemaField from backend.util.exceptions import BlockExecutionError diff --git a/autogpt_platform/backend/backend/blocks/video/text_overlay.py b/autogpt_platform/backend/backend/blocks/video/text_overlay.py index cb7cfe0420..86dd30318c 100644 --- a/autogpt_platform/backend/backend/blocks/video/text_overlay.py +++ b/autogpt_platform/backend/backend/blocks/video/text_overlay.py @@ -5,18 +5,18 @@ from typing import Literal from moviepy import CompositeVideoClip, TextClip from moviepy.video.io.VideoFileClip import VideoFileClip -from backend.blocks.video._utils import ( - extract_source_name, - get_video_codecs, - strip_chapters_inplace, -) -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, BlockSchemaInput, BlockSchemaOutput, ) +from backend.blocks.video._utils import ( + extract_source_name, + get_video_codecs, + strip_chapters_inplace, +) from backend.data.execution import ExecutionContext from backend.data.model import SchemaField from backend.util.exceptions import BlockExecutionError diff --git a/autogpt_platform/backend/backend/blocks/xml_parser.py b/autogpt_platform/backend/backend/blocks/xml_parser.py index 223f8ea367..a1274fa562 100644 --- a/autogpt_platform/backend/backend/blocks/xml_parser.py +++ b/autogpt_platform/backend/backend/blocks/xml_parser.py @@ -1,7 +1,7 @@ from gravitasml.parser import Parser from gravitasml.token import Token, tokenize -from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput +from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.model import SchemaField diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index 6d81a86b4c..6ce705e4f5 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -9,7 +9,7 @@ from youtube_transcript_api._transcripts import FetchedTranscript from youtube_transcript_api.formatters import TextFormatter from youtube_transcript_api.proxies import WebshareProxyConfig -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockOutput, diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py index fa5283f324..6a461b4aa8 100644 --- a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py +++ b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py @@ -7,6 +7,13 @@ from zerobouncesdk.zb_validate_response import ( ZBValidateSubStatus, ) +from backend.blocks._base import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) from backend.blocks.zerobounce._api import ZeroBounceClient from backend.blocks.zerobounce._auth import ( TEST_CREDENTIALS, @@ -14,13 +21,6 @@ from backend.blocks.zerobounce._auth import ( ZeroBounceCredentials, ZeroBounceCredentialsInput, ) -from backend.data.block import ( - Block, - BlockCategory, - BlockOutput, - BlockSchemaInput, - BlockSchemaOutput, -) from backend.data.model import CredentialsField, SchemaField diff --git a/autogpt_platform/backend/backend/data/__init__.py b/autogpt_platform/backend/backend/data/__init__.py index c98667e362..8b13789179 100644 --- a/autogpt_platform/backend/backend/data/__init__.py +++ b/autogpt_platform/backend/backend/data/__init__.py @@ -1,8 +1 @@ -from backend.api.features.library.model import LibraryAgentPreset -from .graph import NodeModel -from .integrations import Webhook # noqa: F401 - -# Resolve Webhook forward references -NodeModel.model_rebuild() -LibraryAgentPreset.model_rebuild() diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index c3572f035e..a958011bc0 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -1,889 +1,32 @@ -import inspect import logging -import os -from abc import ABC, abstractmethod -from collections.abc import AsyncGenerator as AsyncGen -from enum import Enum -from typing import ( - TYPE_CHECKING, - Any, - Callable, - ClassVar, - Generic, - Optional, - Sequence, - Type, - TypeAlias, - TypeVar, - cast, - get_origin, -) +from typing import TYPE_CHECKING, Any, AsyncGenerator -import jsonref -import jsonschema from prisma.models import AgentBlock from prisma.types import AgentBlockCreateInput -from pydantic import BaseModel -from backend.data.model import NodeExecutionStats -from backend.integrations.providers import ProviderName from backend.util import json -from backend.util.cache import cached -from backend.util.exceptions import ( - BlockError, - BlockExecutionError, - BlockInputError, - BlockOutputError, - BlockUnknownError, -) -from backend.util.settings import Config -from .model import ( - ContributorDetails, - Credentials, - CredentialsFieldInfo, - CredentialsMetaInput, - SchemaField, - is_credentials_field_name, -) +if TYPE_CHECKING: + from backend.blocks._base import AnyBlockSchema logger = logging.getLogger(__name__) -if TYPE_CHECKING: - from backend.data.execution import ExecutionContext - from .graph import Link - -app_config = Config() - -BlockInput = dict[str, Any] # Input: 1 input pin consumes 1 data. +BlockInput = dict[str, Any] # Input: 1 input pin <- 1 data. BlockOutputEntry = tuple[str, Any] # Output data should be a tuple of (name, value). -BlockOutput = AsyncGen[BlockOutputEntry, None] # Output: 1 output pin produces n data. -BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]] +BlockOutput = AsyncGenerator[BlockOutputEntry, None] # Output: 1 output pin -> N data. CompletedBlockOutput = dict[str, list[Any]] # Completed stream, collected as a dict. -class BlockType(Enum): - STANDARD = "Standard" - INPUT = "Input" - OUTPUT = "Output" - NOTE = "Note" - WEBHOOK = "Webhook" - WEBHOOK_MANUAL = "Webhook (manual)" - AGENT = "Agent" - AI = "AI" - AYRSHARE = "Ayrshare" - HUMAN_IN_THE_LOOP = "Human In The Loop" - - -class BlockCategory(Enum): - AI = "Block that leverages AI to perform a task." - SOCIAL = "Block that interacts with social media platforms." - TEXT = "Block that processes text data." - SEARCH = "Block that searches or extracts information from the internet." - BASIC = "Block that performs basic operations." - INPUT = "Block that interacts with input of the graph." - OUTPUT = "Block that interacts with output of the graph." - LOGIC = "Programming logic to control the flow of your agent" - COMMUNICATION = "Block that interacts with communication platforms." - DEVELOPER_TOOLS = "Developer tools such as GitHub blocks." - DATA = "Block that interacts with structured data." - HARDWARE = "Block that interacts with hardware." - AGENT = "Block that interacts with other agents." - CRM = "Block that interacts with CRM services." - SAFETY = ( - "Block that provides AI safety mechanisms such as detecting harmful content" - ) - PRODUCTIVITY = "Block that helps with productivity" - ISSUE_TRACKING = "Block that helps with issue tracking" - MULTIMEDIA = "Block that interacts with multimedia content" - MARKETING = "Block that helps with marketing" - - def dict(self) -> dict[str, str]: - return {"category": self.name, "description": self.value} - - -class BlockCostType(str, Enum): - RUN = "run" # cost X credits per run - BYTE = "byte" # cost X credits per byte - SECOND = "second" # cost X credits per second - - -class BlockCost(BaseModel): - cost_amount: int - cost_filter: BlockInput - cost_type: BlockCostType - - def __init__( - self, - cost_amount: int, - cost_type: BlockCostType = BlockCostType.RUN, - cost_filter: Optional[BlockInput] = None, - **data: Any, - ) -> None: - super().__init__( - cost_amount=cost_amount, - cost_filter=cost_filter or {}, - cost_type=cost_type, - **data, - ) - - -class BlockInfo(BaseModel): - id: str - name: str - inputSchema: dict[str, Any] - outputSchema: dict[str, Any] - costs: list[BlockCost] - description: str - categories: list[dict[str, str]] - contributors: list[dict[str, Any]] - staticOutput: bool - uiType: str - - -class BlockSchema(BaseModel): - cached_jsonschema: ClassVar[dict[str, Any]] - - @classmethod - def jsonschema(cls) -> dict[str, Any]: - if cls.cached_jsonschema: - return cls.cached_jsonschema - - model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True) - - def ref_to_dict(obj): - if isinstance(obj, dict): - # OpenAPI <3.1 does not support sibling fields that has a $ref key - # So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item. - keys = {"allOf", "anyOf", "oneOf"} - one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None) - if one_key: - obj.update(obj[one_key][0]) - - return { - key: ref_to_dict(value) - for key, value in obj.items() - if not key.startswith("$") and key != one_key - } - elif isinstance(obj, list): - return [ref_to_dict(item) for item in obj] - - return obj - - cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model)) - - return cls.cached_jsonschema - - @classmethod - def validate_data(cls, data: BlockInput) -> str | None: - return json.validate_with_jsonschema( - schema=cls.jsonschema(), - data={k: v for k, v in data.items() if v is not None}, - ) - - @classmethod - def get_mismatch_error(cls, data: BlockInput) -> str | None: - return cls.validate_data(data) - - @classmethod - def get_field_schema(cls, field_name: str) -> dict[str, Any]: - model_schema = cls.jsonschema().get("properties", {}) - if not model_schema: - raise ValueError(f"Invalid model schema {cls}") - - property_schema = model_schema.get(field_name) - if not property_schema: - raise ValueError(f"Invalid property name {field_name}") - - return property_schema - - @classmethod - def validate_field(cls, field_name: str, data: BlockInput) -> str | None: - """ - Validate the data against a specific property (one of the input/output name). - Returns the validation error message if the data does not match the schema. - """ - try: - property_schema = cls.get_field_schema(field_name) - jsonschema.validate(json.to_dict(data), property_schema) - return None - except jsonschema.ValidationError as e: - return str(e) - - @classmethod - def get_fields(cls) -> set[str]: - return set(cls.model_fields.keys()) - - @classmethod - def get_required_fields(cls) -> set[str]: - return { - field - for field, field_info in cls.model_fields.items() - if field_info.is_required() - } - - @classmethod - def __pydantic_init_subclass__(cls, **kwargs): - """Validates the schema definition. Rules: - - Fields with annotation `CredentialsMetaInput` MUST be - named `credentials` or `*_credentials` - - Fields named `credentials` or `*_credentials` MUST be - of type `CredentialsMetaInput` - """ - super().__pydantic_init_subclass__(**kwargs) - - # Reset cached JSON schema to prevent inheriting it from parent class - cls.cached_jsonschema = {} - - credentials_fields = cls.get_credentials_fields() - - for field_name in cls.get_fields(): - if is_credentials_field_name(field_name): - if field_name not in credentials_fields: - raise TypeError( - f"Credentials field '{field_name}' on {cls.__qualname__} " - f"is not of type {CredentialsMetaInput.__name__}" - ) - - CredentialsMetaInput.validate_credentials_field_schema( - cls.get_field_schema(field_name), field_name - ) - - elif field_name in credentials_fields: - raise KeyError( - f"Credentials field '{field_name}' on {cls.__qualname__} " - "has invalid name: must be 'credentials' or *_credentials" - ) - - @classmethod - def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]: - return { - field_name: info.annotation - for field_name, info in cls.model_fields.items() - if ( - inspect.isclass(info.annotation) - and issubclass( - get_origin(info.annotation) or info.annotation, - CredentialsMetaInput, - ) - ) - } - - @classmethod - def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]: - """ - Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput). - - Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config} - - Raises: - ValueError: If multiple fields have the same kwarg_name, as this would - cause silent overwriting and only the last field would be processed. - """ - result: dict[str, dict[str, Any]] = {} - schema = cls.jsonschema() - properties = schema.get("properties", {}) - - for field_name, field_schema in properties.items(): - auto_creds = field_schema.get("auto_credentials") - if auto_creds: - kwarg_name = auto_creds.get("kwarg_name", "credentials") - if kwarg_name in result: - raise ValueError( - f"Duplicate auto_credentials kwarg_name '{kwarg_name}' " - f"in fields '{result[kwarg_name]['field_name']}' and " - f"'{field_name}' on {cls.__qualname__}" - ) - result[kwarg_name] = { - "field_name": field_name, - "config": auto_creds, - } - return result - - @classmethod - def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]: - result = {} - - # Regular credentials fields - for field_name in cls.get_credentials_fields().keys(): - result[field_name] = CredentialsFieldInfo.model_validate( - cls.get_field_schema(field_name), by_alias=True - ) - - # Auto-generated credentials fields (from GoogleDriveFileInput etc.) - for kwarg_name, info in cls.get_auto_credentials_fields().items(): - config = info["config"] - # Build a schema-like dict that CredentialsFieldInfo can parse - auto_schema = { - "credentials_provider": [config.get("provider", "google")], - "credentials_types": [config.get("type", "oauth2")], - "credentials_scopes": config.get("scopes"), - "is_auto_credential": True, - "input_field_name": info["field_name"], - } - result[kwarg_name] = CredentialsFieldInfo.model_validate( - auto_schema, by_alias=True - ) - - return result - - @classmethod - def get_input_defaults(cls, data: BlockInput) -> BlockInput: - return data # Return as is, by default. - - @classmethod - def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]: - input_fields_from_nodes = {link.sink_name for link in links} - return input_fields_from_nodes - set(data) - - @classmethod - def get_missing_input(cls, data: BlockInput) -> set[str]: - return cls.get_required_fields() - set(data) - - -class BlockSchemaInput(BlockSchema): - """ - Base schema class for block inputs. - All block input schemas should extend this class for consistency. - """ - - pass - - -class BlockSchemaOutput(BlockSchema): - """ - Base schema class for block outputs that includes a standard error field. - All block output schemas should extend this class to ensure consistent error handling. - """ - - error: str = SchemaField( - description="Error message if the operation failed", default="" - ) - - -BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput) -BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput) - - -class EmptyInputSchema(BlockSchemaInput): - pass - - -class EmptyOutputSchema(BlockSchemaOutput): - pass - - -# For backward compatibility - will be deprecated -EmptySchema = EmptyOutputSchema - - -# --8<-- [start:BlockWebhookConfig] -class BlockManualWebhookConfig(BaseModel): - """ - Configuration model for webhook-triggered blocks on which - the user has to manually set up the webhook at the provider. - """ - - provider: ProviderName - """The service provider that the webhook connects to""" - - webhook_type: str - """ - Identifier for the webhook type. E.g. GitHub has repo and organization level hooks. - - Only for use in the corresponding `WebhooksManager`. - """ - - event_filter_input: str = "" - """ - Name of the block's event filter input. - Leave empty if the corresponding webhook doesn't have distinct event/payload types. - """ - - event_format: str = "{event}" - """ - Template string for the event(s) that a block instance subscribes to. - Applied individually to each event selected in the event filter input. - - Example: `"pull_request.{event}"` -> `"pull_request.opened"` - """ - - -class BlockWebhookConfig(BlockManualWebhookConfig): - """ - Configuration model for webhook-triggered blocks for which - the webhook can be automatically set up through the provider's API. - """ - - resource_format: str - """ - Template string for the resource that a block instance subscribes to. - Fields will be filled from the block's inputs (except `payload`). - - Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented) - - Only for use in the corresponding `WebhooksManager`. - """ - # --8<-- [end:BlockWebhookConfig] - - -class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): - def __init__( - self, - id: str = "", - description: str = "", - contributors: list[ContributorDetails] = [], - categories: set[BlockCategory] | None = None, - input_schema: Type[BlockSchemaInputType] = EmptyInputSchema, - output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema, - test_input: BlockInput | list[BlockInput] | None = None, - test_output: BlockTestOutput | list[BlockTestOutput] | None = None, - test_mock: dict[str, Any] | None = None, - test_credentials: Optional[Credentials | dict[str, Credentials]] = None, - disabled: bool = False, - static_output: bool = False, - block_type: BlockType = BlockType.STANDARD, - webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None, - is_sensitive_action: bool = False, - ): - """ - Initialize the block with the given schema. - - Args: - id: The unique identifier for the block, this value will be persisted in the - DB. So it should be a unique and constant across the application run. - Use the UUID format for the ID. - description: The description of the block, explaining what the block does. - contributors: The list of contributors who contributed to the block. - input_schema: The schema, defined as a Pydantic model, for the input data. - output_schema: The schema, defined as a Pydantic model, for the output data. - test_input: The list or single sample input data for the block, for testing. - test_output: The list or single expected output if the test_input is run. - test_mock: function names on the block implementation to mock on test run. - disabled: If the block is disabled, it will not be available for execution. - static_output: Whether the output links of the block are static by default. - """ - self.id = id - self.input_schema = input_schema - self.output_schema = output_schema - self.test_input = test_input - self.test_output = test_output - self.test_mock = test_mock - self.test_credentials = test_credentials - self.description = description - self.categories = categories or set() - self.contributors = contributors or set() - self.disabled = disabled - self.static_output = static_output - self.block_type = block_type - self.webhook_config = webhook_config - self.is_sensitive_action = is_sensitive_action - self.execution_stats: NodeExecutionStats = NodeExecutionStats() - - if self.webhook_config: - if isinstance(self.webhook_config, BlockWebhookConfig): - # Enforce presence of credentials field on auto-setup webhook blocks - if not (cred_fields := self.input_schema.get_credentials_fields()): - raise TypeError( - "credentials field is required on auto-setup webhook blocks" - ) - # Disallow multiple credentials inputs on webhook blocks - elif len(cred_fields) > 1: - raise ValueError( - "Multiple credentials inputs not supported on webhook blocks" - ) - - self.block_type = BlockType.WEBHOOK - else: - self.block_type = BlockType.WEBHOOK_MANUAL - - # Enforce shape of webhook event filter, if present - if self.webhook_config.event_filter_input: - event_filter_field = self.input_schema.model_fields[ - self.webhook_config.event_filter_input - ] - if not ( - isinstance(event_filter_field.annotation, type) - and issubclass(event_filter_field.annotation, BaseModel) - and all( - field.annotation is bool - for field in event_filter_field.annotation.model_fields.values() - ) - ): - raise NotImplementedError( - f"{self.name} has an invalid webhook event selector: " - "field must be a BaseModel and all its fields must be boolean" - ) - - # Enforce presence of 'payload' input - if "payload" not in self.input_schema.model_fields: - raise TypeError( - f"{self.name} is webhook-triggered but has no 'payload' input" - ) - - # Disable webhook-triggered block if webhook functionality not available - if not app_config.platform_base_url: - self.disabled = True - - @classmethod - def create(cls: Type["Block"]) -> "Block": - return cls() - - @abstractmethod - async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: - """ - Run the block with the given input data. - Args: - input_data: The input data with the structure of input_schema. - - Kwargs: Currently 14/02/2025 these include - graph_id: The ID of the graph. - node_id: The ID of the node. - graph_exec_id: The ID of the graph execution. - node_exec_id: The ID of the node execution. - user_id: The ID of the user. - - Returns: - A Generator that yields (output_name, output_data). - output_name: One of the output name defined in Block's output_schema. - output_data: The data for the output_name, matching the defined schema. - """ - # --- satisfy the type checker, never executed ------------- - if False: # noqa: SIM115 - yield "name", "value" # pyright: ignore[reportMissingYield] - raise NotImplementedError(f"{self.name} does not implement the run method.") - - async def run_once( - self, input_data: BlockSchemaInputType, output: str, **kwargs - ) -> Any: - async for item in self.run(input_data, **kwargs): - name, data = item - if name == output: - return data - raise ValueError(f"{self.name} did not produce any output for {output}") - - def merge_stats(self, stats: NodeExecutionStats) -> NodeExecutionStats: - self.execution_stats += stats - return self.execution_stats - - @property - def name(self): - return self.__class__.__name__ - - def to_dict(self): - return { - "id": self.id, - "name": self.name, - "inputSchema": self.input_schema.jsonschema(), - "outputSchema": self.output_schema.jsonschema(), - "description": self.description, - "categories": [category.dict() for category in self.categories], - "contributors": [ - contributor.model_dump() for contributor in self.contributors - ], - "staticOutput": self.static_output, - "uiType": self.block_type.value, - } - - def get_info(self) -> BlockInfo: - from backend.data.credit import get_block_cost - - return BlockInfo( - id=self.id, - name=self.name, - inputSchema=self.input_schema.jsonschema(), - outputSchema=self.output_schema.jsonschema(), - costs=get_block_cost(self), - description=self.description, - categories=[category.dict() for category in self.categories], - contributors=[ - contributor.model_dump() for contributor in self.contributors - ], - staticOutput=self.static_output, - uiType=self.block_type.value, - ) - - async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: - try: - async for output_name, output_data in self._execute(input_data, **kwargs): - yield output_name, output_data - except Exception as ex: - if isinstance(ex, BlockError): - raise ex - else: - raise ( - BlockExecutionError - if isinstance(ex, ValueError) - else BlockUnknownError - )( - message=str(ex), - block_name=self.name, - block_id=self.id, - ) from ex - - async def is_block_exec_need_review( - self, - input_data: BlockInput, - *, - user_id: str, - node_id: str, - node_exec_id: str, - graph_exec_id: str, - graph_id: str, - graph_version: int, - execution_context: "ExecutionContext", - **kwargs, - ) -> tuple[bool, BlockInput]: - """ - Check if this block execution needs human review and handle the review process. - - Returns: - Tuple of (should_pause, input_data_to_use) - - should_pause: True if execution should be paused for review - - input_data_to_use: The input data to use (may be modified by reviewer) - """ - if not ( - self.is_sensitive_action and execution_context.sensitive_action_safe_mode - ): - return False, input_data - - from backend.blocks.helpers.review import HITLReviewHelper - - # Handle the review request and get decision - decision = await HITLReviewHelper.handle_review_decision( - input_data=input_data, - user_id=user_id, - node_id=node_id, - node_exec_id=node_exec_id, - graph_exec_id=graph_exec_id, - graph_id=graph_id, - graph_version=graph_version, - block_name=self.name, - editable=True, - ) - - if decision is None: - # We're awaiting review - pause execution - return True, input_data - - if not decision.should_proceed: - # Review was rejected, raise an error to stop execution - raise BlockExecutionError( - message=f"Block execution rejected by reviewer: {decision.message}", - block_name=self.name, - block_id=self.id, - ) - - # Review was approved - use the potentially modified data - # ReviewResult.data must be a dict for block inputs - reviewed_data = decision.review_result.data - if not isinstance(reviewed_data, dict): - raise BlockExecutionError( - message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}", - block_name=self.name, - block_id=self.id, - ) - return False, reviewed_data - - async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: - # Check for review requirement only if running within a graph execution context - # Direct block execution (e.g., from chat) skips the review process - has_graph_context = all( - key in kwargs - for key in ( - "node_exec_id", - "graph_exec_id", - "graph_id", - "execution_context", - ) - ) - if has_graph_context: - should_pause, input_data = await self.is_block_exec_need_review( - input_data, **kwargs - ) - if should_pause: - return - - # Validate the input data (original or reviewer-modified) once - if error := self.input_schema.validate_data(input_data): - raise BlockInputError( - message=f"Unable to execute block with invalid input data: {error}", - block_name=self.name, - block_id=self.id, - ) - - # Use the validated input data - async for output_name, output_data in self.run( - self.input_schema(**{k: v for k, v in input_data.items() if v is not None}), - **kwargs, - ): - if output_name == "error": - raise BlockExecutionError( - message=output_data, block_name=self.name, block_id=self.id - ) - if self.block_type == BlockType.STANDARD and ( - error := self.output_schema.validate_field(output_name, output_data) - ): - raise BlockOutputError( - message=f"Block produced an invalid output data: {error}", - block_name=self.name, - block_id=self.id, - ) - yield output_name, output_data - - def is_triggered_by_event_type( - self, trigger_config: dict[str, Any], event_type: str - ) -> bool: - if not self.webhook_config: - raise TypeError("This method can't be used on non-trigger blocks") - if not self.webhook_config.event_filter_input: - return True - event_filter = trigger_config.get(self.webhook_config.event_filter_input) - if not event_filter: - raise ValueError("Event filter is not configured on trigger") - return event_type in [ - self.webhook_config.event_format.format(event=k) - for k in event_filter - if event_filter[k] is True - ] - - -# Type alias for any block with standard input/output schemas -AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput] - - -# ======================= Block Helper Functions ======================= # - - -def get_blocks() -> dict[str, Type[Block]]: - from backend.blocks import load_all_blocks - - return load_all_blocks() - - -def is_block_auth_configured( - block_cls: type[AnyBlockSchema], -) -> bool: - """ - Check if a block has a valid authentication method configured at runtime. - - For example if a block is an OAuth-only block and there env vars are not set, - do not show it in the UI. - - """ - from backend.sdk.registry import AutoRegistry - - # Create an instance to access input_schema - try: - block = block_cls() - except Exception as e: - # If we can't create a block instance, assume it's not OAuth-only - logger.error(f"Error creating block instance for {block_cls.__name__}: {e}") - return True - logger.debug( - f"Checking if block {block_cls.__name__} has a valid provider configured" - ) - - # Get all credential inputs from input schema - credential_inputs = block.input_schema.get_credentials_fields_info() - required_inputs = block.input_schema.get_required_fields() - if not credential_inputs: - logger.debug( - f"Block {block_cls.__name__} has no credential inputs - Treating as valid" - ) - return True - - # Check credential inputs - if len(required_inputs.intersection(credential_inputs.keys())) == 0: - logger.debug( - f"Block {block_cls.__name__} has only optional credential inputs" - " - will work without credentials configured" - ) - - # Check if the credential inputs for this block are correctly configured - for field_name, field_info in credential_inputs.items(): - provider_names = field_info.provider - if not provider_names: - logger.warning( - f"Block {block_cls.__name__} " - f"has credential input '{field_name}' with no provider options" - " - Disabling" - ) - return False - - # If a field has multiple possible providers, each one needs to be usable to - # prevent breaking the UX - for _provider_name in provider_names: - provider_name = _provider_name.value - if provider_name in ProviderName.__members__.values(): - logger.debug( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"provider '{provider_name}' is part of the legacy provider system" - " - Treating as valid" - ) - break - - provider = AutoRegistry.get_provider(provider_name) - if not provider: - logger.warning( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"refers to unknown provider '{provider_name}' - Disabling" - ) - return False - - # Check the provider's supported auth types - if field_info.supported_types != provider.supported_auth_types: - logger.warning( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"has mismatched supported auth types (field <> Provider): " - f"{field_info.supported_types} != {provider.supported_auth_types}" - ) - - if not (supported_auth_types := provider.supported_auth_types): - # No auth methods are been configured for this provider - logger.warning( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"provider '{provider_name}' " - "has no authentication methods configured - Disabling" - ) - return False - - # Check if provider supports OAuth - if "oauth2" in supported_auth_types: - # Check if OAuth environment variables are set - if (oauth_config := provider.oauth_config) and bool( - os.getenv(oauth_config.client_id_env_var) - and os.getenv(oauth_config.client_secret_env_var) - ): - logger.debug( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"provider '{provider_name}' is configured for OAuth" - ) - else: - logger.error( - f"Block {block_cls.__name__} credential input '{field_name}' " - f"provider '{provider_name}' " - "is missing OAuth client ID or secret - Disabling" - ) - return False - - logger.debug( - f"Block {block_cls.__name__} credential input '{field_name}' is valid; " - f"supported credential types: {', '.join(field_info.supported_types)}" - ) - - return True - - async def initialize_blocks() -> None: + from backend.blocks import get_blocks from backend.sdk.cost_integration import sync_all_provider_costs from backend.util.retry import func_retry sync_all_provider_costs() @func_retry - async def sync_block_to_db(block: Block) -> None: + async def sync_block_to_db(block: "AnyBlockSchema") -> None: existing_block = await AgentBlock.prisma().find_first( where={"OR": [{"id": block.id}, {"name": block.name}]} ) @@ -934,36 +77,3 @@ async def initialize_blocks() -> None: f"Failed to sync {len(failed_blocks)} block(s) to database: " f"{', '.join(failed_blocks)}. These blocks are still available in memory." ) - - -# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281 -def get_block(block_id: str) -> AnyBlockSchema | None: - cls = get_blocks().get(block_id) - return cls() if cls else None - - -@cached(ttl_seconds=3600) -def get_webhook_block_ids() -> Sequence[str]: - return [ - id - for id, B in get_blocks().items() - if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL) - ] - - -@cached(ttl_seconds=3600) -def get_io_block_ids() -> Sequence[str]: - return [ - id - for id, B in get_blocks().items() - if B().block_type in (BlockType.INPUT, BlockType.OUTPUT) - ] - - -@cached(ttl_seconds=3600) -def get_human_in_the_loop_block_ids() -> Sequence[str]: - return [ - id - for id, B in get_blocks().items() - if B().block_type == BlockType.HUMAN_IN_THE_LOOP - ] diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index ec35afa401..c7fb12deb6 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -1,5 +1,6 @@ from typing import Type +from backend.blocks._base import Block, BlockCost, BlockCostType from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel from backend.blocks.ai_image_generator_block import AIImageGeneratorBlock, ImageGenModel from backend.blocks.ai_music_generator import AIMusicGeneratorBlock @@ -37,7 +38,6 @@ from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock from backend.blocks.video.narration import VideoNarrationBlock -from backend.data.block import Block, BlockCost, BlockCostType from backend.integrations.credentials_store import ( aiml_api_credentials, anthropic_credentials, diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index f3c5365446..04f91d8d61 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -38,7 +38,7 @@ from backend.util.retry import func_retry from backend.util.settings import Settings if TYPE_CHECKING: - from backend.data.block import Block, BlockCost + from backend.blocks._base import Block, BlockCost settings = Settings() stripe.api_key = settings.secrets.stripe_api_key diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py index 2b10c62882..cb5973c74f 100644 --- a/autogpt_platform/backend/backend/data/credit_test.py +++ b/autogpt_platform/backend/backend/data/credit_test.py @@ -4,8 +4,8 @@ import pytest from prisma.enums import CreditTransactionType from prisma.models import CreditTransaction, UserBalance +from backend.blocks import get_block from backend.blocks.llm import AITextGeneratorBlock -from backend.data.block import get_block from backend.data.credit import BetaUserCredit, UsageTransactionMetadata from backend.data.execution import ExecutionContext, NodeExecutionEntry from backend.data.user import DEFAULT_USER_ID diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index def3d14fda..2f9258dc55 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -4,7 +4,6 @@ from collections import defaultdict from datetime import datetime, timedelta, timezone from enum import Enum from typing import ( - TYPE_CHECKING, Annotated, Any, AsyncGenerator, @@ -39,6 +38,8 @@ from prisma.types import ( from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError from pydantic.fields import Field +from backend.blocks import get_block, get_io_block_ids, get_webhook_block_ids +from backend.blocks._base import BlockType from backend.util import type as type_utils from backend.util.exceptions import DatabaseError from backend.util.json import SafeJson @@ -47,14 +48,7 @@ from backend.util.retry import func_retry from backend.util.settings import Config from backend.util.truncate import truncate -from .block import ( - BlockInput, - BlockType, - CompletedBlockOutput, - get_block, - get_io_block_ids, - get_webhook_block_ids, -) +from .block import BlockInput, CompletedBlockOutput from .db import BaseDbModel, query_raw_with_schema from .event_bus import AsyncRedisEventBus, RedisEventBus from .includes import ( @@ -63,10 +57,12 @@ from .includes import ( GRAPH_EXECUTION_INCLUDE_WITH_NODES, graph_execution_include, ) -from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats - -if TYPE_CHECKING: - pass +from .model import ( + CredentialsMetaInput, + GraphExecutionStats, + GraphInput, + NodeExecutionStats, +) T = TypeVar("T") @@ -167,7 +163,7 @@ class GraphExecutionMeta(BaseDbModel): user_id: str graph_id: str graph_version: int - inputs: Optional[BlockInput] # no default -> required in the OpenAPI spec + inputs: Optional[GraphInput] # no default -> required in the OpenAPI spec credential_inputs: Optional[dict[str, CredentialsMetaInput]] nodes_input_masks: Optional[dict[str, BlockInput]] preset_id: Optional[str] @@ -272,7 +268,7 @@ class GraphExecutionMeta(BaseDbModel): user_id=_graph_exec.userId, graph_id=_graph_exec.agentGraphId, graph_version=_graph_exec.agentGraphVersion, - inputs=cast(BlockInput | None, _graph_exec.inputs), + inputs=cast(GraphInput | None, _graph_exec.inputs), credential_inputs=( { name: CredentialsMetaInput.model_validate(cmi) @@ -314,7 +310,7 @@ class GraphExecutionMeta(BaseDbModel): class GraphExecution(GraphExecutionMeta): - inputs: BlockInput # type: ignore - incompatible override is intentional + inputs: GraphInput # type: ignore - incompatible override is intentional outputs: CompletedBlockOutput @staticmethod @@ -447,7 +443,7 @@ class NodeExecutionResult(BaseModel): for name, messages in stats.cleared_inputs.items(): input_data[name] = messages[-1] if messages else "" elif _node_exec.executionData: - input_data = type_utils.convert(_node_exec.executionData, dict[str, Any]) + input_data = type_utils.convert(_node_exec.executionData, BlockInput) else: input_data: BlockInput = defaultdict() for data in _node_exec.Input or []: @@ -867,7 +863,7 @@ async def upsert_execution_output( async def get_execution_outputs_by_node_exec_id( node_exec_id: str, -) -> dict[str, Any]: +) -> CompletedBlockOutput: """ Get all execution outputs for a specific node execution ID. @@ -1498,7 +1494,7 @@ async def get_graph_execution_by_share_token( # The executionData contains the structured input with 'name' and 'value' fields if hasattr(node_exec, "executionData") and node_exec.executionData: exec_data = type_utils.convert( - node_exec.executionData, dict[str, Any] + node_exec.executionData, BlockInput ) if "name" in exec_data: name = exec_data["name"] diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 3340716da8..a6072ce43a 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -23,38 +23,29 @@ from prisma.types import ( from pydantic import BaseModel, BeforeValidator, Field from pydantic.fields import computed_field +from backend.blocks import get_block, get_blocks +from backend.blocks._base import Block, BlockType, EmptySchema from backend.blocks.agent import AgentExecutorBlock from backend.blocks.io import AgentInputBlock, AgentOutputBlock from backend.blocks.llm import LlmModel -from backend.data.db import prisma as db -from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name -from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH -from backend.data.model import ( - CredentialsFieldInfo, - CredentialsMetaInput, - is_credentials_field_name, -) from backend.integrations.providers import ProviderName from backend.util import type as type_utils from backend.util.exceptions import GraphNotAccessibleError, GraphNotInLibraryError from backend.util.json import SafeJson from backend.util.models import Pagination -from .block import ( - AnyBlockSchema, - Block, - BlockInput, - BlockType, - EmptySchema, - get_block, - get_blocks, -) -from .db import BaseDbModel, query_raw_with_schema, transaction -from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE +from .block import BlockInput +from .db import BaseDbModel +from .db import prisma as db +from .db import query_raw_with_schema, transaction +from .dynamic_fields import is_tool_pin, sanitize_pin_name +from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE, MAX_GRAPH_VERSIONS_FETCH +from .model import CredentialsFieldInfo, CredentialsMetaInput, is_credentials_field_name if TYPE_CHECKING: + from backend.blocks._base import AnyBlockSchema + from .execution import NodesInputMasks - from .integrations import Webhook logger = logging.getLogger(__name__) @@ -128,7 +119,7 @@ class Node(BaseDbModel): return self.metadata.get("credentials_optional", False) @property - def block(self) -> AnyBlockSchema | "_UnknownBlockBase": + def block(self) -> "AnyBlockSchema | _UnknownBlockBase": """Get the block for this node. Returns UnknownBlock if block is deleted/missing.""" block = get_block(self.block_id) if not block: @@ -145,21 +136,18 @@ class NodeModel(Node): graph_version: int webhook_id: Optional[str] = None - webhook: Optional["Webhook"] = None + # webhook: Optional["Webhook"] = None # deprecated @staticmethod def from_db(node: AgentNode, for_export: bool = False) -> "NodeModel": - from .integrations import Webhook - obj = NodeModel( id=node.id, block_id=node.agentBlockId, - input_default=type_utils.convert(node.constantInput, dict[str, Any]), + input_default=type_utils.convert(node.constantInput, BlockInput), metadata=type_utils.convert(node.metadata, dict[str, Any]), graph_id=node.agentGraphId, graph_version=node.agentGraphVersion, webhook_id=node.webhookId, - webhook=Webhook.from_db(node.Webhook) if node.Webhook else None, ) obj.input_links = [Link.from_db(link) for link in node.Input or []] obj.output_links = [Link.from_db(link) for link in node.Output or []] @@ -192,14 +180,13 @@ class NodeModel(Node): # Remove webhook info stripped_node.webhook_id = None - stripped_node.webhook = None return stripped_node @staticmethod def _filter_secrets_from_node_input( - input_data: dict[str, Any], schema: dict[str, Any] | None - ) -> dict[str, Any]: + input_data: BlockInput, schema: dict[str, Any] | None + ) -> BlockInput: sensitive_keys = ["credentials", "api_key", "password", "token", "secret"] field_schemas = schema.get("properties", {}) if schema else {} result = {} @@ -774,6 +761,11 @@ class GraphModel(Graph, GraphMeta): # For invalid blocks, we still raise immediately as this is a structural issue raise ValueError(f"Invalid block {node.block_id} for node #{node.id}") + if block.disabled: + raise ValueError( + f"Block {node.block_id} is disabled and cannot be used in graphs" + ) + node_input_mask = ( nodes_input_masks.get(node.id, {}) if nodes_input_masks else {} ) diff --git a/autogpt_platform/backend/backend/data/graph_test.py b/autogpt_platform/backend/backend/data/graph_test.py index a1147c5464..5930d3f253 100644 --- a/autogpt_platform/backend/backend/data/graph_test.py +++ b/autogpt_platform/backend/backend/data/graph_test.py @@ -9,9 +9,9 @@ from pytest_snapshot.plugin import Snapshot import backend.api.features.store.model as store from backend.api.model import CreateGraph +from backend.blocks._base import BlockSchema, BlockSchemaInput from backend.blocks.basic import StoreValueBlock from backend.blocks.io import AgentInputBlock, AgentOutputBlock -from backend.data.block import BlockSchema, BlockSchemaInput from backend.data.graph import Graph, Link, Node from backend.data.model import SchemaField from backend.data.user import DEFAULT_USER_ID @@ -323,7 +323,6 @@ async def test_clean_graph(server: SpinTestServer): # Verify webhook info is removed (if any nodes had it) for node in cleaned_graph.nodes: assert node.webhook_id is None - assert node.webhook is None @pytest.mark.asyncio(loop_scope="session") diff --git a/autogpt_platform/backend/backend/data/integrations.py b/autogpt_platform/backend/backend/data/integrations.py index 5f44f928bd..a6f007ce99 100644 --- a/autogpt_platform/backend/backend/data/integrations.py +++ b/autogpt_platform/backend/backend/data/integrations.py @@ -1,5 +1,5 @@ import logging -from typing import TYPE_CHECKING, AsyncGenerator, Literal, Optional, overload +from typing import AsyncGenerator, Literal, Optional, overload from prisma.models import AgentNode, AgentPreset, IntegrationWebhook from prisma.types import ( @@ -22,9 +22,6 @@ from backend.integrations.webhooks.utils import webhook_ingress_url from backend.util.exceptions import NotFoundError from backend.util.json import SafeJson -if TYPE_CHECKING: - from backend.api.features.library.model import LibraryAgentPreset - from .db import BaseDbModel from .graph import NodeModel @@ -64,9 +61,18 @@ class Webhook(BaseDbModel): ) +# LibraryAgentPreset import must be after Webhook definition to avoid +# broken circular import: +# integrations.py → library/model.py → integrations.py (for Webhook) +from backend.api.features.library.model import LibraryAgentPreset # noqa: E402 + +# Resolve forward refs +LibraryAgentPreset.model_rebuild() + + class WebhookWithRelations(Webhook): triggered_nodes: list[NodeModel] - triggered_presets: list["LibraryAgentPreset"] + triggered_presets: list[LibraryAgentPreset] @staticmethod def from_db(webhook: IntegrationWebhook): @@ -75,11 +81,6 @@ class WebhookWithRelations(Webhook): "AgentNodes and AgentPresets must be included in " "IntegrationWebhook query with relations" ) - # LibraryAgentPreset import is moved to TYPE_CHECKING to avoid circular import: - # integrations.py → library/model.py → integrations.py (for Webhook) - # Runtime import is used in WebhookWithRelations.from_db() method instead - # Import at runtime to avoid circular dependency - from backend.api.features.library.model import LibraryAgentPreset return WebhookWithRelations( **Webhook.from_db(webhook).model_dump(), diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index 45c0541111..1477a1fe0d 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -168,6 +168,9 @@ T = TypeVar("T") logger = logging.getLogger(__name__) +GraphInput = dict[str, Any] + + class BlockSecret: def __init__(self, key: Optional[str] = None, value: Optional[str] = None): if value is not None: diff --git a/autogpt_platform/backend/backend/executor/activity_status_generator.py b/autogpt_platform/backend/backend/executor/activity_status_generator.py index 3bc6bcb876..8cc1da8957 100644 --- a/autogpt_platform/backend/backend/executor/activity_status_generator.py +++ b/autogpt_platform/backend/backend/executor/activity_status_generator.py @@ -13,8 +13,8 @@ except ImportError: from pydantic import SecretStr +from backend.blocks import get_block from backend.blocks.llm import AIStructuredResponseGeneratorBlock, LlmModel -from backend.data.block import get_block from backend.data.execution import ExecutionStatus, NodeExecutionResult from backend.data.model import APIKeyCredentials, GraphExecutionStats from backend.util.feature_flag import Flag, is_feature_enabled diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 40b386a359..a1adc27c07 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -16,16 +16,12 @@ from pika.spec import Basic, BasicProperties from prometheus_client import Gauge, start_http_server from redis.asyncio.lock import Lock as AsyncRedisLock +from backend.blocks import get_block +from backend.blocks._base import BlockSchema from backend.blocks.agent import AgentExecutorBlock from backend.blocks.io import AgentOutputBlock from backend.data import redis_client as redis -from backend.data.block import ( - BlockInput, - BlockOutput, - BlockOutputEntry, - BlockSchema, - get_block, -) +from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry from backend.data.credit import UsageTransactionMetadata from backend.data.dynamic_fields import parse_execution_output from backend.data.execution import ( @@ -288,6 +284,9 @@ async def execute_node( block_name=node_block.name, ) + if node_block.disabled: + raise ValueError(f"Block {node_block.id} is disabled and cannot be executed") + # Sanity check: validate the execution input. input_data, error = validate_exec(node, data.inputs, resolve_input=False) if input_data is None: diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index cbdc441718..94829f9837 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -24,9 +24,8 @@ from dotenv import load_dotenv from pydantic import BaseModel, Field, ValidationError from sqlalchemy import MetaData, create_engine -from backend.data.block import BlockInput from backend.data.execution import GraphExecutionWithNodes -from backend.data.model import CredentialsMetaInput +from backend.data.model import CredentialsMetaInput, GraphInput from backend.executor import utils as execution_utils from backend.monitoring import ( NotificationJobArgs, @@ -387,7 +386,7 @@ class GraphExecutionJobArgs(BaseModel): graph_version: int agent_name: str | None = None cron: str - input_data: BlockInput + input_data: GraphInput input_credentials: dict[str, CredentialsMetaInput] = Field(default_factory=dict) @@ -649,7 +648,7 @@ class Scheduler(AppService): graph_id: str, graph_version: int, cron: str, - input_data: BlockInput, + input_data: GraphInput, input_credentials: dict[str, CredentialsMetaInput], name: Optional[str] = None, user_timezone: str | None = None, diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index ffccd125b9..35725112d1 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -8,23 +8,18 @@ from typing import Mapping, Optional, cast from pydantic import BaseModel, JsonValue, ValidationError +from backend.blocks import get_block +from backend.blocks._base import Block, BlockCostType, BlockType from backend.data import execution as execution_db from backend.data import graph as graph_db from backend.data import human_review as human_review_db from backend.data import onboarding as onboarding_db from backend.data import user as user_db -from backend.data.block import ( - Block, - BlockCostType, - BlockInput, - BlockOutputEntry, - BlockType, - get_block, -) -from backend.data.block_cost_config import BLOCK_COSTS -from backend.data.db import prisma # Import dynamic field utilities from centralized location +from backend.data.block import BlockInput, BlockOutputEntry +from backend.data.block_cost_config import BLOCK_COSTS +from backend.data.db import prisma from backend.data.dynamic_fields import merge_execution_input from backend.data.execution import ( ExecutionContext, @@ -35,7 +30,7 @@ from backend.data.execution import ( NodesInputMasks, ) from backend.data.graph import GraphModel, Node -from backend.data.model import USER_TIMEZONE_NOT_SET, CredentialsMetaInput +from backend.data.model import USER_TIMEZONE_NOT_SET, CredentialsMetaInput, GraphInput from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig from backend.util.clients import ( get_async_execution_event_bus, @@ -469,7 +464,7 @@ async def validate_graph_with_credentials( async def _construct_starting_node_execution_input( graph: GraphModel, user_id: str, - graph_inputs: BlockInput, + graph_inputs: GraphInput, nodes_input_masks: Optional[NodesInputMasks] = None, ) -> tuple[list[tuple[str, BlockInput]], set[str]]: """ @@ -481,7 +476,7 @@ async def _construct_starting_node_execution_input( Args: graph (GraphModel): The graph model to execute. user_id (str): The ID of the user executing the graph. - data (BlockInput): The input data for the graph execution. + data (GraphInput): The input data for the graph execution. node_credentials_map: `dict[node_id, dict[input_name, CredentialsMetaInput]]` Returns: @@ -539,7 +534,7 @@ async def _construct_starting_node_execution_input( async def validate_and_construct_node_execution_input( graph_id: str, user_id: str, - graph_inputs: BlockInput, + graph_inputs: GraphInput, graph_version: Optional[int] = None, graph_credentials_inputs: Optional[Mapping[str, CredentialsMetaInput]] = None, nodes_input_masks: Optional[NodesInputMasks] = None, @@ -839,7 +834,7 @@ async def stop_graph_execution( async def add_graph_execution( graph_id: str, user_id: str, - inputs: Optional[BlockInput] = None, + inputs: Optional[GraphInput] = None, preset_id: Optional[str] = None, graph_version: Optional[int] = None, graph_credentials_inputs: Optional[Mapping[str, CredentialsMetaInput]] = None, diff --git a/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py b/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py index 5fb9198c4d..99eee404b9 100644 --- a/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py +++ b/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py @@ -2,8 +2,9 @@ import asyncio import logging from typing import TYPE_CHECKING, Optional, cast, overload -from backend.data.block import BlockSchema +from backend.blocks._base import BlockSchema from backend.data.graph import set_node_webhook +from backend.data.integrations import get_webhook from backend.integrations.creds_manager import IntegrationCredentialsManager from . import get_webhook_manager, supports_webhooks @@ -113,31 +114,32 @@ async def on_node_deactivate( webhooks_manager = get_webhook_manager(provider) - if node.webhook_id: - logger.debug(f"Node #{node.id} has webhook_id {node.webhook_id}") - if not node.webhook: - logger.error(f"Node #{node.id} has webhook_id but no webhook object") - raise ValueError("node.webhook not included") + if webhook_id := node.webhook_id: + logger.warning( + f"Node #{node.id} still attached to webhook #{webhook_id} - " + "did migration by `migrate_legacy_triggered_graphs` fail? " + "Triggered nodes are deprecated since Significant-Gravitas/AutoGPT#10418." + ) + webhook = await get_webhook(webhook_id) # Detach webhook from node logger.debug(f"Detaching webhook from node #{node.id}") updated_node = await set_node_webhook(node.id, None) # Prune and deregister the webhook if it is no longer used anywhere - webhook = node.webhook logger.debug( f"Pruning{' and deregistering' if credentials else ''} " - f"webhook #{webhook.id}" + f"webhook #{webhook_id}" ) await webhooks_manager.prune_webhook_if_dangling( - user_id, webhook.id, credentials + user_id, webhook_id, credentials ) if ( cast(BlockSchema, block.input_schema).get_credentials_fields() and not credentials ): logger.warning( - f"Cannot deregister webhook #{webhook.id}: credentials " + f"Cannot deregister webhook #{webhook_id}: credentials " f"#{webhook.credentials_id} not available " f"({webhook.provider.value} webhook ID: {webhook.provider_webhook_id})" ) diff --git a/autogpt_platform/backend/backend/integrations/webhooks/utils.py b/autogpt_platform/backend/backend/integrations/webhooks/utils.py index 79316c4c0e..ffe910a2eb 100644 --- a/autogpt_platform/backend/backend/integrations/webhooks/utils.py +++ b/autogpt_platform/backend/backend/integrations/webhooks/utils.py @@ -9,7 +9,7 @@ from backend.util.settings import Config from . import get_webhook_manager, supports_webhooks if TYPE_CHECKING: - from backend.data.block import AnyBlockSchema + from backend.blocks._base import AnyBlockSchema from backend.data.integrations import Webhook from backend.data.model import Credentials from backend.integrations.providers import ProviderName @@ -42,7 +42,7 @@ async def setup_webhook_for_block( Webhook: The created or found webhook object, if successful. str: A feedback message, if any required inputs are missing. """ - from backend.data.block import BlockWebhookConfig + from backend.blocks._base import BlockWebhookConfig if not (trigger_base_config := trigger_block.webhook_config): raise ValueError(f"Block #{trigger_block.id} does not have a webhook_config") diff --git a/autogpt_platform/backend/backend/monitoring/block_error_monitor.py b/autogpt_platform/backend/backend/monitoring/block_error_monitor.py index ffd2ffc888..07565a37e8 100644 --- a/autogpt_platform/backend/backend/monitoring/block_error_monitor.py +++ b/autogpt_platform/backend/backend/monitoring/block_error_monitor.py @@ -6,7 +6,7 @@ from datetime import datetime, timedelta, timezone from pydantic import BaseModel -from backend.data.block import get_block +from backend.blocks import get_block from backend.data.execution import ExecutionStatus, NodeExecutionResult from backend.util.clients import ( get_database_manager_client, diff --git a/autogpt_platform/backend/backend/sdk/__init__.py b/autogpt_platform/backend/backend/sdk/__init__.py index b3a23dc735..dc7260d08f 100644 --- a/autogpt_platform/backend/backend/sdk/__init__.py +++ b/autogpt_platform/backend/backend/sdk/__init__.py @@ -17,7 +17,7 @@ This module provides: from pydantic import BaseModel, Field, SecretStr # === CORE BLOCK SYSTEM === -from backend.data.block import ( +from backend.blocks._base import ( Block, BlockCategory, BlockManualWebhookConfig, @@ -65,7 +65,7 @@ except ImportError: # Cost System try: - from backend.data.block import BlockCost, BlockCostType + from backend.blocks._base import BlockCost, BlockCostType except ImportError: from backend.data.block_cost_config import BlockCost, BlockCostType diff --git a/autogpt_platform/backend/backend/sdk/builder.py b/autogpt_platform/backend/backend/sdk/builder.py index 09949b256f..28dd4023f0 100644 --- a/autogpt_platform/backend/backend/sdk/builder.py +++ b/autogpt_platform/backend/backend/sdk/builder.py @@ -8,7 +8,7 @@ from typing import Callable, List, Optional, Type from pydantic import SecretStr -from backend.data.block import BlockCost, BlockCostType +from backend.blocks._base import BlockCost, BlockCostType from backend.data.model import ( APIKeyCredentials, Credentials, diff --git a/autogpt_platform/backend/backend/sdk/cost_integration.py b/autogpt_platform/backend/backend/sdk/cost_integration.py index 04c027ffa3..2eec1aece0 100644 --- a/autogpt_platform/backend/backend/sdk/cost_integration.py +++ b/autogpt_platform/backend/backend/sdk/cost_integration.py @@ -8,7 +8,7 @@ BLOCK_COSTS configuration used by the execution system. import logging from typing import List, Type -from backend.data.block import Block, BlockCost +from backend.blocks._base import Block, BlockCost from backend.data.block_cost_config import BLOCK_COSTS from backend.sdk.registry import AutoRegistry diff --git a/autogpt_platform/backend/backend/sdk/provider.py b/autogpt_platform/backend/backend/sdk/provider.py index 98afbf05d5..2933121703 100644 --- a/autogpt_platform/backend/backend/sdk/provider.py +++ b/autogpt_platform/backend/backend/sdk/provider.py @@ -7,7 +7,7 @@ from typing import Any, Callable, List, Optional, Set, Type from pydantic import BaseModel, SecretStr -from backend.data.block import BlockCost +from backend.blocks._base import BlockCost from backend.data.model import ( APIKeyCredentials, Credentials, diff --git a/autogpt_platform/backend/backend/util/prompt.py b/autogpt_platform/backend/backend/util/prompt.py index 5f904bbc8a..3ec25dd61b 100644 --- a/autogpt_platform/backend/backend/util/prompt.py +++ b/autogpt_platform/backend/backend/util/prompt.py @@ -364,6 +364,44 @@ def _remove_orphan_tool_responses( return result +def validate_and_remove_orphan_tool_responses( + messages: list[dict], + log_warning: bool = True, +) -> list[dict]: + """ + Validate tool_call/tool_response pairs and remove orphaned responses. + + Scans messages in order, tracking all tool_call IDs. Any tool response + referencing an ID not seen in a preceding message is considered orphaned + and removed. This prevents API errors like Anthropic's "unexpected tool_use_id". + + Args: + messages: List of messages to validate (OpenAI or Anthropic format) + log_warning: Whether to log a warning when orphans are found + + Returns: + A new list with orphaned tool responses removed + """ + available_ids: set[str] = set() + orphan_ids: set[str] = set() + + for msg in messages: + available_ids |= _extract_tool_call_ids_from_message(msg) + for resp_id in _extract_tool_response_ids_from_message(msg): + if resp_id not in available_ids: + orphan_ids.add(resp_id) + + if not orphan_ids: + return messages + + if log_warning: + logger.warning( + f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}" + ) + + return _remove_orphan_tool_responses(messages, orphan_ids) + + def _ensure_tool_pairs_intact( recent_messages: list[dict], all_messages: list[dict], @@ -723,6 +761,13 @@ async def compress_context( # Filter out any None values that may have been introduced final_msgs: list[dict] = [m for m in msgs if m is not None] + + # ---- STEP 6: Final tool-pair validation --------------------------------- + # After all compression steps, verify that every tool response has a + # matching tool_call in a preceding assistant message. Remove orphans + # to prevent API errors (e.g., Anthropic's "unexpected tool_use_id"). + final_msgs = validate_and_remove_orphan_tool_responses(final_msgs) + final_count = sum(_msg_tokens(m, enc) for m in final_msgs) error = None if final_count + reserve > target_tokens: diff --git a/autogpt_platform/backend/backend/util/sandbox_files.py b/autogpt_platform/backend/backend/util/sandbox_files.py new file mode 100644 index 0000000000..9db53ded14 --- /dev/null +++ b/autogpt_platform/backend/backend/util/sandbox_files.py @@ -0,0 +1,288 @@ +""" +Shared utilities for extracting and storing files from E2B sandboxes. + +This module provides common file extraction and workspace storage functionality +for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.). +""" + +import base64 +import logging +import mimetypes +import shlex +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from pydantic import BaseModel + +from backend.util.file import store_media_file +from backend.util.type import MediaFileType + +if TYPE_CHECKING: + from e2b import AsyncSandbox as BaseAsyncSandbox + + from backend.executor.utils import ExecutionContext + +logger = logging.getLogger(__name__) + +# Text file extensions that can be safely read and stored as text +TEXT_EXTENSIONS = { + ".txt", + ".md", + ".html", + ".htm", + ".css", + ".js", + ".ts", + ".jsx", + ".tsx", + ".json", + ".xml", + ".yaml", + ".yml", + ".toml", + ".ini", + ".cfg", + ".conf", + ".py", + ".rb", + ".php", + ".java", + ".c", + ".cpp", + ".h", + ".hpp", + ".cs", + ".go", + ".rs", + ".swift", + ".kt", + ".scala", + ".sh", + ".bash", + ".zsh", + ".sql", + ".graphql", + ".env", + ".gitignore", + ".dockerfile", + "Dockerfile", + ".vue", + ".svelte", + ".astro", + ".mdx", + ".rst", + ".tex", + ".csv", + ".log", +} + + +class SandboxFileOutput(BaseModel): + """A file extracted from a sandbox and optionally stored in workspace.""" + + path: str + """Full path in the sandbox.""" + + relative_path: str + """Path relative to the working directory.""" + + name: str + """Filename only.""" + + content: str + """File content as text (for backward compatibility).""" + + workspace_ref: str | None = None + """Workspace reference (workspace://{id}#mime) if stored, None otherwise.""" + + +@dataclass +class ExtractedFile: + """Internal representation of an extracted file before storage.""" + + path: str + relative_path: str + name: str + content: bytes + is_text: bool + + +async def extract_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[ExtractedFile]: + """ + Extract files from an E2B sandbox. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files (default). If False, extract all files. + + Returns: + List of ExtractedFile objects with path, content, and metadata + """ + files: list[ExtractedFile] = [] + + try: + # Build find command + safe_working_dir = shlex.quote(working_directory) + timestamp_filter = "" + if since_timestamp: + timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " + + find_result = await sandbox.commands.run( + f"find {safe_working_dir} -type f " + f"{timestamp_filter}" + f"-not -path '*/node_modules/*' " + f"-not -path '*/.git/*' " + f"2>/dev/null" + ) + + if not find_result.stdout: + return files + + for file_path in find_result.stdout.strip().split("\n"): + if not file_path: + continue + + # Check if it's a text file + is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS) + + # Skip non-text files if text_only mode + if text_only and not is_text: + continue + + try: + # Read file content as bytes + content = await sandbox.files.read(file_path, format="bytes") + if isinstance(content, str): + content = content.encode("utf-8") + elif isinstance(content, bytearray): + content = bytes(content) + + # Extract filename from path + file_name = file_path.split("/")[-1] + + # Calculate relative path + relative_path = file_path + if file_path.startswith(working_directory): + relative_path = file_path[len(working_directory) :] + if relative_path.startswith("/"): + relative_path = relative_path[1:] + + files.append( + ExtractedFile( + path=file_path, + relative_path=relative_path, + name=file_name, + content=content, + is_text=is_text, + ) + ) + except Exception as e: + logger.debug(f"Failed to read file {file_path}: {e}") + continue + + except Exception as e: + logger.warning(f"File extraction failed: {e}") + + return files + + +async def store_sandbox_files( + extracted_files: list[ExtractedFile], + execution_context: "ExecutionContext", +) -> list[SandboxFileOutput]: + """ + Store extracted sandbox files to workspace and return output objects. + + Args: + extracted_files: List of files extracted from sandbox + execution_context: Execution context for workspace storage + + Returns: + List of SandboxFileOutput objects with workspace refs + """ + outputs: list[SandboxFileOutput] = [] + + for file in extracted_files: + # Decode content for text files (for backward compat content field) + if file.is_text: + try: + content_str = file.content.decode("utf-8", errors="replace") + except Exception: + content_str = "" + else: + content_str = f"[Binary file: {len(file.content)} bytes]" + + # Build data URI (needed for storage and as binary fallback) + mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream" + data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}" + + # Try to store in workspace + workspace_ref: str | None = None + try: + result = await store_media_file( + file=MediaFileType(data_uri), + execution_context=execution_context, + return_format="for_block_output", + ) + if result.startswith("workspace://"): + workspace_ref = result + elif not file.is_text: + # Non-workspace context (graph execution): store_media_file + # returned a data URI — use it as content so binary data isn't lost. + content_str = result + except Exception as e: + logger.warning(f"Failed to store file {file.name} to workspace: {e}") + # For binary files, fall back to data URI to prevent data loss + if not file.is_text: + content_str = data_uri + + outputs.append( + SandboxFileOutput( + path=file.path, + relative_path=file.relative_path, + name=file.name, + content=content_str, + workspace_ref=workspace_ref, + ) + ) + + return outputs + + +async def extract_and_store_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + execution_context: "ExecutionContext", + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[SandboxFileOutput]: + """ + Extract files from sandbox and store them in workspace. + + This is the main entry point combining extraction and storage. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + execution_context: Execution context for workspace storage + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files + + Returns: + List of SandboxFileOutput objects with content and workspace refs + """ + extracted = await extract_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + since_timestamp=since_timestamp, + text_only=text_only, + ) + + return await store_sandbox_files(extracted, execution_context) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 50b7428160..48dadb88f1 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -368,6 +368,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default=600, description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)", ) + agentgenerator_use_dummy: bool = Field( + default=False, + description="Use dummy agent generator responses for testing (bypasses external service)", + ) enable_example_blocks: bool = Field( default=False, diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 23d7c24147..279b3142a4 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -8,8 +8,9 @@ from typing import Sequence, cast from autogpt_libs.auth import get_user_id from backend.api.rest_api import AgentServer +from backend.blocks._base import Block, BlockSchema from backend.data import db -from backend.data.block import Block, BlockSchema, initialize_blocks +from backend.data.block import initialize_blocks from backend.data.execution import ( ExecutionContext, ExecutionStatus, diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 425b8d555a..53b5030da6 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5" [[package]] name = "aiofiles" -version = "24.1.0" +version = "25.1.0" description = "File support for asyncio." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, - {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, + {file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"}, + {file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"}, ] [[package]] @@ -1382,14 +1382,14 @@ tzdata = "*" [[package]] name = "fastapi" -version = "0.128.5" +version = "0.128.6" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.128.5-py3-none-any.whl", hash = "sha256:bceec0de8aa6564599c5bcc0593b0d287703562c848271fca8546fd2c87bf4dd"}, - {file = "fastapi-0.128.5.tar.gz", hash = "sha256:a7173579fc162d6471e3c6fbd9a4b7610c7a3b367bcacf6c4f90d5d022cab711"}, + {file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"}, + {file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"}, ] [package.dependencies] @@ -3078,14 +3078,14 @@ type = ["pygobject-stubs", "pytest-mypy (>=1.0.1)", "shtab", "types-pywin32"] [[package]] name = "langfuse" -version = "3.13.0" +version = "3.14.1" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.10" groups = ["main"] files = [ - {file = "langfuse-3.13.0-py3-none-any.whl", hash = "sha256:71912ddac1cc831a65df895eae538a556f564c094ae51473e747426e9ded1a9d"}, - {file = "langfuse-3.13.0.tar.gz", hash = "sha256:dacea8111ca4442e97dbfec4f8d676cf9709b35357a26e468f8887b95de0012f"}, + {file = "langfuse-3.14.1-py3-none-any.whl", hash = "sha256:17bed605dbfc9947cbd1738a715f6d27c1b80b6da9f2946586171958fa5820d0"}, + {file = "langfuse-3.14.1.tar.gz", hash = "sha256:404a6104cd29353d7829aa417ec46565b04917e5599afdda96c5b0865f4bc991"}, ] [package.dependencies] @@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "14686ee0e2dc446a75d0db145b08dc410dc31c357e25085bb0f9b0174711c4b1" +content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 677b73b468..317663ee98 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -21,7 +21,7 @@ cryptography = "^46.0" discord-py = "^2.5.2" e2b-code-interpreter = "^1.5.2" elevenlabs = "^1.50.0" -fastapi = "^0.128.5" +fastapi = "^0.128.6" feedparser = "^6.0.11" flake8 = "^7.3.0" google-api-python-client = "^2.177.0" @@ -34,7 +34,7 @@ html2text = "^2024.2.26" jinja2 = "^3.1.6" jsonref = "^1.1.0" jsonschema = "^4.25.0" -langfuse = "^3.11.0" +langfuse = "^3.14.1" launchdarkly-server-sdk = "^9.14.1" mem0ai = "^0.1.115" moviepy = "^2.1.2" @@ -76,7 +76,7 @@ yt-dlp = "2025.12.08" zerobouncesdk = "^1.1.2" # NOTE: please insert new dependencies in their alphabetical location pytest-snapshot = "^0.9.0" -aiofiles = "^24.1.0" +aiofiles = "^25.1.0" tiktoken = "^0.12.0" aioclamd = "^1.0.0" setuptools = "^80.9.0" diff --git a/autogpt_platform/backend/scripts/generate_block_docs.py b/autogpt_platform/backend/scripts/generate_block_docs.py index bb60eddb5d..25ad0a3be7 100644 --- a/autogpt_platform/backend/scripts/generate_block_docs.py +++ b/autogpt_platform/backend/scripts/generate_block_docs.py @@ -24,7 +24,10 @@ import sys from collections import defaultdict from dataclasses import dataclass, field from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING, Any, Type + +if TYPE_CHECKING: + from backend.blocks._base import AnyBlockSchema # Add backend to path for imports backend_dir = Path(__file__).parent.parent @@ -242,9 +245,9 @@ def file_path_to_title(file_path: str) -> str: return apply_fixes(name.replace("_", " ").title()) -def extract_block_doc(block_cls: type) -> BlockDoc: +def extract_block_doc(block_cls: Type["AnyBlockSchema"]) -> BlockDoc: """Extract documentation data from a block class.""" - block = block_cls.create() + block = block_cls() # Get source file try: @@ -520,7 +523,7 @@ def generate_overview_table(blocks: list[BlockDoc], block_dir_prefix: str = "") lines.append("") # Group blocks by category - by_category = defaultdict(list) + by_category = defaultdict[str, list[BlockDoc]](list) for block in blocks: primary_cat = block.categories[0] if block.categories else "BASIC" by_category[primary_cat].append(block) diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index cc37c428c0..93c9b9dcc0 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -25,6 +25,7 @@ class TestServiceConfiguration: """Test that external service is not configured when host is empty.""" mock_settings = MagicMock() mock_settings.config.agentgenerator_host = "" + mock_settings.config.agentgenerator_use_dummy = False with patch.object(service, "_get_settings", return_value=mock_settings): assert service.is_external_service_configured() is False diff --git a/autogpt_platform/backend/test/load_store_agents.py b/autogpt_platform/backend/test/load_store_agents.py index b9d8e0478e..dfc5beb453 100644 --- a/autogpt_platform/backend/test/load_store_agents.py +++ b/autogpt_platform/backend/test/load_store_agents.py @@ -49,7 +49,7 @@ async def initialize_blocks(db: Prisma) -> set[str]: Returns a set of block IDs that exist in the database. """ - from backend.data.block import get_blocks + from backend.blocks import get_blocks print(" Initializing agent blocks...") blocks = get_blocks() diff --git a/autogpt_platform/backend/test/sdk/test_sdk_registry.py b/autogpt_platform/backend/test/sdk/test_sdk_registry.py index f82abd57cb..ab384ca955 100644 --- a/autogpt_platform/backend/test/sdk/test_sdk_registry.py +++ b/autogpt_platform/backend/test/sdk/test_sdk_registry.py @@ -377,7 +377,7 @@ class TestProviderBuilder: def test_provider_builder_with_base_cost(self): """Test building a provider with base costs.""" - from backend.data.block import BlockCostType + from backend.blocks._base import BlockCostType provider = ( ProviderBuilder("cost_test") @@ -418,7 +418,7 @@ class TestProviderBuilder: def test_provider_builder_complete_example(self): """Test building a complete provider with all features.""" - from backend.data.block import BlockCostType + from backend.blocks._base import BlockCostType class TestOAuth(BaseOAuthHandler): PROVIDER_NAME = ProviderName.GITHUB diff --git a/autogpt_platform/frontend/Dockerfile b/autogpt_platform/frontend/Dockerfile index 2b120af5e1..ab2708f1f9 100644 --- a/autogpt_platform/frontend/Dockerfile +++ b/autogpt_platform/frontend/Dockerfile @@ -25,8 +25,12 @@ RUN if [ -f .env.production ]; then \ cp .env.default .env; \ fi RUN pnpm run generate:api +# Disable source-map generation in Docker builds to halve webpack memory usage. +# Source maps are only useful when SENTRY_AUTH_TOKEN is set (Vercel deploys); +# the Docker image never uploads them, so generating them just wastes RAM. +ENV NEXT_PUBLIC_SOURCEMAPS="false" # In CI, we want NEXT_PUBLIC_PW_TEST=true during build so Next.js inlines it -RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=4096" pnpm build; else NODE_OPTIONS="--max-old-space-size=4096" pnpm build; fi +RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=8192" pnpm build; else NODE_OPTIONS="--max-old-space-size=8192" pnpm build; fi # Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile FROM node:21-alpine AS prod diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs index bb4410039d..9bb5983801 100644 --- a/autogpt_platform/frontend/next.config.mjs +++ b/autogpt_platform/frontend/next.config.mjs @@ -1,8 +1,12 @@ import { withSentryConfig } from "@sentry/nextjs"; +// Allow Docker builds to skip source-map generation (halves memory usage). +// Defaults to true so Vercel/local builds are unaffected. +const enableSourceMaps = process.env.NEXT_PUBLIC_SOURCEMAPS !== "false"; + /** @type {import('next').NextConfig} */ const nextConfig = { - productionBrowserSourceMaps: true, + productionBrowserSourceMaps: enableSourceMaps, // Externalize OpenTelemetry packages to fix Turbopack HMR issues serverExternalPackages: [ "@opentelemetry/instrumentation", @@ -14,9 +18,37 @@ const nextConfig = { serverActions: { bodySizeLimit: "256mb", }, - // Increase body size limit for API routes (file uploads) - 256MB to match backend limit - proxyClientMaxBodySize: "256mb", middlewareClientMaxBodySize: "256mb", + // Limit parallel webpack workers to reduce peak memory during builds. + cpus: 2, + }, + // Work around cssnano "Invalid array length" bug in Next.js's bundled + // cssnano-simple comment parser when processing very large CSS chunks. + // CSS is still bundled correctly; gzip handles most of the size savings anyway. + webpack: (config, { dev }) => { + if (!dev) { + // Next.js adds CssMinimizerPlugin internally (after user config), so we + // can't filter it from config.plugins. Instead, intercept the webpack + // compilation hooks and replace the buggy plugin's tap with a no-op. + config.plugins.push({ + apply(compiler) { + compiler.hooks.compilation.tap( + "DisableCssMinimizer", + (compilation) => { + compilation.hooks.processAssets.intercept({ + register: (tap) => { + if (tap.name === "CssMinimizerPlugin") { + return { ...tap, fn: async () => {} }; + } + return tap; + }, + }); + }, + ); + }, + }); + } + return config; }, images: { domains: [ @@ -54,9 +86,16 @@ const nextConfig = { transpilePackages: ["geist"], }; -const isDevelopmentBuild = process.env.NODE_ENV !== "production"; +// Only run the Sentry webpack plugin when we can actually upload source maps +// (i.e. on Vercel with SENTRY_AUTH_TOKEN set). The Sentry *runtime* SDK +// (imported in app code) still captures errors without the plugin. +// Skipping the plugin saves ~1 GB of peak memory during `next build`. +const skipSentryPlugin = + process.env.NODE_ENV !== "production" || + !enableSourceMaps || + !process.env.SENTRY_AUTH_TOKEN; -export default isDevelopmentBuild +export default skipSentryPlugin ? nextConfig : withSentryConfig(nextConfig, { // For all available options, see: @@ -96,7 +135,7 @@ export default isDevelopmentBuild // This helps Sentry with sourcemaps... https://docs.sentry.io/platforms/javascript/guides/nextjs/sourcemaps/ sourcemaps: { - disable: false, + disable: !enableSourceMaps, assets: [".next/**/*.js", ".next/**/*.js.map"], ignore: ["**/node_modules/**"], deleteSourcemapsAfterUpload: false, // Source is public anyway :) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index e8c9871a72..5988e59c90 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -7,7 +7,7 @@ }, "scripts": { "dev": "pnpm run generate:api:force && next dev --turbo", - "build": "next build", + "build": "cross-env NODE_OPTIONS=--max-old-space-size=16384 next build", "start": "next start", "start:standalone": "cd .next/standalone && node server.js", "lint": "next lint && prettier --check .", @@ -30,6 +30,7 @@ "defaults" ], "dependencies": { + "@ai-sdk/react": "3.0.61", "@faker-js/faker": "10.0.0", "@hookform/resolvers": "5.2.2", "@next/third-parties": "15.4.6", @@ -60,6 +61,10 @@ "@rjsf/utils": "6.1.2", "@rjsf/validator-ajv8": "6.1.2", "@sentry/nextjs": "10.27.0", + "@streamdown/cjk": "1.0.1", + "@streamdown/code": "1.0.1", + "@streamdown/math": "1.0.1", + "@streamdown/mermaid": "1.0.1", "@supabase/ssr": "0.7.0", "@supabase/supabase-js": "2.78.0", "@tanstack/react-query": "5.90.6", @@ -68,6 +73,7 @@ "@vercel/analytics": "1.5.0", "@vercel/speed-insights": "1.2.0", "@xyflow/react": "12.9.2", + "ai": "6.0.59", "boring-avatars": "1.11.2", "class-variance-authority": "0.7.1", "clsx": "2.1.1", @@ -87,7 +93,6 @@ "launchdarkly-react-client-sdk": "3.9.0", "lodash": "4.17.21", "lucide-react": "0.552.0", - "moment": "2.30.1", "next": "15.4.10", "next-themes": "0.4.6", "nuqs": "2.7.2", @@ -112,9 +117,11 @@ "remark-math": "6.0.0", "shepherd.js": "14.5.1", "sonner": "2.0.7", + "streamdown": "2.1.0", "tailwind-merge": "2.6.0", "tailwind-scrollbar": "3.1.0", "tailwindcss-animate": "1.0.7", + "use-stick-to-bottom": "1.1.2", "uuid": "11.1.0", "vaul": "1.1.2", "zod": "3.25.76", @@ -172,7 +179,8 @@ }, "pnpm": { "overrides": { - "@opentelemetry/instrumentation": "0.209.0" + "@opentelemetry/instrumentation": "0.209.0", + "lodash-es": "4.17.23" } }, "packageManager": "pnpm@10.20.0+sha512.cf9998222162dd85864d0a8102e7892e7ba4ceadebbf5a31f9c2fce48dfce317a9c53b9f6464d1ef9042cba2e02ae02a9f7c143a2b438cd93c91840f0192b9dd" diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 377a298564..468e2f312d 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -6,11 +6,15 @@ settings: overrides: '@opentelemetry/instrumentation': 0.209.0 + lodash-es: 4.17.23 importers: .: dependencies: + '@ai-sdk/react': + specifier: 3.0.61 + version: 3.0.61(react@18.3.1)(zod@3.25.76) '@faker-js/faker': specifier: 10.0.0 version: 10.0.0 @@ -101,6 +105,18 @@ importers: '@sentry/nextjs': specifier: 10.27.0 version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.104.1(esbuild@0.25.12)) + '@streamdown/cjk': + specifier: 1.0.1 + version: 1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5) + '@streamdown/code': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) + '@streamdown/math': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) + '@streamdown/mermaid': + specifier: 1.0.1 + version: 1.0.1(react@18.3.1) '@supabase/ssr': specifier: 0.7.0 version: 0.7.0(@supabase/supabase-js@2.78.0) @@ -125,6 +141,9 @@ importers: '@xyflow/react': specifier: 12.9.2 version: 12.9.2(@types/react@18.3.17)(immer@11.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + ai: + specifier: 6.0.59 + version: 6.0.59(zod@3.25.76) boring-avatars: specifier: 1.11.2 version: 1.11.2 @@ -182,9 +201,6 @@ importers: lucide-react: specifier: 0.552.0 version: 0.552.0(react@18.3.1) - moment: - specifier: 2.30.1 - version: 2.30.1 next: specifier: 15.4.10 version: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -257,6 +273,9 @@ importers: sonner: specifier: 2.0.7 version: 2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + streamdown: + specifier: 2.1.0 + version: 2.1.0(react@18.3.1) tailwind-merge: specifier: 2.6.0 version: 2.6.0 @@ -266,6 +285,9 @@ importers: tailwindcss-animate: specifier: 1.0.7 version: 1.0.7(tailwindcss@3.4.17) + use-stick-to-bottom: + specifier: 1.1.2 + version: 1.1.2(react@18.3.1) uuid: specifier: 11.1.0 version: 11.1.0 @@ -417,10 +439,35 @@ packages: '@adobe/css-tools@4.4.4': resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==} + '@ai-sdk/gateway@3.0.27': + resolution: {integrity: sha512-Pr+ApS9k6/jcR3kNltJNxo60OdYvnVU4DeRhzVtxUAYTXCHx4qO+qTMG9nNRn+El1acJnNRA//Su47srjXkT/w==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider-utils@4.0.10': + resolution: {integrity: sha512-VeDAiCH+ZK8Xs4hb9Cw7pHlujWNL52RKe8TExOkrw6Ir1AmfajBZTb9XUdKOZO08RwQElIKA8+Ltm+Gqfo8djQ==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider@3.0.5': + resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} + engines: {node: '>=18'} + + '@ai-sdk/react@3.0.61': + resolution: {integrity: sha512-vCjZBnY2+TawFBXamSKt6elAt9n1MXMfcjSd9DSgT9peCJN27qNGVSXgaGNh/B3cUgeOktFfhB2GVmIqOjvmLQ==} + engines: {node: '>=18'} + peerDependencies: + react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1 + '@alloc/quick-lru@5.2.0': resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} engines: {node: '>=10'} + '@antfu/install-pkg@1.1.0': + resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + '@apidevtools/json-schema-ref-parser@14.0.1': resolution: {integrity: sha512-Oc96zvmxx1fqoSEdUmfmvvb59/KDOnUoJ7s2t7bISyAn0XEz57LCCw8k2Y4Pf3mwKaZLMciESALORLgfe2frCw==} engines: {node: '>= 16'} @@ -1032,6 +1079,24 @@ packages: resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} engines: {node: '>=6.9.0'} + '@braintree/sanitize-url@7.1.2': + resolution: {integrity: sha512-jigsZK+sMF/cuiB7sERuo9V7N9jx+dhmHHnQyDSVdpZwVutaBu7WvNYqMDLSgFgfB30n452TP3vjDAvFC973mA==} + + '@chevrotain/cst-dts-gen@11.0.3': + resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + + '@chevrotain/gast@11.0.3': + resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} + + '@chevrotain/regexp-to-ast@11.0.3': + resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + + '@chevrotain/types@11.0.3': + resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + + '@chevrotain/utils@11.0.3': + resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} + '@chromatic-com/storybook@4.1.2': resolution: {integrity: sha512-QAWGtHwib0qsP5CcO64aJCF75zpFgpKK3jNpxILzQiPK3sVo4EmnVGJVdwcZWpWrGdH8E4YkncGoitw4EXzKMg==} engines: {node: '>=20.0.0', yarn: '>=1.22.18'} @@ -1486,6 +1551,12 @@ packages: resolution: {integrity: sha512-oT8USsTulFAA8FiBN0lA2rJqQI2lIt+HP2pdakGQXo3EviL2vqJTgpSCRwjl6mLJL158f1BVcdQUOEFGxomK3w==} engines: {node: '>=16.0.0'} + '@iconify/types@2.0.0': + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + + '@iconify/utils@3.1.0': + resolution: {integrity: sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==} + '@img/colour@1.0.0': resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==} engines: {node: '>=18'} @@ -1705,6 +1776,9 @@ packages: '@types/react': '>=16' react: '>=16' + '@mermaid-js/parser@0.6.3': + resolution: {integrity: sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==} + '@mswjs/interceptors@0.40.0': resolution: {integrity: sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ==} engines: {node: '>=18'} @@ -3019,6 +3093,12 @@ packages: peerDependencies: webpack: '>=4.40.0' + '@shikijs/core@3.21.0': + resolution: {integrity: sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA==} + + '@shikijs/engine-javascript@3.21.0': + resolution: {integrity: sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ==} + '@shikijs/engine-oniguruma@3.21.0': resolution: {integrity: sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ==} @@ -3222,6 +3302,26 @@ packages: typescript: optional: true + '@streamdown/cjk@1.0.1': + resolution: {integrity: sha512-ElDoEfad2u8iFzmgmEEab15N4mt19r47xeUIPJtHaHVyEF5baojamGo+xw3MywMj2qUsAY3LnTnKbrUtL5tGkg==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/code@1.0.1': + resolution: {integrity: sha512-U9LITfQ28tZYAoY922jdtw1ryg4kgRBdURopqK9hph7G2fBUwPeHthjH7SvaV0fvFv7EqjqCzARJuWUljLe9Ag==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/math@1.0.1': + resolution: {integrity: sha512-R9WdHbpERiRU7WeO7oT1aIbnLJ/jraDr89F7X9x2OM//Y8G8UMATRnLD/RUwg4VLr8Nu7QSIJ0Pa8lXd2meM4Q==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + '@streamdown/mermaid@1.0.1': + resolution: {integrity: sha512-LVGbxYd6t1DKMCMqm3cpbfsdD4/EKpQelanOlJaBMKv83kbrl8syZJhVBsd/jka+CawhpeR9xsGQJzSJEpjoVw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + '@supabase/auth-js@2.78.0': resolution: {integrity: sha512-cXDtu1U0LeZj/xfnFoV7yCze37TcbNo8FCxy1FpqhMbB9u9QxxDSW6pA5gm/07Ei7m260Lof4CZx67Cu6DPeig==} @@ -3344,21 +3444,69 @@ packages: '@types/d3-array@3.2.2': resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} + '@types/d3-axis@3.0.6': + resolution: {integrity: sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==} + + '@types/d3-brush@3.0.6': + resolution: {integrity: sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==} + + '@types/d3-chord@3.0.6': + resolution: {integrity: sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==} + '@types/d3-color@3.1.3': resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} + '@types/d3-contour@3.0.6': + resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} + + '@types/d3-delaunay@6.0.4': + resolution: {integrity: sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==} + + '@types/d3-dispatch@3.0.7': + resolution: {integrity: sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==} + '@types/d3-drag@3.0.7': resolution: {integrity: sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==} + '@types/d3-dsv@3.0.7': + resolution: {integrity: sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==} + '@types/d3-ease@3.0.2': resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} + '@types/d3-fetch@3.0.7': + resolution: {integrity: sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==} + + '@types/d3-force@3.0.10': + resolution: {integrity: sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==} + + '@types/d3-format@3.0.4': + resolution: {integrity: sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==} + + '@types/d3-geo@3.1.0': + resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} + + '@types/d3-hierarchy@3.1.7': + resolution: {integrity: sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==} + '@types/d3-interpolate@3.0.4': resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} '@types/d3-path@3.1.1': resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} + '@types/d3-polygon@3.0.2': + resolution: {integrity: sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==} + + '@types/d3-quadtree@3.0.6': + resolution: {integrity: sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==} + + '@types/d3-random@3.0.3': + resolution: {integrity: sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==} + + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + '@types/d3-scale@4.0.9': resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} @@ -3368,6 +3516,9 @@ packages: '@types/d3-shape@3.1.7': resolution: {integrity: sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==} + '@types/d3-time-format@4.0.3': + resolution: {integrity: sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==} + '@types/d3-time@3.0.4': resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} @@ -3380,6 +3531,9 @@ packages: '@types/d3-zoom@3.0.8': resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + '@types/d3@7.4.3': + resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} + '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} @@ -3404,6 +3558,9 @@ packages: '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/geojson@7946.0.16': + resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} + '@types/hast@3.0.4': resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} @@ -3692,6 +3849,10 @@ packages: vue-router: optional: true + '@vercel/oidc@3.1.0': + resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} + engines: {node: '>= 20'} + '@vercel/speed-insights@1.2.0': resolution: {integrity: sha512-y9GVzrUJ2xmgtQlzFP2KhVRoCglwfRQgjyfY607aU0hh0Un6d0OUyrJkjuAlsV18qR4zfoFPs/BiIj9YDS6Wzw==} peerDependencies: @@ -3873,6 +4034,12 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + ai@6.0.59: + resolution: {integrity: sha512-9SfCvcr4kVk4t8ZzIuyHpuL1hFYKsYMQfBSbBq3dipXPa+MphARvI8wHEjNaRqYl3JOsJbWxEBIMqHL0L92mUA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -4227,6 +4394,14 @@ packages: resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} engines: {node: '>= 16'} + chevrotain-allstar@0.3.1: + resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} + peerDependencies: + chevrotain: ^11.0.0 + + chevrotain@11.0.3: + resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} + chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} @@ -4325,6 +4500,10 @@ packages: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + commander@8.3.0: resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} engines: {node: '>= 12'} @@ -4346,6 +4525,9 @@ packages: engines: {node: '>=18'} hasBin: true + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + console-browserify@1.2.0: resolution: {integrity: sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==} @@ -4374,6 +4556,12 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + + cose-base@2.2.0: + resolution: {integrity: sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==} + cosmiconfig@7.1.0: resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==} engines: {node: '>=10'} @@ -4447,14 +4635,51 @@ packages: csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + cytoscape-cose-bilkent@4.1.0: + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape-fcose@2.2.0: + resolution: {integrity: sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + + d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + d3-array@3.2.4: resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} engines: {node: '>=12'} + d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + + d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + + d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + d3-color@3.1.0: resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} engines: {node: '>=12'} + d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + + d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + d3-dispatch@3.0.1: resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} engines: {node: '>=12'} @@ -4463,22 +4688,65 @@ packages: resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} engines: {node: '>=12'} + d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + d3-ease@3.0.1: resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} engines: {node: '>=12'} + d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + + d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + d3-format@3.1.0: resolution: {integrity: sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==} engines: {node: '>=12'} + d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + + d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + d3-interpolate@3.0.1: resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} engines: {node: '>=12'} + d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + d3-path@3.1.0: resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} engines: {node: '>=12'} + d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + + d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + + d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + + d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + + d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + d3-scale@4.0.2: resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} engines: {node: '>=12'} @@ -4487,6 +4755,9 @@ packages: resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} engines: {node: '>=12'} + d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + d3-shape@3.2.0: resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} engines: {node: '>=12'} @@ -4513,6 +4784,13 @@ packages: resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} engines: {node: '>=12'} + d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + + dagre-d3-es@7.0.13: + resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + damerau-levenshtein@1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} @@ -4538,6 +4816,9 @@ packages: date-fns@4.1.0: resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==} + dayjs@1.11.19: + resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} + debug@3.2.7: resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} peerDependencies: @@ -4594,6 +4875,9 @@ packages: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} + delaunator@5.0.1: + resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} + dependency-graph@0.11.0: resolution: {integrity: sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==} engines: {node: '>= 0.6.0'} @@ -4974,6 +5258,10 @@ packages: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + evp_bytestokey@1.0.3: resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} @@ -5174,6 +5462,10 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} + get-east-asian-width@1.4.0: + resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} + engines: {node: '>=18'} + get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} @@ -5213,11 +5505,12 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - deprecated: Glob versions prior to v9 are no longer supported + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -5248,6 +5541,9 @@ packages: resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + hachure-fill@0.5.2: + resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} + happy-dom@20.3.4: resolution: {integrity: sha512-rfbiwB6OKxZFIFQ7SRnCPB2WL9WhyXsFoTfecYgeCeFSOBxvkWLaXsdv5ehzJrfqwXQmDephAKWLRQoFoJwrew==} engines: {node: '>=20.0.0'} @@ -5311,9 +5607,21 @@ packages: hast-util-parse-selector@4.0.0: resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} + hast-util-raw@9.1.0: + resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + + hast-util-sanitize@5.0.2: + resolution: {integrity: sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg==} + + hast-util-to-html@9.0.5: + resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} + hast-util-to-jsx-runtime@2.3.6: resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==} + hast-util-to-parse5@8.0.1: + resolution: {integrity: sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==} + hast-util-to-string@3.0.1: resolution: {integrity: sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==} @@ -5358,6 +5666,9 @@ packages: html-url-attributes@3.0.1: resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + html-webpack-plugin@5.6.5: resolution: {integrity: sha512-4xynFbKNNk+WlzXeQQ+6YYsH2g7mpfPszQZUi3ovKlj+pDmngQ7vRXjrrmGROabmKwyQkcgcX5hqfOwHbFmK5g==} engines: {node: '>=10.13.0'} @@ -5395,6 +5706,10 @@ packages: resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} engines: {node: '>=10.17.0'} + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + icss-utils@5.1.0: resolution: {integrity: sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==} engines: {node: ^10 || ^12 || >= 14} @@ -5458,6 +5773,9 @@ packages: resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} engines: {node: '>= 0.4'} + internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + internmap@2.0.3: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} @@ -5698,6 +6016,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema@0.4.0: + resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} + json-stable-stringify-without-jsonify@1.0.1: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} @@ -5740,9 +6061,20 @@ packages: resolution: {integrity: sha512-woHRUZ/iF23GBP1dkDQMh1QBad9dmr8/PAwNA54VrSOVYgI12MAcE14TqnDdQOdzyEonGzMepYnqBMYdsoAr8Q==} hasBin: true + katex@0.16.28: + resolution: {integrity: sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==} + hasBin: true + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + + langium@3.3.1: + resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} + engines: {node: '>=16.0.0'} + language-subtag-registry@0.3.23: resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==} @@ -5762,6 +6094,12 @@ packages: react: ^16.6.3 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0 + layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + + layout-base@2.0.1: + resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} + leven@3.1.0: resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} engines: {node: '>=6'} @@ -5804,8 +6142,8 @@ packages: resolution: {integrity: sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - lodash-es@4.17.22: - resolution: {integrity: sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==} + lodash-es@4.17.23: + resolution: {integrity: sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==} lodash.camelcase@4.3.0: resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} @@ -5912,6 +6250,16 @@ packages: react: optional: true + marked@16.4.2: + resolution: {integrity: sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==} + engines: {node: '>= 20'} + hasBin: true + + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + math-intrinsics@1.1.0: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} @@ -5984,9 +6332,41 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} + mermaid@11.12.2: + resolution: {integrity: sha512-n34QPDPEKmaeCG4WDMGy0OT6PSyxKCfy2pJgShP+Qow2KLrvWjclwbc3yXfSIf4BanqWEhQEpngWwNp/XhZt6w==} + micromark-core-commonmark@2.0.3: resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} + micromark-extension-cjk-friendly-gfm-strikethrough@1.2.3: + resolution: {integrity: sha512-gSPnxgHDDqXYOBvQRq6lerrq9mjDhdtKn+7XETuXjxWcL62yZEfUdA28Ml1I2vDIPfAOIKLa0h2XDSGkInGHFQ==} + engines: {node: '>=16'} + peerDependencies: + micromark: ^4.0.0 + micromark-util-types: ^2.0.0 + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-cjk-friendly-util@2.1.1: + resolution: {integrity: sha512-egs6+12JU2yutskHY55FyR48ZiEcFOJFyk9rsiyIhcJ6IvWB6ABBqVrBw8IobqJTDZ/wdSr9eoXDPb5S2nW1bg==} + engines: {node: '>=16'} + peerDependencies: + micromark-util-types: '*' + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-cjk-friendly@1.2.3: + resolution: {integrity: sha512-gRzVLUdjXBLX6zNPSnHGDoo+ZTp5zy+MZm0g3sv+3chPXY7l9gW+DnrcHcZh/jiPR6MjPKO4AEJNp4Aw6V9z5Q==} + engines: {node: '>=16'} + peerDependencies: + micromark: ^4.0.0 + micromark-util-types: ^2.0.0 + peerDependenciesMeta: + micromark-util-types: + optional: true + micromark-extension-gfm-autolink-literal@2.1.0: resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} @@ -6119,12 +6499,12 @@ packages: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + module-details-from-path@1.0.4: resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==} - moment@2.30.1: - resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} - motion-dom@12.24.8: resolution: {integrity: sha512-wX64WITk6gKOhaTqhsFqmIkayLAAx45SVFiMnJIxIrH5uqyrwrxjrfo8WX9Kh8CaUAixjeMn82iH0W0QT9wD5w==} @@ -6339,6 +6719,12 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.4: + resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} + open@8.4.2: resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} engines: {node: '>=12'} @@ -6398,6 +6784,9 @@ packages: package-json-from-dist@1.0.1: resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + package-manager-detector@1.6.0: + resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} + pako@1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -6434,6 +6823,9 @@ packages: path-browserify@1.0.1: resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + path-data-parser@0.1.0: + resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} @@ -6513,6 +6905,9 @@ packages: resolution: {integrity: sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==} engines: {node: '>=14.16'} + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + playwright-core@1.56.1: resolution: {integrity: sha512-hutraynyn31F+Bifme+Ps9Vq59hKuUCz7H1kDOcBs+2oGguKkWTU50bBWrtz34OUWmIwpBTWDxaRPXrIXkgvmQ==} engines: {node: '>=18'} @@ -6523,6 +6918,12 @@ packages: engines: {node: '>=18'} hasBin: true + points-on-curve@0.2.0: + resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} + + points-on-path@0.2.1: + resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + pony-cause@1.1.1: resolution: {integrity: sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==} engines: {node: '>=12.0.0'} @@ -6962,6 +7363,15 @@ packages: regex-parser@2.3.1: resolution: {integrity: sha512-yXLRqatcCuKtVHsWrNg0JL3l1zGfdXeEvDa0bdu4tCDQw0RpMDZsqbkyRTUnKMR0tXF627V2oEWjBEaEdqTwtQ==} + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@6.1.0: + resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==} + regexp.prototype.flags@1.5.4: resolution: {integrity: sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==} engines: {node: '>= 0.4'} @@ -6980,12 +7390,21 @@ packages: rehype-autolink-headings@7.1.0: resolution: {integrity: sha512-rItO/pSdvnvsP4QRB1pmPiNHUskikqtPojZKJPPPAVx9Hj8i8TwMBhofrrAYRhYOOBZH9tgmG5lPqDLuIWPWmw==} + rehype-harden@1.1.7: + resolution: {integrity: sha512-j5DY0YSK2YavvNGV+qBHma15J9m0WZmRe8posT5AtKDS6TNWtMVTo6RiqF8SidfcASYz8f3k2J/1RWmq5zTXUw==} + rehype-highlight@7.0.2: resolution: {integrity: sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==} rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} + rehype-raw@7.0.0: + resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + + rehype-sanitize@6.0.0: + resolution: {integrity: sha512-CsnhKNsyI8Tub6L4sm5ZFsme4puGfc6pYylvXo1AeqaGbjOYyzNv3qZPwvs0oMJ39eryyeOdmxwUIo94IpEhqg==} + rehype-slug@6.0.0: resolution: {integrity: sha512-lWyvf/jwu+oS5+hL5eClVd3hNdmwM1kAC0BUvEGD19pajQMIzcNUd/k9GsfQ+FfECvX+JE+e9/btsKH0EjJT6A==} @@ -6993,6 +7412,26 @@ packages: resolution: {integrity: sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==} engines: {node: '>= 0.10'} + remark-cjk-friendly-gfm-strikethrough@1.2.3: + resolution: {integrity: sha512-bXfMZtsaomK6ysNN/UGRIcasQAYkC10NtPmP0oOHOV8YOhA2TXmwRXCku4qOzjIFxAPfish5+XS0eIug2PzNZA==} + engines: {node: '>=16'} + peerDependencies: + '@types/mdast': ^4.0.0 + unified: ^11.0.0 + peerDependenciesMeta: + '@types/mdast': + optional: true + + remark-cjk-friendly@1.2.3: + resolution: {integrity: sha512-UvAgxwlNk+l9Oqgl/9MWK2eWRS7zgBW/nXX9AthV7nd/3lNejF138E7Xbmk9Zs4WjTJGs721r7fAEc7tNFoH7g==} + engines: {node: '>=16'} + peerDependencies: + '@types/mdast': ^4.0.0 + unified: ^11.0.0 + peerDependenciesMeta: + '@types/mdast': + optional: true + remark-gfm@4.0.1: resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==} @@ -7008,6 +7447,9 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + remend@1.1.0: + resolution: {integrity: sha512-JENGyuIhTwzUfCarW43X4r9cehoqTo9QyYxfNDZSud2AmqeuWjZ5pfybasTa4q0dxTJAj5m8NB+wR+YueAFpxQ==} + renderkid@3.0.0: resolution: {integrity: sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==} @@ -7066,14 +7508,23 @@ packages: resolution: {integrity: sha512-5Di9UC0+8h1L6ZD2d7awM7E/T4uA1fJRlx6zk/NvdCCVEoAnFqvHmCuNeIKoCeIixBX/q8uM+6ycDvF8woqosA==} engines: {node: '>= 0.8'} + robust-predicates@3.0.2: + resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + rollup@4.55.1: resolution: {integrity: sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true + roughjs@4.6.6: + resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + rxjs@7.8.2: resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} @@ -7098,6 +7549,9 @@ packages: safe-stable-stringify@1.1.1: resolution: {integrity: sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==} + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + sass-loader@16.0.6: resolution: {integrity: sha512-sglGzId5gmlfxNs4gK2U3h7HlVRfx278YK6Ono5lwzuvi1jxig80YiuHkaDBVsYIKFhx8wN7XSCI0M2IDS/3qA==} engines: {node: '>= 18.12.0'} @@ -7186,6 +7640,9 @@ packages: resolution: {integrity: sha512-VuvPvLG1QjNOLP7AIm2HGyfmxEIz8QdskvWOHwUcxLDibYWjLRBmCWd8LSL5FlwhBW7D/GU+3gNVC/ASxAWdxg==} engines: {node: 18.* || >= 20} + shiki@3.21.0: + resolution: {integrity: sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w==} + should-equal@2.0.0: resolution: {integrity: sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==} @@ -7301,6 +7758,11 @@ packages: stream-http@3.2.0: resolution: {integrity: sha512-Oq1bLqisTyK3TSCXpPbT4sdeYNdmyZJv1LxpEm2vu1ZhK89kSE5YXwZc3cWk0MagGaKriBh9mCFbVGtO+vY29A==} + streamdown@2.1.0: + resolution: {integrity: sha512-u9gWd0AmjKg1d+74P44XaPlGrMeC21oDOSIhjGNEYMAttDMzCzlJO6lpTyJ9JkSinQQF65YcK4eOd3q9iTvULw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + strict-event-emitter@0.5.1: resolution: {integrity: sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==} @@ -7414,6 +7876,9 @@ packages: babel-plugin-macros: optional: true + stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + sucrase@3.35.1: resolution: {integrity: sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==} engines: {node: '>=16 || 14 >=14.17'} @@ -7435,12 +7900,20 @@ packages: resolution: {integrity: sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==} hasBin: true + swr@2.3.8: + resolution: {integrity: sha512-gaCPRVoMq8WGDcWj9p4YWzCMPHzE0WNl6W8ADIx9c3JBEIdMkJGMzW+uzXvxHMltwcYACr9jP+32H8/hgwMR7w==} + peerDependencies: + react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} tailwind-merge@2.6.0: resolution: {integrity: sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA==} + tailwind-merge@3.4.0: + resolution: {integrity: sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==} + tailwind-scrollbar@3.1.0: resolution: {integrity: sha512-pmrtDIZeHyu2idTejfV59SbaJyvp1VRjYxAjZBH0jnyrPRo6HL1kD5Glz8VPagasqr6oAx6M05+Tuw429Z8jxg==} engines: {node: '>=12.13.0'} @@ -7495,6 +7968,10 @@ packages: third-party-capital@1.0.20: resolution: {integrity: sha512-oB7yIimd8SuGptespDAZnNkzIz+NWaJCu2RMsbs4Wmp9zSDUM8Nhi3s2OOcqYuv3mN4hitXc8DVx+LyUmbUDiA==} + throttleit@2.1.0: + resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==} + engines: {node: '>=18'} + timers-browserify@2.0.12: resolution: {integrity: sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==} engines: {node: '>=0.6.0'} @@ -7677,6 +8154,9 @@ packages: uc.micro@2.1.0: resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + ufo@1.6.3: + resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==} + unbox-primitive@1.1.0: resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==} engines: {node: '>= 0.4'} @@ -7781,6 +8261,11 @@ packages: '@types/react': optional: true + use-stick-to-bottom@1.1.2: + resolution: {integrity: sha512-ssUfMNvfH8a8hGLoAt5kcOsjbsVORknon2tbkECuf3EsVucFFBbyXl+Xnv3b58P8ZRuZelzO81fgb6M0eRo8cg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + use-sync-external-store@1.6.0: resolution: {integrity: sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==} peerDependencies: @@ -7918,6 +8403,26 @@ packages: vm-browserify@1.1.2: resolution: {integrity: sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==} + vscode-jsonrpc@8.2.0: + resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} + engines: {node: '>=14.0.0'} + + vscode-languageserver-protocol@3.17.5: + resolution: {integrity: sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==} + + vscode-languageserver-textdocument@1.0.12: + resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} + + vscode-languageserver-types@3.17.5: + resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==} + + vscode-languageserver@9.0.1: + resolution: {integrity: sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==} + hasBin: true + + vscode-uri@3.0.8: + resolution: {integrity: sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==} + w3c-xmlserializer@5.0.0: resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} engines: {node: '>=18'} @@ -8147,8 +8652,41 @@ snapshots: '@adobe/css-tools@4.4.4': {} + '@ai-sdk/gateway@3.0.27(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + '@vercel/oidc': 3.1.0 + zod: 3.25.76 + + '@ai-sdk/provider-utils@4.0.10(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + + '@ai-sdk/provider@3.0.5': + dependencies: + json-schema: 0.4.0 + + '@ai-sdk/react@3.0.61(react@18.3.1)(zod@3.25.76)': + dependencies: + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + ai: 6.0.59(zod@3.25.76) + react: 18.3.1 + swr: 2.3.8(react@18.3.1) + throttleit: 2.1.0 + transitivePeerDependencies: + - zod + '@alloc/quick-lru@5.2.0': {} + '@antfu/install-pkg@1.1.0': + dependencies: + package-manager-detector: 1.6.0 + tinyexec: 1.0.2 + '@apidevtools/json-schema-ref-parser@14.0.1': dependencies: '@types/json-schema': 7.0.15 @@ -8962,6 +9500,25 @@ snapshots: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 + '@braintree/sanitize-url@7.1.2': {} + + '@chevrotain/cst-dts-gen@11.0.3': + dependencies: + '@chevrotain/gast': 11.0.3 + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.23 + + '@chevrotain/gast@11.0.3': + dependencies: + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.23 + + '@chevrotain/regexp-to-ast@11.0.3': {} + + '@chevrotain/types@11.0.3': {} + + '@chevrotain/utils@11.0.3': {} + '@chromatic-com/storybook@4.1.2(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2)))': dependencies: '@neoconfetti/react': 1.0.0 @@ -9281,6 +9838,14 @@ snapshots: transitivePeerDependencies: - encoding + '@iconify/types@2.0.0': {} + + '@iconify/utils@3.1.0': + dependencies: + '@antfu/install-pkg': 1.1.0 + '@iconify/types': 2.0.0 + mlly: 1.8.0 + '@img/colour@1.0.0': optional: true @@ -9457,6 +10022,10 @@ snapshots: '@types/react': 18.3.17 react: 18.3.1 + '@mermaid-js/parser@0.6.3': + dependencies: + langium: 3.3.1 + '@mswjs/interceptors@0.40.0': dependencies: '@open-draft/deferred-promise': 2.2.0 @@ -10608,7 +11177,7 @@ snapshots: dependencies: '@rjsf/utils': 6.1.2(react@18.3.1) lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 markdown-to-jsx: 8.0.0(react@18.3.1) prop-types: 15.8.1 react: 18.3.1 @@ -10619,7 +11188,7 @@ snapshots: fast-uri: 3.1.0 jsonpointer: 5.0.1 lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 react: 18.3.1 react-is: 18.3.1 @@ -10629,7 +11198,7 @@ snapshots: ajv: 8.17.1 ajv-formats: 2.1.1(ajv@8.17.1) lodash: 4.17.21 - lodash-es: 4.17.22 + lodash-es: 4.17.23 '@rolldown/pluginutils@1.0.0-beta.53': {} @@ -10936,6 +11505,19 @@ snapshots: - encoding - supports-color + '@shikijs/core@3.21.0': + dependencies: + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 + + '@shikijs/engine-javascript@3.21.0': + dependencies: + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + oniguruma-to-es: 4.3.4 + '@shikijs/engine-oniguruma@3.21.0': dependencies: '@shikijs/types': 3.21.0 @@ -11313,6 +11895,37 @@ snapshots: optionalDependencies: typescript: 5.9.3 + '@streamdown/cjk@1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5)': + dependencies: + react: 18.3.1 + remark-cjk-friendly: 1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) + remark-cjk-friendly-gfm-strikethrough: 1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) + unist-util-visit: 5.0.0 + transitivePeerDependencies: + - '@types/mdast' + - micromark + - micromark-util-types + - unified + + '@streamdown/code@1.0.1(react@18.3.1)': + dependencies: + react: 18.3.1 + shiki: 3.21.0 + + '@streamdown/math@1.0.1(react@18.3.1)': + dependencies: + katex: 0.16.28 + react: 18.3.1 + rehype-katex: 7.0.1 + remark-math: 6.0.0 + transitivePeerDependencies: + - supports-color + + '@streamdown/mermaid@1.0.1(react@18.3.1)': + dependencies: + mermaid: 11.12.2 + react: 18.3.1 + '@supabase/auth-js@2.78.0': dependencies: '@supabase/node-fetch': 2.6.15 @@ -11475,20 +12088,63 @@ snapshots: '@types/d3-array@3.2.2': {} + '@types/d3-axis@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-brush@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-chord@3.0.6': {} + '@types/d3-color@3.1.3': {} + '@types/d3-contour@3.0.6': + dependencies: + '@types/d3-array': 3.2.2 + '@types/geojson': 7946.0.16 + + '@types/d3-delaunay@6.0.4': {} + + '@types/d3-dispatch@3.0.7': {} + '@types/d3-drag@3.0.7': dependencies: '@types/d3-selection': 3.0.11 + '@types/d3-dsv@3.0.7': {} + '@types/d3-ease@3.0.2': {} + '@types/d3-fetch@3.0.7': + dependencies: + '@types/d3-dsv': 3.0.7 + + '@types/d3-force@3.0.10': {} + + '@types/d3-format@3.0.4': {} + + '@types/d3-geo@3.1.0': + dependencies: + '@types/geojson': 7946.0.16 + + '@types/d3-hierarchy@3.1.7': {} + '@types/d3-interpolate@3.0.4': dependencies: '@types/d3-color': 3.1.3 '@types/d3-path@3.1.1': {} + '@types/d3-polygon@3.0.2': {} + + '@types/d3-quadtree@3.0.6': {} + + '@types/d3-random@3.0.3': {} + + '@types/d3-scale-chromatic@3.1.0': {} + '@types/d3-scale@4.0.9': dependencies: '@types/d3-time': 3.0.4 @@ -11499,6 +12155,8 @@ snapshots: dependencies: '@types/d3-path': 3.1.1 + '@types/d3-time-format@4.0.3': {} + '@types/d3-time@3.0.4': {} '@types/d3-timer@3.0.2': {} @@ -11512,6 +12170,39 @@ snapshots: '@types/d3-interpolate': 3.0.4 '@types/d3-selection': 3.0.11 + '@types/d3@7.4.3': + dependencies: + '@types/d3-array': 3.2.2 + '@types/d3-axis': 3.0.6 + '@types/d3-brush': 3.0.6 + '@types/d3-chord': 3.0.6 + '@types/d3-color': 3.1.3 + '@types/d3-contour': 3.0.6 + '@types/d3-delaunay': 6.0.4 + '@types/d3-dispatch': 3.0.7 + '@types/d3-drag': 3.0.7 + '@types/d3-dsv': 3.0.7 + '@types/d3-ease': 3.0.2 + '@types/d3-fetch': 3.0.7 + '@types/d3-force': 3.0.10 + '@types/d3-format': 3.0.4 + '@types/d3-geo': 3.1.0 + '@types/d3-hierarchy': 3.1.7 + '@types/d3-interpolate': 3.0.4 + '@types/d3-path': 3.1.1 + '@types/d3-polygon': 3.0.2 + '@types/d3-quadtree': 3.0.6 + '@types/d3-random': 3.0.3 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + '@types/d3-selection': 3.0.11 + '@types/d3-shape': 3.1.7 + '@types/d3-time': 3.0.4 + '@types/d3-time-format': 4.0.3 + '@types/d3-timer': 3.0.2 + '@types/d3-transition': 3.0.9 + '@types/d3-zoom': 3.0.8 + '@types/debug@4.1.12': dependencies: '@types/ms': 2.1.0 @@ -11540,6 +12231,8 @@ snapshots: '@types/estree@1.0.8': {} + '@types/geojson@7946.0.16': {} + '@types/hast@3.0.4': dependencies: '@types/unist': 3.0.3 @@ -11796,6 +12489,8 @@ snapshots: next: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 + '@vercel/oidc@3.1.0': {} + '@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: next: 15.4.10(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -12023,6 +12718,14 @@ snapshots: agent-base@7.1.4: optional: true + ai@6.0.59(zod@3.25.76): + dependencies: + '@ai-sdk/gateway': 3.0.27(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.10(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + zod: 3.25.76 + ajv-draft-04@1.0.0(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -12411,6 +13114,20 @@ snapshots: check-error@2.1.3: {} + chevrotain-allstar@0.3.1(chevrotain@11.0.3): + dependencies: + chevrotain: 11.0.3 + lodash-es: 4.17.23 + + chevrotain@11.0.3: + dependencies: + '@chevrotain/cst-dts-gen': 11.0.3 + '@chevrotain/gast': 11.0.3 + '@chevrotain/regexp-to-ast': 11.0.3 + '@chevrotain/types': 11.0.3 + '@chevrotain/utils': 11.0.3 + lodash-es: 4.17.23 + chokidar@3.6.0: dependencies: anymatch: 3.1.3 @@ -12491,6 +13208,8 @@ snapshots: commander@4.1.1: {} + commander@7.2.0: {} + commander@8.3.0: {} common-path-prefix@3.0.0: {} @@ -12510,6 +13229,8 @@ snapshots: tree-kill: 1.2.2 yargs: 17.7.2 + confbox@0.1.8: {} + console-browserify@1.2.0: {} constants-browserify@1.0.0: {} @@ -12530,6 +13251,14 @@ snapshots: core-util-is@1.0.3: {} + cose-base@1.0.3: + dependencies: + layout-base: 1.0.2 + + cose-base@2.2.0: + dependencies: + layout-base: 2.0.1 + cosmiconfig@7.1.0: dependencies: '@types/parse-json': 4.0.2 @@ -12638,12 +13367,50 @@ snapshots: csstype@3.2.3: {} + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + + cytoscape-fcose@2.2.0(cytoscape@3.33.1): + dependencies: + cose-base: 2.2.0 + cytoscape: 3.33.1 + + cytoscape@3.33.1: {} + + d3-array@2.12.1: + dependencies: + internmap: 1.0.1 + d3-array@3.2.4: dependencies: internmap: 2.0.3 + d3-axis@3.0.0: {} + + d3-brush@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3-chord@3.0.1: + dependencies: + d3-path: 3.1.0 + d3-color@3.1.0: {} + d3-contour@4.0.2: + dependencies: + d3-array: 3.2.4 + + d3-delaunay@6.0.4: + dependencies: + delaunator: 5.0.1 + d3-dispatch@3.0.1: {} d3-drag@3.0.0: @@ -12651,16 +13418,56 @@ snapshots: d3-dispatch: 3.0.1 d3-selection: 3.0.0 + d3-dsv@3.0.1: + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + d3-ease@3.0.1: {} + d3-fetch@3.0.1: + dependencies: + d3-dsv: 3.0.1 + + d3-force@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + d3-format@3.1.0: {} + d3-geo@3.1.1: + dependencies: + d3-array: 3.2.4 + + d3-hierarchy@3.1.2: {} + d3-interpolate@3.0.1: dependencies: d3-color: 3.1.0 + d3-path@1.0.9: {} + d3-path@3.1.0: {} + d3-polygon@3.0.1: {} + + d3-quadtree@3.0.1: {} + + d3-random@3.0.1: {} + + d3-sankey@0.12.3: + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + + d3-scale-chromatic@3.1.0: + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + d3-scale@4.0.2: dependencies: d3-array: 3.2.4 @@ -12671,6 +13478,10 @@ snapshots: d3-selection@3.0.0: {} + d3-shape@1.3.7: + dependencies: + d3-path: 1.0.9 + d3-shape@3.2.0: dependencies: d3-path: 3.1.0 @@ -12702,6 +13513,44 @@ snapshots: d3-selection: 3.0.0 d3-transition: 3.0.1(d3-selection@3.0.0) + d3@7.9.0: + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.0 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + + dagre-d3-es@7.0.13: + dependencies: + d3: 7.9.0 + lodash-es: 4.17.23 + damerau-levenshtein@1.0.8: {} data-urls@6.0.1: @@ -12732,6 +13581,8 @@ snapshots: date-fns@4.1.0: {} + dayjs@1.11.19: {} + debug@3.2.7: dependencies: ms: 2.1.3 @@ -12773,6 +13624,10 @@ snapshots: has-property-descriptors: 1.0.2 object-keys: 1.1.1 + delaunator@5.0.1: + dependencies: + robust-predicates: 3.0.2 + dependency-graph@0.11.0: {} dequal@2.0.3: {} @@ -13347,6 +14202,8 @@ snapshots: events@3.3.0: {} + eventsource-parser@3.0.6: {} + evp_bytestokey@1.0.3: dependencies: md5.js: 1.3.5 @@ -13553,6 +14410,8 @@ snapshots: get-caller-file@2.0.5: {} + get-east-asian-width@1.4.0: {} + get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -13643,6 +14502,8 @@ snapshots: graphql@16.12.0: {} + hachure-fill@0.5.2: {} + happy-dom@20.3.4: dependencies: '@types/node': 24.10.0 @@ -13739,6 +14600,42 @@ snapshots: dependencies: '@types/hast': 3.0.4 + hast-util-raw@9.1.0: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + '@ungap/structured-clone': 1.3.0 + hast-util-from-parse5: 8.0.3 + hast-util-to-parse5: 8.0.1 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + parse5: 7.3.0 + unist-util-position: 5.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-sanitize@5.0.2: + dependencies: + '@types/hast': 3.0.4 + '@ungap/structured-clone': 1.3.0 + unist-util-position: 5.0.0 + + hast-util-to-html@9.0.5: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + hast-util-to-jsx-runtime@2.3.6: dependencies: '@types/estree': 1.0.8 @@ -13759,6 +14656,16 @@ snapshots: transitivePeerDependencies: - supports-color + hast-util-to-parse5@8.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + hast-util-to-string@3.0.1: dependencies: '@types/hast': 3.0.4 @@ -13819,6 +14726,8 @@ snapshots: html-url-attributes@3.0.1: {} + html-void-elements@3.0.0: {} + html-webpack-plugin@5.6.5(webpack@5.104.1(esbuild@0.25.12)): dependencies: '@types/html-minifier-terser': 6.1.0 @@ -13865,6 +14774,10 @@ snapshots: human-signals@2.1.0: {} + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + icss-utils@5.1.0(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -13916,6 +14829,8 @@ snapshots: hasown: 2.0.2 side-channel: 1.1.0 + internmap@1.0.1: {} + internmap@2.0.3: {} is-alphabetical@2.0.1: {} @@ -14164,6 +15079,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema@0.4.0: {} + json-stable-stringify-without-jsonify@1.0.1: {} json5@1.0.2: @@ -14207,10 +15124,24 @@ snapshots: dependencies: commander: 8.3.0 + katex@0.16.28: + dependencies: + commander: 8.3.0 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 + khroma@2.1.0: {} + + langium@3.3.1: + dependencies: + chevrotain: 11.0.3 + chevrotain-allstar: 0.3.1(chevrotain@11.0.3) + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.12 + vscode-uri: 3.0.8 + language-subtag-registry@0.3.23: {} language-tags@1.0.9: @@ -14236,6 +15167,10 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + layout-base@1.0.2: {} + + layout-base@2.0.1: {} + leven@3.1.0: {} levn@0.4.1: @@ -14273,7 +15208,7 @@ snapshots: dependencies: p-locate: 6.0.0 - lodash-es@4.17.22: {} + lodash-es@4.17.23: {} lodash.camelcase@4.3.0: {} @@ -14363,6 +15298,10 @@ snapshots: optionalDependencies: react: 18.3.1 + marked@16.4.2: {} + + marked@17.0.1: {} + math-intrinsics@1.1.0: {} md5.js@1.3.5: @@ -14549,6 +15488,29 @@ snapshots: merge2@1.4.1: {} + mermaid@11.12.2: + dependencies: + '@braintree/sanitize-url': 7.1.2 + '@iconify/utils': 3.1.0 + '@mermaid-js/parser': 0.6.3 + '@types/d3': 7.4.3 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.13 + dayjs: 1.11.19 + dompurify: 3.3.1 + katex: 0.16.25 + khroma: 2.1.0 + lodash-es: 4.17.23 + marked: 16.4.2 + roughjs: 4.6.6 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 11.1.0 + micromark-core-commonmark@2.0.3: dependencies: decode-named-character-reference: 1.2.0 @@ -14568,6 +15530,38 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-extension-cjk-friendly-gfm-strikethrough@1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2): + dependencies: + devlop: 1.1.0 + get-east-asian-width: 1.4.0 + micromark: 4.0.2 + micromark-extension-cjk-friendly-util: 2.1.1(micromark-util-types@2.0.2) + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly-util@2.1.1(micromark-util-types@2.0.2): + dependencies: + get-east-asian-width: 1.4.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly@1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2): + dependencies: + devlop: 1.1.0 + micromark: 4.0.2 + micromark-extension-cjk-friendly-util: 2.1.1(micromark-util-types@2.0.2) + micromark-util-chunked: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + micromark-extension-gfm-autolink-literal@2.1.0: dependencies: micromark-util-character: 2.1.1 @@ -14790,9 +15784,14 @@ snapshots: minipass@7.1.2: {} - module-details-from-path@1.0.4: {} + mlly@1.8.0: + dependencies: + acorn: 8.15.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.3 - moment@2.30.1: {} + module-details-from-path@1.0.4: {} motion-dom@12.24.8: dependencies: @@ -15049,6 +16048,14 @@ snapshots: dependencies: mimic-fn: 2.1.0 + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.4: + dependencies: + oniguruma-parser: 0.12.1 + regex: 6.1.0 + regex-recursion: 6.0.2 + open@8.4.2: dependencies: define-lazy-prop: 2.0.0 @@ -15143,6 +16150,8 @@ snapshots: package-json-from-dist@1.0.1: {} + package-manager-detector@1.6.0: {} + pako@1.0.11: {} param-case@3.0.4: @@ -15197,6 +16206,8 @@ snapshots: path-browserify@1.0.1: {} + path-data-parser@0.1.0: {} + path-exists@4.0.0: {} path-exists@5.0.0: {} @@ -15259,6 +16270,12 @@ snapshots: dependencies: find-up: 6.3.0 + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + playwright-core@1.56.1: {} playwright@1.56.1: @@ -15267,6 +16284,13 @@ snapshots: optionalDependencies: fsevents: 2.3.2 + points-on-curve@0.2.0: {} + + points-on-path@0.2.1: + dependencies: + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + pony-cause@1.1.1: {} possible-typed-array-names@1.1.0: {} @@ -15693,6 +16717,16 @@ snapshots: regex-parser@2.3.1: {} + regex-recursion@6.0.2: + dependencies: + regex-utilities: 2.3.0 + + regex-utilities@2.3.0: {} + + regex@6.1.0: + dependencies: + regex-utilities: 2.3.0 + regexp.prototype.flags@1.5.4: dependencies: call-bind: 1.0.8 @@ -15726,6 +16760,10 @@ snapshots: unified: 11.0.5 unist-util-visit: 5.0.0 + rehype-harden@1.1.7: + dependencies: + unist-util-visit: 5.0.0 + rehype-highlight@7.0.2: dependencies: '@types/hast': 3.0.4 @@ -15744,6 +16782,17 @@ snapshots: unist-util-visit-parents: 6.0.2 vfile: 6.0.3 + rehype-raw@7.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-raw: 9.1.0 + vfile: 6.0.3 + + rehype-sanitize@6.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-sanitize: 5.0.2 + rehype-slug@6.0.0: dependencies: '@types/hast': 3.0.4 @@ -15754,6 +16803,26 @@ snapshots: relateurl@0.2.7: {} + remark-cjk-friendly-gfm-strikethrough@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): + dependencies: + micromark-extension-cjk-friendly-gfm-strikethrough: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) + unified: 11.0.5 + optionalDependencies: + '@types/mdast': 4.0.4 + transitivePeerDependencies: + - micromark + - micromark-util-types + + remark-cjk-friendly@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): + dependencies: + micromark-extension-cjk-friendly: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) + unified: 11.0.5 + optionalDependencies: + '@types/mdast': 4.0.4 + transitivePeerDependencies: + - micromark + - micromark-util-types + remark-gfm@4.0.1: dependencies: '@types/mdast': 4.0.4 @@ -15797,6 +16866,8 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + remend@1.1.0: {} + renderkid@3.0.0: dependencies: css-select: 4.3.0 @@ -15861,6 +16932,8 @@ snapshots: hash-base: 3.1.2 inherits: 2.0.4 + robust-predicates@3.0.2: {} + rollup@4.55.1: dependencies: '@types/estree': 1.0.8 @@ -15892,10 +16965,19 @@ snapshots: '@rollup/rollup-win32-x64-msvc': 4.55.1 fsevents: 2.3.3 + roughjs@4.6.6: + dependencies: + hachure-fill: 0.5.2 + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + points-on-path: 0.2.1 + run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 + rw@1.3.3: {} + rxjs@7.8.2: dependencies: tslib: 2.8.1 @@ -15925,6 +17007,8 @@ snapshots: safe-stable-stringify@1.1.1: {} + safer-buffer@2.1.2: {} + sass-loader@16.0.6(webpack@5.104.1(esbuild@0.25.12)): dependencies: neo-async: 2.6.2 @@ -16037,6 +17121,17 @@ snapshots: '@scarf/scarf': 1.4.0 deepmerge-ts: 7.1.5 + shiki@3.21.0: + dependencies: + '@shikijs/core': 3.21.0 + '@shikijs/engine-javascript': 3.21.0 + '@shikijs/engine-oniguruma': 3.21.0 + '@shikijs/langs': 3.21.0 + '@shikijs/themes': 3.21.0 + '@shikijs/types': 3.21.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + should-equal@2.0.0: dependencies: should-type: 1.4.0 @@ -16176,6 +17271,26 @@ snapshots: readable-stream: 3.6.2 xtend: 4.0.2 + streamdown@2.1.0(react@18.3.1): + dependencies: + clsx: 2.1.1 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + marked: 17.0.1 + react: 18.3.1 + rehype-harden: 1.1.7 + rehype-raw: 7.0.0 + rehype-sanitize: 6.0.0 + remark-gfm: 4.0.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + remend: 1.1.0 + tailwind-merge: 3.4.0 + unified: 11.0.5 + unist-util-visit: 5.0.0 + transitivePeerDependencies: + - supports-color + strict-event-emitter@0.5.1: {} string-argv@0.3.2: {} @@ -16301,6 +17416,8 @@ snapshots: optionalDependencies: '@babel/core': 7.28.5 + stylis@4.3.6: {} + sucrase@3.35.1: dependencies: '@jridgewell/gen-mapping': 0.3.13 @@ -16337,11 +17454,19 @@ snapshots: transitivePeerDependencies: - encoding + swr@2.3.8(react@18.3.1): + dependencies: + dequal: 2.0.3 + react: 18.3.1 + use-sync-external-store: 1.6.0(react@18.3.1) + symbol-tree@3.2.4: optional: true tailwind-merge@2.6.0: {} + tailwind-merge@3.4.0: {} + tailwind-scrollbar@3.1.0(tailwindcss@3.4.17): dependencies: tailwindcss: 3.4.17 @@ -16409,6 +17534,8 @@ snapshots: third-party-capital@1.0.20: {} + throttleit@2.1.0: {} + timers-browserify@2.0.12: dependencies: setimmediate: 1.0.5 @@ -16571,6 +17698,8 @@ snapshots: uc.micro@2.1.0: {} + ufo@1.6.3: {} + unbox-primitive@1.1.0: dependencies: call-bound: 1.0.4 @@ -16708,6 +17837,10 @@ snapshots: optionalDependencies: '@types/react': 18.3.17 + use-stick-to-bottom@1.1.2(react@18.3.1): + dependencies: + react: 18.3.1 + use-sync-external-store@1.6.0(react@18.3.1): dependencies: react: 18.3.1 @@ -16843,6 +17976,23 @@ snapshots: vm-browserify@1.1.2: {} + vscode-jsonrpc@8.2.0: {} + + vscode-languageserver-protocol@3.17.5: + dependencies: + vscode-jsonrpc: 8.2.0 + vscode-languageserver-types: 3.17.5 + + vscode-languageserver-textdocument@1.0.12: {} + + vscode-languageserver-types@3.17.5: {} + + vscode-languageserver@9.0.1: + dependencies: + vscode-languageserver-protocol: 3.17.5 + + vscode-uri@3.0.8: {} + w3c-xmlserializer@5.0.0: dependencies: xml-name-validator: 5.0.0 diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderViewTabs/BuilderViewTabs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderViewTabs/BuilderViewTabs.tsx deleted file mode 100644 index 4f4237445b..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderViewTabs/BuilderViewTabs.tsx +++ /dev/null @@ -1,31 +0,0 @@ -"use client"; - -import { Tabs, TabsList, TabsTrigger } from "@/components/__legacy__/ui/tabs"; - -export type BuilderView = "old" | "new"; - -export function BuilderViewTabs({ - value, - onChange, -}: { - value: BuilderView; - onChange: (value: BuilderView) => void; -}) { - return ( -