mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 15:25:16 -05:00
Compare commits
1 Commits
add-llm-ma
...
chore/comb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3e5721c1e |
@@ -22,7 +22,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.workflow_run.head_branch }}
|
ref: ${{ github.event.workflow_run.head_branch }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|||||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
|||||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/copilot-setup-steps.yml
vendored
2
.github/workflows/copilot-setup-steps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
# If you do not check out your code, Copilot will do this for you.
|
# If you do not check out your code, Copilot will do this for you.
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
2
.github/workflows/docs-block-sync.yml
vendored
2
.github/workflows/docs-block-sync.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-claude-review.yml
vendored
2
.github/workflows/docs-claude-review.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-enhance.yml
vendored
2
.github/workflows/docs-enhance.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.ref_name || 'master' }}
|
ref: ${{ github.ref_name || 'master' }}
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
10
.github/workflows/platform-frontend-ci.yml
vendored
10
.github/workflows/platform-frontend-ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Check for component changes
|
- name: Check for component changes
|
||||||
uses: dorny/paths-filter@v3
|
uses: dorny/paths-filter@v3
|
||||||
@@ -71,7 +71,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@@ -107,7 +107,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -148,7 +148,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
@@ -277,7 +277,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@@ -63,7 +63,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/repo-workflow-checker.yml
vendored
2
.github/workflows/repo-workflow-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# - name: Wait some time for all actions to start
|
# - name: Wait some time for all actions to start
|
||||||
# run: sleep 30
|
# run: sleep 30
|
||||||
- uses: actions/checkout@v6
|
- uses: actions/checkout@v4
|
||||||
# with:
|
# with:
|
||||||
# fetch-depth: 0
|
# fetch-depth: 0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
|
|||||||
77
autogpt_platform/autogpt_libs/poetry.lock
generated
77
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -1062,14 +1062,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.14.1"
|
version = "9.15.0"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -1478,14 +1478,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2135,21 +2135,21 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pytest"
|
name = "pytest"
|
||||||
version = "8.4.1"
|
version = "9.0.2"
|
||||||
description = "pytest: simple powerful testing with Python"
|
description = "pytest: simple powerful testing with Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["dev"]
|
groups = ["dev"]
|
||||||
files = [
|
files = [
|
||||||
{file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"},
|
{file = "pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b"},
|
||||||
{file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"},
|
{file = "pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""}
|
colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""}
|
||||||
exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""}
|
exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""}
|
||||||
iniconfig = ">=1"
|
iniconfig = ">=1.0.1"
|
||||||
packaging = ">=20"
|
packaging = ">=22"
|
||||||
pluggy = ">=1.5,<2"
|
pluggy = ">=1.5,<2"
|
||||||
pygments = ">=2.7.2"
|
pygments = ">=2.7.2"
|
||||||
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
|
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
|
||||||
@@ -2248,14 +2248,14 @@ cli = ["click (>=5.0)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2265,20 +2265,21 @@ websockets = ">=11,<16"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "redis"
|
name = "redis"
|
||||||
version = "6.2.0"
|
version = "7.1.1"
|
||||||
description = "Python client for Redis database and key-value store"
|
description = "Python client for Redis database and key-value store"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "redis-6.2.0-py3-none-any.whl", hash = "sha256:c8ddf316ee0aab65f04a11229e94a64b2618451dab7a67cb2f77eb799d872d5e"},
|
{file = "redis-7.1.1-py3-none-any.whl", hash = "sha256:f77817f16071c2950492c67d40b771fa493eb3fccc630a424a10976dbb794b7a"},
|
||||||
{file = "redis-6.2.0.tar.gz", hash = "sha256:e821f129b75dde6cb99dd35e5c76e8c49512a5a0d8dfdc560b2fbd44b85ca977"},
|
{file = "redis-7.1.1.tar.gz", hash = "sha256:a2814b2bda15b39dad11391cc48edac4697214a8a5a4bd10abe936ab4892eb43"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
|
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
|
circuit-breaker = ["pybreaker (>=1.4.0)"]
|
||||||
hiredis = ["hiredis (>=3.2.0)"]
|
hiredis = ["hiredis (>=3.2.0)"]
|
||||||
jwt = ["pyjwt (>=2.9.0)"]
|
jwt = ["pyjwt (>=2.9.0)"]
|
||||||
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (>=20.0.1)", "requests (>=2.31.0)"]
|
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (>=20.0.1)", "requests (>=2.31.0)"]
|
||||||
@@ -2436,14 +2437,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2487,35 +2488,35 @@ python-dateutil = ">=2.6.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.27.2"
|
postgrest = "2.28.0"
|
||||||
realtime = "2.27.2"
|
realtime = "2.28.0"
|
||||||
storage3 = "2.27.2"
|
storage3 = "2.28.0"
|
||||||
supabase-auth = "2.27.2"
|
supabase-auth = "2.28.0"
|
||||||
supabase-functions = "2.27.2"
|
supabase-functions = "2.28.0"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2525,14 +2526,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2911,4 +2912,4 @@ type = ["pytest-mypy"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<4.0"
|
python-versions = ">=3.10,<4.0"
|
||||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
content-hash = "3f738dbf158a0b9319387d7251cd557e8e143d4dec809c5ab720321d2b53e368"
|
||||||
|
|||||||
@@ -13,17 +13,17 @@ cryptography = "^46.0"
|
|||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.0"
|
fastapi = "^0.128.0"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.14.1"
|
launchdarkly-server-sdk = "^9.15.0"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||||
redis = "^6.2.0"
|
redis = "^7.1.1"
|
||||||
supabase = "^2.27.2"
|
supabase = "^2.28.0"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
pyright = "^1.1.408"
|
pyright = "^1.1.408"
|
||||||
pytest = "^8.4.1"
|
pytest = "^9.0.2"
|
||||||
pytest-asyncio = "^1.3.0"
|
pytest-asyncio = "^1.3.0"
|
||||||
pytest-mock = "^3.15.1"
|
pytest-mock = "^3.15.1"
|
||||||
pytest-cov = "^7.0.0"
|
pytest-cov = "^7.0.0"
|
||||||
|
|||||||
@@ -122,24 +122,6 @@ class ConnectionManager:
|
|||||||
|
|
||||||
return len(connections)
|
return len(connections)
|
||||||
|
|
||||||
async def broadcast_to_all(self, *, method: WSMethod, data: dict) -> int:
|
|
||||||
"""Broadcast a message to all active websocket connections."""
|
|
||||||
message = WSMessage(
|
|
||||||
method=method,
|
|
||||||
data=data,
|
|
||||||
).model_dump_json()
|
|
||||||
|
|
||||||
connections = tuple(self.active_connections)
|
|
||||||
if not connections:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
await asyncio.gather(
|
|
||||||
*(connection.send_text(message) for connection in connections),
|
|
||||||
return_exceptions=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
return len(connections)
|
|
||||||
|
|
||||||
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
|
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
|
||||||
if channel_key not in self.subscriptions:
|
if channel_key not in self.subscriptions:
|
||||||
self.subscriptions[channel_key] = set()
|
self.subscriptions[channel_key] = set()
|
||||||
|
|||||||
@@ -176,64 +176,30 @@ async def get_execution_analytics_config(
|
|||||||
# Return with provider prefix for clarity
|
# Return with provider prefix for clarity
|
||||||
return f"{provider_name}: {model_name}"
|
return f"{provider_name}: {model_name}"
|
||||||
|
|
||||||
# Get all models from the registry (dynamic, not hardcoded enum)
|
# Include all LlmModel values (no more filtering by hardcoded list)
|
||||||
from backend.data import llm_registry
|
recommended_model = LlmModel.GPT4O_MINI.value
|
||||||
from backend.server.v2.llm import db as llm_db
|
for model in LlmModel:
|
||||||
|
|
||||||
# Get the recommended model from the database (configurable via admin UI)
|
|
||||||
recommended_model_slug = await llm_db.get_recommended_model_slug()
|
|
||||||
|
|
||||||
# Build the available models list
|
|
||||||
first_enabled_slug = None
|
|
||||||
for registry_model in llm_registry.iter_dynamic_models():
|
|
||||||
# Only include enabled models in the list
|
|
||||||
if not registry_model.is_enabled:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Track first enabled model as fallback
|
|
||||||
if first_enabled_slug is None:
|
|
||||||
first_enabled_slug = registry_model.slug
|
|
||||||
|
|
||||||
model = LlmModel(registry_model.slug)
|
|
||||||
label = generate_model_label(model)
|
label = generate_model_label(model)
|
||||||
# Add "(Recommended)" suffix to the recommended model
|
# Add "(Recommended)" suffix to the recommended model
|
||||||
if registry_model.slug == recommended_model_slug:
|
if model.value == recommended_model:
|
||||||
label += " (Recommended)"
|
label += " (Recommended)"
|
||||||
|
|
||||||
available_models.append(
|
available_models.append(
|
||||||
ModelInfo(
|
ModelInfo(
|
||||||
value=registry_model.slug,
|
value=model.value,
|
||||||
label=label,
|
label=label,
|
||||||
provider=registry_model.metadata.provider,
|
provider=model.provider,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Sort models by provider and name for better UX
|
# Sort models by provider and name for better UX
|
||||||
available_models.sort(key=lambda x: (x.provider, x.label))
|
available_models.sort(key=lambda x: (x.provider, x.label))
|
||||||
|
|
||||||
# Handle case where no models are available
|
|
||||||
if not available_models:
|
|
||||||
logger.warning(
|
|
||||||
"No enabled LLM models found in registry. "
|
|
||||||
"Ensure models are configured and enabled in the LLM Registry."
|
|
||||||
)
|
|
||||||
# Provide a placeholder entry so admins see meaningful feedback
|
|
||||||
available_models.append(
|
|
||||||
ModelInfo(
|
|
||||||
value="",
|
|
||||||
label="No models available - configure in LLM Registry",
|
|
||||||
provider="none",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Use the DB recommended model, or fallback to first enabled model
|
|
||||||
final_recommended = recommended_model_slug or first_enabled_slug or ""
|
|
||||||
|
|
||||||
return ExecutionAnalyticsConfig(
|
return ExecutionAnalyticsConfig(
|
||||||
available_models=available_models,
|
available_models=available_models,
|
||||||
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||||
default_user_prompt=DEFAULT_USER_PROMPT,
|
default_user_prompt=DEFAULT_USER_PROMPT,
|
||||||
recommended_model=final_recommended,
|
recommended_model=recommended_model,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,593 +0,0 @@
|
|||||||
import logging
|
|
||||||
|
|
||||||
import autogpt_libs.auth
|
|
||||||
import fastapi
|
|
||||||
|
|
||||||
from backend.data import llm_registry
|
|
||||||
from backend.data.block_cost_config import refresh_llm_costs
|
|
||||||
from backend.server.v2.llm import db as llm_db
|
|
||||||
from backend.server.v2.llm import model as llm_model
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
router = fastapi.APIRouter(
|
|
||||||
tags=["llm", "admin"],
|
|
||||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def _refresh_runtime_state() -> None:
|
|
||||||
"""Refresh the LLM registry and clear all related caches to ensure real-time updates."""
|
|
||||||
logger.info("Refreshing LLM registry runtime state...")
|
|
||||||
try:
|
|
||||||
# Refresh registry from database
|
|
||||||
await llm_registry.refresh_llm_registry()
|
|
||||||
refresh_llm_costs()
|
|
||||||
|
|
||||||
# Clear block schema caches so they're regenerated with updated model options
|
|
||||||
from backend.data.block import BlockSchema
|
|
||||||
|
|
||||||
BlockSchema.clear_all_schema_caches()
|
|
||||||
logger.info("Cleared all block schema caches")
|
|
||||||
|
|
||||||
# Clear the /blocks endpoint cache so frontend gets updated schemas
|
|
||||||
try:
|
|
||||||
from backend.api.features.v1 import _get_cached_blocks
|
|
||||||
|
|
||||||
_get_cached_blocks.cache_clear()
|
|
||||||
logger.info("Cleared /blocks endpoint cache")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("Failed to clear /blocks cache: %s", e)
|
|
||||||
|
|
||||||
# Clear the v2 builder caches
|
|
||||||
try:
|
|
||||||
from backend.api.features.builder import db as builder_db
|
|
||||||
|
|
||||||
builder_db._get_all_providers.cache_clear()
|
|
||||||
logger.info("Cleared v2 builder providers cache")
|
|
||||||
builder_db._build_cached_search_results.cache_clear()
|
|
||||||
logger.info("Cleared v2 builder search results cache")
|
|
||||||
except Exception as e:
|
|
||||||
logger.debug("Could not clear v2 builder cache: %s", e)
|
|
||||||
|
|
||||||
# Notify all executor services to refresh their registry cache
|
|
||||||
from backend.data.llm_registry import publish_registry_refresh_notification
|
|
||||||
|
|
||||||
await publish_registry_refresh_notification()
|
|
||||||
logger.info("Published registry refresh notification")
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception(
|
|
||||||
"LLM runtime state refresh failed; caches may be stale: %s", exc
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/providers",
|
|
||||||
summary="List LLM providers",
|
|
||||||
response_model=llm_model.LlmProvidersResponse,
|
|
||||||
)
|
|
||||||
async def list_llm_providers(include_models: bool = True):
|
|
||||||
providers = await llm_db.list_providers(include_models=include_models)
|
|
||||||
return llm_model.LlmProvidersResponse(providers=providers)
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/providers",
|
|
||||||
summary="Create LLM provider",
|
|
||||||
response_model=llm_model.LlmProvider,
|
|
||||||
)
|
|
||||||
async def create_llm_provider(request: llm_model.UpsertLlmProviderRequest):
|
|
||||||
provider = await llm_db.upsert_provider(request=request)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
return provider
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch(
|
|
||||||
"/providers/{provider_id}",
|
|
||||||
summary="Update LLM provider",
|
|
||||||
response_model=llm_model.LlmProvider,
|
|
||||||
)
|
|
||||||
async def update_llm_provider(
|
|
||||||
provider_id: str,
|
|
||||||
request: llm_model.UpsertLlmProviderRequest,
|
|
||||||
):
|
|
||||||
provider = await llm_db.upsert_provider(request=request, provider_id=provider_id)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
return provider
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete(
|
|
||||||
"/providers/{provider_id}",
|
|
||||||
summary="Delete LLM provider",
|
|
||||||
response_model=dict,
|
|
||||||
)
|
|
||||||
async def delete_llm_provider(provider_id: str):
|
|
||||||
"""
|
|
||||||
Delete an LLM provider.
|
|
||||||
|
|
||||||
A provider can only be deleted if it has no associated models.
|
|
||||||
Delete all models from the provider first before deleting the provider.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
await llm_db.delete_provider(provider_id)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info("Deleted LLM provider '%s'", provider_id)
|
|
||||||
return {"success": True, "message": "Provider deleted successfully"}
|
|
||||||
except ValueError as e:
|
|
||||||
logger.warning("Failed to delete provider '%s': %s", provider_id, e)
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Failed to delete provider '%s': %s", provider_id, e)
|
|
||||||
raise fastapi.HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/models",
|
|
||||||
summary="List LLM models",
|
|
||||||
response_model=llm_model.LlmModelsResponse,
|
|
||||||
)
|
|
||||||
async def list_llm_models(
|
|
||||||
provider_id: str | None = fastapi.Query(default=None),
|
|
||||||
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
|
|
||||||
page_size: int = fastapi.Query(
|
|
||||||
default=50, ge=1, le=100, description="Number of models per page"
|
|
||||||
),
|
|
||||||
):
|
|
||||||
return await llm_db.list_models(
|
|
||||||
provider_id=provider_id, page=page, page_size=page_size
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/models",
|
|
||||||
summary="Create LLM model",
|
|
||||||
response_model=llm_model.LlmModel,
|
|
||||||
)
|
|
||||||
async def create_llm_model(request: llm_model.CreateLlmModelRequest):
|
|
||||||
model = await llm_db.create_model(request=request)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch(
|
|
||||||
"/models/{model_id}",
|
|
||||||
summary="Update LLM model",
|
|
||||||
response_model=llm_model.LlmModel,
|
|
||||||
)
|
|
||||||
async def update_llm_model(
|
|
||||||
model_id: str,
|
|
||||||
request: llm_model.UpdateLlmModelRequest,
|
|
||||||
):
|
|
||||||
model = await llm_db.update_model(model_id=model_id, request=request)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch(
|
|
||||||
"/models/{model_id}/toggle",
|
|
||||||
summary="Toggle LLM model availability",
|
|
||||||
response_model=llm_model.ToggleLlmModelResponse,
|
|
||||||
)
|
|
||||||
async def toggle_llm_model(
|
|
||||||
model_id: str,
|
|
||||||
request: llm_model.ToggleLlmModelRequest,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
|
||||||
|
|
||||||
If disabling a model and `migrate_to_slug` is provided, all workflows using
|
|
||||||
this model will be migrated to the specified replacement model before disabling.
|
|
||||||
A migration record is created which can be reverted later using the revert endpoint.
|
|
||||||
|
|
||||||
Optional fields:
|
|
||||||
- `migration_reason`: Reason for the migration (e.g., "Provider outage")
|
|
||||||
- `custom_credit_cost`: Custom pricing override for billing during migration
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
result = await llm_db.toggle_model(
|
|
||||||
model_id=model_id,
|
|
||||||
is_enabled=request.is_enabled,
|
|
||||||
migrate_to_slug=request.migrate_to_slug,
|
|
||||||
migration_reason=request.migration_reason,
|
|
||||||
custom_credit_cost=request.custom_credit_cost,
|
|
||||||
)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
if result.nodes_migrated > 0:
|
|
||||||
logger.info(
|
|
||||||
"Toggled model '%s' to %s and migrated %d nodes to '%s' (migration_id=%s)",
|
|
||||||
result.model.slug,
|
|
||||||
"enabled" if request.is_enabled else "disabled",
|
|
||||||
result.nodes_migrated,
|
|
||||||
result.migrated_to_slug,
|
|
||||||
result.migration_id,
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.warning("Model toggle validation failed: %s", exc)
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to toggle LLM model %s: %s", model_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to toggle model availability",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/models/{model_id}/usage",
|
|
||||||
summary="Get model usage count",
|
|
||||||
response_model=llm_model.LlmModelUsageResponse,
|
|
||||||
)
|
|
||||||
async def get_llm_model_usage(model_id: str):
|
|
||||||
"""Get the number of workflow nodes using this model."""
|
|
||||||
try:
|
|
||||||
return await llm_db.get_model_usage(model_id=model_id)
|
|
||||||
except ValueError as exc:
|
|
||||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to get model usage %s: %s", model_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to get model usage",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete(
|
|
||||||
"/models/{model_id}",
|
|
||||||
summary="Delete LLM model and migrate workflows",
|
|
||||||
response_model=llm_model.DeleteLlmModelResponse,
|
|
||||||
)
|
|
||||||
async def delete_llm_model(
|
|
||||||
model_id: str,
|
|
||||||
replacement_model_slug: str | None = fastapi.Query(
|
|
||||||
default=None,
|
|
||||||
description="Slug of the model to migrate existing workflows to (required only if workflows use this model)",
|
|
||||||
),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Delete a model and optionally migrate workflows using it to a replacement model.
|
|
||||||
|
|
||||||
If no workflows are using this model, it can be deleted without providing a
|
|
||||||
replacement. If workflows exist, replacement_model_slug is required.
|
|
||||||
|
|
||||||
This endpoint:
|
|
||||||
1. Counts how many workflow nodes use the model being deleted
|
|
||||||
2. If nodes exist, validates the replacement model and migrates them
|
|
||||||
3. Deletes the model record
|
|
||||||
4. Refreshes all caches and notifies executors
|
|
||||||
|
|
||||||
Example: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o
|
|
||||||
Example (no usage): DELETE /admin/llm/models/{id}
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
result = await llm_db.delete_model(
|
|
||||||
model_id=model_id, replacement_model_slug=replacement_model_slug
|
|
||||||
)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info(
|
|
||||||
"Deleted model '%s' and migrated %d nodes to '%s'",
|
|
||||||
result.deleted_model_slug,
|
|
||||||
result.nodes_migrated,
|
|
||||||
result.replacement_model_slug,
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
except ValueError as exc:
|
|
||||||
# Validation errors (model not found, replacement invalid, etc.)
|
|
||||||
logger.warning("Model deletion validation failed: %s", exc)
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to delete LLM model %s: %s", model_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to delete model and migrate workflows",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Migration Management Endpoints
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/migrations",
|
|
||||||
summary="List model migrations",
|
|
||||||
response_model=llm_model.LlmMigrationsResponse,
|
|
||||||
)
|
|
||||||
async def list_llm_migrations(
|
|
||||||
include_reverted: bool = fastapi.Query(
|
|
||||||
default=False, description="Include reverted migrations in the list"
|
|
||||||
),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
List all model migrations.
|
|
||||||
|
|
||||||
Migrations are created when disabling a model with the migrate_to_slug option.
|
|
||||||
They can be reverted to restore the original model configuration.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
migrations = await llm_db.list_migrations(include_reverted=include_reverted)
|
|
||||||
return llm_model.LlmMigrationsResponse(migrations=migrations)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to list migrations: %s", exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to list migrations",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/migrations/{migration_id}",
|
|
||||||
summary="Get migration details",
|
|
||||||
response_model=llm_model.LlmModelMigration,
|
|
||||||
)
|
|
||||||
async def get_llm_migration(migration_id: str):
|
|
||||||
"""Get details of a specific migration."""
|
|
||||||
try:
|
|
||||||
migration = await llm_db.get_migration(migration_id)
|
|
||||||
if not migration:
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=404, detail=f"Migration '{migration_id}' not found"
|
|
||||||
)
|
|
||||||
return migration
|
|
||||||
except fastapi.HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to get migration %s: %s", migration_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to get migration",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/migrations/{migration_id}/revert",
|
|
||||||
summary="Revert a model migration",
|
|
||||||
response_model=llm_model.RevertMigrationResponse,
|
|
||||||
)
|
|
||||||
async def revert_llm_migration(
|
|
||||||
migration_id: str,
|
|
||||||
request: llm_model.RevertMigrationRequest | None = None,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Revert a model migration, restoring affected workflows to their original model.
|
|
||||||
|
|
||||||
This only reverts the specific nodes that were part of the migration.
|
|
||||||
The source model must exist for the revert to succeed.
|
|
||||||
|
|
||||||
Options:
|
|
||||||
- `re_enable_source_model`: Whether to re-enable the source model if disabled (default: True)
|
|
||||||
|
|
||||||
Response includes:
|
|
||||||
- `nodes_reverted`: Number of nodes successfully reverted
|
|
||||||
- `nodes_already_changed`: Number of nodes that were modified since migration (not reverted)
|
|
||||||
- `source_model_re_enabled`: Whether the source model was re-enabled
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
- Migration must not already be reverted
|
|
||||||
- Source model must exist
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
re_enable = request.re_enable_source_model if request else True
|
|
||||||
result = await llm_db.revert_migration(
|
|
||||||
migration_id,
|
|
||||||
re_enable_source_model=re_enable,
|
|
||||||
)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info(
|
|
||||||
"Reverted migration '%s': %d nodes restored from '%s' to '%s' "
|
|
||||||
"(%d already changed, source re-enabled=%s)",
|
|
||||||
migration_id,
|
|
||||||
result.nodes_reverted,
|
|
||||||
result.target_model_slug,
|
|
||||||
result.source_model_slug,
|
|
||||||
result.nodes_already_changed,
|
|
||||||
result.source_model_re_enabled,
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.warning("Migration revert validation failed: %s", exc)
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to revert migration %s: %s", migration_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to revert migration",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Creator Management Endpoints
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/creators",
|
|
||||||
summary="List model creators",
|
|
||||||
response_model=llm_model.LlmCreatorsResponse,
|
|
||||||
)
|
|
||||||
async def list_llm_creators():
|
|
||||||
"""
|
|
||||||
List all model creators.
|
|
||||||
|
|
||||||
Creators are organizations that create/train models (e.g., OpenAI, Meta, Anthropic).
|
|
||||||
This is distinct from providers who host/serve the models (e.g., OpenRouter).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
creators = await llm_db.list_creators()
|
|
||||||
return llm_model.LlmCreatorsResponse(creators=creators)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to list creators: %s", exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to list creators",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/creators/{creator_id}",
|
|
||||||
summary="Get creator details",
|
|
||||||
response_model=llm_model.LlmModelCreator,
|
|
||||||
)
|
|
||||||
async def get_llm_creator(creator_id: str):
|
|
||||||
"""Get details of a specific model creator."""
|
|
||||||
try:
|
|
||||||
creator = await llm_db.get_creator(creator_id)
|
|
||||||
if not creator:
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=404, detail=f"Creator '{creator_id}' not found"
|
|
||||||
)
|
|
||||||
return creator
|
|
||||||
except fastapi.HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to get creator %s: %s", creator_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to get creator",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/creators",
|
|
||||||
summary="Create model creator",
|
|
||||||
response_model=llm_model.LlmModelCreator,
|
|
||||||
)
|
|
||||||
async def create_llm_creator(request: llm_model.UpsertLlmCreatorRequest):
|
|
||||||
"""
|
|
||||||
Create a new model creator.
|
|
||||||
|
|
||||||
A creator represents an organization that creates/trains AI models,
|
|
||||||
such as OpenAI, Anthropic, Meta, or Google.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
creator = await llm_db.upsert_creator(request=request)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info("Created model creator '%s' (%s)", creator.display_name, creator.id)
|
|
||||||
return creator
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to create creator: %s", exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to create creator",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch(
|
|
||||||
"/creators/{creator_id}",
|
|
||||||
summary="Update model creator",
|
|
||||||
response_model=llm_model.LlmModelCreator,
|
|
||||||
)
|
|
||||||
async def update_llm_creator(
|
|
||||||
creator_id: str,
|
|
||||||
request: llm_model.UpsertLlmCreatorRequest,
|
|
||||||
):
|
|
||||||
"""Update an existing model creator."""
|
|
||||||
try:
|
|
||||||
creator = await llm_db.upsert_creator(request=request, creator_id=creator_id)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info("Updated model creator '%s' (%s)", creator.display_name, creator_id)
|
|
||||||
return creator
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to update creator %s: %s", creator_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to update creator",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete(
|
|
||||||
"/creators/{creator_id}",
|
|
||||||
summary="Delete model creator",
|
|
||||||
response_model=dict,
|
|
||||||
)
|
|
||||||
async def delete_llm_creator(creator_id: str):
|
|
||||||
"""
|
|
||||||
Delete a model creator.
|
|
||||||
|
|
||||||
This will remove the creator association from all models that reference it
|
|
||||||
(sets creatorId to NULL), but will not delete the models themselves.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
await llm_db.delete_creator(creator_id)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info("Deleted model creator '%s'", creator_id)
|
|
||||||
return {"success": True, "message": f"Creator '{creator_id}' deleted"}
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.warning("Creator deletion validation failed: %s", exc)
|
|
||||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to delete creator %s: %s", creator_id, exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to delete creator",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Recommended Model Endpoints
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/recommended-model",
|
|
||||||
summary="Get recommended model",
|
|
||||||
response_model=llm_model.RecommendedModelResponse,
|
|
||||||
)
|
|
||||||
async def get_recommended_model():
|
|
||||||
"""
|
|
||||||
Get the currently recommended LLM model.
|
|
||||||
|
|
||||||
The recommended model is shown to users as the default/suggested option
|
|
||||||
in model selection dropdowns.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
model = await llm_db.get_recommended_model()
|
|
||||||
return llm_model.RecommendedModelResponse(
|
|
||||||
model=model,
|
|
||||||
slug=model.slug if model else None,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to get recommended model: %s", exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to get recommended model",
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/recommended-model",
|
|
||||||
summary="Set recommended model",
|
|
||||||
response_model=llm_model.SetRecommendedModelResponse,
|
|
||||||
)
|
|
||||||
async def set_recommended_model(request: llm_model.SetRecommendedModelRequest):
|
|
||||||
"""
|
|
||||||
Set a model as the recommended model.
|
|
||||||
|
|
||||||
This clears the recommended flag from any other model and sets it on
|
|
||||||
the specified model. The model must be enabled to be set as recommended.
|
|
||||||
|
|
||||||
The recommended model is displayed to users as the default/suggested
|
|
||||||
option in model selection dropdowns throughout the platform.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
model, previous_slug = await llm_db.set_recommended_model(request.model_id)
|
|
||||||
await _refresh_runtime_state()
|
|
||||||
logger.info(
|
|
||||||
"Set recommended model to '%s' (previous: %s)",
|
|
||||||
model.slug,
|
|
||||||
previous_slug or "none",
|
|
||||||
)
|
|
||||||
return llm_model.SetRecommendedModelResponse(
|
|
||||||
model=model,
|
|
||||||
previous_recommended_slug=previous_slug,
|
|
||||||
message=f"Model '{model.display_name}' is now the recommended model",
|
|
||||||
)
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.warning("Set recommended model validation failed: %s", exc)
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
|
||||||
except Exception as exc:
|
|
||||||
logger.exception("Failed to set recommended model: %s", exc)
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500,
|
|
||||||
detail="Failed to set recommended model",
|
|
||||||
) from exc
|
|
||||||
@@ -1,491 +0,0 @@
|
|||||||
import json
|
|
||||||
from unittest.mock import AsyncMock
|
|
||||||
|
|
||||||
import fastapi
|
|
||||||
import fastapi.testclient
|
|
||||||
import pytest
|
|
||||||
import pytest_mock
|
|
||||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
|
||||||
from pytest_snapshot.plugin import Snapshot
|
|
||||||
|
|
||||||
import backend.api.features.admin.llm_routes as llm_routes
|
|
||||||
from backend.server.v2.llm import model as llm_model
|
|
||||||
from backend.util.models import Pagination
|
|
||||||
|
|
||||||
app = fastapi.FastAPI()
|
|
||||||
app.include_router(llm_routes.router, prefix="/admin/llm")
|
|
||||||
|
|
||||||
client = fastapi.testclient.TestClient(app)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
|
||||||
def setup_app_admin_auth(mock_jwt_admin):
|
|
||||||
"""Setup admin auth overrides for all tests in this module"""
|
|
||||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_admin["get_jwt_payload"]
|
|
||||||
yield
|
|
||||||
app.dependency_overrides.clear()
|
|
||||||
|
|
||||||
|
|
||||||
def test_list_llm_providers_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful listing of LLM providers"""
|
|
||||||
# Mock the database function
|
|
||||||
mock_providers = [
|
|
||||||
{
|
|
||||||
"id": "provider-1",
|
|
||||||
"name": "openai",
|
|
||||||
"display_name": "OpenAI",
|
|
||||||
"description": "OpenAI LLM provider",
|
|
||||||
"supports_tools": True,
|
|
||||||
"supports_json_output": True,
|
|
||||||
"supports_reasoning": False,
|
|
||||||
"supports_parallel_tool": True,
|
|
||||||
"metadata": {},
|
|
||||||
"models": [],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "provider-2",
|
|
||||||
"name": "anthropic",
|
|
||||||
"display_name": "Anthropic",
|
|
||||||
"description": "Anthropic LLM provider",
|
|
||||||
"supports_tools": True,
|
|
||||||
"supports_json_output": True,
|
|
||||||
"supports_reasoning": False,
|
|
||||||
"supports_parallel_tool": True,
|
|
||||||
"metadata": {},
|
|
||||||
"models": [],
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.list_providers",
|
|
||||||
new=AsyncMock(return_value=mock_providers),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.get("/admin/llm/providers")
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert len(response_data["providers"]) == 2
|
|
||||||
assert response_data["providers"][0]["name"] == "openai"
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"list_llm_providers_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_list_llm_models_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful listing of LLM models with pagination"""
|
|
||||||
# Mock the database function - now returns LlmModelsResponse
|
|
||||||
mock_model = llm_model.LlmModel(
|
|
||||||
id="model-1",
|
|
||||||
slug="gpt-4o",
|
|
||||||
display_name="GPT-4o",
|
|
||||||
description="GPT-4 Optimized",
|
|
||||||
provider_id="provider-1",
|
|
||||||
context_window=128000,
|
|
||||||
max_output_tokens=16384,
|
|
||||||
is_enabled=True,
|
|
||||||
capabilities={},
|
|
||||||
metadata={},
|
|
||||||
costs=[
|
|
||||||
llm_model.LlmModelCost(
|
|
||||||
id="cost-1",
|
|
||||||
credit_cost=10,
|
|
||||||
credential_provider="openai",
|
|
||||||
metadata={},
|
|
||||||
)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_response = llm_model.LlmModelsResponse(
|
|
||||||
models=[mock_model],
|
|
||||||
pagination=Pagination(
|
|
||||||
total_items=1,
|
|
||||||
total_pages=1,
|
|
||||||
current_page=1,
|
|
||||||
page_size=50,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.list_models",
|
|
||||||
new=AsyncMock(return_value=mock_response),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.get("/admin/llm/models")
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert len(response_data["models"]) == 1
|
|
||||||
assert response_data["models"][0]["slug"] == "gpt-4o"
|
|
||||||
assert response_data["pagination"]["total_items"] == 1
|
|
||||||
assert response_data["pagination"]["page_size"] == 50
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"list_llm_models_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_create_llm_provider_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful creation of LLM provider"""
|
|
||||||
mock_provider = {
|
|
||||||
"id": "new-provider-id",
|
|
||||||
"name": "groq",
|
|
||||||
"display_name": "Groq",
|
|
||||||
"description": "Groq LLM provider",
|
|
||||||
"supports_tools": True,
|
|
||||||
"supports_json_output": True,
|
|
||||||
"supports_reasoning": False,
|
|
||||||
"supports_parallel_tool": False,
|
|
||||||
"metadata": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.upsert_provider",
|
|
||||||
new=AsyncMock(return_value=mock_provider),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
request_data = {
|
|
||||||
"name": "groq",
|
|
||||||
"display_name": "Groq",
|
|
||||||
"description": "Groq LLM provider",
|
|
||||||
"supports_tools": True,
|
|
||||||
"supports_json_output": True,
|
|
||||||
"supports_reasoning": False,
|
|
||||||
"supports_parallel_tool": False,
|
|
||||||
"metadata": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
response = client.post("/admin/llm/providers", json=request_data)
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["name"] == "groq"
|
|
||||||
assert response_data["display_name"] == "Groq"
|
|
||||||
|
|
||||||
# Verify refresh was called
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"create_llm_provider_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_create_llm_model_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful creation of LLM model"""
|
|
||||||
mock_model = {
|
|
||||||
"id": "new-model-id",
|
|
||||||
"slug": "gpt-4.1-mini",
|
|
||||||
"display_name": "GPT-4.1 Mini",
|
|
||||||
"description": "Latest GPT-4.1 Mini model",
|
|
||||||
"provider_id": "provider-1",
|
|
||||||
"context_window": 128000,
|
|
||||||
"max_output_tokens": 16384,
|
|
||||||
"is_enabled": True,
|
|
||||||
"capabilities": {},
|
|
||||||
"metadata": {},
|
|
||||||
"costs": [
|
|
||||||
{
|
|
||||||
"id": "cost-id",
|
|
||||||
"credit_cost": 5,
|
|
||||||
"credential_provider": "openai",
|
|
||||||
"metadata": {},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.create_model",
|
|
||||||
new=AsyncMock(return_value=mock_model),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
request_data = {
|
|
||||||
"slug": "gpt-4.1-mini",
|
|
||||||
"display_name": "GPT-4.1 Mini",
|
|
||||||
"description": "Latest GPT-4.1 Mini model",
|
|
||||||
"provider_id": "provider-1",
|
|
||||||
"context_window": 128000,
|
|
||||||
"max_output_tokens": 16384,
|
|
||||||
"is_enabled": True,
|
|
||||||
"capabilities": {},
|
|
||||||
"metadata": {},
|
|
||||||
"costs": [
|
|
||||||
{
|
|
||||||
"credit_cost": 5,
|
|
||||||
"credential_provider": "openai",
|
|
||||||
"metadata": {},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
response = client.post("/admin/llm/models", json=request_data)
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["slug"] == "gpt-4.1-mini"
|
|
||||||
assert response_data["is_enabled"] is True
|
|
||||||
|
|
||||||
# Verify refresh was called
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"create_llm_model_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_update_llm_model_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful update of LLM model"""
|
|
||||||
mock_model = {
|
|
||||||
"id": "model-1",
|
|
||||||
"slug": "gpt-4o",
|
|
||||||
"display_name": "GPT-4o Updated",
|
|
||||||
"description": "Updated description",
|
|
||||||
"provider_id": "provider-1",
|
|
||||||
"context_window": 256000,
|
|
||||||
"max_output_tokens": 32768,
|
|
||||||
"is_enabled": True,
|
|
||||||
"capabilities": {},
|
|
||||||
"metadata": {},
|
|
||||||
"costs": [
|
|
||||||
{
|
|
||||||
"id": "cost-1",
|
|
||||||
"credit_cost": 15,
|
|
||||||
"credential_provider": "openai",
|
|
||||||
"metadata": {},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.update_model",
|
|
||||||
new=AsyncMock(return_value=mock_model),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
request_data = {
|
|
||||||
"display_name": "GPT-4o Updated",
|
|
||||||
"description": "Updated description",
|
|
||||||
"context_window": 256000,
|
|
||||||
"max_output_tokens": 32768,
|
|
||||||
}
|
|
||||||
|
|
||||||
response = client.patch("/admin/llm/models/model-1", json=request_data)
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["display_name"] == "GPT-4o Updated"
|
|
||||||
assert response_data["context_window"] == 256000
|
|
||||||
|
|
||||||
# Verify refresh was called
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"update_llm_model_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_toggle_llm_model_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful toggling of LLM model enabled status"""
|
|
||||||
# Create a proper mock model object
|
|
||||||
mock_model = llm_model.LlmModel(
|
|
||||||
id="model-1",
|
|
||||||
slug="gpt-4o",
|
|
||||||
display_name="GPT-4o",
|
|
||||||
description="GPT-4 Optimized",
|
|
||||||
provider_id="provider-1",
|
|
||||||
context_window=128000,
|
|
||||||
max_output_tokens=16384,
|
|
||||||
is_enabled=False,
|
|
||||||
capabilities={},
|
|
||||||
metadata={},
|
|
||||||
costs=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create a proper ToggleLlmModelResponse
|
|
||||||
mock_response = llm_model.ToggleLlmModelResponse(
|
|
||||||
model=mock_model,
|
|
||||||
nodes_migrated=0,
|
|
||||||
migrated_to_slug=None,
|
|
||||||
migration_id=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.toggle_model",
|
|
||||||
new=AsyncMock(return_value=mock_response),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
request_data = {"is_enabled": False}
|
|
||||||
|
|
||||||
response = client.patch("/admin/llm/models/model-1/toggle", json=request_data)
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["model"]["is_enabled"] is False
|
|
||||||
|
|
||||||
# Verify refresh was called
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"toggle_llm_model_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_delete_llm_model_success(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
configured_snapshot: Snapshot,
|
|
||||||
) -> None:
|
|
||||||
"""Test successful deletion of LLM model with migration"""
|
|
||||||
# Create a proper DeleteLlmModelResponse
|
|
||||||
mock_response = llm_model.DeleteLlmModelResponse(
|
|
||||||
deleted_model_slug="gpt-3.5-turbo",
|
|
||||||
deleted_model_display_name="GPT-3.5 Turbo",
|
|
||||||
replacement_model_slug="gpt-4o-mini",
|
|
||||||
nodes_migrated=42,
|
|
||||||
message="Successfully deleted model 'GPT-3.5 Turbo' (gpt-3.5-turbo) "
|
|
||||||
"and migrated 42 workflow node(s) to 'gpt-4o-mini'.",
|
|
||||||
)
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
|
||||||
new=AsyncMock(return_value=mock_response),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.delete(
|
|
||||||
"/admin/llm/models/model-1?replacement_model_slug=gpt-4o-mini"
|
|
||||||
)
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["deleted_model_slug"] == "gpt-3.5-turbo"
|
|
||||||
assert response_data["nodes_migrated"] == 42
|
|
||||||
assert response_data["replacement_model_slug"] == "gpt-4o-mini"
|
|
||||||
|
|
||||||
# Verify refresh was called
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
|
|
||||||
# Snapshot test the response (must be string)
|
|
||||||
configured_snapshot.assert_match(
|
|
||||||
json.dumps(response_data, indent=2, sort_keys=True),
|
|
||||||
"delete_llm_model_success.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_delete_llm_model_validation_error(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
) -> None:
|
|
||||||
"""Test deletion fails with proper error when validation fails"""
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
|
||||||
new=AsyncMock(side_effect=ValueError("Replacement model 'invalid' not found")),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.delete("/admin/llm/models/model-1?replacement_model_slug=invalid")
|
|
||||||
|
|
||||||
assert response.status_code == 400
|
|
||||||
assert "Replacement model 'invalid' not found" in response.json()["detail"]
|
|
||||||
|
|
||||||
|
|
||||||
def test_delete_llm_model_no_replacement_with_usage(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
) -> None:
|
|
||||||
"""Test deletion fails when nodes exist but no replacement is provided"""
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
|
||||||
new=AsyncMock(
|
|
||||||
side_effect=ValueError(
|
|
||||||
"Cannot delete model 'test-model': 5 workflow node(s) are using it. "
|
|
||||||
"Please provide a replacement_model_slug to migrate them."
|
|
||||||
)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.delete("/admin/llm/models/model-1")
|
|
||||||
|
|
||||||
assert response.status_code == 400
|
|
||||||
assert "workflow node(s) are using it" in response.json()["detail"]
|
|
||||||
|
|
||||||
|
|
||||||
def test_delete_llm_model_no_replacement_no_usage(
|
|
||||||
mocker: pytest_mock.MockFixture,
|
|
||||||
) -> None:
|
|
||||||
"""Test deletion succeeds when no nodes use the model and no replacement is provided"""
|
|
||||||
mock_response = llm_model.DeleteLlmModelResponse(
|
|
||||||
deleted_model_slug="unused-model",
|
|
||||||
deleted_model_display_name="Unused Model",
|
|
||||||
replacement_model_slug=None,
|
|
||||||
nodes_migrated=0,
|
|
||||||
message="Successfully deleted model 'Unused Model' (unused-model). No workflows were using this model.",
|
|
||||||
)
|
|
||||||
|
|
||||||
mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
|
||||||
new=AsyncMock(return_value=mock_response),
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_refresh = mocker.patch(
|
|
||||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
|
||||||
new=AsyncMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
response = client.delete("/admin/llm/models/model-1")
|
|
||||||
|
|
||||||
assert response.status_code == 200
|
|
||||||
response_data = response.json()
|
|
||||||
assert response_data["deleted_model_slug"] == "unused-model"
|
|
||||||
assert response_data["nodes_migrated"] == 0
|
|
||||||
assert response_data["replacement_model_slug"] is None
|
|
||||||
mock_refresh.assert_called_once()
|
|
||||||
@@ -15,7 +15,6 @@ from backend.blocks import load_all_blocks
|
|||||||
from backend.blocks.llm import LlmModel
|
from backend.blocks.llm import LlmModel
|
||||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||||
from backend.data.db import query_raw_with_schema
|
from backend.data.db import query_raw_with_schema
|
||||||
from backend.data.llm_registry import get_all_model_slugs_for_validation
|
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
@@ -32,14 +31,7 @@ from .model import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||||
|
|
||||||
def _get_llm_models() -> list[str]:
|
|
||||||
"""Get LLM model names for search matching from the registry."""
|
|
||||||
return [
|
|
||||||
slug.lower().replace("-", " ") for slug in get_all_model_slugs_for_validation()
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||||
@@ -504,8 +496,8 @@ async def _get_static_counts():
|
|||||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||||
for field in schema_cls.model_fields.values():
|
for field in schema_cls.model_fields.values():
|
||||||
if field.annotation == LlmModel:
|
if field.annotation == LlmModel:
|
||||||
# Check if query matches any value in llm_models from registry
|
# Check if query matches any value in llm_models
|
||||||
if any(query in name for name in _get_llm_models()):
|
if any(query in name for name in llm_models):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -393,7 +393,6 @@ async def get_creators(
|
|||||||
@router.get(
|
@router.get(
|
||||||
"/creator/{username}",
|
"/creator/{username}",
|
||||||
summary="Get creator details",
|
summary="Get creator details",
|
||||||
operation_id="getV2GetCreatorDetails",
|
|
||||||
tags=["store", "public"],
|
tags=["store", "public"],
|
||||||
response_model=store_model.CreatorDetails,
|
response_model=store_model.CreatorDetails,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ from prisma.errors import PrismaError
|
|||||||
|
|
||||||
import backend.api.features.admin.credit_admin_routes
|
import backend.api.features.admin.credit_admin_routes
|
||||||
import backend.api.features.admin.execution_analytics_routes
|
import backend.api.features.admin.execution_analytics_routes
|
||||||
import backend.api.features.admin.llm_routes
|
|
||||||
import backend.api.features.admin.store_admin_routes
|
import backend.api.features.admin.store_admin_routes
|
||||||
import backend.api.features.builder
|
import backend.api.features.builder
|
||||||
import backend.api.features.builder.routes
|
import backend.api.features.builder.routes
|
||||||
@@ -39,15 +38,13 @@ import backend.data.db
|
|||||||
import backend.data.graph
|
import backend.data.graph
|
||||||
import backend.data.user
|
import backend.data.user
|
||||||
import backend.integrations.webhooks.utils
|
import backend.integrations.webhooks.utils
|
||||||
import backend.server.v2.llm.routes as public_llm_routes
|
|
||||||
import backend.util.service
|
import backend.util.service
|
||||||
import backend.util.settings
|
import backend.util.settings
|
||||||
from backend.api.features.chat.completion_consumer import (
|
from backend.api.features.chat.completion_consumer import (
|
||||||
start_completion_consumer,
|
start_completion_consumer,
|
||||||
stop_completion_consumer,
|
stop_completion_consumer,
|
||||||
)
|
)
|
||||||
from backend.data import llm_registry
|
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||||
from backend.data.block_cost_config import refresh_llm_costs
|
|
||||||
from backend.data.model import Credentials
|
from backend.data.model import Credentials
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.monitoring.instrumentation import instrument_fastapi
|
from backend.monitoring.instrumentation import instrument_fastapi
|
||||||
@@ -118,27 +115,11 @@ async def lifespan_context(app: fastapi.FastAPI):
|
|||||||
|
|
||||||
AutoRegistry.patch_integrations()
|
AutoRegistry.patch_integrations()
|
||||||
|
|
||||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
|
||||||
await llm_registry.refresh_llm_registry()
|
|
||||||
refresh_llm_costs()
|
|
||||||
|
|
||||||
# Clear block schema caches so they're regenerated with updated discriminator_mapping
|
|
||||||
from backend.data.block import BlockSchema
|
|
||||||
|
|
||||||
BlockSchema.clear_all_schema_caches()
|
|
||||||
|
|
||||||
await backend.data.block.initialize_blocks()
|
await backend.data.block.initialize_blocks()
|
||||||
|
|
||||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||||
await backend.data.graph.fix_llm_provider_credentials()
|
await backend.data.graph.fix_llm_provider_credentials()
|
||||||
# migrate_llm_models uses registry default model
|
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||||
from backend.blocks.llm import LlmModel
|
|
||||||
|
|
||||||
default_model_slug = llm_registry.get_default_model_slug()
|
|
||||||
if default_model_slug:
|
|
||||||
await backend.data.graph.migrate_llm_models(LlmModel(default_model_slug))
|
|
||||||
else:
|
|
||||||
logger.warning("Skipping LLM model migration: no default model available")
|
|
||||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||||
|
|
||||||
# Start chat completion consumer for Redis Streams notifications
|
# Start chat completion consumer for Redis Streams notifications
|
||||||
@@ -340,16 +321,6 @@ app.include_router(
|
|||||||
tags=["v2", "executions", "review"],
|
tags=["v2", "executions", "review"],
|
||||||
prefix="/api/review",
|
prefix="/api/review",
|
||||||
)
|
)
|
||||||
app.include_router(
|
|
||||||
backend.api.features.admin.llm_routes.router,
|
|
||||||
tags=["v2", "admin", "llm"],
|
|
||||||
prefix="/api/llm/admin",
|
|
||||||
)
|
|
||||||
app.include_router(
|
|
||||||
public_llm_routes.router,
|
|
||||||
tags=["v2", "llm"],
|
|
||||||
prefix="/api",
|
|
||||||
)
|
|
||||||
app.include_router(
|
app.include_router(
|
||||||
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library"
|
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -79,39 +79,7 @@ async def event_broadcaster(manager: ConnectionManager):
|
|||||||
payload=notification.payload,
|
payload=notification.payload,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def registry_refresh_worker():
|
await asyncio.gather(execution_worker(), notification_worker())
|
||||||
"""Listen for LLM registry refresh notifications and broadcast to all clients."""
|
|
||||||
from backend.data.llm_registry import REGISTRY_REFRESH_CHANNEL
|
|
||||||
from backend.data.redis_client import connect_async
|
|
||||||
|
|
||||||
redis = await connect_async()
|
|
||||||
pubsub = redis.pubsub()
|
|
||||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
|
||||||
logger.info(
|
|
||||||
"Subscribed to LLM registry refresh notifications for WebSocket broadcast"
|
|
||||||
)
|
|
||||||
|
|
||||||
async for message in pubsub.listen():
|
|
||||||
if (
|
|
||||||
message["type"] == "message"
|
|
||||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
"Broadcasting LLM registry refresh to all WebSocket clients"
|
|
||||||
)
|
|
||||||
await manager.broadcast_to_all(
|
|
||||||
method=WSMethod.NOTIFICATION,
|
|
||||||
data={
|
|
||||||
"type": "LLM_REGISTRY_REFRESH",
|
|
||||||
"event": "registry_updated",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
await asyncio.gather(
|
|
||||||
execution_worker(),
|
|
||||||
notification_worker(),
|
|
||||||
registry_refresh_worker(),
|
|
||||||
)
|
|
||||||
finally:
|
finally:
|
||||||
# Ensure PubSub connections are closed on any exit to prevent leaks
|
# Ensure PubSub connections are closed on any exit to prevent leaks
|
||||||
await execution_bus.close()
|
await execution_bus.close()
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
|
DEFAULT_LLM_MODEL,
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
AIBlockBase,
|
AIBlockBase,
|
||||||
@@ -9,7 +10,6 @@ from backend.blocks.llm import (
|
|||||||
LlmModel,
|
LlmModel,
|
||||||
LLMResponse,
|
LLMResponse,
|
||||||
llm_call,
|
llm_call,
|
||||||
llm_model_schema_extra,
|
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
from backend.data.block import (
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
@@ -50,10 +50,9 @@ class AIConditionBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for evaluating the condition.",
|
description="The language model to use for evaluating the condition.",
|
||||||
advanced=False,
|
advanced=False,
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
credentials: AICredentials = AICredentialsField()
|
credentials: AICredentials = AICredentialsField()
|
||||||
|
|
||||||
@@ -83,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
|
|||||||
"condition": "the input is an email address",
|
"condition": "the input is an email address",
|
||||||
"yes_value": "Valid email",
|
"yes_value": "Valid email",
|
||||||
"no_value": "Not an email",
|
"no_value": "Not an email",
|
||||||
"model": LlmModel.default(),
|
"model": DEFAULT_LLM_MODEL,
|
||||||
"credentials": TEST_CREDENTIALS_INPUT,
|
"credentials": TEST_CREDENTIALS_INPUT,
|
||||||
},
|
},
|
||||||
test_credentials=TEST_CREDENTIALS,
|
test_credentials=TEST_CREDENTIALS,
|
||||||
|
|||||||
@@ -4,19 +4,17 @@ import logging
|
|||||||
import re
|
import re
|
||||||
import secrets
|
import secrets
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from enum import Enum
|
from enum import Enum, EnumMeta
|
||||||
from json import JSONDecodeError
|
from json import JSONDecodeError
|
||||||
from typing import Any, Iterable, List, Literal, Optional
|
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
|
||||||
|
|
||||||
import anthropic
|
import anthropic
|
||||||
import ollama
|
import ollama
|
||||||
import openai
|
import openai
|
||||||
from anthropic.types import ToolParam
|
from anthropic.types import ToolParam
|
||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
from pydantic import BaseModel, GetCoreSchemaHandler, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
from pydantic_core import CoreSchema, core_schema
|
|
||||||
|
|
||||||
from backend.data import llm_registry
|
|
||||||
from backend.data.block import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
@@ -24,7 +22,6 @@ from backend.data.block import (
|
|||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.data.llm_registry import ModelMetadata
|
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
CredentialsField,
|
CredentialsField,
|
||||||
@@ -69,123 +66,114 @@ TEST_CREDENTIALS_INPUT = {
|
|||||||
|
|
||||||
|
|
||||||
def AICredentialsField() -> AICredentials:
|
def AICredentialsField() -> AICredentials:
|
||||||
"""
|
|
||||||
Returns a CredentialsField for LLM providers.
|
|
||||||
The discriminator_mapping will be refreshed when the schema is generated
|
|
||||||
if it's empty, ensuring the LLM registry is loaded.
|
|
||||||
"""
|
|
||||||
# Get the mapping now - it may be empty initially, but will be refreshed
|
|
||||||
# when the schema is generated via CredentialsMetaInput._add_json_schema_extra
|
|
||||||
mapping = llm_registry.get_llm_discriminator_mapping()
|
|
||||||
|
|
||||||
return CredentialsField(
|
return CredentialsField(
|
||||||
description="API key for the LLM provider.",
|
description="API key for the LLM provider.",
|
||||||
discriminator="model",
|
discriminator="model",
|
||||||
discriminator_mapping=mapping, # May be empty initially, refreshed later
|
discriminator_mapping={
|
||||||
|
model.value: model.metadata.provider for model in LlmModel
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def llm_model_schema_extra() -> dict[str, Any]:
|
class ModelMetadata(NamedTuple):
|
||||||
return {"options": llm_registry.get_llm_model_schema_options()}
|
provider: str
|
||||||
|
context_window: int
|
||||||
|
max_output_tokens: int | None
|
||||||
|
display_name: str
|
||||||
|
provider_name: str
|
||||||
|
creator_name: str
|
||||||
|
price_tier: Literal[1, 2, 3]
|
||||||
|
|
||||||
|
|
||||||
class LlmModelMeta(type):
|
class LlmModelMeta(EnumMeta):
|
||||||
"""
|
pass
|
||||||
Metaclass for LlmModel that enables attribute-style access to dynamic models.
|
|
||||||
|
|
||||||
This allows code like `LlmModel.GPT4O` to work by converting the attribute
|
|
||||||
name to a slug format:
|
|
||||||
- GPT4O -> gpt-4o
|
|
||||||
- GPT4O_MINI -> gpt-4o-mini
|
|
||||||
- CLAUDE_3_5_SONNET -> claude-3-5-sonnet
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __getattr__(cls, name: str):
|
|
||||||
# Don't intercept private/dunder attributes
|
|
||||||
if name.startswith("_"):
|
|
||||||
raise AttributeError(f"type object 'LlmModel' has no attribute '{name}'")
|
|
||||||
|
|
||||||
# Convert attribute name to slug format:
|
|
||||||
# 1. Lowercase: GPT4O -> gpt4o
|
|
||||||
# 2. Underscores to hyphens: GPT4O_MINI -> gpt4o-mini
|
|
||||||
slug = name.lower().replace("_", "-")
|
|
||||||
|
|
||||||
# Check for exact match in registry first (e.g., "o1" stays "o1")
|
|
||||||
registry_slugs = llm_registry.get_dynamic_model_slugs()
|
|
||||||
if slug in registry_slugs:
|
|
||||||
return cls(slug)
|
|
||||||
|
|
||||||
# If no exact match, try inserting hyphen between letter and digit
|
|
||||||
# e.g., gpt4o -> gpt-4o
|
|
||||||
transformed_slug = re.sub(r"([a-z])(\d)", r"\1-\2", slug)
|
|
||||||
return cls(transformed_slug)
|
|
||||||
|
|
||||||
def __iter__(cls):
|
|
||||||
"""Iterate over all models from the registry.
|
|
||||||
|
|
||||||
Yields LlmModel instances for each model in the dynamic registry.
|
|
||||||
Used by __get_pydantic_json_schema__ to build model metadata.
|
|
||||||
"""
|
|
||||||
for model in llm_registry.iter_dynamic_models():
|
|
||||||
yield cls(model.slug)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModel(str, metaclass=LlmModelMeta):
|
class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||||
"""
|
# OpenAI models
|
||||||
Dynamic LLM model type that accepts any model slug from the registry.
|
O3_MINI = "o3-mini"
|
||||||
|
O3 = "o3-2025-04-16"
|
||||||
This is a string subclass (not an Enum) that allows any model slug value.
|
O1 = "o1"
|
||||||
All models are managed via the LLM Registry in the database.
|
O1_MINI = "o1-mini"
|
||||||
|
# GPT-5 models
|
||||||
Usage:
|
GPT5_2 = "gpt-5.2-2025-12-11"
|
||||||
model = LlmModel("gpt-4o") # Direct construction
|
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||||
model = LlmModel.GPT4O # Attribute access (converted to "gpt-4o")
|
GPT5 = "gpt-5-2025-08-07"
|
||||||
model.value # Returns the slug string
|
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||||
model.provider # Returns the provider from registry
|
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||||
"""
|
GPT5_CHAT = "gpt-5-chat-latest"
|
||||||
|
GPT41 = "gpt-4.1-2025-04-14"
|
||||||
def __new__(cls, value: str):
|
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||||
if isinstance(value, LlmModel):
|
GPT4O_MINI = "gpt-4o-mini"
|
||||||
return value
|
GPT4O = "gpt-4o"
|
||||||
return str.__new__(cls, value)
|
GPT4_TURBO = "gpt-4-turbo"
|
||||||
|
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||||
@classmethod
|
# Anthropic models
|
||||||
def __get_pydantic_core_schema__(
|
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||||
) -> CoreSchema:
|
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||||
"""
|
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||||
Tell Pydantic how to validate LlmModel.
|
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||||
|
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||||
Accepts strings and converts them to LlmModel instances.
|
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||||
"""
|
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||||
return core_schema.no_info_after_validator_function(
|
# AI/ML API models
|
||||||
cls, # The validator function (LlmModel constructor)
|
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||||
core_schema.str_schema(), # Accept string input
|
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
|
||||||
serialization=core_schema.to_string_ser_schema(), # Serialize as string
|
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||||
)
|
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
|
||||||
|
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
|
||||||
@property
|
# Groq models
|
||||||
def value(self) -> str:
|
LLAMA3_3_70B = "llama-3.3-70b-versatile"
|
||||||
"""Return the model slug (for compatibility with enum-style access)."""
|
LLAMA3_1_8B = "llama-3.1-8b-instant"
|
||||||
return str(self)
|
# Ollama models
|
||||||
|
OLLAMA_LLAMA3_3 = "llama3.3"
|
||||||
@classmethod
|
OLLAMA_LLAMA3_2 = "llama3.2"
|
||||||
def default(cls) -> "LlmModel":
|
OLLAMA_LLAMA3_8B = "llama3"
|
||||||
"""
|
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||||
Get the default model from the registry.
|
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
|
||||||
|
# OpenRouter models
|
||||||
Returns the recommended model if set, otherwise gpt-4o if available
|
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
|
||||||
and enabled, otherwise the first enabled model from the registry.
|
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
|
||||||
Falls back to "gpt-4o" if registry is empty (e.g., at module import time).
|
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
|
||||||
"""
|
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
|
||||||
from backend.data.llm_registry import get_default_model_slug
|
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
|
||||||
|
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
|
||||||
slug = get_default_model_slug()
|
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
|
||||||
if slug is None:
|
GEMINI_2_0_FLASH_LITE = "google/gemini-2.0-flash-lite-001"
|
||||||
# Registry is empty (e.g., at module import time before DB connection).
|
MISTRAL_NEMO = "mistralai/mistral-nemo"
|
||||||
# Fall back to gpt-4o for backward compatibility.
|
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||||
slug = "gpt-4o"
|
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
|
||||||
return cls(slug)
|
DEEPSEEK_CHAT = "deepseek/deepseek-chat" # Actually: DeepSeek V3
|
||||||
|
DEEPSEEK_R1_0528 = "deepseek/deepseek-r1-0528"
|
||||||
|
PERPLEXITY_SONAR = "perplexity/sonar"
|
||||||
|
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
|
||||||
|
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
|
||||||
|
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
|
||||||
|
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
|
||||||
|
AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1"
|
||||||
|
AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1"
|
||||||
|
AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1"
|
||||||
|
MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b"
|
||||||
|
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
|
||||||
|
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||||
|
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||||
|
GROK_4 = "x-ai/grok-4"
|
||||||
|
GROK_4_FAST = "x-ai/grok-4-fast"
|
||||||
|
GROK_4_1_FAST = "x-ai/grok-4.1-fast"
|
||||||
|
GROK_CODE_FAST_1 = "x-ai/grok-code-fast-1"
|
||||||
|
KIMI_K2 = "moonshotai/kimi-k2"
|
||||||
|
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
|
||||||
|
QWEN3_CODER = "qwen/qwen3-coder"
|
||||||
|
# Llama API models
|
||||||
|
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
|
||||||
|
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||||
|
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||||
|
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||||
|
# v0 by Vercel models
|
||||||
|
V0_1_5_MD = "v0-1.5-md"
|
||||||
|
V0_1_5_LG = "v0-1.5-lg"
|
||||||
|
V0_1_0_MD = "v0-1.0-md"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __get_pydantic_json_schema__(cls, schema, handler):
|
def __get_pydantic_json_schema__(cls, schema, handler):
|
||||||
@@ -193,15 +181,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
|||||||
llm_model_metadata = {}
|
llm_model_metadata = {}
|
||||||
for model in cls:
|
for model in cls:
|
||||||
model_name = model.value
|
model_name = model.value
|
||||||
# Skip disabled models - only show enabled models in the picker
|
metadata = model.metadata
|
||||||
if not llm_registry.is_model_enabled(model_name):
|
|
||||||
continue
|
|
||||||
# Use registry directly with None check to gracefully handle
|
|
||||||
# missing metadata during startup/import before registry is populated
|
|
||||||
metadata = llm_registry.get_llm_model_metadata(model_name)
|
|
||||||
if metadata is None:
|
|
||||||
# Skip models without metadata (registry not yet populated)
|
|
||||||
continue
|
|
||||||
llm_model_metadata[model_name] = {
|
llm_model_metadata[model_name] = {
|
||||||
"creator": metadata.creator_name,
|
"creator": metadata.creator_name,
|
||||||
"creator_name": metadata.creator_name,
|
"creator_name": metadata.creator_name,
|
||||||
@@ -217,12 +197,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def metadata(self) -> ModelMetadata:
|
def metadata(self) -> ModelMetadata:
|
||||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
return MODEL_METADATA[self]
|
||||||
if metadata:
|
|
||||||
return metadata
|
|
||||||
raise ValueError(
|
|
||||||
f"Missing metadata for model: {self.value}. Model not found in LLM registry."
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def provider(self) -> str:
|
def provider(self) -> str:
|
||||||
@@ -237,9 +212,300 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
|||||||
return self.metadata.max_output_tokens
|
return self.metadata.max_output_tokens
|
||||||
|
|
||||||
|
|
||||||
# Default model constant for backward compatibility
|
MODEL_METADATA = {
|
||||||
# Uses the dynamic registry to get the default model
|
# https://platform.openai.com/docs/models
|
||||||
DEFAULT_LLM_MODEL = LlmModel.default()
|
LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
|
||||||
|
LlmModel.O3_MINI: ModelMetadata(
|
||||||
|
"openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1
|
||||||
|
), # o3-mini-2025-01-31
|
||||||
|
LlmModel.O1: ModelMetadata(
|
||||||
|
"openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3
|
||||||
|
), # o1-2024-12-17
|
||||||
|
LlmModel.O1_MINI: ModelMetadata(
|
||||||
|
"openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2
|
||||||
|
), # o1-mini-2024-09-12
|
||||||
|
# GPT-5 models
|
||||||
|
LlmModel.GPT5_2: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_1: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2
|
||||||
|
),
|
||||||
|
LlmModel.GPT5: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_MINI: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_NANO: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_CHAT: ModelMetadata(
|
||||||
|
"openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2
|
||||||
|
),
|
||||||
|
LlmModel.GPT41: ModelMetadata(
|
||||||
|
"openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT41_MINI: ModelMetadata(
|
||||||
|
"openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT4O_MINI: ModelMetadata(
|
||||||
|
"openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1
|
||||||
|
), # gpt-4o-mini-2024-07-18
|
||||||
|
LlmModel.GPT4O: ModelMetadata(
|
||||||
|
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||||
|
), # gpt-4o-2024-08-06
|
||||||
|
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||||
|
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
||||||
|
), # gpt-4-turbo-2024-04-09
|
||||||
|
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
||||||
|
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
||||||
|
), # gpt-3.5-turbo-0125
|
||||||
|
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||||
|
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||||
|
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-opus-4-1-20250805
|
||||||
|
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
||||||
|
"anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-4-opus-20250514
|
||||||
|
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||||
|
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||||
|
), # claude-4-sonnet-20250514
|
||||||
|
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||||
|
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-opus-4-6
|
||||||
|
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||||
|
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-opus-4-5-20251101
|
||||||
|
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||||
|
"anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-sonnet-4-5-20250929
|
||||||
|
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||||
|
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
|
||||||
|
), # claude-haiku-4-5-20251001
|
||||||
|
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||||
|
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
|
||||||
|
), # claude-3-haiku-20240307
|
||||||
|
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
||||||
|
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata(
|
||||||
|
"aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1
|
||||||
|
),
|
||||||
|
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata(
|
||||||
|
"aiml_api",
|
||||||
|
128000,
|
||||||
|
40000,
|
||||||
|
"Llama 3.1 Nemotron 70B Instruct",
|
||||||
|
"AI/ML",
|
||||||
|
"Nvidia",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata(
|
||||||
|
"aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata(
|
||||||
|
"aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata(
|
||||||
|
"aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
|
),
|
||||||
|
# https://console.groq.com/docs/models
|
||||||
|
LlmModel.LLAMA3_3_70B: ModelMetadata(
|
||||||
|
"groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA3_1_8B: ModelMetadata(
|
||||||
|
"groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1
|
||||||
|
),
|
||||||
|
# https://ollama.com/library
|
||||||
|
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_DOLPHIN: ModelMetadata(
|
||||||
|
"ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1
|
||||||
|
),
|
||||||
|
# https://openrouter.ai/models
|
||||||
|
LlmModel.GEMINI_2_5_PRO: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1050000,
|
||||||
|
8192,
|
||||||
|
"Gemini 2.5 Pro Preview 03.25",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
2,
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata(
|
||||||
|
"open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_5_FLASH: ModelMetadata(
|
||||||
|
"open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_0_FLASH: ModelMetadata(
|
||||||
|
"open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1048576,
|
||||||
|
65535,
|
||||||
|
"Gemini 2.5 Flash Lite Preview 06.17",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1048576,
|
||||||
|
8192,
|
||||||
|
"Gemini 2.0 Flash Lite 001",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.MISTRAL_NEMO: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1
|
||||||
|
),
|
||||||
|
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1
|
||||||
|
),
|
||||||
|
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
|
||||||
|
),
|
||||||
|
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
|
||||||
|
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
|
||||||
|
),
|
||||||
|
LlmModel.DEEPSEEK_R1_0528: ModelMetadata(
|
||||||
|
"open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1
|
||||||
|
),
|
||||||
|
LlmModel.PERPLEXITY_SONAR: ModelMetadata(
|
||||||
|
"open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1
|
||||||
|
),
|
||||||
|
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
|
||||||
|
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
|
||||||
|
),
|
||||||
|
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
128000,
|
||||||
|
16000,
|
||||||
|
"Sonar Deep Research",
|
||||||
|
"OpenRouter",
|
||||||
|
"Perplexity",
|
||||||
|
3,
|
||||||
|
),
|
||||||
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
131000,
|
||||||
|
4096,
|
||||||
|
"Hermes 3 Llama 3.1 405B",
|
||||||
|
"OpenRouter",
|
||||||
|
"Nous Research",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
12288,
|
||||||
|
12288,
|
||||||
|
"Hermes 3 Llama 3.1 70B",
|
||||||
|
"OpenRouter",
|
||||||
|
"Nous Research",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata(
|
||||||
|
"open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata(
|
||||||
|
"open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata(
|
||||||
|
"open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata(
|
||||||
|
"open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata(
|
||||||
|
"open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
|
||||||
|
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
|
||||||
|
),
|
||||||
|
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
|
||||||
|
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
|
||||||
|
),
|
||||||
|
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata(
|
||||||
|
"open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata(
|
||||||
|
"open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4: ModelMetadata(
|
||||||
|
"open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4_FAST: ModelMetadata(
|
||||||
|
"open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4_1_FAST: ModelMetadata(
|
||||||
|
"open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_CODE_FAST_1: ModelMetadata(
|
||||||
|
"open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.KIMI_K2: ModelMetadata(
|
||||||
|
"open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1
|
||||||
|
),
|
||||||
|
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
262144,
|
||||||
|
262144,
|
||||||
|
"Qwen 3 235B A22B Thinking 2507",
|
||||||
|
"OpenRouter",
|
||||||
|
"Qwen",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.QWEN3_CODER: ModelMetadata(
|
||||||
|
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
|
||||||
|
),
|
||||||
|
# Llama API models
|
||||||
|
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
|
||||||
|
"llama_api",
|
||||||
|
128000,
|
||||||
|
4028,
|
||||||
|
"Llama 4 Scout 17B 16E Instruct FP8",
|
||||||
|
"Llama API",
|
||||||
|
"Meta",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata(
|
||||||
|
"llama_api",
|
||||||
|
128000,
|
||||||
|
4028,
|
||||||
|
"Llama 4 Maverick 17B 128E Instruct FP8",
|
||||||
|
"Llama API",
|
||||||
|
"Meta",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata(
|
||||||
|
"llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata(
|
||||||
|
"llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1
|
||||||
|
),
|
||||||
|
# v0 by Vercel models
|
||||||
|
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
|
||||||
|
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
|
||||||
|
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||||
|
|
||||||
|
for model in LlmModel:
|
||||||
|
if model not in MODEL_METADATA:
|
||||||
|
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||||
|
|
||||||
|
|
||||||
class ToolCall(BaseModel):
|
class ToolCall(BaseModel):
|
||||||
@@ -332,11 +598,8 @@ def get_parallel_tool_calls_param(
|
|||||||
llm_model: LlmModel, parallel_tool_calls: bool | None
|
llm_model: LlmModel, parallel_tool_calls: bool | None
|
||||||
) -> bool | openai.Omit:
|
) -> bool | openai.Omit:
|
||||||
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
|
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
|
||||||
# Check for o-series models (o1, o1-mini, o3-mini, etc.) which don't support
|
if llm_model.startswith("o") or parallel_tool_calls is None:
|
||||||
# parallel tool calls. Use regex to avoid false positives like "openai/gpt-oss".
|
return openai.omit
|
||||||
is_o_series = re.match(r"^o\d", llm_model) is not None
|
|
||||||
if is_o_series or parallel_tool_calls is None:
|
|
||||||
return openai.NOT_GIVEN
|
|
||||||
return parallel_tool_calls
|
return parallel_tool_calls
|
||||||
|
|
||||||
|
|
||||||
@@ -371,93 +634,15 @@ async def llm_call(
|
|||||||
- prompt_tokens: The number of tokens used in the prompt.
|
- prompt_tokens: The number of tokens used in the prompt.
|
||||||
- completion_tokens: The number of tokens used in the completion.
|
- completion_tokens: The number of tokens used in the completion.
|
||||||
"""
|
"""
|
||||||
# Get model metadata and check if enabled - with fallback support
|
provider = llm_model.metadata.provider
|
||||||
# The model we'll actually use (may differ if original is disabled)
|
context_window = llm_model.context_window
|
||||||
model_to_use = llm_model.value
|
|
||||||
|
|
||||||
# Check if model is in registry and if it's enabled
|
|
||||||
from backend.data.llm_registry import (
|
|
||||||
get_fallback_model_for_disabled,
|
|
||||||
get_model_info,
|
|
||||||
)
|
|
||||||
|
|
||||||
model_info = get_model_info(llm_model.value)
|
|
||||||
|
|
||||||
if model_info and not model_info.is_enabled:
|
|
||||||
# Model is disabled - try to find a fallback from the same provider
|
|
||||||
fallback = get_fallback_model_for_disabled(llm_model.value)
|
|
||||||
if fallback:
|
|
||||||
logger.warning(
|
|
||||||
f"Model '{llm_model.value}' is disabled. Using fallback model '{fallback.slug}' from the same provider ({fallback.metadata.provider})."
|
|
||||||
)
|
|
||||||
model_to_use = fallback.slug
|
|
||||||
# Use fallback model's metadata
|
|
||||||
provider = fallback.metadata.provider
|
|
||||||
context_window = fallback.metadata.context_window
|
|
||||||
model_max_output = fallback.metadata.max_output_tokens or int(2**15)
|
|
||||||
else:
|
|
||||||
# No fallback available - raise error
|
|
||||||
raise ValueError(
|
|
||||||
f"LLM model '{llm_model.value}' is disabled and no fallback model "
|
|
||||||
f"from the same provider is available. Please enable the model or "
|
|
||||||
f"select a different model in the block configuration."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Model is enabled or not in registry (legacy/static model)
|
|
||||||
try:
|
|
||||||
provider = llm_model.metadata.provider
|
|
||||||
context_window = llm_model.context_window
|
|
||||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
|
||||||
except ValueError:
|
|
||||||
# Model not in cache - try refreshing the registry once if we have DB access
|
|
||||||
logger.warning(f"Model {llm_model.value} not found in registry cache")
|
|
||||||
|
|
||||||
# Try refreshing the registry if we have database access
|
|
||||||
from backend.data.db import is_connected
|
|
||||||
|
|
||||||
if is_connected():
|
|
||||||
try:
|
|
||||||
logger.info(
|
|
||||||
f"Refreshing LLM registry and retrying lookup for {llm_model.value}"
|
|
||||||
)
|
|
||||||
await llm_registry.refresh_llm_registry()
|
|
||||||
# Try again after refresh
|
|
||||||
try:
|
|
||||||
provider = llm_model.metadata.provider
|
|
||||||
context_window = llm_model.context_window
|
|
||||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
|
||||||
logger.info(
|
|
||||||
f"Successfully loaded model {llm_model.value} metadata after registry refresh"
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
# Still not found after refresh
|
|
||||||
raise ValueError(
|
|
||||||
f"LLM model '{llm_model.value}' not found in registry after refresh. "
|
|
||||||
"Please ensure the model is added and enabled in the LLM registry via the admin UI."
|
|
||||||
)
|
|
||||||
except Exception as refresh_exc:
|
|
||||||
logger.error(f"Failed to refresh LLM registry: {refresh_exc}")
|
|
||||||
raise ValueError(
|
|
||||||
f"LLM model '{llm_model.value}' not found in registry and failed to refresh. "
|
|
||||||
"Please ensure the model is added to the LLM registry via the admin UI."
|
|
||||||
) from refresh_exc
|
|
||||||
else:
|
|
||||||
# No DB access (e.g., in executor without direct DB connection)
|
|
||||||
# The registry should have been loaded on startup
|
|
||||||
raise ValueError(
|
|
||||||
f"LLM model '{llm_model.value}' not found in registry cache. "
|
|
||||||
"The registry may need to be refreshed. Please contact support or try again later."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create effective model for model-specific parameter resolution (e.g., o-series check)
|
|
||||||
# This uses the resolved model_to_use which may differ from llm_model if fallback occurred
|
|
||||||
effective_model = LlmModel(model_to_use)
|
|
||||||
|
|
||||||
if compress_prompt_to_fit:
|
if compress_prompt_to_fit:
|
||||||
result = await compress_context(
|
result = await compress_context(
|
||||||
messages=prompt,
|
messages=prompt,
|
||||||
target_tokens=context_window // 2,
|
target_tokens=llm_model.context_window // 2,
|
||||||
lossy_ok=True,
|
client=None, # Truncation-only, no LLM summarization
|
||||||
|
reserve=0, # Caller handles response token budget separately
|
||||||
)
|
)
|
||||||
if result.error:
|
if result.error:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
@@ -468,7 +653,7 @@ async def llm_call(
|
|||||||
|
|
||||||
# Calculate available tokens based on context window and input length
|
# Calculate available tokens based on context window and input length
|
||||||
estimated_input_tokens = estimate_token_count(prompt)
|
estimated_input_tokens = estimate_token_count(prompt)
|
||||||
# model_max_output already set above
|
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||||
user_max = max_tokens or model_max_output
|
user_max = max_tokens or model_max_output
|
||||||
available_tokens = max(context_window - estimated_input_tokens, 0)
|
available_tokens = max(context_window - estimated_input_tokens, 0)
|
||||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||||
@@ -479,14 +664,14 @@ async def llm_call(
|
|||||||
response_format = None
|
response_format = None
|
||||||
|
|
||||||
parallel_tool_calls = get_parallel_tool_calls_param(
|
parallel_tool_calls = get_parallel_tool_calls_param(
|
||||||
effective_model, parallel_tool_calls
|
llm_model, parallel_tool_calls
|
||||||
)
|
)
|
||||||
|
|
||||||
if force_json_output:
|
if force_json_output:
|
||||||
response_format = {"type": "json_object"}
|
response_format = {"type": "json_object"}
|
||||||
|
|
||||||
response = await oai_client.chat.completions.create(
|
response = await oai_client.chat.completions.create(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
response_format=response_format, # type: ignore
|
response_format=response_format, # type: ignore
|
||||||
max_completion_tokens=max_tokens,
|
max_completion_tokens=max_tokens,
|
||||||
@@ -533,7 +718,7 @@ async def llm_call(
|
|||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
resp = await client.messages.create(
|
resp = await client.messages.create(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
system=sysprompt,
|
system=sysprompt,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
@@ -597,7 +782,7 @@ async def llm_call(
|
|||||||
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
|
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
|
||||||
response_format = {"type": "json_object"} if force_json_output else None
|
response_format = {"type": "json_object"} if force_json_output else None
|
||||||
response = await client.chat.completions.create(
|
response = await client.chat.completions.create(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
response_format=response_format, # type: ignore
|
response_format=response_format, # type: ignore
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
@@ -619,7 +804,7 @@ async def llm_call(
|
|||||||
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||||
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
|
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
|
||||||
response = await client.generate(
|
response = await client.generate(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
prompt=f"{sys_messages}\n\n{usr_messages}",
|
prompt=f"{sys_messages}\n\n{usr_messages}",
|
||||||
stream=False,
|
stream=False,
|
||||||
options={"num_ctx": max_tokens},
|
options={"num_ctx": max_tokens},
|
||||||
@@ -641,7 +826,7 @@ async def llm_call(
|
|||||||
)
|
)
|
||||||
|
|
||||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||||
effective_model, parallel_tool_calls
|
llm_model, parallel_tool_calls
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await client.chat.completions.create(
|
response = await client.chat.completions.create(
|
||||||
@@ -649,7 +834,7 @@ async def llm_call(
|
|||||||
"HTTP-Referer": "https://agpt.co",
|
"HTTP-Referer": "https://agpt.co",
|
||||||
"X-Title": "AutoGPT",
|
"X-Title": "AutoGPT",
|
||||||
},
|
},
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
tools=tools_param, # type: ignore
|
tools=tools_param, # type: ignore
|
||||||
@@ -683,7 +868,7 @@ async def llm_call(
|
|||||||
)
|
)
|
||||||
|
|
||||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||||
effective_model, parallel_tool_calls
|
llm_model, parallel_tool_calls
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await client.chat.completions.create(
|
response = await client.chat.completions.create(
|
||||||
@@ -691,7 +876,7 @@ async def llm_call(
|
|||||||
"HTTP-Referer": "https://agpt.co",
|
"HTTP-Referer": "https://agpt.co",
|
||||||
"X-Title": "AutoGPT",
|
"X-Title": "AutoGPT",
|
||||||
},
|
},
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
tools=tools_param, # type: ignore
|
tools=tools_param, # type: ignore
|
||||||
@@ -718,7 +903,7 @@ async def llm_call(
|
|||||||
reasoning=reasoning,
|
reasoning=reasoning,
|
||||||
)
|
)
|
||||||
elif provider == "aiml_api":
|
elif provider == "aiml_api":
|
||||||
client = openai.AsyncOpenAI(
|
client = openai.OpenAI(
|
||||||
base_url="https://api.aimlapi.com/v2",
|
base_url="https://api.aimlapi.com/v2",
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
default_headers={
|
default_headers={
|
||||||
@@ -728,8 +913,8 @@ async def llm_call(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
completion = await client.chat.completions.create(
|
completion = client.chat.completions.create(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
)
|
)
|
||||||
@@ -757,11 +942,11 @@ async def llm_call(
|
|||||||
response_format = {"type": "json_object"}
|
response_format = {"type": "json_object"}
|
||||||
|
|
||||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||||
effective_model, parallel_tool_calls
|
llm_model, parallel_tool_calls
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await client.chat.completions.create(
|
response = await client.chat.completions.create(
|
||||||
model=model_to_use,
|
model=llm_model.value,
|
||||||
messages=prompt, # type: ignore
|
messages=prompt, # type: ignore
|
||||||
response_format=response_format, # type: ignore
|
response_format=response_format, # type: ignore
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
@@ -812,10 +997,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for answering the prompt.",
|
description="The language model to use for answering the prompt.",
|
||||||
advanced=False,
|
advanced=False,
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
force_json_output: bool = SchemaField(
|
force_json_output: bool = SchemaField(
|
||||||
title="Restrict LLM to pure JSON output",
|
title="Restrict LLM to pure JSON output",
|
||||||
@@ -878,7 +1062,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
|||||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||||
test_input={
|
test_input={
|
||||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
"model": DEFAULT_LLM_MODEL,
|
||||||
"credentials": TEST_CREDENTIALS_INPUT,
|
"credentials": TEST_CREDENTIALS_INPUT,
|
||||||
"expected_format": {
|
"expected_format": {
|
||||||
"key1": "value1",
|
"key1": "value1",
|
||||||
@@ -1244,10 +1428,9 @@ class AITextGeneratorBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for answering the prompt.",
|
description="The language model to use for answering the prompt.",
|
||||||
advanced=False,
|
advanced=False,
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
credentials: AICredentials = AICredentialsField()
|
credentials: AICredentials = AICredentialsField()
|
||||||
sys_prompt: str = SchemaField(
|
sys_prompt: str = SchemaField(
|
||||||
@@ -1341,9 +1524,8 @@ class AITextSummarizerBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for summarizing the text.",
|
description="The language model to use for summarizing the text.",
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
focus: str = SchemaField(
|
focus: str = SchemaField(
|
||||||
title="Focus",
|
title="Focus",
|
||||||
@@ -1559,9 +1741,8 @@ class AIConversationBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for the conversation.",
|
description="The language model to use for the conversation.",
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
credentials: AICredentials = AICredentialsField()
|
credentials: AICredentials = AICredentialsField()
|
||||||
max_tokens: int | None = SchemaField(
|
max_tokens: int | None = SchemaField(
|
||||||
@@ -1598,7 +1779,7 @@ class AIConversationBlock(AIBlockBase):
|
|||||||
},
|
},
|
||||||
{"role": "user", "content": "Where was it played?"},
|
{"role": "user", "content": "Where was it played?"},
|
||||||
],
|
],
|
||||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
"model": DEFAULT_LLM_MODEL,
|
||||||
"credentials": TEST_CREDENTIALS_INPUT,
|
"credentials": TEST_CREDENTIALS_INPUT,
|
||||||
},
|
},
|
||||||
test_credentials=TEST_CREDENTIALS,
|
test_credentials=TEST_CREDENTIALS,
|
||||||
@@ -1661,10 +1842,9 @@ class AIListGeneratorBlock(AIBlockBase):
|
|||||||
)
|
)
|
||||||
model: LlmModel = SchemaField(
|
model: LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=LlmModel.default,
|
default=DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for generating the list.",
|
description="The language model to use for generating the list.",
|
||||||
advanced=True,
|
advanced=True,
|
||||||
json_schema_extra=llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
credentials: AICredentials = AICredentialsField()
|
credentials: AICredentials = AICredentialsField()
|
||||||
max_retries: int = SchemaField(
|
max_retries: int = SchemaField(
|
||||||
@@ -1719,7 +1899,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
|||||||
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
||||||
"fictional worlds."
|
"fictional worlds."
|
||||||
),
|
),
|
||||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
"model": DEFAULT_LLM_MODEL,
|
||||||
"credentials": TEST_CREDENTIALS_INPUT,
|
"credentials": TEST_CREDENTIALS_INPUT,
|
||||||
"max_retries": 3,
|
"max_retries": 3,
|
||||||
"force_json_output": False,
|
"force_json_output": False,
|
||||||
|
|||||||
@@ -226,10 +226,9 @@ class SmartDecisionMakerBlock(Block):
|
|||||||
)
|
)
|
||||||
model: llm.LlmModel = SchemaField(
|
model: llm.LlmModel = SchemaField(
|
||||||
title="LLM Model",
|
title="LLM Model",
|
||||||
default_factory=llm.LlmModel.default,
|
default=llm.DEFAULT_LLM_MODEL,
|
||||||
description="The language model to use for answering the prompt.",
|
description="The language model to use for answering the prompt.",
|
||||||
advanced=False,
|
advanced=False,
|
||||||
json_schema_extra=llm.llm_model_schema_extra(),
|
|
||||||
)
|
)
|
||||||
credentials: llm.AICredentials = llm.AICredentialsField()
|
credentials: llm.AICredentials = llm.AICredentialsField()
|
||||||
multiple_tool_calls: bool = SchemaField(
|
multiple_tool_calls: bool = SchemaField(
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ import stagehand.main
|
|||||||
from stagehand import Stagehand
|
from stagehand import Stagehand
|
||||||
|
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
|
MODEL_METADATA,
|
||||||
AICredentials,
|
AICredentials,
|
||||||
AICredentialsField,
|
AICredentialsField,
|
||||||
LlmModel,
|
LlmModel,
|
||||||
ModelMetadata,
|
ModelMetadata,
|
||||||
)
|
)
|
||||||
from backend.blocks.stagehand._config import stagehand as stagehand_provider
|
from backend.blocks.stagehand._config import stagehand as stagehand_provider
|
||||||
from backend.data import llm_registry
|
|
||||||
from backend.sdk import (
|
from backend.sdk import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
Block,
|
Block,
|
||||||
@@ -91,7 +91,7 @@ class StagehandRecommendedLlmModel(str, Enum):
|
|||||||
Returns the provider name for the model in the required format for Stagehand:
|
Returns the provider name for the model in the required format for Stagehand:
|
||||||
provider/model_name
|
provider/model_name
|
||||||
"""
|
"""
|
||||||
model_metadata = self.metadata
|
model_metadata = MODEL_METADATA[LlmModel(self.value)]
|
||||||
model_name = self.value
|
model_name = self.value
|
||||||
|
|
||||||
if len(model_name.split("/")) == 1 and not self.value.startswith(
|
if len(model_name.split("/")) == 1 and not self.value.startswith(
|
||||||
@@ -107,23 +107,19 @@ class StagehandRecommendedLlmModel(str, Enum):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def provider(self) -> str:
|
def provider(self) -> str:
|
||||||
return self.metadata.provider
|
return MODEL_METADATA[LlmModel(self.value)].provider
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def metadata(self) -> ModelMetadata:
|
def metadata(self) -> ModelMetadata:
|
||||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
return MODEL_METADATA[LlmModel(self.value)]
|
||||||
if metadata:
|
|
||||||
return metadata
|
|
||||||
# Fallback to LlmModel enum if registry lookup fails
|
|
||||||
return LlmModel(self.value).metadata
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def context_window(self) -> int:
|
def context_window(self) -> int:
|
||||||
return self.metadata.context_window
|
return MODEL_METADATA[LlmModel(self.value)].context_window
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def max_output_tokens(self) -> int | None:
|
def max_output_tokens(self) -> int | None:
|
||||||
return self.metadata.max_output_tokens
|
return MODEL_METADATA[LlmModel(self.value)].max_output_tokens
|
||||||
|
|
||||||
|
|
||||||
class StagehandObserveBlock(Block):
|
class StagehandObserveBlock(Block):
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ from prisma.models import AgentBlock
|
|||||||
from prisma.types import AgentBlockCreateInput
|
from prisma.types import AgentBlockCreateInput
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.llm_registry import update_schema_with_llm_registry
|
|
||||||
from backend.data.model import NodeExecutionStats
|
from backend.data.model import NodeExecutionStats
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util import json
|
from backend.util import json
|
||||||
@@ -144,59 +143,35 @@ class BlockInfo(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class BlockSchema(BaseModel):
|
class BlockSchema(BaseModel):
|
||||||
cached_jsonschema: ClassVar[dict[str, Any] | None] = None
|
cached_jsonschema: ClassVar[dict[str, Any]]
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def clear_schema_cache(cls) -> None:
|
|
||||||
"""Clear the cached JSON schema for this class."""
|
|
||||||
# Use None instead of {} because {} is truthy and would prevent regeneration
|
|
||||||
cls.cached_jsonschema = None # type: ignore
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def clear_all_schema_caches() -> None:
|
|
||||||
"""Clear cached JSON schemas for all BlockSchema subclasses."""
|
|
||||||
|
|
||||||
def clear_recursive(cls: type) -> None:
|
|
||||||
"""Recursively clear cache for class and all subclasses."""
|
|
||||||
if hasattr(cls, "clear_schema_cache"):
|
|
||||||
cls.clear_schema_cache()
|
|
||||||
for subclass in cls.__subclasses__():
|
|
||||||
clear_recursive(subclass)
|
|
||||||
|
|
||||||
clear_recursive(BlockSchema)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def jsonschema(cls) -> dict[str, Any]:
|
def jsonschema(cls) -> dict[str, Any]:
|
||||||
# Generate schema if not cached
|
if cls.cached_jsonschema:
|
||||||
if not cls.cached_jsonschema:
|
return cls.cached_jsonschema
|
||||||
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
|
||||||
|
|
||||||
def ref_to_dict(obj):
|
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
||||||
if isinstance(obj, dict):
|
|
||||||
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
|
||||||
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
|
||||||
keys = {"allOf", "anyOf", "oneOf"}
|
|
||||||
one_key = next(
|
|
||||||
(k for k in keys if k in obj and len(obj[k]) == 1), None
|
|
||||||
)
|
|
||||||
if one_key:
|
|
||||||
obj.update(obj[one_key][0])
|
|
||||||
|
|
||||||
return {
|
def ref_to_dict(obj):
|
||||||
key: ref_to_dict(value)
|
if isinstance(obj, dict):
|
||||||
for key, value in obj.items()
|
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
||||||
if not key.startswith("$") and key != one_key
|
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
||||||
}
|
keys = {"allOf", "anyOf", "oneOf"}
|
||||||
elif isinstance(obj, list):
|
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
|
||||||
return [ref_to_dict(item) for item in obj]
|
if one_key:
|
||||||
|
obj.update(obj[one_key][0])
|
||||||
|
|
||||||
return obj
|
return {
|
||||||
|
key: ref_to_dict(value)
|
||||||
|
for key, value in obj.items()
|
||||||
|
if not key.startswith("$") and key != one_key
|
||||||
|
}
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
return [ref_to_dict(item) for item in obj]
|
||||||
|
|
||||||
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
return obj
|
||||||
|
|
||||||
# Always post-process to ensure LLM registry data is up-to-date
|
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
||||||
# This refreshes model options and discriminator mappings even if schema was cached
|
|
||||||
update_schema_with_llm_registry(cls.cached_jsonschema, cls)
|
|
||||||
|
|
||||||
return cls.cached_jsonschema
|
return cls.cached_jsonschema
|
||||||
|
|
||||||
@@ -259,7 +234,7 @@ class BlockSchema(BaseModel):
|
|||||||
super().__pydantic_init_subclass__(**kwargs)
|
super().__pydantic_init_subclass__(**kwargs)
|
||||||
|
|
||||||
# Reset cached JSON schema to prevent inheriting it from parent class
|
# Reset cached JSON schema to prevent inheriting it from parent class
|
||||||
cls.cached_jsonschema = None
|
cls.cached_jsonschema = {}
|
||||||
|
|
||||||
credentials_fields = cls.get_credentials_fields()
|
credentials_fields = cls.get_credentials_fields()
|
||||||
|
|
||||||
@@ -900,30 +875,6 @@ def is_block_auth_configured(
|
|||||||
|
|
||||||
|
|
||||||
async def initialize_blocks() -> None:
|
async def initialize_blocks() -> None:
|
||||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
|
||||||
# This ensures the registry cache is populated even in executor context
|
|
||||||
try:
|
|
||||||
from backend.data import llm_registry
|
|
||||||
from backend.data.block_cost_config import refresh_llm_costs
|
|
||||||
|
|
||||||
# Only refresh if we have DB access (check if Prisma is connected)
|
|
||||||
from backend.data.db import is_connected
|
|
||||||
|
|
||||||
if is_connected():
|
|
||||||
await llm_registry.refresh_llm_registry()
|
|
||||||
refresh_llm_costs()
|
|
||||||
logger.info("LLM registry refreshed during block initialization")
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"Prisma not connected, skipping LLM registry refresh during block initialization"
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to refresh LLM registry during block initialization: %s", exc
|
|
||||||
)
|
|
||||||
|
|
||||||
# First, sync all provider costs to blocks
|
|
||||||
# Imported here to avoid circular import
|
|
||||||
from backend.sdk.cost_integration import sync_all_provider_costs
|
from backend.sdk.cost_integration import sync_all_provider_costs
|
||||||
from backend.util.retry import func_retry
|
from backend.util.retry import func_retry
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
import logging
|
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
|
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
|
||||||
@@ -24,11 +23,13 @@ from backend.blocks.ideogram import IdeogramModelBlock
|
|||||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||||
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
|
MODEL_METADATA,
|
||||||
AIConversationBlock,
|
AIConversationBlock,
|
||||||
AIListGeneratorBlock,
|
AIListGeneratorBlock,
|
||||||
AIStructuredResponseGeneratorBlock,
|
AIStructuredResponseGeneratorBlock,
|
||||||
AITextGeneratorBlock,
|
AITextGeneratorBlock,
|
||||||
AITextSummarizerBlock,
|
AITextSummarizerBlock,
|
||||||
|
LlmModel,
|
||||||
)
|
)
|
||||||
from backend.blocks.replicate.flux_advanced import ReplicateFluxAdvancedModelBlock
|
from backend.blocks.replicate.flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||||
from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||||
@@ -36,7 +37,6 @@ from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
|||||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||||
from backend.blocks.video.narration import VideoNarrationBlock
|
from backend.blocks.video.narration import VideoNarrationBlock
|
||||||
from backend.data import llm_registry
|
|
||||||
from backend.data.block import Block, BlockCost, BlockCostType
|
from backend.data.block import Block, BlockCost, BlockCostType
|
||||||
from backend.integrations.credentials_store import (
|
from backend.integrations.credentials_store import (
|
||||||
aiml_api_credentials,
|
aiml_api_credentials,
|
||||||
@@ -57,63 +57,210 @@ from backend.integrations.credentials_store import (
|
|||||||
v0_credentials,
|
v0_credentials,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
# =============== Configure the cost for each LLM Model call =============== #
|
||||||
|
|
||||||
PROVIDER_CREDENTIALS = {
|
MODEL_COST: dict[LlmModel, int] = {
|
||||||
"openai": openai_credentials,
|
LlmModel.O3: 4,
|
||||||
"anthropic": anthropic_credentials,
|
LlmModel.O3_MINI: 2,
|
||||||
"groq": groq_credentials,
|
LlmModel.O1: 16,
|
||||||
"open_router": open_router_credentials,
|
LlmModel.O1_MINI: 4,
|
||||||
"llama_api": llama_api_credentials,
|
# GPT-5 models
|
||||||
"aiml_api": aiml_api_credentials,
|
LlmModel.GPT5_2: 6,
|
||||||
"v0": v0_credentials,
|
LlmModel.GPT5_1: 5,
|
||||||
|
LlmModel.GPT5: 2,
|
||||||
|
LlmModel.GPT5_MINI: 1,
|
||||||
|
LlmModel.GPT5_NANO: 1,
|
||||||
|
LlmModel.GPT5_CHAT: 5,
|
||||||
|
LlmModel.GPT41: 2,
|
||||||
|
LlmModel.GPT41_MINI: 1,
|
||||||
|
LlmModel.GPT4O_MINI: 1,
|
||||||
|
LlmModel.GPT4O: 3,
|
||||||
|
LlmModel.GPT4_TURBO: 10,
|
||||||
|
LlmModel.GPT3_5_TURBO: 1,
|
||||||
|
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||||
|
LlmModel.CLAUDE_4_OPUS: 21,
|
||||||
|
LlmModel.CLAUDE_4_SONNET: 5,
|
||||||
|
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||||
|
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||||
|
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||||
|
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||||
|
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||||
|
LlmModel.AIML_API_QWEN2_5_72B: 1,
|
||||||
|
LlmModel.AIML_API_LLAMA3_1_70B: 1,
|
||||||
|
LlmModel.AIML_API_LLAMA3_3_70B: 1,
|
||||||
|
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
|
||||||
|
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
|
||||||
|
LlmModel.LLAMA3_3_70B: 1,
|
||||||
|
LlmModel.LLAMA3_1_8B: 1,
|
||||||
|
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||||
|
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||||
|
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||||
|
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||||
|
LlmModel.OLLAMA_DOLPHIN: 1,
|
||||||
|
LlmModel.OPENAI_GPT_OSS_120B: 1,
|
||||||
|
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
||||||
|
LlmModel.GEMINI_2_5_PRO: 4,
|
||||||
|
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
||||||
|
LlmModel.GEMINI_2_5_FLASH: 1,
|
||||||
|
LlmModel.GEMINI_2_0_FLASH: 1,
|
||||||
|
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||||
|
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||||
|
LlmModel.MISTRAL_NEMO: 1,
|
||||||
|
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||||
|
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||||
|
LlmModel.DEEPSEEK_CHAT: 2,
|
||||||
|
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||||
|
LlmModel.PERPLEXITY_SONAR: 1,
|
||||||
|
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
||||||
|
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
||||||
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1,
|
||||||
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1,
|
||||||
|
LlmModel.AMAZON_NOVA_LITE_V1: 1,
|
||||||
|
LlmModel.AMAZON_NOVA_MICRO_V1: 1,
|
||||||
|
LlmModel.AMAZON_NOVA_PRO_V1: 1,
|
||||||
|
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1,
|
||||||
|
LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1,
|
||||||
|
LlmModel.META_LLAMA_4_SCOUT: 1,
|
||||||
|
LlmModel.META_LLAMA_4_MAVERICK: 1,
|
||||||
|
LlmModel.LLAMA_API_LLAMA_4_SCOUT: 1,
|
||||||
|
LlmModel.LLAMA_API_LLAMA4_MAVERICK: 1,
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_8B: 1,
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_70B: 1,
|
||||||
|
LlmModel.GROK_4: 9,
|
||||||
|
LlmModel.GROK_4_FAST: 1,
|
||||||
|
LlmModel.GROK_4_1_FAST: 1,
|
||||||
|
LlmModel.GROK_CODE_FAST_1: 1,
|
||||||
|
LlmModel.KIMI_K2: 1,
|
||||||
|
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
||||||
|
LlmModel.QWEN3_CODER: 9,
|
||||||
|
# v0 by Vercel models
|
||||||
|
LlmModel.V0_1_5_MD: 1,
|
||||||
|
LlmModel.V0_1_5_LG: 2,
|
||||||
|
LlmModel.V0_1_0_MD: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
# =============== Configure the cost for each LLM Model call =============== #
|
for model in LlmModel:
|
||||||
# All LLM costs now come from the database via llm_registry
|
if model not in MODEL_COST:
|
||||||
|
raise ValueError(f"Missing MODEL_COST for model: {model}")
|
||||||
LLM_COST: list[BlockCost] = []
|
|
||||||
|
|
||||||
|
|
||||||
def _build_llm_costs_from_registry() -> list[BlockCost]:
|
LLM_COST = (
|
||||||
"""Build BlockCost list from all models in the LLM registry."""
|
# Anthropic Models
|
||||||
costs: list[BlockCost] = []
|
[
|
||||||
for model in llm_registry.iter_dynamic_models():
|
BlockCost(
|
||||||
for cost in model.costs:
|
cost_type=BlockCostType.RUN,
|
||||||
credentials = PROVIDER_CREDENTIALS.get(cost.credential_provider)
|
cost_filter={
|
||||||
if not credentials:
|
"model": model,
|
||||||
logger.warning(
|
|
||||||
"Skipping cost entry for %s due to unknown credentials provider %s",
|
|
||||||
model.slug,
|
|
||||||
cost.credential_provider,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
cost_filter = {
|
|
||||||
"model": model.slug,
|
|
||||||
"credentials": {
|
"credentials": {
|
||||||
"id": credentials.id,
|
"id": anthropic_credentials.id,
|
||||||
"provider": credentials.provider,
|
"provider": anthropic_credentials.provider,
|
||||||
"type": credentials.type,
|
"type": anthropic_credentials.type,
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
costs.append(
|
cost_amount=cost,
|
||||||
BlockCost(
|
)
|
||||||
cost_type=BlockCostType.RUN,
|
for model, cost in MODEL_COST.items()
|
||||||
cost_filter=cost_filter,
|
if MODEL_METADATA[model].provider == "anthropic"
|
||||||
cost_amount=cost.credit_cost,
|
]
|
||||||
)
|
# OpenAI Models
|
||||||
)
|
+ [
|
||||||
return costs
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
def refresh_llm_costs() -> None:
|
"model": model,
|
||||||
"""Refresh LLM costs from the registry. All costs now come from the database."""
|
"credentials": {
|
||||||
LLM_COST.clear()
|
"id": openai_credentials.id,
|
||||||
LLM_COST.extend(_build_llm_costs_from_registry())
|
"provider": openai_credentials.provider,
|
||||||
|
"type": openai_credentials.type,
|
||||||
|
},
|
||||||
# Initial load will happen after registry is refreshed at startup
|
},
|
||||||
# Don't call refresh_llm_costs() here - it will be called after registry refresh
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "openai"
|
||||||
|
]
|
||||||
|
# Groq Models
|
||||||
|
+ [
|
||||||
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
|
"model": model,
|
||||||
|
"credentials": {"id": groq_credentials.id},
|
||||||
|
},
|
||||||
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "groq"
|
||||||
|
]
|
||||||
|
# Open Router Models
|
||||||
|
+ [
|
||||||
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
|
"model": model,
|
||||||
|
"credentials": {
|
||||||
|
"id": open_router_credentials.id,
|
||||||
|
"provider": open_router_credentials.provider,
|
||||||
|
"type": open_router_credentials.type,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "open_router"
|
||||||
|
]
|
||||||
|
# Llama API Models
|
||||||
|
+ [
|
||||||
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
|
"model": model,
|
||||||
|
"credentials": {
|
||||||
|
"id": llama_api_credentials.id,
|
||||||
|
"provider": llama_api_credentials.provider,
|
||||||
|
"type": llama_api_credentials.type,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "llama_api"
|
||||||
|
]
|
||||||
|
# v0 by Vercel Models
|
||||||
|
+ [
|
||||||
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
|
"model": model,
|
||||||
|
"credentials": {
|
||||||
|
"id": v0_credentials.id,
|
||||||
|
"provider": v0_credentials.provider,
|
||||||
|
"type": v0_credentials.type,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "v0"
|
||||||
|
]
|
||||||
|
# AI/ML Api Models
|
||||||
|
+ [
|
||||||
|
BlockCost(
|
||||||
|
cost_type=BlockCostType.RUN,
|
||||||
|
cost_filter={
|
||||||
|
"model": model,
|
||||||
|
"credentials": {
|
||||||
|
"id": aiml_api_credentials.id,
|
||||||
|
"provider": aiml_api_credentials.provider,
|
||||||
|
"type": aiml_api_credentials.type,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cost_amount=cost,
|
||||||
|
)
|
||||||
|
for model, cost in MODEL_COST.items()
|
||||||
|
if MODEL_METADATA[model].provider == "aiml_api"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# =============== This is the exhaustive list of cost for each Block =============== #
|
# =============== This is the exhaustive list of cost for each Block =============== #
|
||||||
|
|
||||||
|
|||||||
@@ -743,11 +743,6 @@ class GraphModel(Graph, GraphMeta):
|
|||||||
# For invalid blocks, we still raise immediately as this is a structural issue
|
# For invalid blocks, we still raise immediately as this is a structural issue
|
||||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||||
|
|
||||||
if block.disabled:
|
|
||||||
raise ValueError(
|
|
||||||
f"Block {node.block_id} is disabled and cannot be used in graphs"
|
|
||||||
)
|
|
||||||
|
|
||||||
node_input_mask = (
|
node_input_mask = (
|
||||||
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
||||||
)
|
)
|
||||||
@@ -1638,10 +1633,8 @@ async def migrate_llm_models(migrate_to: LlmModel):
|
|||||||
if field.annotation == LlmModel:
|
if field.annotation == LlmModel:
|
||||||
llm_model_fields[block.id] = field_name
|
llm_model_fields[block.id] = field_name
|
||||||
|
|
||||||
# Get all model slugs from the registry (dynamic, not hardcoded enum)
|
# Convert enum values to a list of strings for the SQL query
|
||||||
from backend.data import llm_registry
|
enum_values = [v.value for v in LlmModel]
|
||||||
|
|
||||||
enum_values = list(llm_registry.get_all_model_slugs_for_validation())
|
|
||||||
escaped_enum_values = repr(tuple(enum_values)) # hack but works
|
escaped_enum_values = repr(tuple(enum_values)) # hack but works
|
||||||
|
|
||||||
# Update each block
|
# Update each block
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
"""
|
|
||||||
LLM Registry module for managing LLM models, providers, and costs dynamically.
|
|
||||||
|
|
||||||
This module provides a database-driven registry system for LLM models,
|
|
||||||
replacing hardcoded model configurations with a flexible admin-managed system.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from backend.data.llm_registry.model import ModelMetadata
|
|
||||||
|
|
||||||
# Re-export for backwards compatibility
|
|
||||||
from backend.data.llm_registry.notifications import (
|
|
||||||
REGISTRY_REFRESH_CHANNEL,
|
|
||||||
publish_registry_refresh_notification,
|
|
||||||
subscribe_to_registry_refresh,
|
|
||||||
)
|
|
||||||
from backend.data.llm_registry.registry import (
|
|
||||||
RegistryModel,
|
|
||||||
RegistryModelCost,
|
|
||||||
RegistryModelCreator,
|
|
||||||
get_all_model_slugs_for_validation,
|
|
||||||
get_default_model_slug,
|
|
||||||
get_dynamic_model_slugs,
|
|
||||||
get_fallback_model_for_disabled,
|
|
||||||
get_llm_discriminator_mapping,
|
|
||||||
get_llm_model_cost,
|
|
||||||
get_llm_model_metadata,
|
|
||||||
get_llm_model_schema_options,
|
|
||||||
get_model_info,
|
|
||||||
is_model_enabled,
|
|
||||||
iter_dynamic_models,
|
|
||||||
refresh_llm_registry,
|
|
||||||
register_static_costs,
|
|
||||||
register_static_metadata,
|
|
||||||
)
|
|
||||||
from backend.data.llm_registry.schema_utils import (
|
|
||||||
is_llm_model_field,
|
|
||||||
refresh_llm_discriminator_mapping,
|
|
||||||
refresh_llm_model_options,
|
|
||||||
update_schema_with_llm_registry,
|
|
||||||
)
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
# Types
|
|
||||||
"ModelMetadata",
|
|
||||||
"RegistryModel",
|
|
||||||
"RegistryModelCost",
|
|
||||||
"RegistryModelCreator",
|
|
||||||
# Registry functions
|
|
||||||
"get_all_model_slugs_for_validation",
|
|
||||||
"get_default_model_slug",
|
|
||||||
"get_dynamic_model_slugs",
|
|
||||||
"get_fallback_model_for_disabled",
|
|
||||||
"get_llm_discriminator_mapping",
|
|
||||||
"get_llm_model_cost",
|
|
||||||
"get_llm_model_metadata",
|
|
||||||
"get_llm_model_schema_options",
|
|
||||||
"get_model_info",
|
|
||||||
"is_model_enabled",
|
|
||||||
"iter_dynamic_models",
|
|
||||||
"refresh_llm_registry",
|
|
||||||
"register_static_costs",
|
|
||||||
"register_static_metadata",
|
|
||||||
# Notifications
|
|
||||||
"REGISTRY_REFRESH_CHANNEL",
|
|
||||||
"publish_registry_refresh_notification",
|
|
||||||
"subscribe_to_registry_refresh",
|
|
||||||
# Schema utilities
|
|
||||||
"is_llm_model_field",
|
|
||||||
"refresh_llm_discriminator_mapping",
|
|
||||||
"refresh_llm_model_options",
|
|
||||||
"update_schema_with_llm_registry",
|
|
||||||
]
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
"""Type definitions for LLM model metadata."""
|
|
||||||
|
|
||||||
from typing import Literal, NamedTuple
|
|
||||||
|
|
||||||
|
|
||||||
class ModelMetadata(NamedTuple):
|
|
||||||
"""Metadata for an LLM model.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
provider: The provider identifier (e.g., "openai", "anthropic")
|
|
||||||
context_window: Maximum context window size in tokens
|
|
||||||
max_output_tokens: Maximum output tokens (None if unlimited)
|
|
||||||
display_name: Human-readable name for the model
|
|
||||||
provider_name: Human-readable provider name (e.g., "OpenAI", "Anthropic")
|
|
||||||
creator_name: Name of the organization that created the model
|
|
||||||
price_tier: Relative cost tier (1=cheapest, 2=medium, 3=expensive)
|
|
||||||
"""
|
|
||||||
|
|
||||||
provider: str
|
|
||||||
context_window: int
|
|
||||||
max_output_tokens: int | None
|
|
||||||
display_name: str
|
|
||||||
provider_name: str
|
|
||||||
creator_name: str
|
|
||||||
price_tier: Literal[1, 2, 3]
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
"""
|
|
||||||
Redis pub/sub notifications for LLM registry updates.
|
|
||||||
|
|
||||||
When models are added/updated/removed via the admin UI, this module
|
|
||||||
publishes notifications to Redis that all executor services subscribe to,
|
|
||||||
ensuring they refresh their registry cache in real-time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from backend.data.redis_client import connect_async
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Redis channel name for LLM registry refresh notifications
|
|
||||||
REGISTRY_REFRESH_CHANNEL = "llm_registry:refresh"
|
|
||||||
|
|
||||||
|
|
||||||
async def publish_registry_refresh_notification() -> None:
|
|
||||||
"""
|
|
||||||
Publish a notification to Redis that the LLM registry has been updated.
|
|
||||||
All executor services subscribed to this channel will refresh their registry.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
redis = await connect_async()
|
|
||||||
await redis.publish(REGISTRY_REFRESH_CHANNEL, "refresh")
|
|
||||||
logger.info("Published LLM registry refresh notification to Redis")
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to publish LLM registry refresh notification: %s",
|
|
||||||
exc,
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def subscribe_to_registry_refresh(
|
|
||||||
on_refresh: Any, # Async callable that takes no args
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Subscribe to Redis notifications for LLM registry updates.
|
|
||||||
This runs in a loop and processes messages as they arrive.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
on_refresh: Async callable to execute when a refresh notification is received
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
redis = await connect_async()
|
|
||||||
pubsub = redis.pubsub()
|
|
||||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
|
||||||
logger.info(
|
|
||||||
"Subscribed to LLM registry refresh notifications on channel: %s",
|
|
||||||
REGISTRY_REFRESH_CHANNEL,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process messages in a loop
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
message = await pubsub.get_message(
|
|
||||||
ignore_subscribe_messages=True, timeout=1.0
|
|
||||||
)
|
|
||||||
if (
|
|
||||||
message
|
|
||||||
and message["type"] == "message"
|
|
||||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
|
||||||
):
|
|
||||||
logger.info("Received LLM registry refresh notification")
|
|
||||||
try:
|
|
||||||
await on_refresh()
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(
|
|
||||||
"Error refreshing LLM registry from notification: %s",
|
|
||||||
exc,
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"Error processing registry refresh message: %s", exc, exc_info=True
|
|
||||||
)
|
|
||||||
# Continue listening even if one message fails
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(
|
|
||||||
"Failed to subscribe to LLM registry refresh notifications: %s",
|
|
||||||
exc,
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
@@ -1,388 +0,0 @@
|
|||||||
"""Core LLM registry implementation for managing models dynamically."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Any, Iterable
|
|
||||||
|
|
||||||
import prisma.models
|
|
||||||
|
|
||||||
from backend.data.llm_registry.model import ModelMetadata
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _json_to_dict(value: Any) -> dict[str, Any]:
|
|
||||||
"""Convert Prisma Json type to dict, with fallback to empty dict."""
|
|
||||||
if value is None:
|
|
||||||
return {}
|
|
||||||
if isinstance(value, dict):
|
|
||||||
return value
|
|
||||||
# Prisma Json type should always be a dict at runtime
|
|
||||||
return dict(value) if value else {}
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
|
||||||
class RegistryModelCost:
|
|
||||||
"""Cost configuration for an LLM model."""
|
|
||||||
|
|
||||||
credit_cost: int
|
|
||||||
credential_provider: str
|
|
||||||
credential_id: str | None
|
|
||||||
credential_type: str | None
|
|
||||||
currency: str | None
|
|
||||||
metadata: dict[str, Any]
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
|
||||||
class RegistryModelCreator:
|
|
||||||
"""Creator information for an LLM model."""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
display_name: str
|
|
||||||
description: str | None
|
|
||||||
website_url: str | None
|
|
||||||
logo_url: str | None
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
|
||||||
class RegistryModel:
|
|
||||||
"""Represents a model in the LLM registry."""
|
|
||||||
|
|
||||||
slug: str
|
|
||||||
display_name: str
|
|
||||||
description: str | None
|
|
||||||
metadata: ModelMetadata
|
|
||||||
capabilities: dict[str, Any]
|
|
||||||
extra_metadata: dict[str, Any]
|
|
||||||
provider_display_name: str
|
|
||||||
is_enabled: bool
|
|
||||||
is_recommended: bool = False
|
|
||||||
costs: tuple[RegistryModelCost, ...] = field(default_factory=tuple)
|
|
||||||
creator: RegistryModelCreator | None = None
|
|
||||||
|
|
||||||
|
|
||||||
_static_metadata: dict[str, ModelMetadata] = {}
|
|
||||||
_static_costs: dict[str, int] = {}
|
|
||||||
_dynamic_models: dict[str, RegistryModel] = {}
|
|
||||||
_schema_options: list[dict[str, str]] = []
|
|
||||||
_discriminator_mapping: dict[str, str] = {}
|
|
||||||
_lock = asyncio.Lock()
|
|
||||||
|
|
||||||
|
|
||||||
def register_static_metadata(metadata: dict[Any, ModelMetadata]) -> None:
|
|
||||||
"""Register static metadata for legacy models (deprecated)."""
|
|
||||||
_static_metadata.update({str(key): value for key, value in metadata.items()})
|
|
||||||
_refresh_cached_schema()
|
|
||||||
|
|
||||||
|
|
||||||
def register_static_costs(costs: dict[Any, int]) -> None:
|
|
||||||
"""Register static costs for legacy models (deprecated)."""
|
|
||||||
_static_costs.update({str(key): value for key, value in costs.items()})
|
|
||||||
|
|
||||||
|
|
||||||
def _build_schema_options() -> list[dict[str, str]]:
|
|
||||||
"""Build schema options for model selection dropdown. Only includes enabled models."""
|
|
||||||
options: list[dict[str, str]] = []
|
|
||||||
# Only include enabled models in the dropdown options
|
|
||||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
|
||||||
if model.is_enabled:
|
|
||||||
options.append(
|
|
||||||
{
|
|
||||||
"label": model.display_name,
|
|
||||||
"value": model.slug,
|
|
||||||
"group": model.metadata.provider,
|
|
||||||
"description": model.description or "",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for slug, metadata in _static_metadata.items():
|
|
||||||
if slug in _dynamic_models:
|
|
||||||
continue
|
|
||||||
options.append(
|
|
||||||
{
|
|
||||||
"label": slug,
|
|
||||||
"value": slug,
|
|
||||||
"group": metadata.provider,
|
|
||||||
"description": "",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return options
|
|
||||||
|
|
||||||
|
|
||||||
async def refresh_llm_registry() -> None:
|
|
||||||
"""Refresh the LLM registry from the database. Loads all models (enabled and disabled)."""
|
|
||||||
async with _lock:
|
|
||||||
try:
|
|
||||||
records = await prisma.models.LlmModel.prisma().find_many(
|
|
||||||
include={
|
|
||||||
"Provider": True,
|
|
||||||
"Costs": True,
|
|
||||||
"Creator": True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
logger.debug("Found %d LLM model records in database", len(records))
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(
|
|
||||||
"Failed to refresh LLM registry from DB: %s", exc, exc_info=True
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
dynamic: dict[str, RegistryModel] = {}
|
|
||||||
for record in records:
|
|
||||||
provider_name = (
|
|
||||||
record.Provider.name if record.Provider else record.providerId
|
|
||||||
)
|
|
||||||
provider_display_name = (
|
|
||||||
record.Provider.displayName if record.Provider else record.providerId
|
|
||||||
)
|
|
||||||
# Creator name: prefer Creator.name, fallback to provider display name
|
|
||||||
creator_name = (
|
|
||||||
record.Creator.name if record.Creator else provider_display_name
|
|
||||||
)
|
|
||||||
# Price tier: default to 1 (cheapest) if not set
|
|
||||||
price_tier = getattr(record, "priceTier", 1) or 1
|
|
||||||
# Clamp to valid range 1-3
|
|
||||||
price_tier = max(1, min(3, price_tier))
|
|
||||||
|
|
||||||
metadata = ModelMetadata(
|
|
||||||
provider=provider_name,
|
|
||||||
context_window=record.contextWindow,
|
|
||||||
max_output_tokens=record.maxOutputTokens,
|
|
||||||
display_name=record.displayName,
|
|
||||||
provider_name=provider_display_name,
|
|
||||||
creator_name=creator_name,
|
|
||||||
price_tier=price_tier, # type: ignore[arg-type]
|
|
||||||
)
|
|
||||||
costs = tuple(
|
|
||||||
RegistryModelCost(
|
|
||||||
credit_cost=cost.creditCost,
|
|
||||||
credential_provider=cost.credentialProvider,
|
|
||||||
credential_id=cost.credentialId,
|
|
||||||
credential_type=cost.credentialType,
|
|
||||||
currency=cost.currency,
|
|
||||||
metadata=_json_to_dict(cost.metadata),
|
|
||||||
)
|
|
||||||
for cost in (record.Costs or [])
|
|
||||||
)
|
|
||||||
|
|
||||||
# Map creator if present
|
|
||||||
creator = None
|
|
||||||
if record.Creator:
|
|
||||||
creator = RegistryModelCreator(
|
|
||||||
id=record.Creator.id,
|
|
||||||
name=record.Creator.name,
|
|
||||||
display_name=record.Creator.displayName,
|
|
||||||
description=record.Creator.description,
|
|
||||||
website_url=record.Creator.websiteUrl,
|
|
||||||
logo_url=record.Creator.logoUrl,
|
|
||||||
)
|
|
||||||
|
|
||||||
dynamic[record.slug] = RegistryModel(
|
|
||||||
slug=record.slug,
|
|
||||||
display_name=record.displayName,
|
|
||||||
description=record.description,
|
|
||||||
metadata=metadata,
|
|
||||||
capabilities=_json_to_dict(record.capabilities),
|
|
||||||
extra_metadata=_json_to_dict(record.metadata),
|
|
||||||
provider_display_name=(
|
|
||||||
record.Provider.displayName
|
|
||||||
if record.Provider
|
|
||||||
else record.providerId
|
|
||||||
),
|
|
||||||
is_enabled=record.isEnabled,
|
|
||||||
is_recommended=record.isRecommended,
|
|
||||||
costs=costs,
|
|
||||||
creator=creator,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Atomic swap - build new structures then replace references
|
|
||||||
# This ensures readers never see partially updated state
|
|
||||||
global _dynamic_models
|
|
||||||
_dynamic_models = dynamic
|
|
||||||
_refresh_cached_schema()
|
|
||||||
logger.info(
|
|
||||||
"LLM registry refreshed with %s dynamic models (enabled: %s, disabled: %s)",
|
|
||||||
len(dynamic),
|
|
||||||
sum(1 for m in dynamic.values() if m.is_enabled),
|
|
||||||
sum(1 for m in dynamic.values() if not m.is_enabled),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _refresh_cached_schema() -> None:
|
|
||||||
"""Refresh cached schema options and discriminator mapping."""
|
|
||||||
global _schema_options, _discriminator_mapping
|
|
||||||
|
|
||||||
# Build new structures
|
|
||||||
new_options = _build_schema_options()
|
|
||||||
new_mapping = {
|
|
||||||
slug: entry.metadata.provider for slug, entry in _dynamic_models.items()
|
|
||||||
}
|
|
||||||
for slug, metadata in _static_metadata.items():
|
|
||||||
new_mapping.setdefault(slug, metadata.provider)
|
|
||||||
|
|
||||||
# Atomic swap - replace references to ensure readers see consistent state
|
|
||||||
_schema_options = new_options
|
|
||||||
_discriminator_mapping = new_mapping
|
|
||||||
|
|
||||||
|
|
||||||
def get_llm_model_metadata(slug: str) -> ModelMetadata | None:
|
|
||||||
"""Get model metadata by slug. Checks dynamic models first, then static metadata."""
|
|
||||||
if slug in _dynamic_models:
|
|
||||||
return _dynamic_models[slug].metadata
|
|
||||||
return _static_metadata.get(slug)
|
|
||||||
|
|
||||||
|
|
||||||
def get_llm_model_cost(slug: str) -> tuple[RegistryModelCost, ...]:
|
|
||||||
"""Get model cost configuration by slug."""
|
|
||||||
if slug in _dynamic_models:
|
|
||||||
return _dynamic_models[slug].costs
|
|
||||||
cost_value = _static_costs.get(slug)
|
|
||||||
if cost_value is None:
|
|
||||||
return tuple()
|
|
||||||
return (
|
|
||||||
RegistryModelCost(
|
|
||||||
credit_cost=cost_value,
|
|
||||||
credential_provider="static",
|
|
||||||
credential_id=None,
|
|
||||||
credential_type=None,
|
|
||||||
currency=None,
|
|
||||||
metadata={},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_llm_model_schema_options() -> list[dict[str, str]]:
|
|
||||||
"""
|
|
||||||
Get schema options for LLM model selection dropdown.
|
|
||||||
|
|
||||||
Returns a copy of cached schema options that are refreshed when the registry is
|
|
||||||
updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
|
||||||
"""
|
|
||||||
# Return a copy to prevent external mutation
|
|
||||||
return list(_schema_options)
|
|
||||||
|
|
||||||
|
|
||||||
def get_llm_discriminator_mapping() -> dict[str, str]:
|
|
||||||
"""
|
|
||||||
Get discriminator mapping for LLM models.
|
|
||||||
|
|
||||||
Returns a copy of cached discriminator mapping that is refreshed when the registry
|
|
||||||
is updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
|
||||||
"""
|
|
||||||
# Return a copy to prevent external mutation
|
|
||||||
return dict(_discriminator_mapping)
|
|
||||||
|
|
||||||
|
|
||||||
def get_dynamic_model_slugs() -> set[str]:
|
|
||||||
"""Get all dynamic model slugs from the registry."""
|
|
||||||
return set(_dynamic_models.keys())
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_model_slugs_for_validation() -> set[str]:
|
|
||||||
"""
|
|
||||||
Get ALL model slugs (both enabled and disabled) for validation purposes.
|
|
||||||
|
|
||||||
This is used for JSON schema enum validation - we need to accept any known
|
|
||||||
model value (even disabled ones) so that existing graphs don't fail validation.
|
|
||||||
The actual fallback/enforcement happens at runtime in llm_call().
|
|
||||||
"""
|
|
||||||
all_slugs = set(_dynamic_models.keys())
|
|
||||||
all_slugs.update(_static_metadata.keys())
|
|
||||||
return all_slugs
|
|
||||||
|
|
||||||
|
|
||||||
def iter_dynamic_models() -> Iterable[RegistryModel]:
|
|
||||||
"""Iterate over all dynamic models in the registry."""
|
|
||||||
return tuple(_dynamic_models.values())
|
|
||||||
|
|
||||||
|
|
||||||
def get_fallback_model_for_disabled(disabled_model_slug: str) -> RegistryModel | None:
|
|
||||||
"""
|
|
||||||
Find a fallback model when the requested model is disabled.
|
|
||||||
|
|
||||||
Looks for an enabled model from the same provider. Prefers models with
|
|
||||||
similar names or capabilities if possible.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
disabled_model_slug: The slug of the disabled model
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
An enabled RegistryModel from the same provider, or None if no fallback found
|
|
||||||
"""
|
|
||||||
disabled_model = _dynamic_models.get(disabled_model_slug)
|
|
||||||
if not disabled_model:
|
|
||||||
return None
|
|
||||||
|
|
||||||
provider = disabled_model.metadata.provider
|
|
||||||
|
|
||||||
# Find all enabled models from the same provider
|
|
||||||
candidates = [
|
|
||||||
model
|
|
||||||
for model in _dynamic_models.values()
|
|
||||||
if model.is_enabled and model.metadata.provider == provider
|
|
||||||
]
|
|
||||||
|
|
||||||
if not candidates:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Sort by: prefer models with similar context window, then by name
|
|
||||||
candidates.sort(
|
|
||||||
key=lambda m: (
|
|
||||||
abs(m.metadata.context_window - disabled_model.metadata.context_window),
|
|
||||||
m.display_name.lower(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return candidates[0]
|
|
||||||
|
|
||||||
|
|
||||||
def is_model_enabled(model_slug: str) -> bool:
|
|
||||||
"""Check if a model is enabled in the registry."""
|
|
||||||
model = _dynamic_models.get(model_slug)
|
|
||||||
if not model:
|
|
||||||
# Model not in registry - assume it's a static/legacy model and allow it
|
|
||||||
return True
|
|
||||||
return model.is_enabled
|
|
||||||
|
|
||||||
|
|
||||||
def get_model_info(model_slug: str) -> RegistryModel | None:
|
|
||||||
"""Get model info from the registry."""
|
|
||||||
return _dynamic_models.get(model_slug)
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_model_slug() -> str | None:
|
|
||||||
"""
|
|
||||||
Get the default model slug to use for block defaults.
|
|
||||||
|
|
||||||
Returns the recommended model if set (configured via admin UI),
|
|
||||||
otherwise returns the first enabled model alphabetically.
|
|
||||||
Returns None if no models are available or enabled.
|
|
||||||
"""
|
|
||||||
# Return the recommended model if one is set and enabled
|
|
||||||
for model in _dynamic_models.values():
|
|
||||||
if model.is_recommended and model.is_enabled:
|
|
||||||
return model.slug
|
|
||||||
|
|
||||||
# No recommended model set - find first enabled model alphabetically
|
|
||||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
|
||||||
if model.is_enabled:
|
|
||||||
logger.warning(
|
|
||||||
"No recommended model set, using '%s' as default",
|
|
||||||
model.slug,
|
|
||||||
)
|
|
||||||
return model.slug
|
|
||||||
|
|
||||||
# No enabled models available
|
|
||||||
if _dynamic_models:
|
|
||||||
logger.error(
|
|
||||||
"No enabled models found in registry (%d models registered but all disabled)",
|
|
||||||
len(_dynamic_models),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error("No models registered in LLM registry")
|
|
||||||
|
|
||||||
return None
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
"""
|
|
||||||
Helper utilities for LLM registry integration with block schemas.
|
|
||||||
|
|
||||||
This module handles the dynamic injection of discriminator mappings
|
|
||||||
and model options from the LLM registry into block schemas.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from backend.data.llm_registry.registry import (
|
|
||||||
get_all_model_slugs_for_validation,
|
|
||||||
get_default_model_slug,
|
|
||||||
get_llm_discriminator_mapping,
|
|
||||||
get_llm_model_schema_options,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def is_llm_model_field(field_name: str, field_info: Any) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a field is an LLM model selection field.
|
|
||||||
|
|
||||||
Returns True if the field has 'options' in json_schema_extra
|
|
||||||
(set by llm_model_schema_extra() in blocks/llm.py).
|
|
||||||
"""
|
|
||||||
if not hasattr(field_info, "json_schema_extra"):
|
|
||||||
return False
|
|
||||||
|
|
||||||
extra = field_info.json_schema_extra
|
|
||||||
if isinstance(extra, dict):
|
|
||||||
return "options" in extra
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def refresh_llm_model_options(field_schema: dict[str, Any]) -> None:
|
|
||||||
"""
|
|
||||||
Refresh LLM model options from the registry.
|
|
||||||
|
|
||||||
Updates 'options' (for frontend dropdown) to show only enabled models,
|
|
||||||
but keeps the 'enum' (for validation) inclusive of ALL known models.
|
|
||||||
|
|
||||||
This is important because:
|
|
||||||
- Options: What users see in the dropdown (enabled models only)
|
|
||||||
- Enum: What values pass validation (all known models, including disabled)
|
|
||||||
|
|
||||||
Existing graphs may have disabled models selected - they should pass validation
|
|
||||||
and the fallback logic in llm_call() will handle using an alternative model.
|
|
||||||
"""
|
|
||||||
fresh_options = get_llm_model_schema_options()
|
|
||||||
if not fresh_options:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Update options array (UI dropdown) - only enabled models
|
|
||||||
if "options" in field_schema:
|
|
||||||
field_schema["options"] = fresh_options
|
|
||||||
|
|
||||||
all_known_slugs = get_all_model_slugs_for_validation()
|
|
||||||
if all_known_slugs and "enum" in field_schema:
|
|
||||||
existing_enum = set(field_schema.get("enum", []))
|
|
||||||
combined_enum = existing_enum | all_known_slugs
|
|
||||||
field_schema["enum"] = sorted(combined_enum)
|
|
||||||
|
|
||||||
# Set the default value from the registry (gpt-4o if available, else first enabled)
|
|
||||||
# This ensures new blocks have a sensible default pre-selected
|
|
||||||
default_slug = get_default_model_slug()
|
|
||||||
if default_slug:
|
|
||||||
field_schema["default"] = default_slug
|
|
||||||
|
|
||||||
|
|
||||||
def refresh_llm_discriminator_mapping(field_schema: dict[str, Any]) -> None:
|
|
||||||
"""
|
|
||||||
Refresh discriminator_mapping for fields that use model-based discrimination.
|
|
||||||
|
|
||||||
The discriminator is already set when AICredentialsField() creates the field.
|
|
||||||
We only need to refresh the mapping when models are added/removed.
|
|
||||||
"""
|
|
||||||
if field_schema.get("discriminator") != "model":
|
|
||||||
return
|
|
||||||
|
|
||||||
# Always refresh the mapping to get latest models
|
|
||||||
fresh_mapping = get_llm_discriminator_mapping()
|
|
||||||
if fresh_mapping is not None:
|
|
||||||
field_schema["discriminator_mapping"] = fresh_mapping
|
|
||||||
|
|
||||||
|
|
||||||
def update_schema_with_llm_registry(
|
|
||||||
schema: dict[str, Any], model_class: type | None = None
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Update a JSON schema with current LLM registry data.
|
|
||||||
|
|
||||||
Refreshes:
|
|
||||||
1. Model options for LLM model selection fields (dropdown choices)
|
|
||||||
2. Discriminator mappings for credentials fields (model → provider)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
schema: The JSON schema to update (mutated in-place)
|
|
||||||
model_class: The Pydantic model class (optional, for field introspection)
|
|
||||||
"""
|
|
||||||
properties = schema.get("properties", {})
|
|
||||||
|
|
||||||
for field_name, field_schema in properties.items():
|
|
||||||
if not isinstance(field_schema, dict):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Refresh model options for LLM model fields
|
|
||||||
if model_class and hasattr(model_class, "model_fields"):
|
|
||||||
field_info = model_class.model_fields.get(field_name)
|
|
||||||
if field_info and is_llm_model_field(field_name, field_info):
|
|
||||||
try:
|
|
||||||
refresh_llm_model_options(field_schema)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to refresh LLM options for field %s: %s",
|
|
||||||
field_name,
|
|
||||||
exc,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Refresh discriminator mapping for fields that use model discrimination
|
|
||||||
try:
|
|
||||||
refresh_llm_discriminator_mapping(field_schema)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to refresh discriminator mapping for field %s: %s",
|
|
||||||
field_name,
|
|
||||||
exc,
|
|
||||||
)
|
|
||||||
@@ -39,7 +39,6 @@ from pydantic_core import (
|
|||||||
)
|
)
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.llm_registry import update_schema_with_llm_registry
|
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.json import loads as json_loads
|
from backend.util.json import loads as json_loads
|
||||||
from backend.util.request import parse_url
|
from backend.util.request import parse_url
|
||||||
@@ -548,9 +547,7 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
|||||||
else:
|
else:
|
||||||
schema["credentials_provider"] = allowed_providers
|
schema["credentials_provider"] = allowed_providers
|
||||||
schema["credentials_types"] = model_class.allowed_cred_types()
|
schema["credentials_types"] = model_class.allowed_cred_types()
|
||||||
|
# Do not return anything, just mutate schema in place
|
||||||
# Ensure LLM discriminators are populated (delegates to shared helper)
|
|
||||||
update_schema_with_llm_registry(schema, model_class)
|
|
||||||
|
|
||||||
model_config = ConfigDict(
|
model_config = ConfigDict(
|
||||||
json_schema_extra=_add_json_schema_extra, # type: ignore
|
json_schema_extra=_add_json_schema_extra, # type: ignore
|
||||||
@@ -705,20 +702,16 @@ def CredentialsField(
|
|||||||
This is enforced by the `BlockSchema` base class.
|
This is enforced by the `BlockSchema` base class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Build field_schema_extra - always include discriminator and mapping if discriminator is set
|
field_schema_extra = {
|
||||||
field_schema_extra: dict[str, Any] = {}
|
k: v
|
||||||
|
for k, v in {
|
||||||
# Always include discriminator if provided
|
"credentials_scopes": list(required_scopes) or None,
|
||||||
if discriminator is not None:
|
"discriminator": discriminator,
|
||||||
field_schema_extra["discriminator"] = discriminator
|
"discriminator_mapping": discriminator_mapping,
|
||||||
# Always include discriminator_mapping when discriminator is set (even if empty initially)
|
"discriminator_values": discriminator_values,
|
||||||
field_schema_extra["discriminator_mapping"] = discriminator_mapping or {}
|
}.items()
|
||||||
|
if v is not None
|
||||||
# Include other optional fields (only if not None)
|
}
|
||||||
if required_scopes:
|
|
||||||
field_schema_extra["credentials_scopes"] = list(required_scopes)
|
|
||||||
if discriminator_values:
|
|
||||||
field_schema_extra["discriminator_values"] = discriminator_values
|
|
||||||
|
|
||||||
# Merge any json_schema_extra passed in kwargs
|
# Merge any json_schema_extra passed in kwargs
|
||||||
if "json_schema_extra" in kwargs:
|
if "json_schema_extra" in kwargs:
|
||||||
|
|||||||
@@ -1,66 +0,0 @@
|
|||||||
"""
|
|
||||||
Helper functions for LLM registry initialization in executor context.
|
|
||||||
|
|
||||||
These functions handle refreshing the LLM registry when the executor starts
|
|
||||||
and subscribing to real-time updates via Redis pub/sub.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from backend.data import db, llm_registry
|
|
||||||
from backend.data.block import BlockSchema, initialize_blocks
|
|
||||||
from backend.data.block_cost_config import refresh_llm_costs
|
|
||||||
from backend.data.llm_registry import subscribe_to_registry_refresh
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_registry_for_executor() -> None:
|
|
||||||
"""
|
|
||||||
Initialize blocks and refresh LLM registry in the executor context.
|
|
||||||
|
|
||||||
This must run in the executor's event loop to have access to the database.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Connect to database if not already connected
|
|
||||||
if not db.is_connected():
|
|
||||||
await db.connect()
|
|
||||||
logger.info("[GraphExecutor] Connected to database for registry refresh")
|
|
||||||
|
|
||||||
# Initialize blocks (internally refreshes LLM registry and costs)
|
|
||||||
await initialize_blocks()
|
|
||||||
logger.info("[GraphExecutor] Blocks initialized")
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(
|
|
||||||
"[GraphExecutor] Failed to refresh LLM registry on startup: %s",
|
|
||||||
exc,
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def refresh_registry_on_notification() -> None:
|
|
||||||
"""Refresh LLM registry when notified via Redis pub/sub."""
|
|
||||||
try:
|
|
||||||
# Ensure DB is connected
|
|
||||||
if not db.is_connected():
|
|
||||||
await db.connect()
|
|
||||||
|
|
||||||
# Refresh registry and costs
|
|
||||||
await llm_registry.refresh_llm_registry()
|
|
||||||
refresh_llm_costs()
|
|
||||||
|
|
||||||
# Clear block schema caches so they regenerate with new model options
|
|
||||||
BlockSchema.clear_all_schema_caches()
|
|
||||||
|
|
||||||
logger.info("[GraphExecutor] LLM registry refreshed from notification")
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(
|
|
||||||
"[GraphExecutor] Failed to refresh LLM registry from notification: %s",
|
|
||||||
exc,
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def subscribe_to_registry_updates() -> None:
|
|
||||||
"""Subscribe to Redis pub/sub for LLM registry refresh notifications."""
|
|
||||||
await subscribe_to_registry_refresh(refresh_registry_on_notification)
|
|
||||||
@@ -213,9 +213,6 @@ async def execute_node(
|
|||||||
block_name=node_block.name,
|
block_name=node_block.name,
|
||||||
)
|
)
|
||||||
|
|
||||||
if node_block.disabled:
|
|
||||||
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
|
|
||||||
|
|
||||||
# Sanity check: validate the execution input.
|
# Sanity check: validate the execution input.
|
||||||
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
||||||
if input_data is None:
|
if input_data is None:
|
||||||
@@ -712,20 +709,6 @@ class ExecutionProcessor:
|
|||||||
)
|
)
|
||||||
self.node_execution_thread.start()
|
self.node_execution_thread.start()
|
||||||
self.node_evaluation_thread.start()
|
self.node_evaluation_thread.start()
|
||||||
|
|
||||||
# Initialize LLM registry and subscribe to updates
|
|
||||||
from backend.executor.llm_registry_init import (
|
|
||||||
initialize_registry_for_executor,
|
|
||||||
subscribe_to_registry_updates,
|
|
||||||
)
|
|
||||||
|
|
||||||
asyncio.run_coroutine_threadsafe(
|
|
||||||
initialize_registry_for_executor(), self.node_execution_loop
|
|
||||||
)
|
|
||||||
asyncio.run_coroutine_threadsafe(
|
|
||||||
subscribe_to_registry_updates(), self.node_execution_loop
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"[GraphExecutor] {self.tid} started")
|
logger.info(f"[GraphExecutor] {self.tid} started")
|
||||||
|
|
||||||
@error_logged(swallow=False)
|
@error_logged(swallow=False)
|
||||||
|
|||||||
@@ -1,935 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import Any, Iterable, Sequence, cast
|
|
||||||
|
|
||||||
import prisma
|
|
||||||
import prisma.models
|
|
||||||
|
|
||||||
from backend.data.db import transaction
|
|
||||||
from backend.server.v2.llm import model as llm_model
|
|
||||||
from backend.util.models import Pagination
|
|
||||||
|
|
||||||
|
|
||||||
def _json_dict(value: Any | None) -> dict[str, Any]:
|
|
||||||
if not value:
|
|
||||||
return {}
|
|
||||||
if isinstance(value, dict):
|
|
||||||
return value
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _map_cost(record: prisma.models.LlmModelCost) -> llm_model.LlmModelCost:
|
|
||||||
return llm_model.LlmModelCost(
|
|
||||||
id=record.id,
|
|
||||||
unit=record.unit,
|
|
||||||
credit_cost=record.creditCost,
|
|
||||||
credential_provider=record.credentialProvider,
|
|
||||||
credential_id=record.credentialId,
|
|
||||||
credential_type=record.credentialType,
|
|
||||||
currency=record.currency,
|
|
||||||
metadata=_json_dict(record.metadata),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _map_creator(
|
|
||||||
record: prisma.models.LlmModelCreator,
|
|
||||||
) -> llm_model.LlmModelCreator:
|
|
||||||
return llm_model.LlmModelCreator(
|
|
||||||
id=record.id,
|
|
||||||
name=record.name,
|
|
||||||
display_name=record.displayName,
|
|
||||||
description=record.description,
|
|
||||||
website_url=record.websiteUrl,
|
|
||||||
logo_url=record.logoUrl,
|
|
||||||
metadata=_json_dict(record.metadata),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _map_model(record: prisma.models.LlmModel) -> llm_model.LlmModel:
|
|
||||||
costs = []
|
|
||||||
if record.Costs:
|
|
||||||
costs = [_map_cost(cost) for cost in record.Costs]
|
|
||||||
|
|
||||||
creator = None
|
|
||||||
if hasattr(record, "Creator") and record.Creator:
|
|
||||||
creator = _map_creator(record.Creator)
|
|
||||||
|
|
||||||
return llm_model.LlmModel(
|
|
||||||
id=record.id,
|
|
||||||
slug=record.slug,
|
|
||||||
display_name=record.displayName,
|
|
||||||
description=record.description,
|
|
||||||
provider_id=record.providerId,
|
|
||||||
creator_id=record.creatorId,
|
|
||||||
creator=creator,
|
|
||||||
context_window=record.contextWindow,
|
|
||||||
max_output_tokens=record.maxOutputTokens,
|
|
||||||
is_enabled=record.isEnabled,
|
|
||||||
is_recommended=record.isRecommended,
|
|
||||||
capabilities=_json_dict(record.capabilities),
|
|
||||||
metadata=_json_dict(record.metadata),
|
|
||||||
costs=costs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _map_provider(record: prisma.models.LlmProvider) -> llm_model.LlmProvider:
|
|
||||||
models: list[llm_model.LlmModel] = []
|
|
||||||
if record.Models:
|
|
||||||
models = [_map_model(model) for model in record.Models]
|
|
||||||
|
|
||||||
return llm_model.LlmProvider(
|
|
||||||
id=record.id,
|
|
||||||
name=record.name,
|
|
||||||
display_name=record.displayName,
|
|
||||||
description=record.description,
|
|
||||||
default_credential_provider=record.defaultCredentialProvider,
|
|
||||||
default_credential_id=record.defaultCredentialId,
|
|
||||||
default_credential_type=record.defaultCredentialType,
|
|
||||||
supports_tools=record.supportsTools,
|
|
||||||
supports_json_output=record.supportsJsonOutput,
|
|
||||||
supports_reasoning=record.supportsReasoning,
|
|
||||||
supports_parallel_tool=record.supportsParallelTool,
|
|
||||||
metadata=_json_dict(record.metadata),
|
|
||||||
models=models,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def list_providers(
|
|
||||||
include_models: bool = True, enabled_only: bool = False
|
|
||||||
) -> list[llm_model.LlmProvider]:
|
|
||||||
"""
|
|
||||||
List all LLM providers.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_models: Whether to include models for each provider
|
|
||||||
enabled_only: If True, only include enabled models (for public routes)
|
|
||||||
"""
|
|
||||||
include: Any = None
|
|
||||||
if include_models:
|
|
||||||
model_where = {"isEnabled": True} if enabled_only else None
|
|
||||||
include = {
|
|
||||||
"Models": {
|
|
||||||
"include": {"Costs": True, "Creator": True},
|
|
||||||
"where": model_where,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
records = await prisma.models.LlmProvider.prisma().find_many(include=include)
|
|
||||||
return [_map_provider(record) for record in records]
|
|
||||||
|
|
||||||
|
|
||||||
async def upsert_provider(
|
|
||||||
request: llm_model.UpsertLlmProviderRequest,
|
|
||||||
provider_id: str | None = None,
|
|
||||||
) -> llm_model.LlmProvider:
|
|
||||||
data: Any = {
|
|
||||||
"name": request.name,
|
|
||||||
"displayName": request.display_name,
|
|
||||||
"description": request.description,
|
|
||||||
"defaultCredentialProvider": request.default_credential_provider,
|
|
||||||
"defaultCredentialId": request.default_credential_id,
|
|
||||||
"defaultCredentialType": request.default_credential_type,
|
|
||||||
"supportsTools": request.supports_tools,
|
|
||||||
"supportsJsonOutput": request.supports_json_output,
|
|
||||||
"supportsReasoning": request.supports_reasoning,
|
|
||||||
"supportsParallelTool": request.supports_parallel_tool,
|
|
||||||
"metadata": prisma.Json(request.metadata or {}),
|
|
||||||
}
|
|
||||||
include: Any = {"Models": {"include": {"Costs": True, "Creator": True}}}
|
|
||||||
if provider_id:
|
|
||||||
record = await prisma.models.LlmProvider.prisma().update(
|
|
||||||
where={"id": provider_id},
|
|
||||||
data=data,
|
|
||||||
include=include,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
record = await prisma.models.LlmProvider.prisma().create(
|
|
||||||
data=data,
|
|
||||||
include=include,
|
|
||||||
)
|
|
||||||
if record is None:
|
|
||||||
raise ValueError("Failed to create/update provider")
|
|
||||||
return _map_provider(record)
|
|
||||||
|
|
||||||
|
|
||||||
async def delete_provider(provider_id: str) -> bool:
|
|
||||||
"""
|
|
||||||
Delete an LLM provider.
|
|
||||||
|
|
||||||
A provider can only be deleted if it has no associated models.
|
|
||||||
Due to onDelete: Restrict on LlmModel.Provider, the database will
|
|
||||||
block deletion if models exist.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider_id: UUID of the provider to delete
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if deleted successfully
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If provider not found or has associated models
|
|
||||||
"""
|
|
||||||
# Check if provider exists
|
|
||||||
provider = await prisma.models.LlmProvider.prisma().find_unique(
|
|
||||||
where={"id": provider_id},
|
|
||||||
include={"Models": True},
|
|
||||||
)
|
|
||||||
if not provider:
|
|
||||||
raise ValueError(f"Provider with id '{provider_id}' not found")
|
|
||||||
|
|
||||||
# Check if provider has any models
|
|
||||||
model_count = len(provider.Models) if provider.Models else 0
|
|
||||||
if model_count > 0:
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot delete provider '{provider.displayName}' because it has "
|
|
||||||
f"{model_count} model(s). Delete all models first."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Safe to delete
|
|
||||||
await prisma.models.LlmProvider.prisma().delete(where={"id": provider_id})
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
async def list_models(
|
|
||||||
provider_id: str | None = None,
|
|
||||||
enabled_only: bool = False,
|
|
||||||
page: int = 1,
|
|
||||||
page_size: int = 50,
|
|
||||||
) -> llm_model.LlmModelsResponse:
|
|
||||||
"""
|
|
||||||
List LLM models with pagination.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider_id: Optional filter by provider ID
|
|
||||||
enabled_only: If True, only return enabled models (for public routes)
|
|
||||||
page: Page number (1-indexed)
|
|
||||||
page_size: Number of models per page
|
|
||||||
"""
|
|
||||||
where: Any = {}
|
|
||||||
if provider_id:
|
|
||||||
where["providerId"] = provider_id
|
|
||||||
if enabled_only:
|
|
||||||
where["isEnabled"] = True
|
|
||||||
|
|
||||||
# Get total count for pagination
|
|
||||||
total_items = await prisma.models.LlmModel.prisma().count(
|
|
||||||
where=where if where else None
|
|
||||||
)
|
|
||||||
|
|
||||||
# Calculate pagination
|
|
||||||
skip = (page - 1) * page_size
|
|
||||||
total_pages = (total_items + page_size - 1) // page_size if total_items > 0 else 0
|
|
||||||
|
|
||||||
records = await prisma.models.LlmModel.prisma().find_many(
|
|
||||||
where=where if where else None,
|
|
||||||
include={"Costs": True, "Creator": True},
|
|
||||||
skip=skip,
|
|
||||||
take=page_size,
|
|
||||||
)
|
|
||||||
models = [_map_model(record) for record in records]
|
|
||||||
|
|
||||||
return llm_model.LlmModelsResponse(
|
|
||||||
models=models,
|
|
||||||
pagination=Pagination(
|
|
||||||
total_items=total_items,
|
|
||||||
total_pages=total_pages,
|
|
||||||
current_page=page,
|
|
||||||
page_size=page_size,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _cost_create_payload(
|
|
||||||
costs: Sequence[llm_model.LlmModelCostInput],
|
|
||||||
) -> dict[str, Iterable[dict[str, Any]]]:
|
|
||||||
|
|
||||||
create_items = []
|
|
||||||
for cost in costs:
|
|
||||||
item: dict[str, Any] = {
|
|
||||||
"unit": cost.unit,
|
|
||||||
"creditCost": cost.credit_cost,
|
|
||||||
"credentialProvider": cost.credential_provider,
|
|
||||||
}
|
|
||||||
# Only include optional fields if they have values
|
|
||||||
if cost.credential_id:
|
|
||||||
item["credentialId"] = cost.credential_id
|
|
||||||
if cost.credential_type:
|
|
||||||
item["credentialType"] = cost.credential_type
|
|
||||||
if cost.currency:
|
|
||||||
item["currency"] = cost.currency
|
|
||||||
# Handle metadata - use Prisma Json type
|
|
||||||
if cost.metadata is not None and cost.metadata != {}:
|
|
||||||
item["metadata"] = prisma.Json(cost.metadata)
|
|
||||||
create_items.append(item)
|
|
||||||
return {"create": create_items}
|
|
||||||
|
|
||||||
|
|
||||||
async def create_model(
|
|
||||||
request: llm_model.CreateLlmModelRequest,
|
|
||||||
) -> llm_model.LlmModel:
|
|
||||||
data: Any = {
|
|
||||||
"slug": request.slug,
|
|
||||||
"displayName": request.display_name,
|
|
||||||
"description": request.description,
|
|
||||||
"Provider": {"connect": {"id": request.provider_id}},
|
|
||||||
"contextWindow": request.context_window,
|
|
||||||
"maxOutputTokens": request.max_output_tokens,
|
|
||||||
"isEnabled": request.is_enabled,
|
|
||||||
"capabilities": prisma.Json(request.capabilities or {}),
|
|
||||||
"metadata": prisma.Json(request.metadata or {}),
|
|
||||||
"Costs": _cost_create_payload(request.costs),
|
|
||||||
}
|
|
||||||
if request.creator_id:
|
|
||||||
data["Creator"] = {"connect": {"id": request.creator_id}}
|
|
||||||
|
|
||||||
record = await prisma.models.LlmModel.prisma().create(
|
|
||||||
data=data,
|
|
||||||
include={"Costs": True, "Creator": True, "Provider": True},
|
|
||||||
)
|
|
||||||
return _map_model(record)
|
|
||||||
|
|
||||||
|
|
||||||
async def update_model(
|
|
||||||
model_id: str,
|
|
||||||
request: llm_model.UpdateLlmModelRequest,
|
|
||||||
) -> llm_model.LlmModel:
|
|
||||||
# Build scalar field updates (non-relation fields)
|
|
||||||
scalar_data: Any = {}
|
|
||||||
if request.display_name is not None:
|
|
||||||
scalar_data["displayName"] = request.display_name
|
|
||||||
if request.description is not None:
|
|
||||||
scalar_data["description"] = request.description
|
|
||||||
if request.context_window is not None:
|
|
||||||
scalar_data["contextWindow"] = request.context_window
|
|
||||||
if request.max_output_tokens is not None:
|
|
||||||
scalar_data["maxOutputTokens"] = request.max_output_tokens
|
|
||||||
if request.is_enabled is not None:
|
|
||||||
scalar_data["isEnabled"] = request.is_enabled
|
|
||||||
if request.capabilities is not None:
|
|
||||||
scalar_data["capabilities"] = request.capabilities
|
|
||||||
if request.metadata is not None:
|
|
||||||
scalar_data["metadata"] = request.metadata
|
|
||||||
# Foreign keys can be updated directly as scalar fields
|
|
||||||
if request.provider_id is not None:
|
|
||||||
scalar_data["providerId"] = request.provider_id
|
|
||||||
if request.creator_id is not None:
|
|
||||||
# Empty string means remove the creator
|
|
||||||
scalar_data["creatorId"] = request.creator_id if request.creator_id else None
|
|
||||||
|
|
||||||
# If we have costs to update, we need to handle them separately
|
|
||||||
# because nested writes have different constraints
|
|
||||||
if request.costs is not None:
|
|
||||||
# Wrap cost replacement in a transaction for atomicity
|
|
||||||
async with transaction() as tx:
|
|
||||||
# First update scalar fields
|
|
||||||
if scalar_data:
|
|
||||||
await tx.llmmodel.update(
|
|
||||||
where={"id": model_id},
|
|
||||||
data=scalar_data,
|
|
||||||
)
|
|
||||||
# Then handle costs: delete existing and create new
|
|
||||||
await tx.llmmodelcost.delete_many(where={"llmModelId": model_id})
|
|
||||||
if request.costs:
|
|
||||||
cost_payload = _cost_create_payload(request.costs)
|
|
||||||
for cost_item in cost_payload["create"]:
|
|
||||||
cost_item["llmModelId"] = model_id
|
|
||||||
await tx.llmmodelcost.create(data=cast(Any, cost_item))
|
|
||||||
# Fetch the updated record (outside transaction)
|
|
||||||
record = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"id": model_id},
|
|
||||||
include={"Costs": True, "Creator": True},
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# No costs update - simple update
|
|
||||||
record = await prisma.models.LlmModel.prisma().update(
|
|
||||||
where={"id": model_id},
|
|
||||||
data=scalar_data,
|
|
||||||
include={"Costs": True, "Creator": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not record:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
return _map_model(record)
|
|
||||||
|
|
||||||
|
|
||||||
async def toggle_model(
|
|
||||||
model_id: str,
|
|
||||||
is_enabled: bool,
|
|
||||||
migrate_to_slug: str | None = None,
|
|
||||||
migration_reason: str | None = None,
|
|
||||||
custom_credit_cost: int | None = None,
|
|
||||||
) -> llm_model.ToggleLlmModelResponse:
|
|
||||||
"""
|
|
||||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_id: UUID of the model to toggle
|
|
||||||
is_enabled: New enabled status
|
|
||||||
migrate_to_slug: If disabling and this is provided, migrate all workflows
|
|
||||||
using this model to the specified replacement model
|
|
||||||
migration_reason: Optional reason for the migration (e.g., "Provider outage")
|
|
||||||
custom_credit_cost: Optional custom pricing override for migrated workflows.
|
|
||||||
When set, the billing system should use this cost instead
|
|
||||||
of the target model's cost for affected nodes.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
ToggleLlmModelResponse with the updated model and optional migration stats
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
|
|
||||||
# Get the model being toggled
|
|
||||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"id": model_id}, include={"Costs": True}
|
|
||||||
)
|
|
||||||
if not model:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
|
|
||||||
nodes_migrated = 0
|
|
||||||
migration_id: str | None = None
|
|
||||||
|
|
||||||
# If disabling with migration, perform migration first
|
|
||||||
if not is_enabled and migrate_to_slug:
|
|
||||||
# Validate replacement model exists and is enabled
|
|
||||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"slug": migrate_to_slug}
|
|
||||||
)
|
|
||||||
if not replacement:
|
|
||||||
raise ValueError(f"Replacement model '{migrate_to_slug}' not found")
|
|
||||||
if not replacement.isEnabled:
|
|
||||||
raise ValueError(
|
|
||||||
f"Replacement model '{migrate_to_slug}' is disabled. "
|
|
||||||
f"Please enable it before using it as a replacement."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform all operations atomically within a single transaction
|
|
||||||
# This ensures no nodes are missed between query and update
|
|
||||||
async with transaction() as tx:
|
|
||||||
# Get the IDs of nodes that will be migrated (inside transaction for consistency)
|
|
||||||
node_ids_result = await tx.query_raw(
|
|
||||||
"""
|
|
||||||
SELECT id
|
|
||||||
FROM "AgentNode"
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = $1
|
|
||||||
FOR UPDATE
|
|
||||||
""",
|
|
||||||
model.slug,
|
|
||||||
)
|
|
||||||
migrated_node_ids = (
|
|
||||||
[row["id"] for row in node_ids_result] if node_ids_result else []
|
|
||||||
)
|
|
||||||
nodes_migrated = len(migrated_node_ids)
|
|
||||||
|
|
||||||
if nodes_migrated > 0:
|
|
||||||
# Update by IDs to ensure we only update the exact nodes we queried
|
|
||||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
|
||||||
node_ids_json = json.dumps(migrated_node_ids)
|
|
||||||
await tx.execute_raw(
|
|
||||||
"""
|
|
||||||
UPDATE "AgentNode"
|
|
||||||
SET "constantInput" = JSONB_SET(
|
|
||||||
"constantInput"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
to_jsonb($1::text)
|
|
||||||
)
|
|
||||||
WHERE id::text IN (
|
|
||||||
SELECT jsonb_array_elements_text($2::jsonb)
|
|
||||||
)
|
|
||||||
""",
|
|
||||||
migrate_to_slug,
|
|
||||||
node_ids_json,
|
|
||||||
)
|
|
||||||
|
|
||||||
record = await tx.llmmodel.update(
|
|
||||||
where={"id": model_id},
|
|
||||||
data={"isEnabled": is_enabled},
|
|
||||||
include={"Costs": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create migration record for revert capability
|
|
||||||
if nodes_migrated > 0:
|
|
||||||
migration_data: Any = {
|
|
||||||
"sourceModelSlug": model.slug,
|
|
||||||
"targetModelSlug": migrate_to_slug,
|
|
||||||
"reason": migration_reason,
|
|
||||||
"migratedNodeIds": json.dumps(migrated_node_ids),
|
|
||||||
"nodeCount": nodes_migrated,
|
|
||||||
"customCreditCost": custom_credit_cost,
|
|
||||||
}
|
|
||||||
migration_record = await tx.llmmodelmigration.create(
|
|
||||||
data=migration_data
|
|
||||||
)
|
|
||||||
migration_id = migration_record.id
|
|
||||||
else:
|
|
||||||
# Simple toggle without migration
|
|
||||||
record = await prisma.models.LlmModel.prisma().update(
|
|
||||||
where={"id": model_id},
|
|
||||||
data={"isEnabled": is_enabled},
|
|
||||||
include={"Costs": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if record is None:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
return llm_model.ToggleLlmModelResponse(
|
|
||||||
model=_map_model(record),
|
|
||||||
nodes_migrated=nodes_migrated,
|
|
||||||
migrated_to_slug=migrate_to_slug if nodes_migrated > 0 else None,
|
|
||||||
migration_id=migration_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse:
|
|
||||||
"""Get usage count for a model."""
|
|
||||||
import prisma as prisma_module
|
|
||||||
|
|
||||||
model = await prisma.models.LlmModel.prisma().find_unique(where={"id": model_id})
|
|
||||||
if not model:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
|
|
||||||
count_result = await prisma_module.get_client().query_raw(
|
|
||||||
"""
|
|
||||||
SELECT COUNT(*) as count
|
|
||||||
FROM "AgentNode"
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = $1
|
|
||||||
""",
|
|
||||||
model.slug,
|
|
||||||
)
|
|
||||||
node_count = int(count_result[0]["count"]) if count_result else 0
|
|
||||||
|
|
||||||
return llm_model.LlmModelUsageResponse(model_slug=model.slug, node_count=node_count)
|
|
||||||
|
|
||||||
|
|
||||||
async def delete_model(
|
|
||||||
model_id: str, replacement_model_slug: str | None = None
|
|
||||||
) -> llm_model.DeleteLlmModelResponse:
|
|
||||||
"""
|
|
||||||
Delete a model and optionally migrate all AgentNodes using it to a replacement model.
|
|
||||||
|
|
||||||
This performs an atomic operation within a database transaction:
|
|
||||||
1. Validates the model exists
|
|
||||||
2. Counts affected nodes
|
|
||||||
3. If nodes exist, validates replacement model and migrates them
|
|
||||||
4. Deletes the LlmModel record (CASCADE deletes costs)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_id: UUID of the model to delete
|
|
||||||
replacement_model_slug: Slug of the model to migrate to (required only if nodes use this model)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
DeleteLlmModelResponse with migration stats
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If model not found, nodes exist but no replacement provided,
|
|
||||||
replacement not found, or replacement is disabled
|
|
||||||
"""
|
|
||||||
# 1. Get the model being deleted (validation - outside transaction)
|
|
||||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"id": model_id}, include={"Costs": True}
|
|
||||||
)
|
|
||||||
if not model:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
|
|
||||||
deleted_slug = model.slug
|
|
||||||
deleted_display_name = model.displayName
|
|
||||||
|
|
||||||
# 2. Count affected nodes first to determine if replacement is needed
|
|
||||||
import prisma as prisma_module
|
|
||||||
|
|
||||||
count_result = await prisma_module.get_client().query_raw(
|
|
||||||
"""
|
|
||||||
SELECT COUNT(*) as count
|
|
||||||
FROM "AgentNode"
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = $1
|
|
||||||
""",
|
|
||||||
deleted_slug,
|
|
||||||
)
|
|
||||||
nodes_to_migrate = int(count_result[0]["count"]) if count_result else 0
|
|
||||||
|
|
||||||
# 3. Validate replacement model only if there are nodes to migrate
|
|
||||||
if nodes_to_migrate > 0:
|
|
||||||
if not replacement_model_slug:
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot delete model '{deleted_slug}': {nodes_to_migrate} workflow node(s) "
|
|
||||||
f"are using it. Please provide a replacement_model_slug to migrate them."
|
|
||||||
)
|
|
||||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"slug": replacement_model_slug}
|
|
||||||
)
|
|
||||||
if not replacement:
|
|
||||||
raise ValueError(f"Replacement model '{replacement_model_slug}' not found")
|
|
||||||
if not replacement.isEnabled:
|
|
||||||
raise ValueError(
|
|
||||||
f"Replacement model '{replacement_model_slug}' is disabled. "
|
|
||||||
f"Please enable it before using it as a replacement."
|
|
||||||
)
|
|
||||||
|
|
||||||
# 4. Perform migration (if needed) and deletion atomically within a transaction
|
|
||||||
async with transaction() as tx:
|
|
||||||
# Migrate all AgentNode.constantInput->model to replacement
|
|
||||||
if nodes_to_migrate > 0 and replacement_model_slug:
|
|
||||||
await tx.execute_raw(
|
|
||||||
"""
|
|
||||||
UPDATE "AgentNode"
|
|
||||||
SET "constantInput" = JSONB_SET(
|
|
||||||
"constantInput"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
to_jsonb($1::text)
|
|
||||||
)
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = $2
|
|
||||||
""",
|
|
||||||
replacement_model_slug,
|
|
||||||
deleted_slug,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Delete the model (CASCADE will delete costs automatically)
|
|
||||||
await tx.llmmodel.delete(where={"id": model_id})
|
|
||||||
|
|
||||||
# Build appropriate message based on whether migration happened
|
|
||||||
if nodes_to_migrate > 0:
|
|
||||||
message = (
|
|
||||||
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) "
|
|
||||||
f"and migrated {nodes_to_migrate} workflow node(s) to '{replacement_model_slug}'."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
message = (
|
|
||||||
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}). "
|
|
||||||
f"No workflows were using this model."
|
|
||||||
)
|
|
||||||
|
|
||||||
return llm_model.DeleteLlmModelResponse(
|
|
||||||
deleted_model_slug=deleted_slug,
|
|
||||||
deleted_model_display_name=deleted_display_name,
|
|
||||||
replacement_model_slug=replacement_model_slug,
|
|
||||||
nodes_migrated=nodes_to_migrate,
|
|
||||||
message=message,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _map_migration(
|
|
||||||
record: prisma.models.LlmModelMigration,
|
|
||||||
) -> llm_model.LlmModelMigration:
|
|
||||||
return llm_model.LlmModelMigration(
|
|
||||||
id=record.id,
|
|
||||||
source_model_slug=record.sourceModelSlug,
|
|
||||||
target_model_slug=record.targetModelSlug,
|
|
||||||
reason=record.reason,
|
|
||||||
node_count=record.nodeCount,
|
|
||||||
custom_credit_cost=record.customCreditCost,
|
|
||||||
is_reverted=record.isReverted,
|
|
||||||
created_at=record.createdAt.isoformat(),
|
|
||||||
reverted_at=record.revertedAt.isoformat() if record.revertedAt else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def list_migrations(
|
|
||||||
include_reverted: bool = False,
|
|
||||||
) -> list[llm_model.LlmModelMigration]:
|
|
||||||
"""
|
|
||||||
List model migrations, optionally including reverted ones.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_reverted: If True, include reverted migrations. Default is False.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of LlmModelMigration records
|
|
||||||
"""
|
|
||||||
where: Any = None if include_reverted else {"isReverted": False}
|
|
||||||
records = await prisma.models.LlmModelMigration.prisma().find_many(
|
|
||||||
where=where,
|
|
||||||
order={"createdAt": "desc"},
|
|
||||||
)
|
|
||||||
return [_map_migration(record) for record in records]
|
|
||||||
|
|
||||||
|
|
||||||
async def get_migration(migration_id: str) -> llm_model.LlmModelMigration | None:
|
|
||||||
"""Get a specific migration by ID."""
|
|
||||||
record = await prisma.models.LlmModelMigration.prisma().find_unique(
|
|
||||||
where={"id": migration_id}
|
|
||||||
)
|
|
||||||
return _map_migration(record) if record else None
|
|
||||||
|
|
||||||
|
|
||||||
async def revert_migration(
|
|
||||||
migration_id: str,
|
|
||||||
re_enable_source_model: bool = True,
|
|
||||||
) -> llm_model.RevertMigrationResponse:
|
|
||||||
"""
|
|
||||||
Revert a model migration, restoring affected nodes to their original model.
|
|
||||||
|
|
||||||
This only reverts the specific nodes that were migrated, not all nodes
|
|
||||||
currently using the target model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
migration_id: UUID of the migration to revert
|
|
||||||
re_enable_source_model: Whether to re-enable the source model if it's disabled
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
RevertMigrationResponse with revert stats
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If migration not found, already reverted, or source model not available
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
# Get the migration record
|
|
||||||
migration = await prisma.models.LlmModelMigration.prisma().find_unique(
|
|
||||||
where={"id": migration_id}
|
|
||||||
)
|
|
||||||
if not migration:
|
|
||||||
raise ValueError(f"Migration with id '{migration_id}' not found")
|
|
||||||
|
|
||||||
if migration.isReverted:
|
|
||||||
raise ValueError(
|
|
||||||
f"Migration '{migration_id}' has already been reverted "
|
|
||||||
f"on {migration.revertedAt.isoformat() if migration.revertedAt else 'unknown date'}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if source model exists
|
|
||||||
source_model = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"slug": migration.sourceModelSlug}
|
|
||||||
)
|
|
||||||
if not source_model:
|
|
||||||
raise ValueError(
|
|
||||||
f"Source model '{migration.sourceModelSlug}' no longer exists. "
|
|
||||||
f"Cannot revert migration."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the migrated node IDs (Prisma auto-parses JSONB to list)
|
|
||||||
migrated_node_ids: list[str] = (
|
|
||||||
migration.migratedNodeIds
|
|
||||||
if isinstance(migration.migratedNodeIds, list)
|
|
||||||
else json.loads(migration.migratedNodeIds) # type: ignore
|
|
||||||
)
|
|
||||||
if not migrated_node_ids:
|
|
||||||
raise ValueError("No nodes to revert in this migration")
|
|
||||||
|
|
||||||
# Track if we need to re-enable the source model
|
|
||||||
source_model_was_disabled = not source_model.isEnabled
|
|
||||||
should_re_enable = source_model_was_disabled and re_enable_source_model
|
|
||||||
source_model_re_enabled = False
|
|
||||||
|
|
||||||
# Perform revert atomically
|
|
||||||
async with transaction() as tx:
|
|
||||||
# Re-enable the source model if requested and it was disabled
|
|
||||||
if should_re_enable:
|
|
||||||
await tx.llmmodel.update(
|
|
||||||
where={"id": source_model.id},
|
|
||||||
data={"isEnabled": True},
|
|
||||||
)
|
|
||||||
source_model_re_enabled = True
|
|
||||||
|
|
||||||
# Update only the specific nodes that were migrated
|
|
||||||
# We need to check that they still have the target model (haven't been changed since)
|
|
||||||
# Use a single batch update for efficiency
|
|
||||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
|
||||||
node_ids_json = json.dumps(migrated_node_ids)
|
|
||||||
result = await tx.execute_raw(
|
|
||||||
"""
|
|
||||||
UPDATE "AgentNode"
|
|
||||||
SET "constantInput" = JSONB_SET(
|
|
||||||
"constantInput"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
to_jsonb($1::text)
|
|
||||||
)
|
|
||||||
WHERE id::text IN (
|
|
||||||
SELECT jsonb_array_elements_text($2::jsonb)
|
|
||||||
)
|
|
||||||
AND "constantInput"::jsonb->>'model' = $3
|
|
||||||
""",
|
|
||||||
migration.sourceModelSlug,
|
|
||||||
node_ids_json,
|
|
||||||
migration.targetModelSlug,
|
|
||||||
)
|
|
||||||
nodes_reverted = result if result else 0
|
|
||||||
|
|
||||||
# Mark migration as reverted
|
|
||||||
await tx.llmmodelmigration.update(
|
|
||||||
where={"id": migration_id},
|
|
||||||
data={
|
|
||||||
"isReverted": True,
|
|
||||||
"revertedAt": datetime.now(timezone.utc),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Calculate nodes that were already changed since migration
|
|
||||||
nodes_already_changed = len(migrated_node_ids) - nodes_reverted
|
|
||||||
|
|
||||||
# Build appropriate message
|
|
||||||
message_parts = [
|
|
||||||
f"Successfully reverted migration: {nodes_reverted} node(s) restored "
|
|
||||||
f"from '{migration.targetModelSlug}' to '{migration.sourceModelSlug}'."
|
|
||||||
]
|
|
||||||
if nodes_already_changed > 0:
|
|
||||||
message_parts.append(
|
|
||||||
f" {nodes_already_changed} node(s) were already changed and not reverted."
|
|
||||||
)
|
|
||||||
if source_model_re_enabled:
|
|
||||||
message_parts.append(
|
|
||||||
f" Model '{migration.sourceModelSlug}' has been re-enabled."
|
|
||||||
)
|
|
||||||
|
|
||||||
return llm_model.RevertMigrationResponse(
|
|
||||||
migration_id=migration_id,
|
|
||||||
source_model_slug=migration.sourceModelSlug,
|
|
||||||
target_model_slug=migration.targetModelSlug,
|
|
||||||
nodes_reverted=nodes_reverted,
|
|
||||||
nodes_already_changed=nodes_already_changed,
|
|
||||||
source_model_re_enabled=source_model_re_enabled,
|
|
||||||
message="".join(message_parts),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Creator CRUD operations
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
async def list_creators() -> list[llm_model.LlmModelCreator]:
|
|
||||||
"""List all LLM model creators."""
|
|
||||||
records = await prisma.models.LlmModelCreator.prisma().find_many(
|
|
||||||
order={"displayName": "asc"}
|
|
||||||
)
|
|
||||||
return [_map_creator(record) for record in records]
|
|
||||||
|
|
||||||
|
|
||||||
async def get_creator(creator_id: str) -> llm_model.LlmModelCreator | None:
|
|
||||||
"""Get a specific creator by ID."""
|
|
||||||
record = await prisma.models.LlmModelCreator.prisma().find_unique(
|
|
||||||
where={"id": creator_id}
|
|
||||||
)
|
|
||||||
return _map_creator(record) if record else None
|
|
||||||
|
|
||||||
|
|
||||||
async def upsert_creator(
|
|
||||||
request: llm_model.UpsertLlmCreatorRequest,
|
|
||||||
creator_id: str | None = None,
|
|
||||||
) -> llm_model.LlmModelCreator:
|
|
||||||
"""Create or update a model creator."""
|
|
||||||
data: Any = {
|
|
||||||
"name": request.name,
|
|
||||||
"displayName": request.display_name,
|
|
||||||
"description": request.description,
|
|
||||||
"websiteUrl": request.website_url,
|
|
||||||
"logoUrl": request.logo_url,
|
|
||||||
"metadata": prisma.Json(request.metadata or {}),
|
|
||||||
}
|
|
||||||
if creator_id:
|
|
||||||
record = await prisma.models.LlmModelCreator.prisma().update(
|
|
||||||
where={"id": creator_id},
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
record = await prisma.models.LlmModelCreator.prisma().create(data=data)
|
|
||||||
if record is None:
|
|
||||||
raise ValueError("Failed to create/update creator")
|
|
||||||
return _map_creator(record)
|
|
||||||
|
|
||||||
|
|
||||||
async def delete_creator(creator_id: str) -> bool:
|
|
||||||
"""
|
|
||||||
Delete a model creator.
|
|
||||||
|
|
||||||
This will set creatorId to NULL on all associated models (due to onDelete: SetNull).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
creator_id: UUID of the creator to delete
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if deleted successfully
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If creator not found
|
|
||||||
"""
|
|
||||||
creator = await prisma.models.LlmModelCreator.prisma().find_unique(
|
|
||||||
where={"id": creator_id}
|
|
||||||
)
|
|
||||||
if not creator:
|
|
||||||
raise ValueError(f"Creator with id '{creator_id}' not found")
|
|
||||||
|
|
||||||
await prisma.models.LlmModelCreator.prisma().delete(where={"id": creator_id})
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
async def get_recommended_model() -> llm_model.LlmModel | None:
|
|
||||||
"""
|
|
||||||
Get the currently recommended LLM model.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The recommended model, or None if no model is marked as recommended.
|
|
||||||
"""
|
|
||||||
record = await prisma.models.LlmModel.prisma().find_first(
|
|
||||||
where={"isRecommended": True, "isEnabled": True},
|
|
||||||
include={"Costs": True, "Creator": True},
|
|
||||||
)
|
|
||||||
return _map_model(record) if record else None
|
|
||||||
|
|
||||||
|
|
||||||
async def set_recommended_model(
|
|
||||||
model_id: str,
|
|
||||||
) -> tuple[llm_model.LlmModel, str | None]:
|
|
||||||
"""
|
|
||||||
Set a model as the recommended model.
|
|
||||||
|
|
||||||
This will clear the isRecommended flag from any other model and set it
|
|
||||||
on the specified model. The model must be enabled.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_id: UUID of the model to set as recommended
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (the updated model, previous recommended model slug or None)
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If model not found or not enabled
|
|
||||||
"""
|
|
||||||
# First, verify the model exists and is enabled
|
|
||||||
target_model = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"id": model_id}
|
|
||||||
)
|
|
||||||
if not target_model:
|
|
||||||
raise ValueError(f"Model with id '{model_id}' not found")
|
|
||||||
if not target_model.isEnabled:
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot set disabled model '{target_model.slug}' as recommended"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the current recommended model (if any)
|
|
||||||
current_recommended = await prisma.models.LlmModel.prisma().find_first(
|
|
||||||
where={"isRecommended": True}
|
|
||||||
)
|
|
||||||
previous_slug = current_recommended.slug if current_recommended else None
|
|
||||||
|
|
||||||
# Use a transaction to ensure atomicity
|
|
||||||
async with transaction() as tx:
|
|
||||||
# Clear isRecommended from all models
|
|
||||||
await tx.llmmodel.update_many(
|
|
||||||
where={"isRecommended": True},
|
|
||||||
data={"isRecommended": False},
|
|
||||||
)
|
|
||||||
# Set the new recommended model
|
|
||||||
await tx.llmmodel.update(
|
|
||||||
where={"id": model_id},
|
|
||||||
data={"isRecommended": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Fetch and return the updated model
|
|
||||||
updated_record = await prisma.models.LlmModel.prisma().find_unique(
|
|
||||||
where={"id": model_id},
|
|
||||||
include={"Costs": True, "Creator": True},
|
|
||||||
)
|
|
||||||
if not updated_record:
|
|
||||||
raise ValueError("Failed to fetch updated model")
|
|
||||||
|
|
||||||
return _map_model(updated_record), previous_slug
|
|
||||||
|
|
||||||
|
|
||||||
async def get_recommended_model_slug() -> str | None:
|
|
||||||
"""
|
|
||||||
Get the slug of the currently recommended LLM model.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The slug of the recommended model, or None if no model is marked as recommended.
|
|
||||||
"""
|
|
||||||
record = await prisma.models.LlmModel.prisma().find_first(
|
|
||||||
where={"isRecommended": True, "isEnabled": True},
|
|
||||||
)
|
|
||||||
return record.slug if record else None
|
|
||||||
@@ -1,235 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import re
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Any, Optional
|
|
||||||
|
|
||||||
import prisma.enums
|
|
||||||
import pydantic
|
|
||||||
|
|
||||||
from backend.util.models import Pagination
|
|
||||||
|
|
||||||
# Pattern for valid model slugs: alphanumeric start, then alphanumeric, dots, underscores, slashes, hyphens
|
|
||||||
SLUG_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9._/-]*$")
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModelCost(pydantic.BaseModel):
|
|
||||||
id: str
|
|
||||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
|
||||||
credit_cost: int
|
|
||||||
credential_provider: str
|
|
||||||
credential_id: Optional[str] = None
|
|
||||||
credential_type: Optional[str] = None
|
|
||||||
currency: Optional[str] = None
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModelCreator(pydantic.BaseModel):
|
|
||||||
"""Represents the organization that created/trained the model (e.g., OpenAI, Meta)."""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
website_url: Optional[str] = None
|
|
||||||
logo_url: Optional[str] = None
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModel(pydantic.BaseModel):
|
|
||||||
id: str
|
|
||||||
slug: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
provider_id: str
|
|
||||||
creator_id: Optional[str] = None
|
|
||||||
creator: Optional[LlmModelCreator] = None
|
|
||||||
context_window: int
|
|
||||||
max_output_tokens: Optional[int] = None
|
|
||||||
is_enabled: bool = True
|
|
||||||
is_recommended: bool = False
|
|
||||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
costs: list[LlmModelCost] = pydantic.Field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmProvider(pydantic.BaseModel):
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
default_credential_provider: Optional[str] = None
|
|
||||||
default_credential_id: Optional[str] = None
|
|
||||||
default_credential_type: Optional[str] = None
|
|
||||||
supports_tools: bool = True
|
|
||||||
supports_json_output: bool = True
|
|
||||||
supports_reasoning: bool = False
|
|
||||||
supports_parallel_tool: bool = False
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
models: list[LlmModel] = pydantic.Field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmProvidersResponse(pydantic.BaseModel):
|
|
||||||
providers: list[LlmProvider]
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModelsResponse(pydantic.BaseModel):
|
|
||||||
models: list[LlmModel]
|
|
||||||
pagination: Optional[Pagination] = None
|
|
||||||
|
|
||||||
|
|
||||||
class LlmCreatorsResponse(pydantic.BaseModel):
|
|
||||||
creators: list[LlmModelCreator]
|
|
||||||
|
|
||||||
|
|
||||||
class UpsertLlmProviderRequest(pydantic.BaseModel):
|
|
||||||
name: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
default_credential_provider: Optional[str] = None
|
|
||||||
default_credential_id: Optional[str] = None
|
|
||||||
default_credential_type: Optional[str] = "api_key"
|
|
||||||
supports_tools: bool = True
|
|
||||||
supports_json_output: bool = True
|
|
||||||
supports_reasoning: bool = False
|
|
||||||
supports_parallel_tool: bool = False
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
class UpsertLlmCreatorRequest(pydantic.BaseModel):
|
|
||||||
name: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
website_url: Optional[str] = None
|
|
||||||
logo_url: Optional[str] = None
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModelCostInput(pydantic.BaseModel):
|
|
||||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
|
||||||
credit_cost: int
|
|
||||||
credential_provider: str
|
|
||||||
credential_id: Optional[str] = None
|
|
||||||
credential_type: Optional[str] = "api_key"
|
|
||||||
currency: Optional[str] = None
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateLlmModelRequest(pydantic.BaseModel):
|
|
||||||
slug: str
|
|
||||||
display_name: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
provider_id: str
|
|
||||||
creator_id: Optional[str] = None
|
|
||||||
context_window: int
|
|
||||||
max_output_tokens: Optional[int] = None
|
|
||||||
is_enabled: bool = True
|
|
||||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
|
||||||
costs: list[LlmModelCostInput]
|
|
||||||
|
|
||||||
@pydantic.field_validator("slug")
|
|
||||||
@classmethod
|
|
||||||
def validate_slug(cls, v: str) -> str:
|
|
||||||
if not v or len(v) > 100:
|
|
||||||
raise ValueError("Slug must be 1-100 characters")
|
|
||||||
if not SLUG_PATTERN.match(v):
|
|
||||||
raise ValueError(
|
|
||||||
"Slug must start with alphanumeric and contain only "
|
|
||||||
"alphanumeric characters, dots, underscores, slashes, or hyphens"
|
|
||||||
)
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateLlmModelRequest(pydantic.BaseModel):
|
|
||||||
display_name: Optional[str] = None
|
|
||||||
description: Optional[str] = None
|
|
||||||
context_window: Optional[int] = None
|
|
||||||
max_output_tokens: Optional[int] = None
|
|
||||||
is_enabled: Optional[bool] = None
|
|
||||||
capabilities: Optional[dict[str, Any]] = None
|
|
||||||
metadata: Optional[dict[str, Any]] = None
|
|
||||||
provider_id: Optional[str] = None
|
|
||||||
creator_id: Optional[str] = None
|
|
||||||
costs: Optional[list[LlmModelCostInput]] = None
|
|
||||||
|
|
||||||
|
|
||||||
class ToggleLlmModelRequest(pydantic.BaseModel):
|
|
||||||
is_enabled: bool
|
|
||||||
migrate_to_slug: Optional[str] = None
|
|
||||||
migration_reason: Optional[str] = None # e.g., "Provider outage"
|
|
||||||
# Custom pricing override for migrated workflows. When set, billing should use
|
|
||||||
# this cost instead of the target model's cost for affected nodes.
|
|
||||||
# See LlmModelMigration in schema.prisma for full documentation.
|
|
||||||
custom_credit_cost: Optional[int] = None
|
|
||||||
|
|
||||||
|
|
||||||
class ToggleLlmModelResponse(pydantic.BaseModel):
|
|
||||||
model: LlmModel
|
|
||||||
nodes_migrated: int = 0
|
|
||||||
migrated_to_slug: Optional[str] = None
|
|
||||||
migration_id: Optional[str] = None # ID of the migration record for revert
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteLlmModelResponse(pydantic.BaseModel):
|
|
||||||
deleted_model_slug: str
|
|
||||||
deleted_model_display_name: str
|
|
||||||
replacement_model_slug: Optional[str] = None
|
|
||||||
nodes_migrated: int
|
|
||||||
message: str
|
|
||||||
|
|
||||||
|
|
||||||
class LlmModelUsageResponse(pydantic.BaseModel):
|
|
||||||
model_slug: str
|
|
||||||
node_count: int
|
|
||||||
|
|
||||||
|
|
||||||
# Migration tracking models
|
|
||||||
class LlmModelMigration(pydantic.BaseModel):
|
|
||||||
id: str
|
|
||||||
source_model_slug: str
|
|
||||||
target_model_slug: str
|
|
||||||
reason: Optional[str] = None
|
|
||||||
node_count: int
|
|
||||||
# Custom pricing override - billing should use this instead of target model's cost
|
|
||||||
custom_credit_cost: Optional[int] = None
|
|
||||||
is_reverted: bool = False
|
|
||||||
created_at: datetime
|
|
||||||
reverted_at: Optional[datetime] = None
|
|
||||||
|
|
||||||
|
|
||||||
class LlmMigrationsResponse(pydantic.BaseModel):
|
|
||||||
migrations: list[LlmModelMigration]
|
|
||||||
|
|
||||||
|
|
||||||
class RevertMigrationRequest(pydantic.BaseModel):
|
|
||||||
re_enable_source_model: bool = (
|
|
||||||
True # Whether to re-enable the source model if disabled
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RevertMigrationResponse(pydantic.BaseModel):
|
|
||||||
migration_id: str
|
|
||||||
source_model_slug: str
|
|
||||||
target_model_slug: str
|
|
||||||
nodes_reverted: int
|
|
||||||
nodes_already_changed: int = (
|
|
||||||
0 # Nodes that were modified since migration (not reverted)
|
|
||||||
)
|
|
||||||
source_model_re_enabled: bool = False # Whether the source model was re-enabled
|
|
||||||
message: str
|
|
||||||
|
|
||||||
|
|
||||||
class SetRecommendedModelRequest(pydantic.BaseModel):
|
|
||||||
model_id: str
|
|
||||||
|
|
||||||
|
|
||||||
class SetRecommendedModelResponse(pydantic.BaseModel):
|
|
||||||
model: LlmModel
|
|
||||||
previous_recommended_slug: Optional[str] = None
|
|
||||||
message: str
|
|
||||||
|
|
||||||
|
|
||||||
class RecommendedModelResponse(pydantic.BaseModel):
|
|
||||||
model: Optional[LlmModel] = None
|
|
||||||
slug: Optional[str] = None
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
import autogpt_libs.auth
|
|
||||||
import fastapi
|
|
||||||
|
|
||||||
from backend.server.v2.llm import db as llm_db
|
|
||||||
from backend.server.v2.llm import model as llm_model
|
|
||||||
|
|
||||||
router = fastapi.APIRouter(
|
|
||||||
prefix="/llm",
|
|
||||||
tags=["llm"],
|
|
||||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_user)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/models", response_model=llm_model.LlmModelsResponse)
|
|
||||||
async def list_models(
|
|
||||||
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
|
|
||||||
page_size: int = fastapi.Query(
|
|
||||||
default=50, ge=1, le=100, description="Number of models per page"
|
|
||||||
),
|
|
||||||
):
|
|
||||||
"""List all enabled LLM models available to users."""
|
|
||||||
return await llm_db.list_models(enabled_only=True, page=page, page_size=page_size)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/providers", response_model=llm_model.LlmProvidersResponse)
|
|
||||||
async def list_providers():
|
|
||||||
"""List all LLM providers with their enabled models."""
|
|
||||||
providers = await llm_db.list_providers(include_models=True, enabled_only=True)
|
|
||||||
return llm_model.LlmProvidersResponse(providers=providers)
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
-- CreateEnum
|
|
||||||
CREATE TYPE "LlmCostUnit" AS ENUM ('RUN', 'TOKENS');
|
|
||||||
|
|
||||||
-- CreateTable
|
|
||||||
CREATE TABLE "LlmProvider" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"name" TEXT NOT NULL,
|
|
||||||
"displayName" TEXT NOT NULL,
|
|
||||||
"description" TEXT,
|
|
||||||
"defaultCredentialProvider" TEXT,
|
|
||||||
"defaultCredentialId" TEXT,
|
|
||||||
"defaultCredentialType" TEXT,
|
|
||||||
"supportsTools" BOOLEAN NOT NULL DEFAULT TRUE,
|
|
||||||
"supportsJsonOutput" BOOLEAN NOT NULL DEFAULT TRUE,
|
|
||||||
"supportsReasoning" BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
"supportsParallelTool" BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
|
||||||
|
|
||||||
CONSTRAINT "LlmProvider_pkey" PRIMARY KEY ("id"),
|
|
||||||
CONSTRAINT "LlmProvider_name_key" UNIQUE ("name")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateTable
|
|
||||||
CREATE TABLE "LlmModel" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"slug" TEXT NOT NULL,
|
|
||||||
"displayName" TEXT NOT NULL,
|
|
||||||
"description" TEXT,
|
|
||||||
"providerId" TEXT NOT NULL,
|
|
||||||
"contextWindow" INTEGER NOT NULL,
|
|
||||||
"maxOutputTokens" INTEGER,
|
|
||||||
"isEnabled" BOOLEAN NOT NULL DEFAULT TRUE,
|
|
||||||
"capabilities" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
|
||||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
|
||||||
|
|
||||||
CONSTRAINT "LlmModel_pkey" PRIMARY KEY ("id"),
|
|
||||||
CONSTRAINT "LlmModel_slug_key" UNIQUE ("slug")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateTable
|
|
||||||
CREATE TABLE "LlmModelCost" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"unit" "LlmCostUnit" NOT NULL DEFAULT 'RUN',
|
|
||||||
"creditCost" INTEGER NOT NULL,
|
|
||||||
"credentialProvider" TEXT NOT NULL,
|
|
||||||
"credentialId" TEXT,
|
|
||||||
"credentialType" TEXT,
|
|
||||||
"currency" TEXT,
|
|
||||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
|
||||||
"llmModelId" TEXT NOT NULL,
|
|
||||||
|
|
||||||
CONSTRAINT "LlmModelCost_pkey" PRIMARY KEY ("id")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModel_providerId_isEnabled_idx" ON "LlmModel"("providerId", "isEnabled");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModel_slug_idx" ON "LlmModel"("slug");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModelCost_llmModelId_idx" ON "LlmModelCost"("llmModelId");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModelCost_credentialProvider_idx" ON "LlmModelCost"("credentialProvider");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE UNIQUE INDEX "LlmModelCost_llmModelId_credentialProvider_unit_key" ON "LlmModelCost"("llmModelId", "credentialProvider", "unit");
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_providerId_fkey" FOREIGN KEY ("providerId") REFERENCES "LlmProvider"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "LlmModelCost" ADD CONSTRAINT "LlmModelCost_llmModelId_fkey" FOREIGN KEY ("llmModelId") REFERENCES "LlmModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
@@ -1,226 +0,0 @@
|
|||||||
-- Seed LLM Registry from existing hard-coded data
|
|
||||||
-- This migration populates the LlmProvider, LlmModel, and LlmModelCost tables
|
|
||||||
-- with data from the existing MODEL_METADATA and MODEL_COST dictionaries
|
|
||||||
|
|
||||||
-- Insert Providers
|
|
||||||
INSERT INTO "LlmProvider" ("id", "name", "displayName", "description", "defaultCredentialProvider", "defaultCredentialType", "supportsTools", "supportsJsonOutput", "supportsReasoning", "supportsParallelTool", "metadata")
|
|
||||||
VALUES
|
|
||||||
(gen_random_uuid(), 'openai', 'OpenAI', 'OpenAI language models', 'openai', 'api_key', true, true, true, true, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'anthropic', 'Anthropic', 'Anthropic Claude models', 'anthropic', 'api_key', true, true, true, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'groq', 'Groq', 'Groq inference API', 'groq', 'api_key', false, true, false, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'open_router', 'OpenRouter', 'OpenRouter unified API', 'open_router', 'api_key', true, true, false, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'aiml_api', 'AI/ML API', 'AI/ML API models', 'aiml_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'ollama', 'Ollama', 'Ollama local models', 'ollama', 'api_key', false, true, false, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'llama_api', 'Llama API', 'Llama API models', 'llama_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
|
||||||
(gen_random_uuid(), 'v0', 'v0', 'v0 by Vercel models', 'v0', 'api_key', true, true, false, false, '{}'::jsonb)
|
|
||||||
ON CONFLICT ("name") DO NOTHING;
|
|
||||||
|
|
||||||
-- Insert Models (using CTEs to reference provider IDs)
|
|
||||||
WITH provider_ids AS (
|
|
||||||
SELECT "id", "name" FROM "LlmProvider"
|
|
||||||
)
|
|
||||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
|
||||||
SELECT
|
|
||||||
gen_random_uuid(),
|
|
||||||
model_slug,
|
|
||||||
model_display_name,
|
|
||||||
NULL,
|
|
||||||
p."id",
|
|
||||||
context_window,
|
|
||||||
max_output_tokens,
|
|
||||||
true,
|
|
||||||
'{}'::jsonb,
|
|
||||||
'{}'::jsonb
|
|
||||||
FROM (VALUES
|
|
||||||
-- OpenAI models
|
|
||||||
('o3', 'O3', 'openai', 200000, 100000),
|
|
||||||
('o3-mini', 'O3 Mini', 'openai', 200000, 100000),
|
|
||||||
('o1', 'O1', 'openai', 200000, 100000),
|
|
||||||
('o1-mini', 'O1 Mini', 'openai', 128000, 65536),
|
|
||||||
('gpt-5-2025-08-07', 'GPT 5', 'openai', 400000, 128000),
|
|
||||||
('gpt-5.1-2025-11-13', 'GPT 5.1', 'openai', 400000, 128000),
|
|
||||||
('gpt-5-mini-2025-08-07', 'GPT 5 Mini', 'openai', 400000, 128000),
|
|
||||||
('gpt-5-nano-2025-08-07', 'GPT 5 Nano', 'openai', 400000, 128000),
|
|
||||||
('gpt-5-chat-latest', 'GPT 5 Chat', 'openai', 400000, 16384),
|
|
||||||
('gpt-4.1-2025-04-14', 'GPT 4.1', 'openai', 1000000, 32768),
|
|
||||||
('gpt-4.1-mini-2025-04-14', 'GPT 4.1 Mini', 'openai', 1047576, 32768),
|
|
||||||
('gpt-4o-mini', 'GPT 4o Mini', 'openai', 128000, 16384),
|
|
||||||
('gpt-4o', 'GPT 4o', 'openai', 128000, 16384),
|
|
||||||
('gpt-4-turbo', 'GPT 4 Turbo', 'openai', 128000, 4096),
|
|
||||||
('gpt-3.5-turbo', 'GPT 3.5 Turbo', 'openai', 16385, 4096),
|
|
||||||
-- Anthropic models
|
|
||||||
('claude-opus-4-1-20250805', 'Claude 4.1 Opus', 'anthropic', 200000, 32000),
|
|
||||||
('claude-opus-4-20250514', 'Claude 4 Opus', 'anthropic', 200000, 32000),
|
|
||||||
('claude-sonnet-4-20250514', 'Claude 4 Sonnet', 'anthropic', 200000, 64000),
|
|
||||||
('claude-opus-4-5-20251101', 'Claude 4.5 Opus', 'anthropic', 200000, 64000),
|
|
||||||
('claude-sonnet-4-5-20250929', 'Claude 4.5 Sonnet', 'anthropic', 200000, 64000),
|
|
||||||
('claude-haiku-4-5-20251001', 'Claude 4.5 Haiku', 'anthropic', 200000, 64000),
|
|
||||||
('claude-3-7-sonnet-20250219', 'Claude 3.7 Sonnet', 'anthropic', 200000, 64000),
|
|
||||||
('claude-3-haiku-20240307', 'Claude 3 Haiku', 'anthropic', 200000, 4096),
|
|
||||||
-- AI/ML API models
|
|
||||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 'Qwen 2.5 72B', 'aiml_api', 32000, 8000),
|
|
||||||
('nvidia/llama-3.1-nemotron-70b-instruct', 'Llama 3.1 Nemotron 70B', 'aiml_api', 128000, 40000),
|
|
||||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 'Llama 3.3 70B', 'aiml_api', 128000, NULL),
|
|
||||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'Meta Llama 3.1 70B', 'aiml_api', 131000, 2000),
|
|
||||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 'Llama 3.2 3B', 'aiml_api', 128000, NULL),
|
|
||||||
-- Groq models
|
|
||||||
('llama-3.3-70b-versatile', 'Llama 3.3 70B', 'groq', 128000, 32768),
|
|
||||||
('llama-3.1-8b-instant', 'Llama 3.1 8B', 'groq', 128000, 8192),
|
|
||||||
-- Ollama models
|
|
||||||
('llama3.3', 'Llama 3.3', 'ollama', 8192, NULL),
|
|
||||||
('llama3.2', 'Llama 3.2', 'ollama', 8192, NULL),
|
|
||||||
('llama3', 'Llama 3', 'ollama', 8192, NULL),
|
|
||||||
('llama3.1:405b', 'Llama 3.1 405B', 'ollama', 8192, NULL),
|
|
||||||
('dolphin-mistral:latest', 'Dolphin Mistral', 'ollama', 32768, NULL),
|
|
||||||
-- OpenRouter models
|
|
||||||
('google/gemini-2.5-pro-preview-03-25', 'Gemini 2.5 Pro', 'open_router', 1050000, 8192),
|
|
||||||
('google/gemini-3-pro-preview', 'Gemini 3 Pro Preview', 'open_router', 1048576, 65535),
|
|
||||||
('google/gemini-2.5-flash', 'Gemini 2.5 Flash', 'open_router', 1048576, 65535),
|
|
||||||
('google/gemini-2.0-flash-001', 'Gemini 2.0 Flash', 'open_router', 1048576, 8192),
|
|
||||||
('google/gemini-2.5-flash-lite-preview-06-17', 'Gemini 2.5 Flash Lite Preview', 'open_router', 1048576, 65535),
|
|
||||||
('google/gemini-2.0-flash-lite-001', 'Gemini 2.0 Flash Lite', 'open_router', 1048576, 8192),
|
|
||||||
('mistralai/mistral-nemo', 'Mistral Nemo', 'open_router', 128000, 4096),
|
|
||||||
('cohere/command-r-08-2024', 'Command R', 'open_router', 128000, 4096),
|
|
||||||
('cohere/command-r-plus-08-2024', 'Command R Plus', 'open_router', 128000, 4096),
|
|
||||||
('deepseek/deepseek-chat', 'DeepSeek Chat', 'open_router', 64000, 2048),
|
|
||||||
('deepseek/deepseek-r1-0528', 'DeepSeek R1', 'open_router', 163840, 163840),
|
|
||||||
('perplexity/sonar', 'Perplexity Sonar', 'open_router', 127000, 8000),
|
|
||||||
('perplexity/sonar-pro', 'Perplexity Sonar Pro', 'open_router', 200000, 8000),
|
|
||||||
('perplexity/sonar-deep-research', 'Perplexity Sonar Deep Research', 'open_router', 128000, 16000),
|
|
||||||
('nousresearch/hermes-3-llama-3.1-405b', 'Hermes 3 Llama 3.1 405B', 'open_router', 131000, 4096),
|
|
||||||
('nousresearch/hermes-3-llama-3.1-70b', 'Hermes 3 Llama 3.1 70B', 'open_router', 12288, 12288),
|
|
||||||
('openai/gpt-oss-120b', 'GPT OSS 120B', 'open_router', 131072, 131072),
|
|
||||||
('openai/gpt-oss-20b', 'GPT OSS 20B', 'open_router', 131072, 32768),
|
|
||||||
('amazon/nova-lite-v1', 'Amazon Nova Lite', 'open_router', 300000, 5120),
|
|
||||||
('amazon/nova-micro-v1', 'Amazon Nova Micro', 'open_router', 128000, 5120),
|
|
||||||
('amazon/nova-pro-v1', 'Amazon Nova Pro', 'open_router', 300000, 5120),
|
|
||||||
('microsoft/wizardlm-2-8x22b', 'WizardLM 2 8x22B', 'open_router', 65536, 4096),
|
|
||||||
('gryphe/mythomax-l2-13b', 'MythoMax L2 13B', 'open_router', 4096, 4096),
|
|
||||||
('meta-llama/llama-4-scout', 'Llama 4 Scout', 'open_router', 131072, 131072),
|
|
||||||
('meta-llama/llama-4-maverick', 'Llama 4 Maverick', 'open_router', 1048576, 1000000),
|
|
||||||
('x-ai/grok-4', 'Grok 4', 'open_router', 256000, 256000),
|
|
||||||
('x-ai/grok-4-fast', 'Grok 4 Fast', 'open_router', 2000000, 30000),
|
|
||||||
('x-ai/grok-4.1-fast', 'Grok 4.1 Fast', 'open_router', 2000000, 30000),
|
|
||||||
('x-ai/grok-code-fast-1', 'Grok Code Fast 1', 'open_router', 256000, 10000),
|
|
||||||
('moonshotai/kimi-k2', 'Kimi K2', 'open_router', 131000, 131000),
|
|
||||||
('qwen/qwen3-235b-a22b-thinking-2507', 'Qwen 3 235B Thinking', 'open_router', 262144, 262144),
|
|
||||||
('qwen/qwen3-coder', 'Qwen 3 Coder', 'open_router', 262144, 262144),
|
|
||||||
-- Llama API models
|
|
||||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 'Llama 4 Scout', 'llama_api', 128000, 4028),
|
|
||||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 'Llama 4 Maverick', 'llama_api', 128000, 4028),
|
|
||||||
('Llama-3.3-8B-Instruct', 'Llama 3.3 8B', 'llama_api', 128000, 4028),
|
|
||||||
('Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'llama_api', 128000, 4028),
|
|
||||||
-- v0 models
|
|
||||||
('v0-1.5-md', 'v0 1.5 MD', 'v0', 128000, 64000),
|
|
||||||
('v0-1.5-lg', 'v0 1.5 LG', 'v0', 512000, 64000),
|
|
||||||
('v0-1.0-md', 'v0 1.0 MD', 'v0', 128000, 64000)
|
|
||||||
) AS models(model_slug, model_display_name, provider_name, context_window, max_output_tokens)
|
|
||||||
JOIN provider_ids p ON p."name" = models.provider_name
|
|
||||||
ON CONFLICT ("slug") DO NOTHING;
|
|
||||||
|
|
||||||
-- Insert Costs (using CTEs to reference model IDs)
|
|
||||||
WITH model_ids AS (
|
|
||||||
SELECT "id", "slug", "providerId" FROM "LlmModel"
|
|
||||||
),
|
|
||||||
provider_ids AS (
|
|
||||||
SELECT "id", "name" FROM "LlmProvider"
|
|
||||||
)
|
|
||||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
|
||||||
SELECT
|
|
||||||
gen_random_uuid(),
|
|
||||||
'RUN'::"LlmCostUnit",
|
|
||||||
cost,
|
|
||||||
p."name",
|
|
||||||
NULL,
|
|
||||||
'api_key',
|
|
||||||
NULL,
|
|
||||||
'{}'::jsonb,
|
|
||||||
m."id"
|
|
||||||
FROM (VALUES
|
|
||||||
-- OpenAI costs
|
|
||||||
('o3', 4),
|
|
||||||
('o3-mini', 2),
|
|
||||||
('o1', 16),
|
|
||||||
('o1-mini', 4),
|
|
||||||
('gpt-5-2025-08-07', 2),
|
|
||||||
('gpt-5.1-2025-11-13', 5),
|
|
||||||
('gpt-5-mini-2025-08-07', 1),
|
|
||||||
('gpt-5-nano-2025-08-07', 1),
|
|
||||||
('gpt-5-chat-latest', 5),
|
|
||||||
('gpt-4.1-2025-04-14', 2),
|
|
||||||
('gpt-4.1-mini-2025-04-14', 1),
|
|
||||||
('gpt-4o-mini', 1),
|
|
||||||
('gpt-4o', 3),
|
|
||||||
('gpt-4-turbo', 10),
|
|
||||||
('gpt-3.5-turbo', 1),
|
|
||||||
-- Anthropic costs
|
|
||||||
('claude-opus-4-1-20250805', 21),
|
|
||||||
('claude-opus-4-20250514', 21),
|
|
||||||
('claude-sonnet-4-20250514', 5),
|
|
||||||
('claude-haiku-4-5-20251001', 4),
|
|
||||||
('claude-opus-4-5-20251101', 14),
|
|
||||||
('claude-sonnet-4-5-20250929', 9),
|
|
||||||
('claude-3-7-sonnet-20250219', 5),
|
|
||||||
('claude-3-haiku-20240307', 1),
|
|
||||||
-- AI/ML API costs
|
|
||||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 1),
|
|
||||||
('nvidia/llama-3.1-nemotron-70b-instruct', 1),
|
|
||||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 1),
|
|
||||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 1),
|
|
||||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 1),
|
|
||||||
-- Groq costs
|
|
||||||
('llama-3.3-70b-versatile', 1),
|
|
||||||
('llama-3.1-8b-instant', 1),
|
|
||||||
-- Ollama costs
|
|
||||||
('llama3.3', 1),
|
|
||||||
('llama3.2', 1),
|
|
||||||
('llama3', 1),
|
|
||||||
('llama3.1:405b', 1),
|
|
||||||
('dolphin-mistral:latest', 1),
|
|
||||||
-- OpenRouter costs
|
|
||||||
('google/gemini-2.5-pro-preview-03-25', 4),
|
|
||||||
('google/gemini-3-pro-preview', 5),
|
|
||||||
('mistralai/mistral-nemo', 1),
|
|
||||||
('cohere/command-r-08-2024', 1),
|
|
||||||
('cohere/command-r-plus-08-2024', 3),
|
|
||||||
('deepseek/deepseek-chat', 2),
|
|
||||||
('perplexity/sonar', 1),
|
|
||||||
('perplexity/sonar-pro', 5),
|
|
||||||
('perplexity/sonar-deep-research', 10),
|
|
||||||
('nousresearch/hermes-3-llama-3.1-405b', 1),
|
|
||||||
('nousresearch/hermes-3-llama-3.1-70b', 1),
|
|
||||||
('amazon/nova-lite-v1', 1),
|
|
||||||
('amazon/nova-micro-v1', 1),
|
|
||||||
('amazon/nova-pro-v1', 1),
|
|
||||||
('microsoft/wizardlm-2-8x22b', 1),
|
|
||||||
('gryphe/mythomax-l2-13b', 1),
|
|
||||||
('meta-llama/llama-4-scout', 1),
|
|
||||||
('meta-llama/llama-4-maverick', 1),
|
|
||||||
('x-ai/grok-4', 9),
|
|
||||||
('x-ai/grok-4-fast', 1),
|
|
||||||
('x-ai/grok-4.1-fast', 1),
|
|
||||||
('x-ai/grok-code-fast-1', 1),
|
|
||||||
('moonshotai/kimi-k2', 1),
|
|
||||||
('qwen/qwen3-235b-a22b-thinking-2507', 1),
|
|
||||||
('qwen/qwen3-coder', 9),
|
|
||||||
('google/gemini-2.5-flash', 1),
|
|
||||||
('google/gemini-2.0-flash-001', 1),
|
|
||||||
('google/gemini-2.5-flash-lite-preview-06-17', 1),
|
|
||||||
('google/gemini-2.0-flash-lite-001', 1),
|
|
||||||
('deepseek/deepseek-r1-0528', 1),
|
|
||||||
('openai/gpt-oss-120b', 1),
|
|
||||||
('openai/gpt-oss-20b', 1),
|
|
||||||
-- Llama API costs
|
|
||||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 1),
|
|
||||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 1),
|
|
||||||
('Llama-3.3-8B-Instruct', 1),
|
|
||||||
('Llama-3.3-70B-Instruct', 1),
|
|
||||||
-- v0 costs
|
|
||||||
('v0-1.5-md', 1),
|
|
||||||
('v0-1.5-lg', 2),
|
|
||||||
('v0-1.0-md', 1)
|
|
||||||
) AS costs(model_slug, cost)
|
|
||||||
JOIN model_ids m ON m."slug" = costs.model_slug
|
|
||||||
JOIN provider_ids p ON p."id" = m."providerId"
|
|
||||||
ON CONFLICT ("llmModelId", "credentialProvider", "unit") DO NOTHING;
|
|
||||||
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
-- CreateTable
|
|
||||||
CREATE TABLE "LlmModelMigration" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
|
||||||
"sourceModelSlug" TEXT NOT NULL,
|
|
||||||
"targetModelSlug" TEXT NOT NULL,
|
|
||||||
"reason" TEXT,
|
|
||||||
"migratedNodeIds" JSONB NOT NULL DEFAULT '[]',
|
|
||||||
"nodeCount" INTEGER NOT NULL,
|
|
||||||
"customCreditCost" INTEGER,
|
|
||||||
"isReverted" BOOLEAN NOT NULL DEFAULT false,
|
|
||||||
"revertedAt" TIMESTAMP(3),
|
|
||||||
|
|
||||||
CONSTRAINT "LlmModelMigration_pkey" PRIMARY KEY ("id")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModelMigration_sourceModelSlug_idx" ON "LlmModelMigration"("sourceModelSlug");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModelMigration_targetModelSlug_idx" ON "LlmModelMigration"("targetModelSlug");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "LlmModelMigration_isReverted_idx" ON "LlmModelMigration"("isReverted");
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
-- Add LlmModelCreator table
|
|
||||||
-- Creator represents who made/trained the model (e.g., OpenAI, Meta)
|
|
||||||
-- This is distinct from Provider who hosts/serves the model (e.g., OpenRouter)
|
|
||||||
|
|
||||||
-- Create the LlmModelCreator table
|
|
||||||
CREATE TABLE "LlmModelCreator" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
|
||||||
"name" TEXT NOT NULL,
|
|
||||||
"displayName" TEXT NOT NULL,
|
|
||||||
"description" TEXT,
|
|
||||||
"websiteUrl" TEXT,
|
|
||||||
"logoUrl" TEXT,
|
|
||||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
|
||||||
|
|
||||||
CONSTRAINT "LlmModelCreator_pkey" PRIMARY KEY ("id")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Create unique index on name
|
|
||||||
CREATE UNIQUE INDEX "LlmModelCreator_name_key" ON "LlmModelCreator"("name");
|
|
||||||
|
|
||||||
-- Add creatorId column to LlmModel
|
|
||||||
ALTER TABLE "LlmModel" ADD COLUMN "creatorId" TEXT;
|
|
||||||
|
|
||||||
-- Add foreign key constraint
|
|
||||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
|
|
||||||
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
-- Create index on creatorId
|
|
||||||
CREATE INDEX "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
|
|
||||||
|
|
||||||
-- Seed creators based on known model creators
|
|
||||||
INSERT INTO "LlmModelCreator" ("id", "updatedAt", "name", "displayName", "description", "websiteUrl", "metadata")
|
|
||||||
VALUES
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'openai', 'OpenAI', 'Creator of GPT models', 'https://openai.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'anthropic', 'Anthropic', 'Creator of Claude models', 'https://anthropic.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'meta', 'Meta', 'Creator of Llama models', 'https://ai.meta.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'google', 'Google', 'Creator of Gemini models', 'https://deepmind.google', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'mistral', 'Mistral AI', 'Creator of Mistral models', 'https://mistral.ai', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cohere', 'Cohere', 'Creator of Command models', 'https://cohere.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'deepseek', 'DeepSeek', 'Creator of DeepSeek models', 'https://deepseek.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'perplexity', 'Perplexity AI', 'Creator of Sonar models', 'https://perplexity.ai', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'qwen', 'Qwen (Alibaba)', 'Creator of Qwen models', 'https://qwenlm.github.io', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'xai', 'xAI', 'Creator of Grok models', 'https://x.ai', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'amazon', 'Amazon', 'Creator of Nova models', 'https://aws.amazon.com/bedrock', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'microsoft', 'Microsoft', 'Creator of WizardLM models', 'https://microsoft.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'moonshot', 'Moonshot AI', 'Creator of Kimi models', 'https://moonshot.cn', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nvidia', 'NVIDIA', 'Creator of Nemotron models', 'https://nvidia.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nous_research', 'Nous Research', 'Creator of Hermes models', 'https://nousresearch.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'vercel', 'Vercel', 'Creator of v0 models', 'https://vercel.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cognitive_computations', 'Cognitive Computations', 'Creator of Dolphin models', 'https://erichartford.com', '{}'),
|
|
||||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'gryphe', 'Gryphe', 'Creator of MythoMax models', 'https://huggingface.co/Gryphe', '{}')
|
|
||||||
ON CONFLICT ("name") DO NOTHING;
|
|
||||||
|
|
||||||
-- Update existing models with their creators
|
|
||||||
-- OpenAI models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'openai')
|
|
||||||
WHERE "slug" LIKE 'gpt-%' OR "slug" LIKE 'o1%' OR "slug" LIKE 'o3%' OR "slug" LIKE 'openai/%';
|
|
||||||
|
|
||||||
-- Anthropic models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'anthropic')
|
|
||||||
WHERE "slug" LIKE 'claude-%';
|
|
||||||
|
|
||||||
-- Meta/Llama models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'meta')
|
|
||||||
WHERE "slug" LIKE 'llama%' OR "slug" LIKE 'Llama%' OR "slug" LIKE 'meta-llama/%' OR "slug" LIKE '%/llama-%';
|
|
||||||
|
|
||||||
-- Google models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'google')
|
|
||||||
WHERE "slug" LIKE 'google/%' OR "slug" LIKE 'gemini%';
|
|
||||||
|
|
||||||
-- Mistral models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'mistral')
|
|
||||||
WHERE "slug" LIKE 'mistral%' OR "slug" LIKE 'mistralai/%';
|
|
||||||
|
|
||||||
-- Cohere models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cohere')
|
|
||||||
WHERE "slug" LIKE 'cohere/%' OR "slug" LIKE 'command-%';
|
|
||||||
|
|
||||||
-- DeepSeek models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'deepseek')
|
|
||||||
WHERE "slug" LIKE 'deepseek/%' OR "slug" LIKE 'deepseek-%';
|
|
||||||
|
|
||||||
-- Perplexity models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'perplexity')
|
|
||||||
WHERE "slug" LIKE 'perplexity/%' OR "slug" LIKE 'sonar%';
|
|
||||||
|
|
||||||
-- Qwen models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'qwen')
|
|
||||||
WHERE "slug" LIKE 'Qwen/%' OR "slug" LIKE 'qwen/%';
|
|
||||||
|
|
||||||
-- xAI/Grok models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'xai')
|
|
||||||
WHERE "slug" LIKE 'x-ai/%' OR "slug" LIKE 'grok%';
|
|
||||||
|
|
||||||
-- Amazon models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'amazon')
|
|
||||||
WHERE "slug" LIKE 'amazon/%' OR "slug" LIKE 'nova-%';
|
|
||||||
|
|
||||||
-- Microsoft models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'microsoft')
|
|
||||||
WHERE "slug" LIKE 'microsoft/%' OR "slug" LIKE 'wizardlm%';
|
|
||||||
|
|
||||||
-- Moonshot models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'moonshot')
|
|
||||||
WHERE "slug" LIKE 'moonshotai/%' OR "slug" LIKE 'kimi%';
|
|
||||||
|
|
||||||
-- NVIDIA models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nvidia')
|
|
||||||
WHERE "slug" LIKE 'nvidia/%' OR "slug" LIKE '%nemotron%';
|
|
||||||
|
|
||||||
-- Nous Research models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nous_research')
|
|
||||||
WHERE "slug" LIKE 'nousresearch/%' OR "slug" LIKE 'hermes%';
|
|
||||||
|
|
||||||
-- Vercel/v0 models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'vercel')
|
|
||||||
WHERE "slug" LIKE 'v0-%';
|
|
||||||
|
|
||||||
-- Dolphin models (Cognitive Computations / Eric Hartford)
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cognitive_computations')
|
|
||||||
WHERE "slug" LIKE 'dolphin-%';
|
|
||||||
|
|
||||||
-- Gryphe models
|
|
||||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'gryphe')
|
|
||||||
WHERE "slug" LIKE 'gryphe/%' OR "slug" LIKE 'mythomax%';
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
-- CreateIndex
|
|
||||||
-- Index for efficient LLM model lookups on AgentNode.constantInput->>'model'
|
|
||||||
-- This improves performance of model migration queries in the LLM registry
|
|
||||||
CREATE INDEX "AgentNode_constantInput_model_idx" ON "AgentNode" ((("constantInput"->>'model')));
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
-- Add GPT-5.2 model and update O3 slug
|
|
||||||
-- This migration adds the new GPT-5.2 model added in dev branch
|
|
||||||
|
|
||||||
-- Update O3 slug to match dev branch format
|
|
||||||
UPDATE "LlmModel"
|
|
||||||
SET "slug" = 'o3-2025-04-16'
|
|
||||||
WHERE "slug" = 'o3';
|
|
||||||
|
|
||||||
-- Update cost reference for O3 if needed
|
|
||||||
-- (costs are linked by model ID, so no update needed)
|
|
||||||
|
|
||||||
-- Add GPT-5.2 model
|
|
||||||
WITH provider_id AS (
|
|
||||||
SELECT "id" FROM "LlmProvider" WHERE "name" = 'openai'
|
|
||||||
)
|
|
||||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
|
||||||
SELECT
|
|
||||||
gen_random_uuid(),
|
|
||||||
'gpt-5.2-2025-12-11',
|
|
||||||
'GPT 5.2',
|
|
||||||
'OpenAI GPT-5.2 model',
|
|
||||||
p."id",
|
|
||||||
400000,
|
|
||||||
128000,
|
|
||||||
true,
|
|
||||||
'{}'::jsonb,
|
|
||||||
'{}'::jsonb
|
|
||||||
FROM provider_id p
|
|
||||||
ON CONFLICT ("slug") DO NOTHING;
|
|
||||||
|
|
||||||
-- Add cost for GPT-5.2
|
|
||||||
WITH model_id AS (
|
|
||||||
SELECT m."id", p."name" as provider_name
|
|
||||||
FROM "LlmModel" m
|
|
||||||
JOIN "LlmProvider" p ON p."id" = m."providerId"
|
|
||||||
WHERE m."slug" = 'gpt-5.2-2025-12-11'
|
|
||||||
)
|
|
||||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
|
||||||
SELECT
|
|
||||||
gen_random_uuid(),
|
|
||||||
'RUN'::"LlmCostUnit",
|
|
||||||
3, -- Same cost tier as GPT-5.1
|
|
||||||
m.provider_name,
|
|
||||||
NULL,
|
|
||||||
'api_key',
|
|
||||||
NULL,
|
|
||||||
'{}'::jsonb,
|
|
||||||
m."id"
|
|
||||||
FROM model_id m
|
|
||||||
WHERE NOT EXISTS (
|
|
||||||
SELECT 1 FROM "LlmModelCost" c WHERE c."llmModelId" = m."id"
|
|
||||||
);
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
-- Add isRecommended field to LlmModel table
|
|
||||||
-- This allows admins to mark a model as the recommended default
|
|
||||||
|
|
||||||
ALTER TABLE "LlmModel" ADD COLUMN "isRecommended" BOOLEAN NOT NULL DEFAULT false;
|
|
||||||
|
|
||||||
-- Set gpt-4o-mini as the default recommended model (if it exists)
|
|
||||||
UPDATE "LlmModel" SET "isRecommended" = true WHERE "slug" = 'gpt-4o-mini' AND "isEnabled" = true;
|
|
||||||
|
|
||||||
-- Create unique partial index to enforce only one recommended model at the database level
|
|
||||||
-- This prevents multiple rows from having isRecommended = true
|
|
||||||
CREATE UNIQUE INDEX "LlmModel_single_recommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
-- Add new columns to LlmModel table for extended model metadata
|
|
||||||
-- These columns support the LLM Picker UI enhancements
|
|
||||||
|
|
||||||
-- Add priceTier column: 1=cheapest, 2=medium, 3=expensive
|
|
||||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "priceTier" INTEGER NOT NULL DEFAULT 1;
|
|
||||||
|
|
||||||
-- Add creatorId column for model creator relationship (if not exists)
|
|
||||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "creatorId" TEXT;
|
|
||||||
|
|
||||||
-- Add isRecommended column (if not exists)
|
|
||||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "isRecommended" BOOLEAN NOT NULL DEFAULT FALSE;
|
|
||||||
|
|
||||||
-- Add index on creatorId if not exists
|
|
||||||
CREATE INDEX IF NOT EXISTS "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
|
|
||||||
|
|
||||||
-- Add foreign key for creatorId if not exists
|
|
||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'LlmModel_creatorId_fkey') THEN
|
|
||||||
-- Only add FK if LlmModelCreator table exists
|
|
||||||
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'LlmModelCreator') THEN
|
|
||||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
|
|
||||||
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
|
|
||||||
-- Update priceTier values for existing models based on original MODEL_METADATA
|
|
||||||
-- Tier 1 = cheapest, Tier 2 = medium, Tier 3 = expensive
|
|
||||||
|
|
||||||
-- OpenAI models
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o3';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'o3-mini';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'o1';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o1-mini';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-5.2';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5.1';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-mini';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-nano';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5-chat-latest';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" LIKE 'gpt-4.1%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-4o-mini';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-4o';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-4-turbo';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-3.5-turbo';
|
|
||||||
|
|
||||||
-- Anthropic models
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude-opus%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude-sonnet%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude%-4-5-sonnet%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude%-haiku%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'claude-3-haiku-20240307';
|
|
||||||
|
|
||||||
-- OpenRouter models - Pro/expensive tiers
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'google/gemini%-pro%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%command-r-plus%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%sonar-pro%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%sonar-deep-research%';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'x-ai/grok-4';
|
|
||||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%qwen3-coder%';
|
|
||||||
756
autogpt_platform/backend/poetry.lock
generated
756
autogpt_platform/backend/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@ packages = [{ include = "backend", format = "sdist" }]
|
|||||||
python = ">=3.10,<3.14"
|
python = ">=3.10,<3.14"
|
||||||
aio-pika = "^9.5.5"
|
aio-pika = "^9.5.5"
|
||||||
aiohttp = "^3.10.0"
|
aiohttp = "^3.10.0"
|
||||||
aiodns = "^3.5.0"
|
aiodns = "^4.0.0"
|
||||||
anthropic = "^0.79.0"
|
anthropic = "^0.79.0"
|
||||||
apscheduler = "^3.11.1"
|
apscheduler = "^3.11.1"
|
||||||
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
||||||
@@ -19,7 +19,7 @@ bleach = { extras = ["css"], version = "^6.2.0" }
|
|||||||
click = "^8.2.0"
|
click = "^8.2.0"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
discord-py = "^2.5.2"
|
discord-py = "^2.5.2"
|
||||||
e2b-code-interpreter = "^1.5.2"
|
e2b-code-interpreter = "^2.4.1"
|
||||||
elevenlabs = "^1.50.0"
|
elevenlabs = "^1.50.0"
|
||||||
fastapi = "^0.128.6"
|
fastapi = "^0.128.6"
|
||||||
feedparser = "^6.0.11"
|
feedparser = "^6.0.11"
|
||||||
@@ -29,7 +29,7 @@ google-auth-oauthlib = "^1.2.2"
|
|||||||
google-cloud-storage = "^3.2.0"
|
google-cloud-storage = "^3.2.0"
|
||||||
googlemaps = "^4.10.0"
|
googlemaps = "^4.10.0"
|
||||||
gravitasml = "^0.1.4"
|
gravitasml = "^0.1.4"
|
||||||
groq = "^0.30.0"
|
groq = "^1.0.0"
|
||||||
html2text = "^2024.2.26"
|
html2text = "^2024.2.26"
|
||||||
jinja2 = "^3.1.6"
|
jinja2 = "^3.1.6"
|
||||||
jsonref = "^1.1.0"
|
jsonref = "^1.1.0"
|
||||||
@@ -58,21 +58,21 @@ pytest = "^8.4.1"
|
|||||||
pytest-asyncio = "^1.1.0"
|
pytest-asyncio = "^1.1.0"
|
||||||
python-dotenv = "^1.1.1"
|
python-dotenv = "^1.1.1"
|
||||||
python-multipart = "^0.0.22"
|
python-multipart = "^0.0.22"
|
||||||
redis = "^6.2.0"
|
redis = "^7.1.1"
|
||||||
regex = "^2025.9.18"
|
regex = "^2025.9.18"
|
||||||
replicate = "^1.0.6"
|
replicate = "^1.0.6"
|
||||||
sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlalchemy"], version = "^2.44.0"}
|
sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlalchemy"], version = "^2.44.0"}
|
||||||
sqlalchemy = "^2.0.40"
|
sqlalchemy = "^2.0.40"
|
||||||
strenum = "^0.4.9"
|
strenum = "^0.4.9"
|
||||||
stripe = "^11.5.0"
|
stripe = "^11.5.0"
|
||||||
supabase = "2.27.3"
|
supabase = "2.28.0"
|
||||||
tenacity = "^9.1.4"
|
tenacity = "^9.1.4"
|
||||||
todoist-api-python = "^2.1.7"
|
todoist-api-python = "^3.2.1"
|
||||||
tweepy = "^4.16.0"
|
tweepy = "^4.16.0"
|
||||||
uvicorn = { extras = ["standard"], version = "^0.40.0" }
|
uvicorn = { extras = ["standard"], version = "^0.40.0" }
|
||||||
websockets = "^15.0"
|
websockets = "^15.0"
|
||||||
youtube-transcript-api = "^1.2.1"
|
youtube-transcript-api = "^1.2.1"
|
||||||
yt-dlp = "2025.12.08"
|
yt-dlp = "2026.2.4"
|
||||||
zerobouncesdk = "^1.1.2"
|
zerobouncesdk = "^1.1.2"
|
||||||
# NOTE: please insert new dependencies in their alphabetical location
|
# NOTE: please insert new dependencies in their alphabetical location
|
||||||
pytest-snapshot = "^0.9.0"
|
pytest-snapshot = "^0.9.0"
|
||||||
@@ -85,7 +85,7 @@ pandas = "^2.3.1"
|
|||||||
firecrawl-py = "^4.3.6"
|
firecrawl-py = "^4.3.6"
|
||||||
exa-py = "^1.14.20"
|
exa-py = "^1.14.20"
|
||||||
croniter = "^6.0.0"
|
croniter = "^6.0.0"
|
||||||
stagehand = "^0.5.1"
|
stagehand = "^3.5.0"
|
||||||
gravitas-md2gdocs = "^0.1.0"
|
gravitas-md2gdocs = "^0.1.0"
|
||||||
posthog = "^7.6.0"
|
posthog = "^7.6.0"
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ aiohappyeyeballs = "^2.6.1"
|
|||||||
black = "^24.10.0"
|
black = "^24.10.0"
|
||||||
faker = "^38.2.0"
|
faker = "^38.2.0"
|
||||||
httpx = "^0.28.1"
|
httpx = "^0.28.1"
|
||||||
isort = "^5.13.2"
|
isort = "^7.0.0"
|
||||||
poethepoet = "^0.41.0"
|
poethepoet = "^0.41.0"
|
||||||
pre-commit = "^4.4.0"
|
pre-commit = "^4.4.0"
|
||||||
pyright = "^1.1.407"
|
pyright = "^1.1.407"
|
||||||
|
|||||||
@@ -1143,153 +1143,6 @@ enum APIKeyStatus {
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
///////////// LLM REGISTRY AND BILLING DATA /////////////
|
|
||||||
////////////////////////////////////////////////////////////
|
|
||||||
////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// LlmCostUnit: Defines how LLM MODEL costs are calculated (per run or per token).
|
|
||||||
// This is distinct from BlockCostType (in backend/data/block.py) which defines
|
|
||||||
// how BLOCK EXECUTION costs are calculated (per run, per byte, or per second).
|
|
||||||
// LlmCostUnit is for pricing individual LLM model API calls in the registry,
|
|
||||||
// while BlockCostType is for billing platform block executions.
|
|
||||||
enum LlmCostUnit {
|
|
||||||
RUN
|
|
||||||
TOKENS
|
|
||||||
}
|
|
||||||
|
|
||||||
model LlmModelCreator {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
|
|
||||||
name String @unique // e.g., "openai", "anthropic", "meta"
|
|
||||||
displayName String // e.g., "OpenAI", "Anthropic", "Meta"
|
|
||||||
description String?
|
|
||||||
websiteUrl String? // Link to creator's website
|
|
||||||
logoUrl String? // URL to creator's logo
|
|
||||||
|
|
||||||
metadata Json @default("{}")
|
|
||||||
|
|
||||||
Models LlmModel[]
|
|
||||||
}
|
|
||||||
|
|
||||||
model LlmProvider {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
|
|
||||||
name String @unique
|
|
||||||
displayName String
|
|
||||||
description String?
|
|
||||||
|
|
||||||
defaultCredentialProvider String?
|
|
||||||
defaultCredentialId String?
|
|
||||||
defaultCredentialType String?
|
|
||||||
|
|
||||||
supportsTools Boolean @default(true)
|
|
||||||
supportsJsonOutput Boolean @default(true)
|
|
||||||
supportsReasoning Boolean @default(false)
|
|
||||||
supportsParallelTool Boolean @default(false)
|
|
||||||
|
|
||||||
metadata Json @default("{}")
|
|
||||||
|
|
||||||
Models LlmModel[]
|
|
||||||
}
|
|
||||||
|
|
||||||
model LlmModel {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
|
|
||||||
slug String @unique
|
|
||||||
displayName String
|
|
||||||
description String?
|
|
||||||
|
|
||||||
providerId String
|
|
||||||
Provider LlmProvider @relation(fields: [providerId], references: [id], onDelete: Restrict)
|
|
||||||
|
|
||||||
// Creator is the organization that created/trained the model (e.g., OpenAI, Meta)
|
|
||||||
// This is distinct from the provider who hosts/serves the model (e.g., OpenRouter)
|
|
||||||
creatorId String?
|
|
||||||
Creator LlmModelCreator? @relation(fields: [creatorId], references: [id], onDelete: SetNull)
|
|
||||||
|
|
||||||
contextWindow Int
|
|
||||||
maxOutputTokens Int?
|
|
||||||
priceTier Int @default(1) // 1=cheapest, 2=medium, 3=expensive
|
|
||||||
isEnabled Boolean @default(true)
|
|
||||||
isRecommended Boolean @default(false)
|
|
||||||
|
|
||||||
capabilities Json @default("{}")
|
|
||||||
metadata Json @default("{}")
|
|
||||||
|
|
||||||
Costs LlmModelCost[]
|
|
||||||
|
|
||||||
@@index([providerId, isEnabled])
|
|
||||||
@@index([creatorId])
|
|
||||||
@@index([slug])
|
|
||||||
}
|
|
||||||
|
|
||||||
model LlmModelCost {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
unit LlmCostUnit @default(RUN)
|
|
||||||
|
|
||||||
creditCost Int
|
|
||||||
|
|
||||||
credentialProvider String
|
|
||||||
credentialId String?
|
|
||||||
credentialType String?
|
|
||||||
currency String?
|
|
||||||
|
|
||||||
metadata Json @default("{}")
|
|
||||||
|
|
||||||
llmModelId String
|
|
||||||
Model LlmModel @relation(fields: [llmModelId], references: [id], onDelete: Cascade)
|
|
||||||
|
|
||||||
@@unique([llmModelId, credentialProvider, unit])
|
|
||||||
@@index([llmModelId])
|
|
||||||
@@index([credentialProvider])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tracks model migrations for revert capability
|
|
||||||
// When a model is disabled with migration, we record which nodes were affected
|
|
||||||
// so they can be reverted when the original model is back online
|
|
||||||
model LlmModelMigration {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
|
|
||||||
sourceModelSlug String // The original model that was disabled
|
|
||||||
targetModelSlug String // The model workflows were migrated to
|
|
||||||
reason String? // Why the migration happened (e.g., "Provider outage")
|
|
||||||
|
|
||||||
// Track affected nodes as JSON array of node IDs
|
|
||||||
// Format: ["node-uuid-1", "node-uuid-2", ...]
|
|
||||||
migratedNodeIds Json @default("[]")
|
|
||||||
nodeCount Int // Number of nodes migrated
|
|
||||||
|
|
||||||
// Custom pricing override for migrated workflows during the migration period.
|
|
||||||
// Use case: When migrating users from an expensive model (e.g., GPT-4) to a cheaper
|
|
||||||
// one (e.g., GPT-3.5), you may want to temporarily maintain the original pricing
|
|
||||||
// to avoid billing surprises, or offer a discount during the transition.
|
|
||||||
//
|
|
||||||
// IMPORTANT: This field is intended for integration with the billing system.
|
|
||||||
// When billing calculates costs for nodes affected by this migration, it should
|
|
||||||
// check if customCreditCost is set and use it instead of the target model's cost.
|
|
||||||
// If null, the target model's normal cost applies.
|
|
||||||
//
|
|
||||||
// TODO: Integrate with billing system to apply this override during cost calculation.
|
|
||||||
customCreditCost Int?
|
|
||||||
|
|
||||||
// Revert tracking
|
|
||||||
isReverted Boolean @default(false)
|
|
||||||
revertedAt DateTime?
|
|
||||||
|
|
||||||
@@index([sourceModelSlug])
|
|
||||||
@@index([targetModelSlug])
|
|
||||||
@@index([isReverted])
|
|
||||||
}
|
|
||||||
////////////// OAUTH PROVIDER TABLES //////////////////
|
////////////// OAUTH PROVIDER TABLES //////////////////
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||||
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||||
import { Cpu } from "@phosphor-icons/react";
|
|
||||||
|
|
||||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||||
|
|
||||||
@@ -29,11 +26,6 @@ const sidebarLinkGroups = [
|
|||||||
href: "/admin/execution-analytics",
|
href: "/admin/execution-analytics",
|
||||||
icon: <FileText className="h-6 w-6" />,
|
icon: <FileText className="h-6 w-6" />,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
text: "LLM Registry",
|
|
||||||
href: "/admin/llms",
|
|
||||||
icon: <Cpu size={24} />,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
text: "Admin User Management",
|
text: "Admin User Management",
|
||||||
href: "/admin/settings",
|
href: "/admin/settings",
|
||||||
|
|||||||
@@ -1,493 +0,0 @@
|
|||||||
"use server";
|
|
||||||
|
|
||||||
import { revalidatePath } from "next/cache";
|
|
||||||
|
|
||||||
// Generated API functions
|
|
||||||
import {
|
|
||||||
getV2ListLlmProviders,
|
|
||||||
postV2CreateLlmProvider,
|
|
||||||
patchV2UpdateLlmProvider,
|
|
||||||
deleteV2DeleteLlmProvider,
|
|
||||||
getV2ListLlmModels,
|
|
||||||
postV2CreateLlmModel,
|
|
||||||
patchV2UpdateLlmModel,
|
|
||||||
patchV2ToggleLlmModelAvailability,
|
|
||||||
deleteV2DeleteLlmModelAndMigrateWorkflows,
|
|
||||||
getV2GetModelUsageCount,
|
|
||||||
getV2ListModelMigrations,
|
|
||||||
postV2RevertAModelMigration,
|
|
||||||
getV2ListModelCreators,
|
|
||||||
postV2CreateModelCreator,
|
|
||||||
patchV2UpdateModelCreator,
|
|
||||||
deleteV2DeleteModelCreator,
|
|
||||||
postV2SetRecommendedModel,
|
|
||||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
|
|
||||||
// Generated types
|
|
||||||
import type { LlmProvidersResponse } from "@/app/api/__generated__/models/llmProvidersResponse";
|
|
||||||
import type { LlmModelsResponse } from "@/app/api/__generated__/models/llmModelsResponse";
|
|
||||||
import type { UpsertLlmProviderRequest } from "@/app/api/__generated__/models/upsertLlmProviderRequest";
|
|
||||||
import type { CreateLlmModelRequest } from "@/app/api/__generated__/models/createLlmModelRequest";
|
|
||||||
import type { UpdateLlmModelRequest } from "@/app/api/__generated__/models/updateLlmModelRequest";
|
|
||||||
import type { ToggleLlmModelRequest } from "@/app/api/__generated__/models/toggleLlmModelRequest";
|
|
||||||
import type { LlmMigrationsResponse } from "@/app/api/__generated__/models/llmMigrationsResponse";
|
|
||||||
import type { LlmCreatorsResponse } from "@/app/api/__generated__/models/llmCreatorsResponse";
|
|
||||||
import type { UpsertLlmCreatorRequest } from "@/app/api/__generated__/models/upsertLlmCreatorRequest";
|
|
||||||
import type { LlmModelUsageResponse } from "@/app/api/__generated__/models/llmModelUsageResponse";
|
|
||||||
import { LlmCostUnit } from "@/app/api/__generated__/models/llmCostUnit";
|
|
||||||
|
|
||||||
const ADMIN_LLM_PATH = "/admin/llms";
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Utilities
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts and validates a required string field from FormData.
|
|
||||||
* Throws an error if the field is missing or empty.
|
|
||||||
*/
|
|
||||||
function getRequiredFormField(
|
|
||||||
formData: FormData,
|
|
||||||
fieldName: string,
|
|
||||||
displayName?: string,
|
|
||||||
): string {
|
|
||||||
const raw = formData.get(fieldName);
|
|
||||||
const value = raw ? String(raw).trim() : "";
|
|
||||||
if (!value) {
|
|
||||||
throw new Error(`${displayName || fieldName} is required`);
|
|
||||||
}
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts and validates a required positive number field from FormData.
|
|
||||||
* Throws an error if the field is missing, empty, or not a positive number.
|
|
||||||
*/
|
|
||||||
function getRequiredPositiveNumber(
|
|
||||||
formData: FormData,
|
|
||||||
fieldName: string,
|
|
||||||
displayName?: string,
|
|
||||||
): number {
|
|
||||||
const raw = formData.get(fieldName);
|
|
||||||
const value = Number(raw);
|
|
||||||
if (raw === null || raw === "" || !Number.isFinite(value) || value <= 0) {
|
|
||||||
throw new Error(`${displayName || fieldName} must be a positive number`);
|
|
||||||
}
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts and validates a required number field from FormData.
|
|
||||||
* Throws an error if the field is missing, empty, or not a finite number.
|
|
||||||
*/
|
|
||||||
function getRequiredNumber(
|
|
||||||
formData: FormData,
|
|
||||||
fieldName: string,
|
|
||||||
displayName?: string,
|
|
||||||
): number {
|
|
||||||
const raw = formData.get(fieldName);
|
|
||||||
const value = Number(raw);
|
|
||||||
if (raw === null || raw === "" || !Number.isFinite(value)) {
|
|
||||||
throw new Error(`${displayName || fieldName} is required`);
|
|
||||||
}
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Provider Actions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export async function fetchLlmProviders(): Promise<LlmProvidersResponse> {
|
|
||||||
const response = await getV2ListLlmProviders({ include_models: true });
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch LLM providers");
|
|
||||||
}
|
|
||||||
return response.data;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function createLlmProviderAction(formData: FormData) {
|
|
||||||
const payload: UpsertLlmProviderRequest = {
|
|
||||||
name: String(formData.get("name") || "").trim(),
|
|
||||||
display_name: String(formData.get("display_name") || "").trim(),
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
default_credential_provider: formData.get("default_credential_provider")
|
|
||||||
? String(formData.get("default_credential_provider")).trim()
|
|
||||||
: undefined,
|
|
||||||
default_credential_id: formData.get("default_credential_id")
|
|
||||||
? String(formData.get("default_credential_id")).trim()
|
|
||||||
: undefined,
|
|
||||||
default_credential_type: formData.get("default_credential_type")
|
|
||||||
? String(formData.get("default_credential_type")).trim()
|
|
||||||
: "api_key",
|
|
||||||
supports_tools: formData.getAll("supports_tools").includes("on"),
|
|
||||||
supports_json_output: formData
|
|
||||||
.getAll("supports_json_output")
|
|
||||||
.includes("on"),
|
|
||||||
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
|
|
||||||
supports_parallel_tool: formData
|
|
||||||
.getAll("supports_parallel_tool")
|
|
||||||
.includes("on"),
|
|
||||||
metadata: {},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await postV2CreateLlmProvider(payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to create LLM provider");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function deleteLlmProviderAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const providerId = getRequiredFormField(
|
|
||||||
formData,
|
|
||||||
"provider_id",
|
|
||||||
"Provider id",
|
|
||||||
);
|
|
||||||
|
|
||||||
const response = await deleteV2DeleteLlmProvider(providerId);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
const errorData = response.data as { detail?: string };
|
|
||||||
throw new Error(errorData?.detail || "Failed to delete provider");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function updateLlmProviderAction(formData: FormData) {
|
|
||||||
const providerId = getRequiredFormField(
|
|
||||||
formData,
|
|
||||||
"provider_id",
|
|
||||||
"Provider id",
|
|
||||||
);
|
|
||||||
|
|
||||||
const payload: UpsertLlmProviderRequest = {
|
|
||||||
name: String(formData.get("name") || "").trim(),
|
|
||||||
display_name: String(formData.get("display_name") || "").trim(),
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
default_credential_provider: formData.get("default_credential_provider")
|
|
||||||
? String(formData.get("default_credential_provider")).trim()
|
|
||||||
: undefined,
|
|
||||||
default_credential_id: formData.get("default_credential_id")
|
|
||||||
? String(formData.get("default_credential_id")).trim()
|
|
||||||
: undefined,
|
|
||||||
default_credential_type: formData.get("default_credential_type")
|
|
||||||
? String(formData.get("default_credential_type")).trim()
|
|
||||||
: "api_key",
|
|
||||||
supports_tools: formData.getAll("supports_tools").includes("on"),
|
|
||||||
supports_json_output: formData
|
|
||||||
.getAll("supports_json_output")
|
|
||||||
.includes("on"),
|
|
||||||
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
|
|
||||||
supports_parallel_tool: formData
|
|
||||||
.getAll("supports_parallel_tool")
|
|
||||||
.includes("on"),
|
|
||||||
metadata: {},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await patchV2UpdateLlmProvider(providerId, payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to update LLM provider");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Model Actions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export async function fetchLlmModels(): Promise<LlmModelsResponse> {
|
|
||||||
const response = await getV2ListLlmModels();
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch LLM models");
|
|
||||||
}
|
|
||||||
return response.data;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function createLlmModelAction(formData: FormData) {
|
|
||||||
const providerId = getRequiredFormField(formData, "provider_id", "Provider");
|
|
||||||
const creatorId = formData.get("creator_id");
|
|
||||||
const contextWindow = getRequiredPositiveNumber(
|
|
||||||
formData,
|
|
||||||
"context_window",
|
|
||||||
"Context window",
|
|
||||||
);
|
|
||||||
const creditCost = getRequiredNumber(formData, "credit_cost", "Credit cost");
|
|
||||||
|
|
||||||
// Fetch provider to get default credentials
|
|
||||||
const providersResponse = await getV2ListLlmProviders({
|
|
||||||
include_models: false,
|
|
||||||
});
|
|
||||||
if (providersResponse.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch providers");
|
|
||||||
}
|
|
||||||
const provider = providersResponse.data.providers.find(
|
|
||||||
(p) => p.id === providerId,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!provider) {
|
|
||||||
throw new Error("Provider not found");
|
|
||||||
}
|
|
||||||
|
|
||||||
const payload: CreateLlmModelRequest = {
|
|
||||||
slug: String(formData.get("slug") || "").trim(),
|
|
||||||
display_name: String(formData.get("display_name") || "").trim(),
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
provider_id: providerId,
|
|
||||||
creator_id: creatorId ? String(creatorId) : undefined,
|
|
||||||
context_window: contextWindow,
|
|
||||||
max_output_tokens: formData.get("max_output_tokens")
|
|
||||||
? Number(formData.get("max_output_tokens"))
|
|
||||||
: undefined,
|
|
||||||
is_enabled: formData.getAll("is_enabled").includes("on"),
|
|
||||||
capabilities: {},
|
|
||||||
metadata: {},
|
|
||||||
costs: [
|
|
||||||
{
|
|
||||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
|
||||||
credit_cost: creditCost,
|
|
||||||
credential_provider:
|
|
||||||
provider.default_credential_provider || provider.name,
|
|
||||||
credential_id: provider.default_credential_id || undefined,
|
|
||||||
credential_type: provider.default_credential_type || "api_key",
|
|
||||||
metadata: {},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await postV2CreateLlmModel(payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to create LLM model");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function updateLlmModelAction(formData: FormData) {
|
|
||||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
|
||||||
const creatorId = formData.get("creator_id");
|
|
||||||
|
|
||||||
const payload: UpdateLlmModelRequest = {
|
|
||||||
display_name: formData.get("display_name")
|
|
||||||
? String(formData.get("display_name"))
|
|
||||||
: undefined,
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
provider_id: formData.get("provider_id")
|
|
||||||
? String(formData.get("provider_id"))
|
|
||||||
: undefined,
|
|
||||||
creator_id: creatorId ? String(creatorId) : undefined,
|
|
||||||
context_window: formData.get("context_window")
|
|
||||||
? Number(formData.get("context_window"))
|
|
||||||
: undefined,
|
|
||||||
max_output_tokens: formData.get("max_output_tokens")
|
|
||||||
? Number(formData.get("max_output_tokens"))
|
|
||||||
: undefined,
|
|
||||||
is_enabled: formData.has("is_enabled")
|
|
||||||
? formData.getAll("is_enabled").includes("on")
|
|
||||||
: undefined,
|
|
||||||
costs: formData.get("credit_cost")
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
|
||||||
credit_cost: Number(formData.get("credit_cost")),
|
|
||||||
credential_provider: String(
|
|
||||||
formData.get("credential_provider") || "",
|
|
||||||
).trim(),
|
|
||||||
credential_id: formData.get("credential_id")
|
|
||||||
? String(formData.get("credential_id"))
|
|
||||||
: undefined,
|
|
||||||
credential_type: formData.get("credential_type")
|
|
||||||
? String(formData.get("credential_type"))
|
|
||||||
: undefined,
|
|
||||||
metadata: {},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
: undefined,
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await patchV2UpdateLlmModel(modelId, payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to update LLM model");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function toggleLlmModelAction(formData: FormData): Promise<void> {
|
|
||||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
|
||||||
const shouldEnable = formData.get("is_enabled") === "true";
|
|
||||||
const migrateToSlug = formData.get("migrate_to_slug");
|
|
||||||
const migrationReason = formData.get("migration_reason");
|
|
||||||
const customCreditCost = formData.get("custom_credit_cost");
|
|
||||||
|
|
||||||
const payload: ToggleLlmModelRequest = {
|
|
||||||
is_enabled: shouldEnable,
|
|
||||||
migrate_to_slug: migrateToSlug ? String(migrateToSlug) : undefined,
|
|
||||||
migration_reason: migrationReason ? String(migrationReason) : undefined,
|
|
||||||
custom_credit_cost: customCreditCost ? Number(customCreditCost) : undefined,
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await patchV2ToggleLlmModelAvailability(modelId, payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to toggle LLM model");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function deleteLlmModelAction(formData: FormData): Promise<void> {
|
|
||||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
|
||||||
const rawReplacement = formData.get("replacement_model_slug");
|
|
||||||
const replacementModelSlug =
|
|
||||||
rawReplacement && String(rawReplacement).trim()
|
|
||||||
? String(rawReplacement).trim()
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
const response = await deleteV2DeleteLlmModelAndMigrateWorkflows(modelId, {
|
|
||||||
replacement_model_slug: replacementModelSlug,
|
|
||||||
});
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to delete model");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function fetchLlmModelUsage(
|
|
||||||
modelId: string,
|
|
||||||
): Promise<LlmModelUsageResponse> {
|
|
||||||
const response = await getV2GetModelUsageCount(modelId);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch model usage");
|
|
||||||
}
|
|
||||||
return response.data;
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Migration Actions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export async function fetchLlmMigrations(
|
|
||||||
includeReverted: boolean = false,
|
|
||||||
): Promise<LlmMigrationsResponse> {
|
|
||||||
const response = await getV2ListModelMigrations({
|
|
||||||
include_reverted: includeReverted,
|
|
||||||
});
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch migrations");
|
|
||||||
}
|
|
||||||
return response.data;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function revertLlmMigrationAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const migrationId = getRequiredFormField(
|
|
||||||
formData,
|
|
||||||
"migration_id",
|
|
||||||
"Migration id",
|
|
||||||
);
|
|
||||||
|
|
||||||
const response = await postV2RevertAModelMigration(migrationId, null);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to revert migration");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Creator Actions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export async function fetchLlmCreators(): Promise<LlmCreatorsResponse> {
|
|
||||||
const response = await getV2ListModelCreators();
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to fetch creators");
|
|
||||||
}
|
|
||||||
return response.data;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function createLlmCreatorAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const payload: UpsertLlmCreatorRequest = {
|
|
||||||
name: String(formData.get("name") || "").trim(),
|
|
||||||
display_name: String(formData.get("display_name") || "").trim(),
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
website_url: formData.get("website_url")
|
|
||||||
? String(formData.get("website_url")).trim()
|
|
||||||
: undefined,
|
|
||||||
logo_url: formData.get("logo_url")
|
|
||||||
? String(formData.get("logo_url")).trim()
|
|
||||||
: undefined,
|
|
||||||
metadata: {},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await postV2CreateModelCreator(payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to create creator");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function updateLlmCreatorAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
|
|
||||||
|
|
||||||
const payload: UpsertLlmCreatorRequest = {
|
|
||||||
name: String(formData.get("name") || "").trim(),
|
|
||||||
display_name: String(formData.get("display_name") || "").trim(),
|
|
||||||
description: formData.get("description")
|
|
||||||
? String(formData.get("description"))
|
|
||||||
: undefined,
|
|
||||||
website_url: formData.get("website_url")
|
|
||||||
? String(formData.get("website_url")).trim()
|
|
||||||
: undefined,
|
|
||||||
logo_url: formData.get("logo_url")
|
|
||||||
? String(formData.get("logo_url")).trim()
|
|
||||||
: undefined,
|
|
||||||
metadata: {},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await patchV2UpdateModelCreator(creatorId, payload);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to update creator");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function deleteLlmCreatorAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
|
|
||||||
|
|
||||||
const response = await deleteV2DeleteModelCreator(creatorId);
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to delete creator");
|
|
||||||
}
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Recommended Model Actions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export async function setRecommendedModelAction(
|
|
||||||
formData: FormData,
|
|
||||||
): Promise<void> {
|
|
||||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
|
||||||
|
|
||||||
const response = await postV2SetRecommendedModel({ model_id: modelId });
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error("Failed to set recommended model");
|
|
||||||
}
|
|
||||||
|
|
||||||
revalidatePath(ADMIN_LLM_PATH);
|
|
||||||
}
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { createLlmCreatorAction } from "../actions";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
|
|
||||||
export function AddCreatorModal() {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await createLlmCreatorAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to create creator");
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Add Creator"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "512px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="primary" size="small">
|
|
||||||
Add Creator
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Add a new model creator (the organization that made/trained the
|
|
||||||
model).
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleSubmit} className="space-y-4">
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Name (slug) <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="name"
|
|
||||||
required
|
|
||||||
name="name"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="openai"
|
|
||||||
/>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Lowercase identifier (e.g., openai, meta, anthropic)
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="display_name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Display Name <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="display_name"
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="OpenAI"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="description"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Description
|
|
||||||
</label>
|
|
||||||
<textarea
|
|
||||||
id="description"
|
|
||||||
name="description"
|
|
||||||
rows={2}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="Creator of GPT models..."
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="website_url"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Website URL
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="website_url"
|
|
||||||
name="website_url"
|
|
||||||
type="url"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="https://openai.com"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Creating..." : "Add Creator"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,314 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import { createLlmModelAction } from "../actions";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
|
|
||||||
interface Props {
|
|
||||||
providers: LlmProvider[];
|
|
||||||
creators: LlmModelCreator[];
|
|
||||||
}
|
|
||||||
|
|
||||||
export function AddModelModal({ providers, creators }: Props) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [selectedCreatorId, setSelectedCreatorId] = useState("");
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await createLlmModelAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to create model");
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// When provider changes, auto-select matching creator if one exists
|
|
||||||
function handleProviderChange(providerId: string) {
|
|
||||||
const provider = providers.find((p) => p.id === providerId);
|
|
||||||
if (provider) {
|
|
||||||
// Find creator with same name as provider (e.g., "openai" -> "openai")
|
|
||||||
const matchingCreator = creators.find((c) => c.name === provider.name);
|
|
||||||
if (matchingCreator) {
|
|
||||||
setSelectedCreatorId(matchingCreator.id);
|
|
||||||
} else {
|
|
||||||
// No matching creator (e.g., OpenRouter hosts other creators' models)
|
|
||||||
setSelectedCreatorId("");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Add Model"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="primary" size="small">
|
|
||||||
Add Model
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Register a new model slug, metadata, and pricing.
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleSubmit} className="space-y-6">
|
|
||||||
{/* Basic Information */}
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Basic Information
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Core model details
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="slug"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Model Slug <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="slug"
|
|
||||||
required
|
|
||||||
name="slug"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="gpt-4.1-mini-2025-04-14"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="display_name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Display Name <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="display_name"
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="GPT 4.1 Mini"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="description"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Description
|
|
||||||
</label>
|
|
||||||
<textarea
|
|
||||||
id="description"
|
|
||||||
name="description"
|
|
||||||
rows={3}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="Optional description..."
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Model Configuration */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Model Configuration
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Model capabilities and limits
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="provider_id"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Provider <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<select
|
|
||||||
id="provider_id"
|
|
||||||
required
|
|
||||||
name="provider_id"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
defaultValue=""
|
|
||||||
onChange={(e) => handleProviderChange(e.target.value)}
|
|
||||||
>
|
|
||||||
<option value="" disabled>
|
|
||||||
Select provider
|
|
||||||
</option>
|
|
||||||
{providers.map((provider) => (
|
|
||||||
<option key={provider.id} value={provider.id}>
|
|
||||||
{provider.display_name} ({provider.name})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Who hosts/serves the model
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="creator_id"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Creator
|
|
||||||
</label>
|
|
||||||
<select
|
|
||||||
id="creator_id"
|
|
||||||
name="creator_id"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
value={selectedCreatorId}
|
|
||||||
onChange={(e) => setSelectedCreatorId(e.target.value)}
|
|
||||||
>
|
|
||||||
<option value="">No creator selected</option>
|
|
||||||
{creators.map((creator) => (
|
|
||||||
<option key={creator.id} value={creator.id}>
|
|
||||||
{creator.display_name} ({creator.name})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Who made/trained the model (e.g., OpenAI, Meta)
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="context_window"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Context Window <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="context_window"
|
|
||||||
required
|
|
||||||
type="number"
|
|
||||||
name="context_window"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="128000"
|
|
||||||
min={1}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="max_output_tokens"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Max Output Tokens
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="max_output_tokens"
|
|
||||||
type="number"
|
|
||||||
name="max_output_tokens"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="16384"
|
|
||||||
min={1}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Pricing */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">Pricing</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Credit cost per run (credentials are managed via the provider)
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-1">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="credit_cost"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Credit Cost <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="credit_cost"
|
|
||||||
required
|
|
||||||
type="number"
|
|
||||||
name="credit_cost"
|
|
||||||
step="1"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="5"
|
|
||||||
min={0}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Credit cost is always in platform credits. Credentials are
|
|
||||||
inherited from the selected provider.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Enabled Toggle */}
|
|
||||||
<div className="flex items-center gap-3 border-t border-border pt-6">
|
|
||||||
<input type="hidden" name="is_enabled" value="off" />
|
|
||||||
<input
|
|
||||||
id="is_enabled"
|
|
||||||
type="checkbox"
|
|
||||||
name="is_enabled"
|
|
||||||
defaultChecked
|
|
||||||
className="h-4 w-4 rounded border-input"
|
|
||||||
/>
|
|
||||||
<label
|
|
||||||
htmlFor="is_enabled"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Enabled by default
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Creating..." : "Save Model"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,268 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { createLlmProviderAction } from "../actions";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
|
|
||||||
export function AddProviderModal() {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await createLlmProviderAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(
|
|
||||||
err instanceof Error ? err.message : "Failed to create provider",
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Add Provider"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="primary" size="small">
|
|
||||||
Add Provider
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Define a new upstream provider and default credential information.
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Setup Instructions */}
|
|
||||||
<div className="mb-6 rounded-lg border border-primary/30 bg-primary/5 p-4">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<h4 className="text-sm font-semibold text-foreground">
|
|
||||||
Before Adding a Provider
|
|
||||||
</h4>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
To use a new provider, you must first configure its credentials in
|
|
||||||
the backend:
|
|
||||||
</p>
|
|
||||||
<ol className="list-inside list-decimal space-y-1 text-xs text-muted-foreground">
|
|
||||||
<li>
|
|
||||||
Add the credential to{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
|
||||||
backend/integrations/credentials_store.py
|
|
||||||
</code>{" "}
|
|
||||||
with a UUID, provider name, and settings secret reference
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
Add it to the{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
|
||||||
PROVIDER_CREDENTIALS
|
|
||||||
</code>{" "}
|
|
||||||
dictionary in{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
|
||||||
backend/data/block_cost_config.py
|
|
||||||
</code>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
Use the <strong>same provider name</strong> in the
|
|
||||||
"Credential Provider" field below that matches the key
|
|
||||||
in{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
|
||||||
PROVIDER_CREDENTIALS
|
|
||||||
</code>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleSubmit} className="space-y-6">
|
|
||||||
{/* Basic Information */}
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Basic Information
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Core provider details
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Provider Slug <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="name"
|
|
||||||
required
|
|
||||||
name="name"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="e.g. openai"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="display_name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Display Name <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="display_name"
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="OpenAI"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="description"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Description
|
|
||||||
</label>
|
|
||||||
<textarea
|
|
||||||
id="description"
|
|
||||||
name="description"
|
|
||||||
rows={3}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="Optional description..."
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Default Credentials */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Default Credentials
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Credential provider name that matches the key in{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
|
||||||
PROVIDER_CREDENTIALS
|
|
||||||
</code>
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="default_credential_provider"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Credential Provider <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="default_credential_provider"
|
|
||||||
name="default_credential_provider"
|
|
||||||
required
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="openai"
|
|
||||||
/>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
<strong>Important:</strong> This must exactly match the key in
|
|
||||||
the{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
|
||||||
PROVIDER_CREDENTIALS
|
|
||||||
</code>{" "}
|
|
||||||
dictionary in{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
|
||||||
block_cost_config.py
|
|
||||||
</code>
|
|
||||||
. Common values: "openai", "anthropic",
|
|
||||||
"groq", "open_router", etc.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Capabilities */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Capabilities
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Provider feature flags
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-3 sm:grid-cols-2">
|
|
||||||
{[
|
|
||||||
{ name: "supports_tools", label: "Supports tools" },
|
|
||||||
{ name: "supports_json_output", label: "Supports JSON output" },
|
|
||||||
{ name: "supports_reasoning", label: "Supports reasoning" },
|
|
||||||
{
|
|
||||||
name: "supports_parallel_tool",
|
|
||||||
label: "Supports parallel tool calls",
|
|
||||||
},
|
|
||||||
].map(({ name, label }) => (
|
|
||||||
<div
|
|
||||||
key={name}
|
|
||||||
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
|
|
||||||
>
|
|
||||||
<input type="hidden" name={name} value="off" />
|
|
||||||
<input
|
|
||||||
id={name}
|
|
||||||
type="checkbox"
|
|
||||||
name={name}
|
|
||||||
defaultChecked={
|
|
||||||
name !== "supports_reasoning" &&
|
|
||||||
name !== "supports_parallel_tool"
|
|
||||||
}
|
|
||||||
className="h-4 w-4 rounded border-input"
|
|
||||||
/>
|
|
||||||
<label
|
|
||||||
htmlFor={name}
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
{label}
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Creating..." : "Save Provider"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,195 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import {
|
|
||||||
Table,
|
|
||||||
TableBody,
|
|
||||||
TableCell,
|
|
||||||
TableHead,
|
|
||||||
TableHeader,
|
|
||||||
TableRow,
|
|
||||||
} from "@/components/atoms/Table/Table";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { updateLlmCreatorAction } from "../actions";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { DeleteCreatorModal } from "./DeleteCreatorModal";
|
|
||||||
|
|
||||||
export function CreatorsTable({ creators }: { creators: LlmModelCreator[] }) {
|
|
||||||
if (!creators.length) {
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
|
||||||
No creators registered yet.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border">
|
|
||||||
<Table>
|
|
||||||
<TableHeader>
|
|
||||||
<TableRow>
|
|
||||||
<TableHead>Creator</TableHead>
|
|
||||||
<TableHead>Description</TableHead>
|
|
||||||
<TableHead>Website</TableHead>
|
|
||||||
<TableHead>Actions</TableHead>
|
|
||||||
</TableRow>
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{creators.map((creator) => (
|
|
||||||
<TableRow key={creator.id}>
|
|
||||||
<TableCell>
|
|
||||||
<div className="font-medium">{creator.display_name}</div>
|
|
||||||
<div className="text-xs text-muted-foreground">
|
|
||||||
{creator.name}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<span className="text-sm text-muted-foreground">
|
|
||||||
{creator.description || "—"}
|
|
||||||
</span>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{creator.website_url ? (
|
|
||||||
<a
|
|
||||||
href={creator.website_url}
|
|
||||||
target="_blank"
|
|
||||||
rel="noopener noreferrer"
|
|
||||||
className="text-sm text-primary hover:underline"
|
|
||||||
>
|
|
||||||
{(() => {
|
|
||||||
try {
|
|
||||||
return new URL(creator.website_url).hostname;
|
|
||||||
} catch {
|
|
||||||
return creator.website_url;
|
|
||||||
}
|
|
||||||
})()}
|
|
||||||
</a>
|
|
||||||
) : (
|
|
||||||
<span className="text-muted-foreground">—</span>
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="flex items-center justify-end gap-2">
|
|
||||||
<EditCreatorModal creator={creator} />
|
|
||||||
<DeleteCreatorModal creator={creator} />
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
))}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function EditCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await updateLlmCreatorAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to update creator");
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Edit Creator"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "512px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="outline" size="small" className="min-w-0">
|
|
||||||
Edit
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<form action={handleSubmit} className="space-y-4">
|
|
||||||
<input type="hidden" name="creator_id" value={creator.id} />
|
|
||||||
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label className="text-sm font-medium">Name (slug)</label>
|
|
||||||
<input
|
|
||||||
required
|
|
||||||
name="name"
|
|
||||||
defaultValue={creator.name}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label className="text-sm font-medium">Display Name</label>
|
|
||||||
<input
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
defaultValue={creator.display_name}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label className="text-sm font-medium">Description</label>
|
|
||||||
<textarea
|
|
||||||
name="description"
|
|
||||||
rows={2}
|
|
||||||
defaultValue={creator.description ?? ""}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label className="text-sm font-medium">Website URL</label>
|
|
||||||
<input
|
|
||||||
name="website_url"
|
|
||||||
type="url"
|
|
||||||
defaultValue={creator.website_url ?? ""}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Updating..." : "Update"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import { deleteLlmCreatorAction } from "../actions";
|
|
||||||
|
|
||||||
export function DeleteCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isDeleting, setIsDeleting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleDelete(formData: FormData) {
|
|
||||||
setIsDeleting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await deleteLlmCreatorAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to delete creator");
|
|
||||||
} finally {
|
|
||||||
setIsDeleting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Delete Creator"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "480px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
size="small"
|
|
||||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
|
||||||
>
|
|
||||||
Delete
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
|
||||||
<div className="flex items-start gap-3">
|
|
||||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
|
||||||
⚠️
|
|
||||||
</div>
|
|
||||||
<div className="text-sm text-foreground">
|
|
||||||
<p className="font-semibold">You are about to delete:</p>
|
|
||||||
<p className="mt-1">
|
|
||||||
<span className="font-medium">{creator.display_name}</span>{" "}
|
|
||||||
<span className="text-muted-foreground">
|
|
||||||
({creator.name})
|
|
||||||
</span>
|
|
||||||
</p>
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
Models using this creator will have their creator field
|
|
||||||
cleared. This is safe and won't affect model
|
|
||||||
functionality.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleDelete} className="space-y-4">
|
|
||||||
<input type="hidden" name="creator_id" value={creator.id} />
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isDeleting}
|
|
||||||
type="button"
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
disabled={isDeleting}
|
|
||||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
|
||||||
>
|
|
||||||
{isDeleting ? "Deleting..." : "Delete Creator"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,224 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import { deleteLlmModelAction, fetchLlmModelUsage } from "../actions";
|
|
||||||
|
|
||||||
export function DeleteModelModal({
|
|
||||||
model,
|
|
||||||
availableModels,
|
|
||||||
}: {
|
|
||||||
model: LlmModel;
|
|
||||||
availableModels: LlmModel[];
|
|
||||||
}) {
|
|
||||||
const router = useRouter();
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [selectedReplacement, setSelectedReplacement] = useState<string>("");
|
|
||||||
const [isDeleting, setIsDeleting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
|
||||||
const [usageLoading, setUsageLoading] = useState(false);
|
|
||||||
const [usageError, setUsageError] = useState<string | null>(null);
|
|
||||||
|
|
||||||
// Filter out the current model and disabled models from replacement options
|
|
||||||
const replacementOptions = availableModels.filter(
|
|
||||||
(m) => m.id !== model.id && m.is_enabled,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if migration is required (has blocks using this model)
|
|
||||||
const requiresMigration = usageCount !== null && usageCount > 0;
|
|
||||||
|
|
||||||
async function fetchUsage() {
|
|
||||||
setUsageLoading(true);
|
|
||||||
setUsageError(null);
|
|
||||||
try {
|
|
||||||
const usage = await fetchLlmModelUsage(model.id);
|
|
||||||
setUsageCount(usage.node_count);
|
|
||||||
} catch (err) {
|
|
||||||
console.error("Failed to fetch model usage:", err);
|
|
||||||
setUsageError("Failed to load usage count");
|
|
||||||
setUsageCount(null);
|
|
||||||
} finally {
|
|
||||||
setUsageLoading(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function handleDelete(formData: FormData) {
|
|
||||||
setIsDeleting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await deleteLlmModelAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to delete model");
|
|
||||||
} finally {
|
|
||||||
setIsDeleting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine if delete button should be enabled
|
|
||||||
const canDelete =
|
|
||||||
!isDeleting &&
|
|
||||||
!usageLoading &&
|
|
||||||
usageCount !== null &&
|
|
||||||
(requiresMigration
|
|
||||||
? selectedReplacement && replacementOptions.length > 0
|
|
||||||
: true);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Delete Model"
|
|
||||||
controlled={{
|
|
||||||
isOpen: open,
|
|
||||||
set: async (isOpen) => {
|
|
||||||
setOpen(isOpen);
|
|
||||||
if (isOpen) {
|
|
||||||
setUsageCount(null);
|
|
||||||
setUsageError(null);
|
|
||||||
setError(null);
|
|
||||||
setSelectedReplacement("");
|
|
||||||
await fetchUsage();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
styling={{ maxWidth: "600px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
size="small"
|
|
||||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
|
||||||
>
|
|
||||||
Delete
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
{requiresMigration
|
|
||||||
? "This action cannot be undone. All workflows using this model will be migrated to the replacement model you select."
|
|
||||||
: "This action cannot be undone."}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
|
||||||
<div className="flex items-start gap-3">
|
|
||||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
|
||||||
⚠️
|
|
||||||
</div>
|
|
||||||
<div className="text-sm text-foreground">
|
|
||||||
<p className="font-semibold">You are about to delete:</p>
|
|
||||||
<p className="mt-1">
|
|
||||||
<span className="font-medium">{model.display_name}</span>{" "}
|
|
||||||
<span className="text-muted-foreground">({model.slug})</span>
|
|
||||||
</p>
|
|
||||||
{usageLoading && (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
Loading usage count...
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
{usageError && (
|
|
||||||
<p className="mt-2 text-destructive">{usageError}</p>
|
|
||||||
)}
|
|
||||||
{!usageLoading && !usageError && usageCount !== null && (
|
|
||||||
<p className="mt-2 font-semibold">
|
|
||||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
|
||||||
currently use this model
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
{requiresMigration && (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
All workflows currently using this model will be
|
|
||||||
automatically updated to use the replacement model you
|
|
||||||
choose below.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
{!usageLoading && usageCount === 0 && (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
No workflows are using this model. It can be safely deleted.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleDelete} className="space-y-4">
|
|
||||||
<input type="hidden" name="model_id" value={model.id} />
|
|
||||||
<input
|
|
||||||
type="hidden"
|
|
||||||
name="replacement_model_slug"
|
|
||||||
value={selectedReplacement}
|
|
||||||
/>
|
|
||||||
|
|
||||||
{requiresMigration && (
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
<span className="mb-2 block">
|
|
||||||
Select Replacement Model{" "}
|
|
||||||
<span className="text-destructive">*</span>
|
|
||||||
</span>
|
|
||||||
<select
|
|
||||||
required
|
|
||||||
value={selectedReplacement}
|
|
||||||
onChange={(e) => setSelectedReplacement(e.target.value)}
|
|
||||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
>
|
|
||||||
<option value="">-- Choose a replacement model --</option>
|
|
||||||
{replacementOptions.map((m) => (
|
|
||||||
<option key={m.id} value={m.slug}>
|
|
||||||
{m.display_name} ({m.slug})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
{replacementOptions.length === 0 && (
|
|
||||||
<p className="mt-2 text-xs text-destructive">
|
|
||||||
No replacement models available. You must have at least one
|
|
||||||
other enabled model before deleting this one.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</label>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setSelectedReplacement("");
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isDeleting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
disabled={!canDelete}
|
|
||||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
|
||||||
>
|
|
||||||
{isDeleting
|
|
||||||
? "Deleting..."
|
|
||||||
: requiresMigration
|
|
||||||
? "Delete and Migrate"
|
|
||||||
: "Delete"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import { deleteLlmProviderAction } from "../actions";
|
|
||||||
|
|
||||||
export function DeleteProviderModal({ provider }: { provider: LlmProvider }) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isDeleting, setIsDeleting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
const modelCount = provider.models?.length ?? 0;
|
|
||||||
const hasModels = modelCount > 0;
|
|
||||||
|
|
||||||
async function handleDelete(formData: FormData) {
|
|
||||||
setIsDeleting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await deleteLlmProviderAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(
|
|
||||||
err instanceof Error ? err.message : "Failed to delete provider",
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
setIsDeleting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Delete Provider"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "480px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
size="small"
|
|
||||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
|
||||||
>
|
|
||||||
Delete
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div
|
|
||||||
className={`rounded-lg border p-4 ${
|
|
||||||
hasModels
|
|
||||||
? "border-destructive/30 bg-destructive/10"
|
|
||||||
: "border-amber-500/30 bg-amber-500/10 dark:border-amber-400/30 dark:bg-amber-400/10"
|
|
||||||
}`}
|
|
||||||
>
|
|
||||||
<div className="flex items-start gap-3">
|
|
||||||
<div
|
|
||||||
className={`flex-shrink-0 ${
|
|
||||||
hasModels
|
|
||||||
? "text-destructive"
|
|
||||||
: "text-amber-600 dark:text-amber-400"
|
|
||||||
}`}
|
|
||||||
>
|
|
||||||
{hasModels ? "🚫" : "⚠️"}
|
|
||||||
</div>
|
|
||||||
<div className="text-sm text-foreground">
|
|
||||||
<p className="font-semibold">You are about to delete:</p>
|
|
||||||
<p className="mt-1">
|
|
||||||
<span className="font-medium">{provider.display_name}</span>{" "}
|
|
||||||
<span className="text-muted-foreground">
|
|
||||||
({provider.name})
|
|
||||||
</span>
|
|
||||||
</p>
|
|
||||||
{hasModels ? (
|
|
||||||
<p className="mt-2 text-destructive">
|
|
||||||
This provider has {modelCount} model(s). You must delete all
|
|
||||||
models before you can delete this provider.
|
|
||||||
</p>
|
|
||||||
) : (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
This provider has no models and can be safely deleted.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleDelete} className="space-y-4">
|
|
||||||
<input type="hidden" name="provider_id" value={provider.id} />
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isDeleting}
|
|
||||||
type="button"
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
disabled={isDeleting || hasModels}
|
|
||||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90 disabled:opacity-50"
|
|
||||||
>
|
|
||||||
{isDeleting ? "Deleting..." : "Delete Provider"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,288 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import { toggleLlmModelAction, fetchLlmModelUsage } from "../actions";
|
|
||||||
|
|
||||||
export function DisableModelModal({
|
|
||||||
model,
|
|
||||||
availableModels,
|
|
||||||
}: {
|
|
||||||
model: LlmModel;
|
|
||||||
availableModels: LlmModel[];
|
|
||||||
}) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isDisabling, setIsDisabling] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
|
||||||
const [selectedMigration, setSelectedMigration] = useState<string>("");
|
|
||||||
const [wantsMigration, setWantsMigration] = useState(false);
|
|
||||||
const [migrationReason, setMigrationReason] = useState("");
|
|
||||||
const [customCreditCost, setCustomCreditCost] = useState<string>("");
|
|
||||||
|
|
||||||
// Filter out the current model and disabled models from replacement options
|
|
||||||
const migrationOptions = availableModels.filter(
|
|
||||||
(m) => m.id !== model.id && m.is_enabled,
|
|
||||||
);
|
|
||||||
|
|
||||||
async function fetchUsage() {
|
|
||||||
try {
|
|
||||||
const usage = await fetchLlmModelUsage(model.id);
|
|
||||||
setUsageCount(usage.node_count);
|
|
||||||
} catch {
|
|
||||||
setUsageCount(null);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function handleDisable(formData: FormData) {
|
|
||||||
setIsDisabling(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await toggleLlmModelAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to disable model");
|
|
||||||
} finally {
|
|
||||||
setIsDisabling(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function resetState() {
|
|
||||||
setError(null);
|
|
||||||
setSelectedMigration("");
|
|
||||||
setWantsMigration(false);
|
|
||||||
setMigrationReason("");
|
|
||||||
setCustomCreditCost("");
|
|
||||||
}
|
|
||||||
|
|
||||||
const hasUsage = usageCount !== null && usageCount > 0;
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Disable Model"
|
|
||||||
controlled={{
|
|
||||||
isOpen: open,
|
|
||||||
set: async (isOpen) => {
|
|
||||||
setOpen(isOpen);
|
|
||||||
if (isOpen) {
|
|
||||||
setUsageCount(null);
|
|
||||||
resetState();
|
|
||||||
await fetchUsage();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
styling={{ maxWidth: "600px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
size="small"
|
|
||||||
className="min-w-0"
|
|
||||||
>
|
|
||||||
Disable
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Disabling a model will hide it from users when creating new workflows.
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
|
||||||
<div className="flex items-start gap-3">
|
|
||||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
|
||||||
⚠️
|
|
||||||
</div>
|
|
||||||
<div className="text-sm text-foreground">
|
|
||||||
<p className="font-semibold">You are about to disable:</p>
|
|
||||||
<p className="mt-1">
|
|
||||||
<span className="font-medium">{model.display_name}</span>{" "}
|
|
||||||
<span className="text-muted-foreground">({model.slug})</span>
|
|
||||||
</p>
|
|
||||||
{usageCount === null ? (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
Loading usage data...
|
|
||||||
</p>
|
|
||||||
) : usageCount > 0 ? (
|
|
||||||
<p className="mt-2 font-semibold">
|
|
||||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
|
||||||
currently use this model
|
|
||||||
</p>
|
|
||||||
) : (
|
|
||||||
<p className="mt-2 text-muted-foreground">
|
|
||||||
No workflows are currently using this model.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{hasUsage && (
|
|
||||||
<div className="space-y-4 rounded-lg border border-border bg-muted/50 p-4">
|
|
||||||
<label className="flex items-start gap-3">
|
|
||||||
<input
|
|
||||||
type="checkbox"
|
|
||||||
checked={wantsMigration}
|
|
||||||
onChange={(e) => {
|
|
||||||
setWantsMigration(e.target.checked);
|
|
||||||
if (!e.target.checked) {
|
|
||||||
setSelectedMigration("");
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
<div className="text-sm">
|
|
||||||
<span className="font-medium">
|
|
||||||
Migrate existing workflows to another model
|
|
||||||
</span>
|
|
||||||
<p className="mt-1 text-muted-foreground">
|
|
||||||
Creates a revertible migration record. If unchecked,
|
|
||||||
existing workflows will use automatic fallback to an enabled
|
|
||||||
model from the same provider.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</label>
|
|
||||||
|
|
||||||
{wantsMigration && (
|
|
||||||
<div className="space-y-4 border-t border-border pt-4">
|
|
||||||
<label className="block text-sm font-medium">
|
|
||||||
<span className="mb-2 block">
|
|
||||||
Replacement Model{" "}
|
|
||||||
<span className="text-destructive">*</span>
|
|
||||||
</span>
|
|
||||||
<select
|
|
||||||
required
|
|
||||||
value={selectedMigration}
|
|
||||||
onChange={(e) => setSelectedMigration(e.target.value)}
|
|
||||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
>
|
|
||||||
<option value="">-- Choose a replacement model --</option>
|
|
||||||
{migrationOptions.map((m) => (
|
|
||||||
<option key={m.id} value={m.slug}>
|
|
||||||
{m.display_name} ({m.slug})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
{migrationOptions.length === 0 && (
|
|
||||||
<p className="mt-2 text-xs text-destructive">
|
|
||||||
No other enabled models available for migration.
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</label>
|
|
||||||
|
|
||||||
<label className="block text-sm font-medium">
|
|
||||||
<span className="mb-2 block">
|
|
||||||
Migration Reason{" "}
|
|
||||||
<span className="font-normal text-muted-foreground">
|
|
||||||
(optional)
|
|
||||||
</span>
|
|
||||||
</span>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={migrationReason}
|
|
||||||
onChange={(e) => setMigrationReason(e.target.value)}
|
|
||||||
placeholder="e.g., Provider outage, Cost reduction"
|
|
||||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
/>
|
|
||||||
<p className="mt-1 text-xs text-muted-foreground">
|
|
||||||
Helps track why the migration was made
|
|
||||||
</p>
|
|
||||||
</label>
|
|
||||||
|
|
||||||
<label className="block text-sm font-medium">
|
|
||||||
<span className="mb-2 block">
|
|
||||||
Custom Credit Cost{" "}
|
|
||||||
<span className="font-normal text-muted-foreground">
|
|
||||||
(optional)
|
|
||||||
</span>
|
|
||||||
</span>
|
|
||||||
<input
|
|
||||||
type="number"
|
|
||||||
min="0"
|
|
||||||
value={customCreditCost}
|
|
||||||
onChange={(e) => setCustomCreditCost(e.target.value)}
|
|
||||||
placeholder="Leave blank to use target model's cost"
|
|
||||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
/>
|
|
||||||
<p className="mt-1 text-xs text-muted-foreground">
|
|
||||||
Override pricing for migrated workflows. When set, billing
|
|
||||||
will use this cost instead of the target model's
|
|
||||||
cost.
|
|
||||||
</p>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<form action={handleDisable} className="space-y-4">
|
|
||||||
<input type="hidden" name="model_id" value={model.id} />
|
|
||||||
<input type="hidden" name="is_enabled" value="false" />
|
|
||||||
{wantsMigration && selectedMigration && (
|
|
||||||
<>
|
|
||||||
<input
|
|
||||||
type="hidden"
|
|
||||||
name="migrate_to_slug"
|
|
||||||
value={selectedMigration}
|
|
||||||
/>
|
|
||||||
{migrationReason && (
|
|
||||||
<input
|
|
||||||
type="hidden"
|
|
||||||
name="migration_reason"
|
|
||||||
value={migrationReason}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
{customCreditCost && (
|
|
||||||
<input
|
|
||||||
type="hidden"
|
|
||||||
name="custom_credit_cost"
|
|
||||||
value={customCreditCost}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
resetState();
|
|
||||||
}}
|
|
||||||
disabled={isDisabling}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
disabled={
|
|
||||||
isDisabling ||
|
|
||||||
(wantsMigration && !selectedMigration) ||
|
|
||||||
usageCount === null
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{isDisabling
|
|
||||||
? "Disabling..."
|
|
||||||
: wantsMigration && selectedMigration
|
|
||||||
? "Disable & Migrate"
|
|
||||||
: "Disable Model"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import { updateLlmModelAction } from "../actions";
|
|
||||||
|
|
||||||
export function EditModelModal({
|
|
||||||
model,
|
|
||||||
providers,
|
|
||||||
creators,
|
|
||||||
}: {
|
|
||||||
model: LlmModel;
|
|
||||||
providers: LlmProvider[];
|
|
||||||
creators: LlmModelCreator[];
|
|
||||||
}) {
|
|
||||||
const router = useRouter();
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const cost = model.costs?.[0];
|
|
||||||
const provider = providers.find((p) => p.id === model.provider_id);
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await updateLlmModelAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to update model");
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Edit Model"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="outline" size="small" className="min-w-0">
|
|
||||||
Edit
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Update model metadata and pricing information.
|
|
||||||
</div>
|
|
||||||
{error && (
|
|
||||||
<div className="mb-4 rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
<form action={handleSubmit} className="space-y-4">
|
|
||||||
<input type="hidden" name="model_id" value={model.id} />
|
|
||||||
|
|
||||||
<div className="grid gap-4 md:grid-cols-2">
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Display Name
|
|
||||||
<input
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
defaultValue={model.display_name}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
/>
|
|
||||||
</label>
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Provider
|
|
||||||
<select
|
|
||||||
required
|
|
||||||
name="provider_id"
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
defaultValue={model.provider_id}
|
|
||||||
>
|
|
||||||
{providers.map((p) => (
|
|
||||||
<option key={p.id} value={p.id}>
|
|
||||||
{p.display_name} ({p.name})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
<span className="text-xs text-muted-foreground">
|
|
||||||
Who hosts/serves the model
|
|
||||||
</span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="grid gap-4 md:grid-cols-2">
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Creator
|
|
||||||
<select
|
|
||||||
name="creator_id"
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
defaultValue={model.creator_id ?? ""}
|
|
||||||
>
|
|
||||||
<option value="">No creator selected</option>
|
|
||||||
{creators.map((c) => (
|
|
||||||
<option key={c.id} value={c.id}>
|
|
||||||
{c.display_name} ({c.name})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
<span className="text-xs text-muted-foreground">
|
|
||||||
Who made/trained the model (e.g., OpenAI, Meta)
|
|
||||||
</span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Description
|
|
||||||
<textarea
|
|
||||||
name="description"
|
|
||||||
rows={2}
|
|
||||||
defaultValue={model.description ?? ""}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
placeholder="Optional description..."
|
|
||||||
/>
|
|
||||||
</label>
|
|
||||||
|
|
||||||
<div className="grid gap-4 md:grid-cols-2">
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Context Window
|
|
||||||
<input
|
|
||||||
required
|
|
||||||
type="number"
|
|
||||||
name="context_window"
|
|
||||||
defaultValue={model.context_window}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
min={1}
|
|
||||||
/>
|
|
||||||
</label>
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Max Output Tokens
|
|
||||||
<input
|
|
||||||
type="number"
|
|
||||||
name="max_output_tokens"
|
|
||||||
defaultValue={model.max_output_tokens ?? undefined}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
min={1}
|
|
||||||
/>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="grid gap-4 md:grid-cols-2">
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Credit Cost
|
|
||||||
<input
|
|
||||||
required
|
|
||||||
type="number"
|
|
||||||
name="credit_cost"
|
|
||||||
defaultValue={cost?.credit_cost ?? 0}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
min={0}
|
|
||||||
/>
|
|
||||||
<span className="text-xs text-muted-foreground">
|
|
||||||
Credits charged per run
|
|
||||||
</span>
|
|
||||||
</label>
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
Credential Provider
|
|
||||||
<select
|
|
||||||
required
|
|
||||||
name="credential_provider"
|
|
||||||
defaultValue={cost?.credential_provider ?? provider?.name ?? ""}
|
|
||||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
|
||||||
>
|
|
||||||
<option value="" disabled>
|
|
||||||
Select provider
|
|
||||||
</option>
|
|
||||||
{providers.map((p) => (
|
|
||||||
<option key={p.id} value={p.name}>
|
|
||||||
{p.display_name} ({p.name})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
<span className="text-xs text-muted-foreground">
|
|
||||||
Must match a key in PROVIDER_CREDENTIALS
|
|
||||||
</span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
{/* Hidden defaults for credential_type and unit */}
|
|
||||||
<input
|
|
||||||
type="hidden"
|
|
||||||
name="credential_type"
|
|
||||||
value={
|
|
||||||
cost?.credential_type ??
|
|
||||||
provider?.default_credential_type ??
|
|
||||||
"api_key"
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<input type="hidden" name="unit" value={cost?.unit ?? "RUN"} />
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => setOpen(false)}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Updating..." : "Update Model"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,263 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { updateLlmProviderAction } from "../actions";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
|
|
||||||
export function EditProviderModal({ provider }: { provider: LlmProvider }) {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
async function handleSubmit(formData: FormData) {
|
|
||||||
setIsSubmitting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await updateLlmProviderAction(formData);
|
|
||||||
setOpen(false);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(
|
|
||||||
err instanceof Error ? err.message : "Failed to update provider",
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Edit Provider"
|
|
||||||
controlled={{ isOpen: open, set: setOpen }}
|
|
||||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
|
||||||
>
|
|
||||||
<Dialog.Trigger>
|
|
||||||
<Button variant="outline" size="small">
|
|
||||||
Edit
|
|
||||||
</Button>
|
|
||||||
</Dialog.Trigger>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="mb-4 text-sm text-muted-foreground">
|
|
||||||
Update provider configuration and capabilities.
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form action={handleSubmit} className="space-y-6">
|
|
||||||
<input type="hidden" name="provider_id" value={provider.id} />
|
|
||||||
|
|
||||||
{/* Basic Information */}
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Basic Information
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Core provider details
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Provider Slug <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="name"
|
|
||||||
required
|
|
||||||
name="name"
|
|
||||||
defaultValue={provider.name}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="e.g. openai"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="display_name"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Display Name <span className="text-destructive">*</span>
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="display_name"
|
|
||||||
required
|
|
||||||
name="display_name"
|
|
||||||
defaultValue={provider.display_name}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="OpenAI"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="description"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Description
|
|
||||||
</label>
|
|
||||||
<textarea
|
|
||||||
id="description"
|
|
||||||
name="description"
|
|
||||||
rows={3}
|
|
||||||
defaultValue={provider.description ?? ""}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="Optional description..."
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Default Credentials */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Default Credentials
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Credential provider name that matches the key in{" "}
|
|
||||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
|
||||||
PROVIDER_CREDENTIALS
|
|
||||||
</code>
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-4 sm:grid-cols-2">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="default_credential_provider"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Credential Provider
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="default_credential_provider"
|
|
||||||
name="default_credential_provider"
|
|
||||||
defaultValue={provider.default_credential_provider ?? ""}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="openai"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="default_credential_id"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Credential ID
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="default_credential_id"
|
|
||||||
name="default_credential_id"
|
|
||||||
defaultValue={provider.default_credential_id ?? ""}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="Optional credential ID"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
<label
|
|
||||||
htmlFor="default_credential_type"
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
Credential Type
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
id="default_credential_type"
|
|
||||||
name="default_credential_type"
|
|
||||||
defaultValue={provider.default_credential_type ?? "api_key"}
|
|
||||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
|
||||||
placeholder="api_key"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Capabilities */}
|
|
||||||
<div className="space-y-4 border-t border-border pt-6">
|
|
||||||
<div className="space-y-1">
|
|
||||||
<h3 className="text-sm font-semibold text-foreground">
|
|
||||||
Capabilities
|
|
||||||
</h3>
|
|
||||||
<p className="text-xs text-muted-foreground">
|
|
||||||
Provider feature flags
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-3 sm:grid-cols-2">
|
|
||||||
{[
|
|
||||||
{
|
|
||||||
name: "supports_tools",
|
|
||||||
label: "Supports tools",
|
|
||||||
checked: provider.supports_tools,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "supports_json_output",
|
|
||||||
label: "Supports JSON output",
|
|
||||||
checked: provider.supports_json_output,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "supports_reasoning",
|
|
||||||
label: "Supports reasoning",
|
|
||||||
checked: provider.supports_reasoning,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "supports_parallel_tool",
|
|
||||||
label: "Supports parallel tool calls",
|
|
||||||
checked: provider.supports_parallel_tool,
|
|
||||||
},
|
|
||||||
].map(({ name, label, checked }) => (
|
|
||||||
<div
|
|
||||||
key={name}
|
|
||||||
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
|
|
||||||
>
|
|
||||||
<input type="hidden" name={name} value="off" />
|
|
||||||
<input
|
|
||||||
id={name}
|
|
||||||
type="checkbox"
|
|
||||||
name={name}
|
|
||||||
defaultChecked={checked}
|
|
||||||
className="h-4 w-4 rounded border-input"
|
|
||||||
/>
|
|
||||||
<label
|
|
||||||
htmlFor={name}
|
|
||||||
className="text-sm font-medium text-foreground"
|
|
||||||
>
|
|
||||||
{label}
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && (
|
|
||||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
type="button"
|
|
||||||
onClick={() => {
|
|
||||||
setOpen(false);
|
|
||||||
setError(null);
|
|
||||||
}}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
type="submit"
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
{isSubmitting ? "Saving..." : "Save Changes"}
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import { ErrorBoundary } from "@/components/molecules/ErrorBoundary/ErrorBoundary";
|
|
||||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
|
||||||
import { AddProviderModal } from "./AddProviderModal";
|
|
||||||
import { AddModelModal } from "./AddModelModal";
|
|
||||||
import { AddCreatorModal } from "./AddCreatorModal";
|
|
||||||
import { ProviderList } from "./ProviderList";
|
|
||||||
import { ModelsTable } from "./ModelsTable";
|
|
||||||
import { MigrationsTable } from "./MigrationsTable";
|
|
||||||
import { CreatorsTable } from "./CreatorsTable";
|
|
||||||
import { RecommendedModelSelector } from "./RecommendedModelSelector";
|
|
||||||
|
|
||||||
interface Props {
|
|
||||||
providers: LlmProvider[];
|
|
||||||
models: LlmModel[];
|
|
||||||
migrations: LlmModelMigration[];
|
|
||||||
creators: LlmModelCreator[];
|
|
||||||
}
|
|
||||||
|
|
||||||
function AdminErrorFallback() {
|
|
||||||
return (
|
|
||||||
<div className="mx-auto max-w-xl p-6">
|
|
||||||
<ErrorCard
|
|
||||||
responseError={{
|
|
||||||
message:
|
|
||||||
"An error occurred while loading the LLM Registry. Please refresh the page.",
|
|
||||||
}}
|
|
||||||
context="llm-registry"
|
|
||||||
onRetry={() => window.location.reload()}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function LlmRegistryDashboard({
|
|
||||||
providers,
|
|
||||||
models,
|
|
||||||
migrations,
|
|
||||||
creators,
|
|
||||||
}: Props) {
|
|
||||||
return (
|
|
||||||
<ErrorBoundary fallback={<AdminErrorFallback />} context="llm-registry">
|
|
||||||
<div className="mx-auto p-6">
|
|
||||||
<div className="flex flex-col gap-6">
|
|
||||||
{/* Header */}
|
|
||||||
<div>
|
|
||||||
<h1 className="text-3xl font-bold">LLM Registry</h1>
|
|
||||||
<p className="text-muted-foreground">
|
|
||||||
Manage providers, creators, models, and credit pricing
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Active Migrations Section - Only show if there are migrations */}
|
|
||||||
{migrations.length > 0 && (
|
|
||||||
<div className="rounded-lg border border-primary/30 bg-primary/5 p-6 shadow-sm">
|
|
||||||
<div className="mb-4">
|
|
||||||
<h2 className="text-xl font-semibold">Active Migrations</h2>
|
|
||||||
<p className="mt-1 text-sm text-muted-foreground">
|
|
||||||
These migrations can be reverted to restore workflows to their
|
|
||||||
original model
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<MigrationsTable migrations={migrations} />
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Providers & Creators Section - Side by Side */}
|
|
||||||
<div className="grid gap-6 lg:grid-cols-2">
|
|
||||||
{/* Providers */}
|
|
||||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
|
||||||
<div className="mb-4 flex items-center justify-between">
|
|
||||||
<div>
|
|
||||||
<h2 className="text-xl font-semibold">Providers</h2>
|
|
||||||
<p className="mt-1 text-sm text-muted-foreground">
|
|
||||||
Who hosts/serves the models
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<AddProviderModal />
|
|
||||||
</div>
|
|
||||||
<ProviderList providers={providers} />
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Creators */}
|
|
||||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
|
||||||
<div className="mb-4 flex items-center justify-between">
|
|
||||||
<div>
|
|
||||||
<h2 className="text-xl font-semibold">Creators</h2>
|
|
||||||
<p className="mt-1 text-sm text-muted-foreground">
|
|
||||||
Who made/trained the models
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<AddCreatorModal />
|
|
||||||
</div>
|
|
||||||
<CreatorsTable creators={creators} />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Models Section */}
|
|
||||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
|
||||||
<div className="mb-4 flex items-center justify-between">
|
|
||||||
<div>
|
|
||||||
<h2 className="text-xl font-semibold">Models</h2>
|
|
||||||
<p className="mt-1 text-sm text-muted-foreground">
|
|
||||||
Toggle availability, adjust context windows, and update credit
|
|
||||||
pricing
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<AddModelModal providers={providers} creators={creators} />
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Recommended Model Selector */}
|
|
||||||
<div className="mb-6">
|
|
||||||
<RecommendedModelSelector models={models} />
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<ModelsTable
|
|
||||||
models={models}
|
|
||||||
providers={providers}
|
|
||||||
creators={creators}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</ErrorBoundary>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,133 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import {
|
|
||||||
Table,
|
|
||||||
TableBody,
|
|
||||||
TableCell,
|
|
||||||
TableHead,
|
|
||||||
TableHeader,
|
|
||||||
TableRow,
|
|
||||||
} from "@/components/atoms/Table/Table";
|
|
||||||
import { revertLlmMigrationAction } from "../actions";
|
|
||||||
|
|
||||||
export function MigrationsTable({
|
|
||||||
migrations,
|
|
||||||
}: {
|
|
||||||
migrations: LlmModelMigration[];
|
|
||||||
}) {
|
|
||||||
if (!migrations.length) {
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
|
||||||
No active migrations. Migrations are created when you disable a model
|
|
||||||
with the "Migrate existing workflows" option.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border">
|
|
||||||
<Table>
|
|
||||||
<TableHeader>
|
|
||||||
<TableRow>
|
|
||||||
<TableHead>Migration</TableHead>
|
|
||||||
<TableHead>Reason</TableHead>
|
|
||||||
<TableHead>Nodes Affected</TableHead>
|
|
||||||
<TableHead>Custom Cost</TableHead>
|
|
||||||
<TableHead>Created</TableHead>
|
|
||||||
<TableHead className="text-right">Actions</TableHead>
|
|
||||||
</TableRow>
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{migrations.map((migration) => (
|
|
||||||
<MigrationRow key={migration.id} migration={migration} />
|
|
||||||
))}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function MigrationRow({ migration }: { migration: LlmModelMigration }) {
|
|
||||||
const [isReverting, setIsReverting] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
|
|
||||||
async function handleRevert(formData: FormData) {
|
|
||||||
setIsReverting(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
await revertLlmMigrationAction(formData);
|
|
||||||
} catch (err) {
|
|
||||||
setError(
|
|
||||||
err instanceof Error ? err.message : "Failed to revert migration",
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
setIsReverting(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const createdDate = new Date(migration.created_at);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<TableRow>
|
|
||||||
<TableCell>
|
|
||||||
<div className="text-sm">
|
|
||||||
<span className="font-medium">{migration.source_model_slug}</span>
|
|
||||||
<span className="mx-2 text-muted-foreground">→</span>
|
|
||||||
<span className="font-medium">{migration.target_model_slug}</span>
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="text-sm text-muted-foreground">
|
|
||||||
{migration.reason || "—"}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="text-sm">{migration.node_count}</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="text-sm">
|
|
||||||
{migration.custom_credit_cost !== null &&
|
|
||||||
migration.custom_credit_cost !== undefined
|
|
||||||
? `${migration.custom_credit_cost} credits`
|
|
||||||
: "—"}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="text-sm text-muted-foreground">
|
|
||||||
{createdDate.toLocaleDateString()}{" "}
|
|
||||||
{createdDate.toLocaleTimeString([], {
|
|
||||||
hour: "2-digit",
|
|
||||||
minute: "2-digit",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell className="text-right">
|
|
||||||
<form action={handleRevert} className="inline">
|
|
||||||
<input type="hidden" name="migration_id" value={migration.id} />
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="outline"
|
|
||||||
size="small"
|
|
||||||
disabled={isReverting}
|
|
||||||
>
|
|
||||||
{isReverting ? "Reverting..." : "Revert"}
|
|
||||||
</Button>
|
|
||||||
</form>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
{error && (
|
|
||||||
<TableRow>
|
|
||||||
<TableCell colSpan={6}>
|
|
||||||
<div className="rounded border border-destructive/30 bg-destructive/10 p-2 text-sm text-destructive">
|
|
||||||
{error}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,262 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState, useEffect, useRef } from "react";
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import {
|
|
||||||
Table,
|
|
||||||
TableBody,
|
|
||||||
TableCell,
|
|
||||||
TableHead,
|
|
||||||
TableHeader,
|
|
||||||
TableRow,
|
|
||||||
} from "@/components/atoms/Table/Table";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { toggleLlmModelAction } from "../actions";
|
|
||||||
import { DeleteModelModal } from "./DeleteModelModal";
|
|
||||||
import { DisableModelModal } from "./DisableModelModal";
|
|
||||||
import { EditModelModal } from "./EditModelModal";
|
|
||||||
import { Star, Spinner } from "@phosphor-icons/react";
|
|
||||||
import { getV2ListLlmModels } from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
|
|
||||||
const PAGE_SIZE = 50;
|
|
||||||
|
|
||||||
export function ModelsTable({
|
|
||||||
models: initialModels,
|
|
||||||
providers,
|
|
||||||
creators,
|
|
||||||
}: {
|
|
||||||
models: LlmModel[];
|
|
||||||
providers: LlmProvider[];
|
|
||||||
creators: LlmModelCreator[];
|
|
||||||
}) {
|
|
||||||
const [models, setModels] = useState<LlmModel[]>(initialModels);
|
|
||||||
const [currentPage, setCurrentPage] = useState(1);
|
|
||||||
const [hasMore, setHasMore] = useState(initialModels.length === PAGE_SIZE);
|
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
|
||||||
const loadedPagesRef = useRef(1);
|
|
||||||
|
|
||||||
// Sync with parent when initialModels changes (e.g., after enable/disable)
|
|
||||||
// Re-fetch all loaded pages to preserve expanded state
|
|
||||||
useEffect(() => {
|
|
||||||
async function refetchAllPages() {
|
|
||||||
const pagesToLoad = loadedPagesRef.current;
|
|
||||||
|
|
||||||
if (pagesToLoad === 1) {
|
|
||||||
// Only first page loaded, just use initialModels
|
|
||||||
setModels(initialModels);
|
|
||||||
setHasMore(initialModels.length === PAGE_SIZE);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Re-fetch all pages we had loaded
|
|
||||||
const allModels: LlmModel[] = [...initialModels];
|
|
||||||
let lastPageHadFullResults = initialModels.length === PAGE_SIZE;
|
|
||||||
|
|
||||||
for (let page = 2; page <= pagesToLoad; page++) {
|
|
||||||
try {
|
|
||||||
const response = await getV2ListLlmModels({
|
|
||||||
page,
|
|
||||||
page_size: PAGE_SIZE,
|
|
||||||
});
|
|
||||||
if (response.status === 200) {
|
|
||||||
allModels.push(...response.data.models);
|
|
||||||
lastPageHadFullResults = response.data.models.length === PAGE_SIZE;
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.error(`Error refetching page ${page}:`, err);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setModels(allModels);
|
|
||||||
setHasMore(lastPageHadFullResults);
|
|
||||||
}
|
|
||||||
|
|
||||||
refetchAllPages();
|
|
||||||
}, [initialModels]);
|
|
||||||
|
|
||||||
async function loadMore() {
|
|
||||||
if (isLoading) return;
|
|
||||||
setIsLoading(true);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const nextPage = currentPage + 1;
|
|
||||||
const response = await getV2ListLlmModels({
|
|
||||||
page: nextPage,
|
|
||||||
page_size: PAGE_SIZE,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.status === 200) {
|
|
||||||
setModels((prev) => [...prev, ...response.data.models]);
|
|
||||||
setCurrentPage(nextPage);
|
|
||||||
loadedPagesRef.current = nextPage;
|
|
||||||
setHasMore(response.data.models.length === PAGE_SIZE);
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.error("Error loading more models:", err);
|
|
||||||
} finally {
|
|
||||||
setIsLoading(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!models.length) {
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
|
||||||
No models registered yet.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const providerLookup = new Map(
|
|
||||||
providers.map((provider) => [provider.id, provider]),
|
|
||||||
);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<div className="rounded-lg border">
|
|
||||||
<Table>
|
|
||||||
<TableHeader>
|
|
||||||
<TableRow>
|
|
||||||
<TableHead>Model</TableHead>
|
|
||||||
<TableHead>Provider</TableHead>
|
|
||||||
<TableHead>Creator</TableHead>
|
|
||||||
<TableHead>Context Window</TableHead>
|
|
||||||
<TableHead>Max Output</TableHead>
|
|
||||||
<TableHead>Cost</TableHead>
|
|
||||||
<TableHead>Status</TableHead>
|
|
||||||
<TableHead>Actions</TableHead>
|
|
||||||
</TableRow>
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{models.map((model) => {
|
|
||||||
const cost = model.costs?.[0];
|
|
||||||
const provider = providerLookup.get(model.provider_id);
|
|
||||||
return (
|
|
||||||
<TableRow
|
|
||||||
key={model.id}
|
|
||||||
className={model.is_enabled ? "" : "opacity-60"}
|
|
||||||
>
|
|
||||||
<TableCell>
|
|
||||||
<div className="font-medium">{model.display_name}</div>
|
|
||||||
<div className="text-xs text-muted-foreground">
|
|
||||||
{model.slug}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{provider ? (
|
|
||||||
<>
|
|
||||||
<div>{provider.display_name}</div>
|
|
||||||
<div className="text-xs text-muted-foreground">
|
|
||||||
{provider.name}
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
model.provider_id
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{model.creator ? (
|
|
||||||
<>
|
|
||||||
<div>{model.creator.display_name}</div>
|
|
||||||
<div className="text-xs text-muted-foreground">
|
|
||||||
{model.creator.name}
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<span className="text-muted-foreground">—</span>
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>{model.context_window.toLocaleString()}</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{model.max_output_tokens
|
|
||||||
? model.max_output_tokens.toLocaleString()
|
|
||||||
: "—"}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{cost ? (
|
|
||||||
<>
|
|
||||||
<div className="font-medium">
|
|
||||||
{cost.credit_cost} credits
|
|
||||||
</div>
|
|
||||||
<div className="text-xs text-muted-foreground">
|
|
||||||
{cost.credential_provider}
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
"—"
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="flex flex-col gap-1">
|
|
||||||
<span
|
|
||||||
className={`inline-flex rounded-full px-2.5 py-1 text-xs font-semibold ${
|
|
||||||
model.is_enabled
|
|
||||||
? "bg-primary/10 text-primary"
|
|
||||||
: "bg-muted text-muted-foreground"
|
|
||||||
}`}
|
|
||||||
>
|
|
||||||
{model.is_enabled ? "Enabled" : "Disabled"}
|
|
||||||
</span>
|
|
||||||
{model.is_recommended && (
|
|
||||||
<span className="inline-flex items-center gap-1 rounded-full bg-amber-500/10 px-2.5 py-1 text-xs font-semibold text-amber-600 dark:text-amber-400">
|
|
||||||
<Star size={12} weight="fill" />
|
|
||||||
Recommended
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="flex items-center justify-end gap-2">
|
|
||||||
{model.is_enabled ? (
|
|
||||||
<DisableModelModal
|
|
||||||
model={model}
|
|
||||||
availableModels={models}
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<EnableModelButton modelId={model.id} />
|
|
||||||
)}
|
|
||||||
<EditModelModal
|
|
||||||
model={model}
|
|
||||||
providers={providers}
|
|
||||||
creators={creators}
|
|
||||||
/>
|
|
||||||
<DeleteModelModal model={model} availableModels={models} />
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
);
|
|
||||||
})}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{hasMore && (
|
|
||||||
<div className="mt-4 flex justify-center">
|
|
||||||
<Button onClick={loadMore} disabled={isLoading} variant="outline">
|
|
||||||
{isLoading ? (
|
|
||||||
<>
|
|
||||||
<Spinner className="mr-2 h-4 w-4 animate-spin" />
|
|
||||||
Loading...
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
"Load More"
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function EnableModelButton({ modelId }: { modelId: string }) {
|
|
||||||
return (
|
|
||||||
<form action={toggleLlmModelAction} className="inline">
|
|
||||||
<input type="hidden" name="model_id" value={modelId} />
|
|
||||||
<input type="hidden" name="is_enabled" value="true" />
|
|
||||||
<Button type="submit" variant="outline" size="small" className="min-w-0">
|
|
||||||
Enable
|
|
||||||
</Button>
|
|
||||||
</form>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import {
|
|
||||||
Table,
|
|
||||||
TableBody,
|
|
||||||
TableCell,
|
|
||||||
TableHead,
|
|
||||||
TableHeader,
|
|
||||||
TableRow,
|
|
||||||
} from "@/components/atoms/Table/Table";
|
|
||||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
|
||||||
import { DeleteProviderModal } from "./DeleteProviderModal";
|
|
||||||
import { EditProviderModal } from "./EditProviderModal";
|
|
||||||
|
|
||||||
export function ProviderList({ providers }: { providers: LlmProvider[] }) {
|
|
||||||
if (!providers.length) {
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
|
||||||
No providers configured yet.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border">
|
|
||||||
<Table>
|
|
||||||
<TableHeader>
|
|
||||||
<TableRow>
|
|
||||||
<TableHead>Name</TableHead>
|
|
||||||
<TableHead>Display Name</TableHead>
|
|
||||||
<TableHead>Default Credential</TableHead>
|
|
||||||
<TableHead>Capabilities</TableHead>
|
|
||||||
<TableHead>Models</TableHead>
|
|
||||||
<TableHead className="w-[100px]">Actions</TableHead>
|
|
||||||
</TableRow>
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{providers.map((provider) => (
|
|
||||||
<TableRow key={provider.id}>
|
|
||||||
<TableCell className="font-medium">{provider.name}</TableCell>
|
|
||||||
<TableCell>{provider.display_name}</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{provider.default_credential_provider
|
|
||||||
? `${provider.default_credential_provider} (${provider.default_credential_id ?? "id?"})`
|
|
||||||
: "—"}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell className="text-sm text-muted-foreground">
|
|
||||||
<div className="flex flex-wrap gap-2">
|
|
||||||
{provider.supports_tools && (
|
|
||||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
|
||||||
Tools
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{provider.supports_json_output && (
|
|
||||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
|
||||||
JSON
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{provider.supports_reasoning && (
|
|
||||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
|
||||||
Reasoning
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{provider.supports_parallel_tool && (
|
|
||||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
|
||||||
Parallel Tools
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell className="text-sm">
|
|
||||||
<span
|
|
||||||
className={
|
|
||||||
(provider.models?.length ?? 0) > 0
|
|
||||||
? "text-foreground"
|
|
||||||
: "text-muted-foreground"
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{provider.models?.length ?? 0}
|
|
||||||
</span>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="flex gap-2">
|
|
||||||
<EditProviderModal provider={provider} />
|
|
||||||
<DeleteProviderModal provider={provider} />
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
))}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { setRecommendedModelAction } from "../actions";
|
|
||||||
import { Star } from "@phosphor-icons/react";
|
|
||||||
|
|
||||||
export function RecommendedModelSelector({ models }: { models: LlmModel[] }) {
|
|
||||||
const router = useRouter();
|
|
||||||
const enabledModels = models.filter((m) => m.is_enabled);
|
|
||||||
const currentRecommended = models.find((m) => m.is_recommended);
|
|
||||||
|
|
||||||
const [selectedModelId, setSelectedModelId] = useState<string>(
|
|
||||||
currentRecommended?.id || "",
|
|
||||||
);
|
|
||||||
const [isSaving, setIsSaving] = useState(false);
|
|
||||||
const [error, setError] = useState<string | null>(null);
|
|
||||||
|
|
||||||
const hasChanges = selectedModelId !== (currentRecommended?.id || "");
|
|
||||||
|
|
||||||
async function handleSave() {
|
|
||||||
if (!selectedModelId) return;
|
|
||||||
|
|
||||||
setIsSaving(true);
|
|
||||||
setError(null);
|
|
||||||
try {
|
|
||||||
const formData = new FormData();
|
|
||||||
formData.set("model_id", selectedModelId);
|
|
||||||
await setRecommendedModelAction(formData);
|
|
||||||
router.refresh();
|
|
||||||
} catch (err) {
|
|
||||||
setError(err instanceof Error ? err.message : "Failed to save");
|
|
||||||
} finally {
|
|
||||||
setIsSaving(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="rounded-lg border border-border bg-card p-4">
|
|
||||||
<div className="mb-3 flex items-center gap-2">
|
|
||||||
<Star size={20} weight="fill" className="text-amber-500" />
|
|
||||||
<h3 className="text-sm font-semibold">Recommended Model</h3>
|
|
||||||
</div>
|
|
||||||
<p className="mb-3 text-xs text-muted-foreground">
|
|
||||||
The recommended model is shown as the default suggestion in model
|
|
||||||
selection dropdowns throughout the platform.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<select
|
|
||||||
value={selectedModelId}
|
|
||||||
onChange={(e) => setSelectedModelId(e.target.value)}
|
|
||||||
className="flex-1 rounded-md border border-input bg-background px-3 py-2 text-sm"
|
|
||||||
disabled={isSaving}
|
|
||||||
>
|
|
||||||
<option value="">-- Select a model --</option>
|
|
||||||
{enabledModels.map((model) => (
|
|
||||||
<option key={model.id} value={model.id}>
|
|
||||||
{model.display_name} ({model.slug})
|
|
||||||
</option>
|
|
||||||
))}
|
|
||||||
</select>
|
|
||||||
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="primary"
|
|
||||||
size="small"
|
|
||||||
onClick={handleSave}
|
|
||||||
disabled={!hasChanges || !selectedModelId || isSaving}
|
|
||||||
>
|
|
||||||
{isSaving ? "Saving..." : "Save"}
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{error && <p className="mt-2 text-xs text-destructive">{error}</p>}
|
|
||||||
|
|
||||||
{currentRecommended && !hasChanges && (
|
|
||||||
<p className="mt-2 text-xs text-muted-foreground">
|
|
||||||
Currently set to:{" "}
|
|
||||||
<span className="font-medium">{currentRecommended.display_name}</span>
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
/**
|
|
||||||
* Server-side data fetching for LLM Registry page.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import {
|
|
||||||
fetchLlmCreators,
|
|
||||||
fetchLlmMigrations,
|
|
||||||
fetchLlmModels,
|
|
||||||
fetchLlmProviders,
|
|
||||||
} from "./actions";
|
|
||||||
|
|
||||||
export async function getLlmRegistryPageData() {
|
|
||||||
// Fetch providers and models (required)
|
|
||||||
const [providersResponse, modelsResponse] = await Promise.all([
|
|
||||||
fetchLlmProviders(),
|
|
||||||
fetchLlmModels(),
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Fetch migrations separately with fallback (table might not exist yet)
|
|
||||||
let migrations: Awaited<ReturnType<typeof fetchLlmMigrations>>["migrations"] =
|
|
||||||
[];
|
|
||||||
try {
|
|
||||||
const migrationsResponse = await fetchLlmMigrations(false);
|
|
||||||
migrations = migrationsResponse.migrations;
|
|
||||||
} catch {
|
|
||||||
// Migrations table might not exist yet - that's ok, just show empty list
|
|
||||||
console.warn("Could not fetch migrations - table may not exist yet");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch creators separately with fallback (table might not exist yet)
|
|
||||||
let creators: Awaited<ReturnType<typeof fetchLlmCreators>>["creators"] = [];
|
|
||||||
try {
|
|
||||||
const creatorsResponse = await fetchLlmCreators();
|
|
||||||
creators = creatorsResponse.creators;
|
|
||||||
} catch {
|
|
||||||
// Creators table might not exist yet - that's ok, just show empty list
|
|
||||||
console.warn("Could not fetch creators - table may not exist yet");
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
providers: providersResponse.providers,
|
|
||||||
models: modelsResponse.models,
|
|
||||||
migrations,
|
|
||||||
creators,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
|
||||||
import { getLlmRegistryPageData } from "./getLlmRegistryPage";
|
|
||||||
import { LlmRegistryDashboard } from "./components/LlmRegistryDashboard";
|
|
||||||
|
|
||||||
async function LlmRegistryPage() {
|
|
||||||
const data = await getLlmRegistryPageData();
|
|
||||||
return <LlmRegistryDashboard {...data} />;
|
|
||||||
}
|
|
||||||
|
|
||||||
export default async function AdminLlmRegistryPage() {
|
|
||||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
|
||||||
const ProtectedLlmRegistryPage = await withAdminAccess(LlmRegistryPage);
|
|
||||||
return <ProtectedLlmRegistryPage />;
|
|
||||||
}
|
|
||||||
@@ -7,9 +7,8 @@ import { BlockCategoryResponse } from "@/app/api/__generated__/models/blockCateg
|
|||||||
import { BlockResponse } from "@/app/api/__generated__/models/blockResponse";
|
import { BlockResponse } from "@/app/api/__generated__/models/blockResponse";
|
||||||
import * as Sentry from "@sentry/nextjs";
|
import * as Sentry from "@sentry/nextjs";
|
||||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||||
import { useState, useEffect } from "react";
|
import { useState } from "react";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import BackendApi from "@/lib/autogpt-server-api";
|
|
||||||
|
|
||||||
export const useAllBlockContent = () => {
|
export const useAllBlockContent = () => {
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
@@ -94,32 +93,6 @@ export const useAllBlockContent = () => {
|
|||||||
const isErrorOnLoadingMore = (categoryName: string) =>
|
const isErrorOnLoadingMore = (categoryName: string) =>
|
||||||
errorLoadingCategories.has(categoryName);
|
errorLoadingCategories.has(categoryName);
|
||||||
|
|
||||||
// Listen for LLM registry refresh notifications
|
|
||||||
useEffect(() => {
|
|
||||||
const api = new BackendApi();
|
|
||||||
const queryClient = getQueryClient();
|
|
||||||
|
|
||||||
const handleNotification = (notification: any) => {
|
|
||||||
if (
|
|
||||||
notification?.type === "LLM_REGISTRY_REFRESH" ||
|
|
||||||
notification?.event === "registry_updated"
|
|
||||||
) {
|
|
||||||
// Invalidate all block-related queries to force refresh
|
|
||||||
const categoriesQueryKey = getGetV2GetBuilderBlockCategoriesQueryKey();
|
|
||||||
queryClient.invalidateQueries({ queryKey: categoriesQueryKey });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const unsubscribe = api.onWebSocketMessage(
|
|
||||||
"notification",
|
|
||||||
handleNotification,
|
|
||||||
);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
unsubscribe();
|
|
||||||
};
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
data,
|
data,
|
||||||
isLoading,
|
isLoading,
|
||||||
|
|||||||
@@ -610,11 +610,8 @@ const NodeOneOfDiscriminatorField: FC<{
|
|||||||
|
|
||||||
return oneOfVariants
|
return oneOfVariants
|
||||||
.map((variant) => {
|
.map((variant) => {
|
||||||
const discProperty = variant.properties?.[discriminatorProperty];
|
const variantDiscValue = variant.properties?.[discriminatorProperty]
|
||||||
const variantDiscValue =
|
?.const as string; // NOTE: can discriminators only be strings?
|
||||||
discProperty && "const" in discProperty
|
|
||||||
? (discProperty.const as string)
|
|
||||||
: undefined; // NOTE: can discriminators only be strings?
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
value: variantDiscValue,
|
value: variantDiscValue,
|
||||||
@@ -1127,47 +1124,9 @@ const NodeStringInput: FC<{
|
|||||||
displayName,
|
displayName,
|
||||||
}) => {
|
}) => {
|
||||||
value ||= schema.default || "";
|
value ||= schema.default || "";
|
||||||
|
|
||||||
// Check if we have options with labels (e.g., LLM model picker)
|
|
||||||
const hasOptions = schema.options && schema.options.length > 0;
|
|
||||||
const hasEnum = schema.enum && schema.enum.length > 0;
|
|
||||||
|
|
||||||
// Helper to get display label for a value
|
|
||||||
const getDisplayLabel = (val: string) => {
|
|
||||||
if (hasOptions) {
|
|
||||||
const option = schema.options!.find((opt) => opt.value === val);
|
|
||||||
return option?.label || beautifyString(val);
|
|
||||||
}
|
|
||||||
return beautifyString(val);
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={className}>
|
<div className={className}>
|
||||||
{hasOptions ? (
|
{schema.enum && schema.enum.length > 0 ? (
|
||||||
// Render options with proper labels (used by LLM model picker)
|
|
||||||
<Select
|
|
||||||
defaultValue={value}
|
|
||||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
|
||||||
>
|
|
||||||
<SelectTrigger>
|
|
||||||
<SelectValue placeholder={schema.placeholder || displayName}>
|
|
||||||
{value ? getDisplayLabel(value) : undefined}
|
|
||||||
</SelectValue>
|
|
||||||
</SelectTrigger>
|
|
||||||
<SelectContent className="nodrag">
|
|
||||||
{schema.options!.map((option, index) => (
|
|
||||||
<SelectItem
|
|
||||||
key={index}
|
|
||||||
value={option.value}
|
|
||||||
title={option.description}
|
|
||||||
>
|
|
||||||
{option.label || beautifyString(option.value)}
|
|
||||||
</SelectItem>
|
|
||||||
))}
|
|
||||||
</SelectContent>
|
|
||||||
</Select>
|
|
||||||
) : hasEnum ? (
|
|
||||||
// Fallback to enum with beautified strings
|
|
||||||
<Select
|
<Select
|
||||||
defaultValue={value}
|
defaultValue={value}
|
||||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
||||||
@@ -1176,8 +1135,8 @@ const NodeStringInput: FC<{
|
|||||||
<SelectValue placeholder={schema.placeholder || displayName} />
|
<SelectValue placeholder={schema.placeholder || displayName} />
|
||||||
</SelectTrigger>
|
</SelectTrigger>
|
||||||
<SelectContent className="nodrag">
|
<SelectContent className="nodrag">
|
||||||
{schema
|
{schema.enum
|
||||||
.enum!.filter((option) => option)
|
.filter((option) => option)
|
||||||
.map((option, index) => (
|
.map((option, index) => (
|
||||||
<SelectItem key={index} value={option}>
|
<SelectItem key={index} value={option}>
|
||||||
{beautifyString(option)}
|
{beautifyString(option)}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,123 +0,0 @@
|
|||||||
import * as React from "react";
|
|
||||||
|
|
||||||
import { cn } from "@/lib/utils";
|
|
||||||
|
|
||||||
const Table = React.forwardRef<
|
|
||||||
HTMLTableElement,
|
|
||||||
React.HTMLAttributes<HTMLTableElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<div className="relative w-full overflow-auto">
|
|
||||||
<table
|
|
||||||
ref={ref}
|
|
||||||
className={cn("w-full caption-bottom text-sm", className)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
));
|
|
||||||
Table.displayName = "Table";
|
|
||||||
|
|
||||||
const TableHeader = React.forwardRef<
|
|
||||||
HTMLTableSectionElement,
|
|
||||||
React.HTMLAttributes<HTMLTableSectionElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
|
|
||||||
));
|
|
||||||
TableHeader.displayName = "TableHeader";
|
|
||||||
|
|
||||||
const TableBody = React.forwardRef<
|
|
||||||
HTMLTableSectionElement,
|
|
||||||
React.HTMLAttributes<HTMLTableSectionElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<tbody
|
|
||||||
ref={ref}
|
|
||||||
className={cn("[&_tr:last-child]:border-0", className)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableBody.displayName = "TableBody";
|
|
||||||
|
|
||||||
const TableFooter = React.forwardRef<
|
|
||||||
HTMLTableSectionElement,
|
|
||||||
React.HTMLAttributes<HTMLTableSectionElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<tfoot
|
|
||||||
ref={ref}
|
|
||||||
className={cn(
|
|
||||||
"border-t bg-neutral-100/50 font-medium dark:bg-neutral-800/50 [&>tr]:last:border-b-0",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableFooter.displayName = "TableFooter";
|
|
||||||
|
|
||||||
const TableRow = React.forwardRef<
|
|
||||||
HTMLTableRowElement,
|
|
||||||
React.HTMLAttributes<HTMLTableRowElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<tr
|
|
||||||
ref={ref}
|
|
||||||
className={cn(
|
|
||||||
"border-b transition-colors data-[state=selected]:bg-neutral-100 hover:bg-neutral-100/50 dark:data-[state=selected]:bg-neutral-800 dark:hover:bg-neutral-800/50",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableRow.displayName = "TableRow";
|
|
||||||
|
|
||||||
const TableHead = React.forwardRef<
|
|
||||||
HTMLTableCellElement,
|
|
||||||
React.ThHTMLAttributes<HTMLTableCellElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<th
|
|
||||||
ref={ref}
|
|
||||||
className={cn(
|
|
||||||
"h-10 px-2 text-left align-middle font-medium text-neutral-500 dark:text-neutral-400 [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableHead.displayName = "TableHead";
|
|
||||||
|
|
||||||
const TableCell = React.forwardRef<
|
|
||||||
HTMLTableCellElement,
|
|
||||||
React.TdHTMLAttributes<HTMLTableCellElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<td
|
|
||||||
ref={ref}
|
|
||||||
className={cn(
|
|
||||||
"p-2 align-middle [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableCell.displayName = "TableCell";
|
|
||||||
|
|
||||||
const TableCaption = React.forwardRef<
|
|
||||||
HTMLTableCaptionElement,
|
|
||||||
React.HTMLAttributes<HTMLTableCaptionElement>
|
|
||||||
>(({ className, ...props }, ref) => (
|
|
||||||
<caption
|
|
||||||
ref={ref}
|
|
||||||
className={cn(
|
|
||||||
"mt-4 text-sm text-neutral-500 dark:text-neutral-400",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
));
|
|
||||||
TableCaption.displayName = "TableCaption";
|
|
||||||
|
|
||||||
export {
|
|
||||||
Table,
|
|
||||||
TableHeader,
|
|
||||||
TableBody,
|
|
||||||
TableFooter,
|
|
||||||
TableHead,
|
|
||||||
TableRow,
|
|
||||||
TableCell,
|
|
||||||
TableCaption,
|
|
||||||
};
|
|
||||||
@@ -1,19 +1,8 @@
|
|||||||
import { RJSFSchema } from "@rjsf/utils";
|
import { RJSFSchema } from "@rjsf/utils";
|
||||||
|
|
||||||
/**
|
|
||||||
* Options type for fields with label/value pairs (e.g., LLM model picker)
|
|
||||||
*/
|
|
||||||
type SchemaOption = {
|
|
||||||
label: string;
|
|
||||||
value: string;
|
|
||||||
group?: string;
|
|
||||||
description?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pre-processes the input schema to ensure all properties have a type defined.
|
* Pre-processes the input schema to ensure all properties have a type defined.
|
||||||
* If a property doesn't have a type, it assigns a union of all supported JSON Schema types.
|
* If a property doesn't have a type, it assigns a union of all supported JSON Schema types.
|
||||||
* Also converts custom 'options' array to RJSF's enum/enumNames format.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
export function preprocessInputSchema(schema: RJSFSchema): RJSFSchema {
|
export function preprocessInputSchema(schema: RJSFSchema): RJSFSchema {
|
||||||
@@ -31,20 +20,6 @@ export function preprocessInputSchema(schema: RJSFSchema): RJSFSchema {
|
|||||||
if (property && typeof property === "object") {
|
if (property && typeof property === "object") {
|
||||||
const processedProperty = { ...property };
|
const processedProperty = { ...property };
|
||||||
|
|
||||||
// Convert custom 'options' array to RJSF's enum/enumNames format
|
|
||||||
// This enables proper label display for dropdowns like the LLM model picker
|
|
||||||
if (
|
|
||||||
(processedProperty as any).options &&
|
|
||||||
Array.isArray((processedProperty as any).options) &&
|
|
||||||
(processedProperty as any).options.length > 0
|
|
||||||
) {
|
|
||||||
const options = (processedProperty as any).options as SchemaOption[];
|
|
||||||
processedProperty.enum = options.map((opt) => opt.value);
|
|
||||||
(processedProperty as any).enumNames = options.map(
|
|
||||||
(opt) => opt.label,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only add type if no type is defined AND no anyOf/oneOf/allOf is present
|
// Only add type if no type is defined AND no anyOf/oneOf/allOf is present
|
||||||
if (
|
if (
|
||||||
!processedProperty.type &&
|
!processedProperty.type &&
|
||||||
|
|||||||
@@ -77,45 +77,17 @@ export default function useAgentGraph(
|
|||||||
|
|
||||||
// Load available blocks & flows (stable - only loads once)
|
// Load available blocks & flows (stable - only loads once)
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const loadBlocks = () => {
|
api
|
||||||
api
|
.getBlocks()
|
||||||
.getBlocks()
|
.then((blocks) => {
|
||||||
.then((blocks) => {
|
setAllBlocks(blocks);
|
||||||
setAllBlocks(blocks);
|
})
|
||||||
})
|
.catch();
|
||||||
.catch();
|
|
||||||
};
|
|
||||||
|
|
||||||
const loadFlows = () => {
|
api
|
||||||
api
|
.listGraphs()
|
||||||
.listGraphs()
|
.then((flows) => setAvailableFlows(flows))
|
||||||
.then((flows) => setAvailableFlows(flows))
|
.catch();
|
||||||
.catch();
|
|
||||||
};
|
|
||||||
|
|
||||||
// Initial load
|
|
||||||
loadBlocks();
|
|
||||||
loadFlows();
|
|
||||||
|
|
||||||
// Listen for LLM registry refresh notifications to reload blocks
|
|
||||||
const deregisterRegistryRefresh = api.onWebSocketMessage(
|
|
||||||
"notification",
|
|
||||||
(notification) => {
|
|
||||||
if (
|
|
||||||
notification?.type === "LLM_REGISTRY_REFRESH" ||
|
|
||||||
notification?.event === "registry_updated"
|
|
||||||
) {
|
|
||||||
console.log(
|
|
||||||
"Received LLM registry refresh notification, reloading blocks...",
|
|
||||||
);
|
|
||||||
loadBlocks();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
deregisterRegistryRefresh();
|
|
||||||
};
|
|
||||||
}, [api]);
|
}, [api]);
|
||||||
|
|
||||||
// Subscribe to execution events
|
// Subscribe to execution events
|
||||||
|
|||||||
@@ -186,7 +186,6 @@ export type BlockIOStringSubSchema = BlockIOSubSchemaMeta & {
|
|||||||
default?: string;
|
default?: string;
|
||||||
format?: string;
|
format?: string;
|
||||||
maxLength?: number;
|
maxLength?: number;
|
||||||
options?: { value: string; label: string; description?: string }[];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export type BlockIONumberSubSchema = BlockIOSubSchemaMeta & {
|
export type BlockIONumberSubSchema = BlockIOSubSchemaMeta & {
|
||||||
|
|||||||
@@ -285,20 +285,17 @@ export function fillObjectDefaultsFromSchema(
|
|||||||
// Apply simple default values
|
// Apply simple default values
|
||||||
obj[key] ??= propertySchema.default;
|
obj[key] ??= propertySchema.default;
|
||||||
} else if (
|
} else if (
|
||||||
"type" in propertySchema &&
|
|
||||||
propertySchema.type === "object" &&
|
propertySchema.type === "object" &&
|
||||||
"properties" in propertySchema
|
"properties" in propertySchema
|
||||||
) {
|
) {
|
||||||
// Recursively fill defaults for nested objects
|
// Recursively fill defaults for nested objects
|
||||||
obj[key] = fillObjectDefaultsFromSchema(obj[key] ?? {}, propertySchema);
|
obj[key] = fillObjectDefaultsFromSchema(obj[key] ?? {}, propertySchema);
|
||||||
} else if ("type" in propertySchema && propertySchema.type === "array") {
|
} else if (propertySchema.type === "array") {
|
||||||
obj[key] ??= [];
|
obj[key] ??= [];
|
||||||
// If the array items are objects, fill their defaults as well
|
// If the array items are objects, fill their defaults as well
|
||||||
if (
|
if (
|
||||||
Array.isArray(obj[key]) &&
|
Array.isArray(obj[key]) &&
|
||||||
propertySchema.items &&
|
propertySchema.items?.type === "object" &&
|
||||||
"type" in propertySchema.items &&
|
|
||||||
propertySchema.items.type === "object" &&
|
|
||||||
"properties" in propertySchema.items
|
"properties" in propertySchema.items
|
||||||
) {
|
) {
|
||||||
for (const item of obj[key]) {
|
for (const item of obj[key]) {
|
||||||
|
|||||||
Reference in New Issue
Block a user