mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
4 Commits
figure-out
...
gitbook
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7e118b5a8 | ||
|
|
92a7a7e6d6 | ||
|
|
e16995347f | ||
|
|
234d3acb4c |
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"worktreeCopyPatterns": [
|
||||
".env*",
|
||||
".vscode/**",
|
||||
".auth/**",
|
||||
".claude/**",
|
||||
"autogpt_platform/.env*",
|
||||
"autogpt_platform/backend/.env*",
|
||||
"autogpt_platform/frontend/.env*",
|
||||
"autogpt_platform/frontend/.auth/**",
|
||||
"autogpt_platform/db/docker/.env*"
|
||||
],
|
||||
"worktreeCopyIgnores": [
|
||||
"**/node_modules/**",
|
||||
"**/dist/**",
|
||||
"**/.git/**",
|
||||
"**/Thumbs.db",
|
||||
"**/.DS_Store",
|
||||
"**/.next/**",
|
||||
"**/__pycache__/**",
|
||||
"**/.ruff_cache/**",
|
||||
"**/.pytest_cache/**",
|
||||
"**/*.pyc",
|
||||
"**/playwright-report/**",
|
||||
"**/logs/**",
|
||||
"**/site/**"
|
||||
],
|
||||
"worktreePathTemplate": "$BASE_PATH.worktree",
|
||||
"postCreateCmd": [
|
||||
"cd autogpt_platform/autogpt_libs && poetry install",
|
||||
"cd autogpt_platform/backend && poetry install && poetry run prisma generate",
|
||||
"cd autogpt_platform/frontend && pnpm install",
|
||||
"cd docs && pip install -r requirements.txt"
|
||||
],
|
||||
"terminalCommand": "code .",
|
||||
"deleteBranchWithWorktree": false
|
||||
}
|
||||
@@ -16,7 +16,6 @@
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
|
||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -74,7 +74,7 @@ jobs:
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
|
||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -90,7 +90,7 @@ jobs:
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
|
||||
12
.github/workflows/copilot-setup-steps.yml
vendored
12
.github/workflows/copilot-setup-steps.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
@@ -108,16 +108,6 @@ jobs:
|
||||
# run: pnpm playwright install --with-deps chromium
|
||||
|
||||
# Docker setup for development environment
|
||||
- name: Free up disk space
|
||||
run: |
|
||||
# Remove large unused tools to free disk space for Docker builds
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker system prune -af
|
||||
df -h
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
|
||||
74
.github/workflows/docs-block-sync.yml
vendored
74
.github/workflows/docs-block-sync.yml
vendored
@@ -1,74 +0,0 @@
|
||||
name: Block Documentation Sync Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
||||
- ".github/workflows/docs-block-sync.yml"
|
||||
pull_request:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
||||
- ".github/workflows/docs-block-sync.yml"
|
||||
|
||||
jobs:
|
||||
check-docs-sync:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Check block documentation is in sync
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
echo "Checking if block documentation is in sync with code..."
|
||||
poetry run python scripts/generate_block_docs.py --check
|
||||
|
||||
- name: Show diff if out of sync
|
||||
if: failure()
|
||||
run: |
|
||||
echo "::error::Block documentation is out of sync with code!"
|
||||
echo ""
|
||||
echo "To fix this, run the following command locally:"
|
||||
echo " cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
||||
echo ""
|
||||
echo "Then commit the updated documentation files."
|
||||
echo ""
|
||||
echo "Changes detected:"
|
||||
git diff docs/content/platform/blocks/ || true
|
||||
94
.github/workflows/docs-claude-review.yml
vendored
94
.github/workflows/docs-claude-review.yml
vendored
@@ -1,94 +0,0 @@
|
||||
name: Claude Block Docs Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
paths:
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
|
||||
jobs:
|
||||
claude-review:
|
||||
# Only run for PRs from members/collaborators
|
||||
if: |
|
||||
github.event.pull_request.author_association == 'OWNER' ||
|
||||
github.event.pull_request.author_association == 'MEMBER' ||
|
||||
github.event.pull_request.author_association == 'COLLABORATOR'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Run Claude Code Review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Read,Glob,Grep,Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*)"
|
||||
prompt: |
|
||||
You are reviewing a PR that modifies block documentation or block code for AutoGPT.
|
||||
|
||||
## Your Task
|
||||
Review the changes in this PR and provide constructive feedback. Focus on:
|
||||
|
||||
1. **Documentation Accuracy**: For any block code changes, verify that:
|
||||
- Input/output tables in docs match the actual block schemas
|
||||
- Description text accurately reflects what the block does
|
||||
- Any new blocks have corresponding documentation
|
||||
|
||||
2. **Manual Content Quality**: Check manual sections (marked with `<!-- MANUAL: -->` markers):
|
||||
- "How it works" sections should have clear technical explanations
|
||||
- "Possible use case" sections should have practical, real-world examples
|
||||
- Content should be helpful for users trying to understand the blocks
|
||||
|
||||
3. **Template Compliance**: Ensure docs follow the standard template:
|
||||
- What it is (brief intro)
|
||||
- What it does (description)
|
||||
- How it works (technical explanation)
|
||||
- Inputs table
|
||||
- Outputs table
|
||||
- Possible use case
|
||||
|
||||
4. **Cross-references**: Check that links and anchors are correct
|
||||
|
||||
## Review Process
|
||||
1. First, get the PR diff to see what changed: `gh pr diff ${{ github.event.pull_request.number }}`
|
||||
2. Read any modified block files to understand the implementation
|
||||
3. Read corresponding documentation files to verify accuracy
|
||||
4. Provide your feedback as a PR comment
|
||||
|
||||
Be constructive and specific. If everything looks good, say so!
|
||||
If there are issues, explain what's wrong and suggest how to fix it.
|
||||
193
.github/workflows/docs-enhance.yml
vendored
193
.github/workflows/docs-enhance.yml
vendored
@@ -1,193 +0,0 @@
|
||||
name: Enhance Block Documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
block_pattern:
|
||||
description: 'Block file pattern to enhance (e.g., "google/*.md" or "*" for all blocks)'
|
||||
required: true
|
||||
default: '*'
|
||||
type: string
|
||||
dry_run:
|
||||
description: 'Dry run mode - show proposed changes without committing'
|
||||
type: boolean
|
||||
default: true
|
||||
max_blocks:
|
||||
description: 'Maximum number of blocks to process (0 for unlimited)'
|
||||
type: number
|
||||
default: 10
|
||||
|
||||
jobs:
|
||||
enhance-docs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Run Claude Enhancement
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Read,Edit,Glob,Grep,Write,Bash(git:*),Bash(gh:*),Bash(find:*),Bash(ls:*)"
|
||||
prompt: |
|
||||
You are enhancing block documentation for AutoGPT. Your task is to improve the MANUAL sections
|
||||
of block documentation files by reading the actual block implementations and writing helpful content.
|
||||
|
||||
## Configuration
|
||||
- Block pattern: ${{ inputs.block_pattern }}
|
||||
- Dry run: ${{ inputs.dry_run }}
|
||||
- Max blocks to process: ${{ inputs.max_blocks }}
|
||||
|
||||
## Your Task
|
||||
|
||||
1. **Find Documentation Files**
|
||||
Find block documentation files matching the pattern in `docs/content/platform/blocks/`
|
||||
Pattern: ${{ inputs.block_pattern }}
|
||||
|
||||
Use: `find docs/content/platform/blocks -name "*.md" -type f`
|
||||
|
||||
2. **For Each Documentation File** (up to ${{ inputs.max_blocks }} files):
|
||||
|
||||
a. Read the documentation file
|
||||
|
||||
b. Identify which block(s) it documents (look for the block class name)
|
||||
|
||||
c. Find and read the corresponding block implementation in `autogpt_platform/backend/backend/blocks/`
|
||||
|
||||
d. Improve the MANUAL sections:
|
||||
|
||||
**"How it works" section** (within `<!-- MANUAL: how_it_works -->` markers):
|
||||
- Explain the technical flow of the block
|
||||
- Describe what APIs or services it connects to
|
||||
- Note any important configuration or prerequisites
|
||||
- Keep it concise but informative (2-4 paragraphs)
|
||||
|
||||
**"Possible use case" section** (within `<!-- MANUAL: use_case -->` markers):
|
||||
- Provide 2-3 practical, real-world examples
|
||||
- Make them specific and actionable
|
||||
- Show how this block could be used in an automation workflow
|
||||
|
||||
3. **Important Rules**
|
||||
- ONLY modify content within `<!-- MANUAL: -->` and `<!-- END MANUAL -->` markers
|
||||
- Do NOT modify auto-generated sections (inputs/outputs tables, descriptions)
|
||||
- Keep content accurate based on the actual block implementation
|
||||
- Write for users who may not be technical experts
|
||||
|
||||
4. **Output**
|
||||
${{ inputs.dry_run == true && 'DRY RUN MODE: Show proposed changes for each file but do NOT actually edit the files. Describe what you would change.' || 'LIVE MODE: Actually edit the files to improve the documentation.' }}
|
||||
|
||||
## Example Improvements
|
||||
|
||||
**Before (How it works):**
|
||||
```
|
||||
_Add technical explanation here._
|
||||
```
|
||||
|
||||
**After (How it works):**
|
||||
```
|
||||
This block connects to the GitHub API to retrieve issue information. When executed,
|
||||
it authenticates using your GitHub credentials and fetches issue details including
|
||||
title, body, labels, and assignees.
|
||||
|
||||
The block requires a valid GitHub OAuth connection with repository access permissions.
|
||||
It supports both public and private repositories you have access to.
|
||||
```
|
||||
|
||||
**Before (Possible use case):**
|
||||
```
|
||||
_Add practical use case examples here._
|
||||
```
|
||||
|
||||
**After (Possible use case):**
|
||||
```
|
||||
**Customer Support Automation**: Monitor a GitHub repository for new issues with
|
||||
the "bug" label, then automatically create a ticket in your support system and
|
||||
notify the on-call engineer via Slack.
|
||||
|
||||
**Release Notes Generation**: When a new release is published, gather all closed
|
||||
issues since the last release and generate a summary for your changelog.
|
||||
```
|
||||
|
||||
Begin by finding and listing the documentation files to process.
|
||||
|
||||
- name: Create PR with enhanced documentation
|
||||
if: ${{ inputs.dry_run == false }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Check if there are changes
|
||||
if git diff --quiet docs/content/platform/blocks/; then
|
||||
echo "No changes to commit"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Configure git
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Create branch and commit
|
||||
BRANCH_NAME="docs/enhance-blocks-$(date +%Y%m%d-%H%M%S)"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
git add docs/content/platform/blocks/
|
||||
git commit -m "docs: enhance block documentation with LLM-generated content
|
||||
|
||||
Pattern: ${{ inputs.block_pattern }}
|
||||
Max blocks: ${{ inputs.max_blocks }}
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
||||
|
||||
# Push and create PR
|
||||
git push -u origin "$BRANCH_NAME"
|
||||
gh pr create \
|
||||
--title "docs: LLM-enhanced block documentation" \
|
||||
--body "## Summary
|
||||
This PR contains LLM-enhanced documentation for block files matching pattern: \`${{ inputs.block_pattern }}\`
|
||||
|
||||
The following manual sections were improved:
|
||||
- **How it works**: Technical explanations based on block implementations
|
||||
- **Possible use case**: Practical, real-world examples
|
||||
|
||||
## Review Checklist
|
||||
- [ ] Content is accurate based on block implementations
|
||||
- [ ] Examples are practical and helpful
|
||||
- [ ] No auto-generated sections were modified
|
||||
|
||||
---
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)" \
|
||||
--base dev
|
||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -134,7 +134,7 @@ jobs:
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
|
||||
@@ -12,7 +12,6 @@ reset-db:
|
||||
rm -rf db/docker/volumes/db/data
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
cd backend && poetry run gen-prisma-stub
|
||||
|
||||
# View logs for core services
|
||||
logs-core:
|
||||
@@ -34,7 +33,6 @@ init-env:
|
||||
migrate:
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
cd backend && poetry run gen-prisma-stub
|
||||
|
||||
run-backend:
|
||||
cd backend && poetry run app
|
||||
|
||||
@@ -48,8 +48,7 @@ RUN poetry install --no-ansi --no-root
|
||||
# Generate Prisma client
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||
RUN poetry run prisma generate
|
||||
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
|
||||
|
||||
@@ -489,7 +489,7 @@ async def update_agent_version_in_library(
|
||||
agent_graph_version: int,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the agent version in the library for any agent owned by the user.
|
||||
Updates the agent version in the library if useGraphIsActiveVersion is True.
|
||||
|
||||
Args:
|
||||
user_id: Owner of the LibraryAgent.
|
||||
@@ -498,31 +498,20 @@ async def update_agent_version_in_library(
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's an error with the update.
|
||||
NotFoundError: If no library agent is found for this user and agent.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Updating agent version in library for user #{user_id}, "
|
||||
f"agent #{agent_graph_id} v{agent_graph_version}"
|
||||
)
|
||||
async with transaction() as tx:
|
||||
library_agent = await prisma.models.LibraryAgent.prisma(tx).find_first_or_raise(
|
||||
try:
|
||||
library_agent = await prisma.models.LibraryAgent.prisma().find_first_or_raise(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": agent_graph_id,
|
||||
"useGraphIsActiveVersion": True,
|
||||
},
|
||||
)
|
||||
|
||||
# Delete any conflicting LibraryAgent for the target version
|
||||
await prisma.models.LibraryAgent.prisma(tx).delete_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": agent_graph_id,
|
||||
"agentGraphVersion": agent_graph_version,
|
||||
"id": {"not": library_agent.id},
|
||||
}
|
||||
)
|
||||
|
||||
lib = await prisma.models.LibraryAgent.prisma(tx).update(
|
||||
lib = await prisma.models.LibraryAgent.prisma().update(
|
||||
where={"id": library_agent.id},
|
||||
data={
|
||||
"AgentGraph": {
|
||||
@@ -536,13 +525,13 @@ async def update_agent_version_in_library(
|
||||
},
|
||||
include={"AgentGraph": True},
|
||||
)
|
||||
if lib is None:
|
||||
raise NotFoundError(f"Library agent {library_agent.id} not found")
|
||||
|
||||
if lib is None:
|
||||
raise NotFoundError(
|
||||
f"Failed to update library agent for {agent_graph_id} v{agent_graph_version}"
|
||||
)
|
||||
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error updating agent version in library: {e}")
|
||||
raise DatabaseError("Failed to update agent version in library") from e
|
||||
|
||||
|
||||
async def update_library_agent(
|
||||
@@ -836,7 +825,6 @@ async def add_store_agent_to_library(
|
||||
}
|
||||
},
|
||||
"isCreatedByUser": False,
|
||||
"useGraphIsActiveVersion": False,
|
||||
"settings": SafeJson(
|
||||
_initialize_graph_settings(graph_model).model_dump()
|
||||
),
|
||||
|
||||
@@ -48,7 +48,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
owner_user_id: str # ID of user who owns/created this agent graph
|
||||
|
||||
image_url: str | None
|
||||
|
||||
@@ -164,7 +163,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
id=agent.id,
|
||||
graph_id=agent.agentGraphId,
|
||||
graph_version=agent.agentGraphVersion,
|
||||
owner_user_id=agent.userId,
|
||||
image_url=agent.imageUrl,
|
||||
creator_name=creator_name,
|
||||
creator_image_url=creator_image_url,
|
||||
|
||||
@@ -42,7 +42,6 @@ async def test_get_library_agents_success(
|
||||
id="test-agent-1",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
@@ -65,7 +64,6 @@ async def test_get_library_agents_success(
|
||||
id="test-agent-2",
|
||||
graph_id="test-agent-2",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
@@ -140,7 +138,6 @@ async def test_get_favorite_library_agents_success(
|
||||
id="test-agent-1",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Favorite Agent 1",
|
||||
description="Test Favorite Description 1",
|
||||
image_url=None,
|
||||
@@ -208,7 +205,6 @@ def test_add_agent_to_library_success(
|
||||
id="test-library-agent-id",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
|
||||
@@ -614,7 +614,6 @@ async def get_store_submissions(
|
||||
submission_models = []
|
||||
for sub in submissions:
|
||||
submission_model = store_model.StoreSubmission(
|
||||
listing_id=sub.listing_id,
|
||||
agent_id=sub.agent_id,
|
||||
agent_version=sub.agent_version,
|
||||
name=sub.name,
|
||||
@@ -668,48 +667,35 @@ async def delete_store_submission(
|
||||
submission_id: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Delete a store submission version as the submitting user.
|
||||
Delete a store listing submission as the submitting user.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user
|
||||
submission_id: StoreListingVersion ID to delete
|
||||
submission_id: ID of the submission to be deleted
|
||||
|
||||
Returns:
|
||||
bool: True if successfully deleted
|
||||
bool: True if the submission was successfully deleted, False otherwise
|
||||
"""
|
||||
logger.debug(f"Deleting store submission {submission_id} for user {user_id}")
|
||||
|
||||
try:
|
||||
# Find the submission version with ownership check
|
||||
version = await prisma.models.StoreListingVersion.prisma().find_first(
|
||||
where={"id": submission_id}, include={"StoreListing": True}
|
||||
# Verify the submission belongs to this user
|
||||
submission = await prisma.models.StoreListing.prisma().find_first(
|
||||
where={"agentGraphId": submission_id, "owningUserId": user_id}
|
||||
)
|
||||
|
||||
if (
|
||||
not version
|
||||
or not version.StoreListing
|
||||
or version.StoreListing.owningUserId != user_id
|
||||
):
|
||||
raise store_exceptions.SubmissionNotFoundError("Submission not found")
|
||||
|
||||
# Prevent deletion of approved submissions
|
||||
if version.submissionStatus == prisma.enums.SubmissionStatus.APPROVED:
|
||||
raise store_exceptions.InvalidOperationError(
|
||||
"Cannot delete approved submissions"
|
||||
if not submission:
|
||||
logger.warning(f"Submission not found for user {user_id}: {submission_id}")
|
||||
raise store_exceptions.SubmissionNotFoundError(
|
||||
f"Submission not found for this user. User ID: {user_id}, Submission ID: {submission_id}"
|
||||
)
|
||||
|
||||
# Delete the version
|
||||
await prisma.models.StoreListingVersion.prisma().delete(
|
||||
where={"id": version.id}
|
||||
)
|
||||
# Delete the submission
|
||||
await prisma.models.StoreListing.prisma().delete(where={"id": submission.id})
|
||||
|
||||
# Clean up empty listing if this was the last version
|
||||
remaining = await prisma.models.StoreListingVersion.prisma().count(
|
||||
where={"storeListingId": version.storeListingId}
|
||||
logger.debug(
|
||||
f"Successfully deleted submission {submission_id} for user {user_id}"
|
||||
)
|
||||
if remaining == 0:
|
||||
await prisma.models.StoreListing.prisma().delete(
|
||||
where={"id": version.storeListingId}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
@@ -773,15 +759,9 @@ async def create_store_submission(
|
||||
logger.warning(
|
||||
f"Agent not found for user {user_id}: {agent_id} v{agent_version}"
|
||||
)
|
||||
# Provide more user-friendly error message when agent_id is empty
|
||||
if not agent_id or agent_id.strip() == "":
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
"No agent selected. Please select an agent before submitting to the store."
|
||||
)
|
||||
else:
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
f"Agent not found for this user. User ID: {user_id}, Agent ID: {agent_id}, Version: {agent_version}"
|
||||
)
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
f"Agent not found for this user. User ID: {user_id}, Agent ID: {agent_id}, Version: {agent_version}"
|
||||
)
|
||||
|
||||
# Check if listing already exists for this agent
|
||||
existing_listing = await prisma.models.StoreListing.prisma().find_first(
|
||||
@@ -853,7 +833,6 @@ async def create_store_submission(
|
||||
logger.debug(f"Created store listing for agent {agent_id}")
|
||||
# Return submission details
|
||||
return store_model.StoreSubmission(
|
||||
listing_id=listing.id,
|
||||
agent_id=agent_id,
|
||||
agent_version=agent_version,
|
||||
name=name,
|
||||
@@ -965,56 +944,81 @@ async def edit_store_submission(
|
||||
# Currently we are not allowing user to update the agent associated with a submission
|
||||
# If we allow it in future, then we need a check here to verify the agent belongs to this user.
|
||||
|
||||
# Only allow editing of PENDING submissions
|
||||
if current_version.submissionStatus != prisma.enums.SubmissionStatus.PENDING:
|
||||
# Check if we can edit this submission
|
||||
if current_version.submissionStatus == prisma.enums.SubmissionStatus.REJECTED:
|
||||
raise store_exceptions.InvalidOperationError(
|
||||
f"Cannot edit a {current_version.submissionStatus.value.lower()} submission. Only pending submissions can be edited."
|
||||
"Cannot edit a rejected submission"
|
||||
)
|
||||
|
||||
# For APPROVED submissions, we need to create a new version
|
||||
if current_version.submissionStatus == prisma.enums.SubmissionStatus.APPROVED:
|
||||
# Create a new version for the existing listing
|
||||
return await create_store_version(
|
||||
user_id=user_id,
|
||||
agent_id=current_version.agentGraphId,
|
||||
agent_version=current_version.agentGraphVersion,
|
||||
store_listing_id=current_version.storeListingId,
|
||||
name=name,
|
||||
video_url=video_url,
|
||||
agent_output_demo_url=agent_output_demo_url,
|
||||
image_urls=image_urls,
|
||||
description=description,
|
||||
sub_heading=sub_heading,
|
||||
categories=categories,
|
||||
changes_summary=changes_summary,
|
||||
recommended_schedule_cron=recommended_schedule_cron,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
# For PENDING submissions, we can update the existing version
|
||||
# Update the existing version
|
||||
updated_version = await prisma.models.StoreListingVersion.prisma().update(
|
||||
where={"id": store_listing_version_id},
|
||||
data=prisma.types.StoreListingVersionUpdateInput(
|
||||
elif current_version.submissionStatus == prisma.enums.SubmissionStatus.PENDING:
|
||||
# Update the existing version
|
||||
updated_version = await prisma.models.StoreListingVersion.prisma().update(
|
||||
where={"id": store_listing_version_id},
|
||||
data=prisma.types.StoreListingVersionUpdateInput(
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
changesSummary=changes_summary,
|
||||
recommendedScheduleCron=recommended_schedule_cron,
|
||||
instructions=instructions,
|
||||
),
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Updated existing version {store_listing_version_id} for agent {current_version.agentGraphId}"
|
||||
)
|
||||
|
||||
if not updated_version:
|
||||
raise DatabaseError("Failed to update store listing version")
|
||||
return store_model.StoreSubmission(
|
||||
agent_id=current_version.agentGraphId,
|
||||
agent_version=current_version.agentGraphVersion,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
sub_heading=sub_heading,
|
||||
slug=current_version.StoreListing.slug,
|
||||
description=description,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
changesSummary=changes_summary,
|
||||
recommendedScheduleCron=recommended_schedule_cron,
|
||||
instructions=instructions,
|
||||
),
|
||||
)
|
||||
image_urls=image_urls,
|
||||
date_submitted=updated_version.submittedAt or updated_version.createdAt,
|
||||
status=updated_version.submissionStatus,
|
||||
runs=0,
|
||||
rating=0.0,
|
||||
store_listing_version_id=updated_version.id,
|
||||
changes_summary=changes_summary,
|
||||
video_url=video_url,
|
||||
categories=categories,
|
||||
version=updated_version.version,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Updated existing version {store_listing_version_id} for agent {current_version.agentGraphId}"
|
||||
)
|
||||
|
||||
if not updated_version:
|
||||
raise DatabaseError("Failed to update store listing version")
|
||||
return store_model.StoreSubmission(
|
||||
listing_id=current_version.StoreListing.id,
|
||||
agent_id=current_version.agentGraphId,
|
||||
agent_version=current_version.agentGraphVersion,
|
||||
name=name,
|
||||
sub_heading=sub_heading,
|
||||
slug=current_version.StoreListing.slug,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
image_urls=image_urls,
|
||||
date_submitted=updated_version.submittedAt or updated_version.createdAt,
|
||||
status=updated_version.submissionStatus,
|
||||
runs=0,
|
||||
rating=0.0,
|
||||
store_listing_version_id=updated_version.id,
|
||||
changes_summary=changes_summary,
|
||||
video_url=video_url,
|
||||
categories=categories,
|
||||
version=updated_version.version,
|
||||
)
|
||||
else:
|
||||
raise store_exceptions.InvalidOperationError(
|
||||
f"Cannot edit submission with status: {current_version.submissionStatus}"
|
||||
)
|
||||
|
||||
except (
|
||||
store_exceptions.SubmissionNotFoundError,
|
||||
@@ -1093,78 +1097,38 @@ async def create_store_version(
|
||||
f"Agent not found for this user. User ID: {user_id}, Agent ID: {agent_id}, Version: {agent_version}"
|
||||
)
|
||||
|
||||
# Check if there's already a PENDING submission for this agent (any version)
|
||||
existing_pending_submission = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_first(
|
||||
where=prisma.types.StoreListingVersionWhereInput(
|
||||
storeListingId=store_listing_id,
|
||||
agentGraphId=agent_id,
|
||||
submissionStatus=prisma.enums.SubmissionStatus.PENDING,
|
||||
isDeleted=False,
|
||||
)
|
||||
# Get the latest version number
|
||||
latest_version = listing.Versions[0] if listing.Versions else None
|
||||
|
||||
next_version = (latest_version.version + 1) if latest_version else 1
|
||||
|
||||
# Create a new version for the existing listing
|
||||
new_version = await prisma.models.StoreListingVersion.prisma().create(
|
||||
data=prisma.types.StoreListingVersionCreateInput(
|
||||
version=next_version,
|
||||
agentGraphId=agent_id,
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
submissionStatus=prisma.enums.SubmissionStatus.PENDING,
|
||||
submittedAt=datetime.now(),
|
||||
changesSummary=changes_summary,
|
||||
recommendedScheduleCron=recommended_schedule_cron,
|
||||
storeListingId=store_listing_id,
|
||||
)
|
||||
)
|
||||
|
||||
# Handle existing pending submission and create new one atomically
|
||||
async with transaction() as tx:
|
||||
# Get the latest version number first
|
||||
latest_listing = await prisma.models.StoreListing.prisma(tx).find_first(
|
||||
where=prisma.types.StoreListingWhereInput(
|
||||
id=store_listing_id, owningUserId=user_id
|
||||
),
|
||||
include={"Versions": {"order_by": {"version": "desc"}, "take": 1}},
|
||||
)
|
||||
|
||||
if not latest_listing:
|
||||
raise store_exceptions.ListingNotFoundError(
|
||||
f"Store listing not found. User ID: {user_id}, Listing ID: {store_listing_id}"
|
||||
)
|
||||
|
||||
latest_version = (
|
||||
latest_listing.Versions[0] if latest_listing.Versions else None
|
||||
)
|
||||
next_version = (latest_version.version + 1) if latest_version else 1
|
||||
|
||||
# If there's an existing pending submission, delete it atomically before creating new one
|
||||
if existing_pending_submission:
|
||||
logger.info(
|
||||
f"Found existing PENDING submission for agent {agent_id} (was v{existing_pending_submission.agentGraphVersion}, now v{agent_version}), replacing existing submission instead of creating duplicate"
|
||||
)
|
||||
await prisma.models.StoreListingVersion.prisma(tx).delete(
|
||||
where={"id": existing_pending_submission.id}
|
||||
)
|
||||
logger.debug(
|
||||
f"Deleted existing pending submission {existing_pending_submission.id}"
|
||||
)
|
||||
|
||||
# Create a new version for the existing listing
|
||||
new_version = await prisma.models.StoreListingVersion.prisma(tx).create(
|
||||
data=prisma.types.StoreListingVersionCreateInput(
|
||||
version=next_version,
|
||||
agentGraphId=agent_id,
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
submissionStatus=prisma.enums.SubmissionStatus.PENDING,
|
||||
submittedAt=datetime.now(),
|
||||
changesSummary=changes_summary,
|
||||
recommendedScheduleCron=recommended_schedule_cron,
|
||||
storeListingId=store_listing_id,
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Created new version for listing {store_listing_id} of agent {agent_id}"
|
||||
)
|
||||
# Return submission details
|
||||
return store_model.StoreSubmission(
|
||||
listing_id=listing.id,
|
||||
agent_id=agent_id,
|
||||
agent_version=agent_version,
|
||||
name=name,
|
||||
@@ -1744,12 +1708,15 @@ async def review_store_submission(
|
||||
|
||||
# Convert to Pydantic model for consistency
|
||||
return store_model.StoreSubmission(
|
||||
listing_id=(submission.StoreListing.id if submission.StoreListing else ""),
|
||||
agent_id=submission.agentGraphId,
|
||||
agent_version=submission.agentGraphVersion,
|
||||
name=submission.name,
|
||||
sub_heading=submission.subHeading,
|
||||
slug=(submission.StoreListing.slug if submission.StoreListing else ""),
|
||||
slug=(
|
||||
submission.StoreListing.slug
|
||||
if hasattr(submission, "storeListing") and submission.StoreListing
|
||||
else ""
|
||||
),
|
||||
description=submission.description,
|
||||
instructions=submission.instructions,
|
||||
image_urls=submission.imageUrls or [],
|
||||
@@ -1851,7 +1818,9 @@ async def get_admin_listings_with_versions(
|
||||
where = prisma.types.StoreListingWhereInput(**where_dict)
|
||||
include = prisma.types.StoreListingInclude(
|
||||
Versions=prisma.types.FindManyStoreListingVersionArgsFromStoreListing(
|
||||
order_by={"version": "desc"}
|
||||
order_by=prisma.types._StoreListingVersion_version_OrderByInput(
|
||||
version="desc"
|
||||
)
|
||||
),
|
||||
OwningUser=True,
|
||||
)
|
||||
@@ -1876,7 +1845,6 @@ async def get_admin_listings_with_versions(
|
||||
# If we have versions, turn them into StoreSubmission models
|
||||
for version in listing.Versions or []:
|
||||
version_model = store_model.StoreSubmission(
|
||||
listing_id=listing.id,
|
||||
agent_id=version.agentGraphId,
|
||||
agent_version=version.agentGraphVersion,
|
||||
name=version.name,
|
||||
|
||||
@@ -110,7 +110,6 @@ class Profile(pydantic.BaseModel):
|
||||
|
||||
|
||||
class StoreSubmission(pydantic.BaseModel):
|
||||
listing_id: str
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
name: str
|
||||
@@ -165,12 +164,8 @@ class StoreListingsWithVersionsResponse(pydantic.BaseModel):
|
||||
|
||||
|
||||
class StoreSubmissionRequest(pydantic.BaseModel):
|
||||
agent_id: str = pydantic.Field(
|
||||
..., min_length=1, description="Agent ID cannot be empty"
|
||||
)
|
||||
agent_version: int = pydantic.Field(
|
||||
..., gt=0, description="Agent version must be greater than 0"
|
||||
)
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
slug: str
|
||||
name: str
|
||||
sub_heading: str
|
||||
|
||||
@@ -138,7 +138,6 @@ def test_creator_details():
|
||||
|
||||
def test_store_submission():
|
||||
submission = store_model.StoreSubmission(
|
||||
listing_id="listing123",
|
||||
agent_id="agent123",
|
||||
agent_version=1,
|
||||
sub_heading="Test subheading",
|
||||
@@ -160,7 +159,6 @@ def test_store_submissions_response():
|
||||
response = store_model.StoreSubmissionsResponse(
|
||||
submissions=[
|
||||
store_model.StoreSubmission(
|
||||
listing_id="listing123",
|
||||
agent_id="agent123",
|
||||
agent_version=1,
|
||||
sub_heading="Test subheading",
|
||||
|
||||
@@ -521,7 +521,6 @@ def test_get_submissions_success(
|
||||
mocked_value = store_model.StoreSubmissionsResponse(
|
||||
submissions=[
|
||||
store_model.StoreSubmission(
|
||||
listing_id="test-listing-id",
|
||||
name="Test Agent",
|
||||
description="Test agent description",
|
||||
image_urls=["test.jpg"],
|
||||
|
||||
@@ -6,9 +6,6 @@ import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import cast
|
||||
|
||||
from prisma.types import Serializable
|
||||
|
||||
from backend.sdk import (
|
||||
BaseWebhooksManager,
|
||||
@@ -87,9 +84,7 @@ class AirtableWebhookManager(BaseWebhooksManager):
|
||||
# update webhook config
|
||||
await update_webhook(
|
||||
webhook.id,
|
||||
config=cast(
|
||||
dict[str, Serializable], {"base_id": base_id, "cursor": response.cursor}
|
||||
),
|
||||
config={"base_id": base_id, "cursor": response.cursor},
|
||||
)
|
||||
|
||||
event_type = "notification"
|
||||
|
||||
@@ -81,7 +81,7 @@ class StoreValueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
|
||||
description="A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks.",
|
||||
description="This block forwards an input value as output, allowing reuse without change.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=StoreValueBlock.Input,
|
||||
output_schema=StoreValueBlock.Output,
|
||||
@@ -111,7 +111,7 @@ class PrintToConsoleBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
|
||||
description="A debugging block that outputs text to the console for monitoring and troubleshooting workflow execution.",
|
||||
description="Print the given text to the console, this is used for a debugging purpose.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=PrintToConsoleBlock.Input,
|
||||
output_schema=PrintToConsoleBlock.Output,
|
||||
@@ -137,7 +137,7 @@ class NoteBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||
description="A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes.",
|
||||
description="This block is used to display a sticky note with the given text.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=NoteBlock.Input,
|
||||
output_schema=NoteBlock.Output,
|
||||
|
||||
@@ -159,7 +159,7 @@ class FindInDictionaryBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
|
||||
description="A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value.",
|
||||
description="Lookup the given key in the input dictionary/object/list and return the value.",
|
||||
input_schema=FindInDictionaryBlock.Input,
|
||||
output_schema=FindInDictionaryBlock.Output,
|
||||
test_input=[
|
||||
|
||||
@@ -51,7 +51,7 @@ class GithubCommentBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a8db4d8d-db1c-4a25-a1b0-416a8c33602b",
|
||||
description="A block that posts comments on GitHub issues or pull requests using the GitHub API.",
|
||||
description="This block posts a comment on a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCommentBlock.Input,
|
||||
output_schema=GithubCommentBlock.Output,
|
||||
@@ -151,7 +151,7 @@ class GithubUpdateCommentBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
|
||||
description="A block that updates an existing comment on a GitHub issue or pull request.",
|
||||
description="This block updates a comment on a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateCommentBlock.Input,
|
||||
output_schema=GithubUpdateCommentBlock.Output,
|
||||
@@ -249,7 +249,7 @@ class GithubListCommentsBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
|
||||
description="A block that retrieves all comments from a GitHub issue or pull request, including comment metadata and content.",
|
||||
description="This block lists all comments for a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListCommentsBlock.Input,
|
||||
output_schema=GithubListCommentsBlock.Output,
|
||||
@@ -363,7 +363,7 @@ class GithubMakeIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="691dad47-f494-44c3-a1e8-05b7990f2dab",
|
||||
description="A block that creates new issues on GitHub repositories with a title and body content.",
|
||||
description="This block creates a new issue on a specified GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubMakeIssueBlock.Input,
|
||||
output_schema=GithubMakeIssueBlock.Output,
|
||||
@@ -433,7 +433,7 @@ class GithubReadIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6443c75d-032a-4772-9c08-230c707c8acc",
|
||||
description="A block that retrieves information about a specific GitHub issue, including its title, body content, and creator.",
|
||||
description="This block reads the body, title, and user of a specified GitHub issue.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubReadIssueBlock.Input,
|
||||
output_schema=GithubReadIssueBlock.Output,
|
||||
@@ -510,7 +510,7 @@ class GithubListIssuesBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c215bfd7-0e57-4573-8f8c-f7d4963dcd74",
|
||||
description="A block that retrieves a list of issues from a GitHub repository with their titles and URLs.",
|
||||
description="This block lists all issues for a specified GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListIssuesBlock.Input,
|
||||
output_schema=GithubListIssuesBlock.Output,
|
||||
@@ -597,7 +597,7 @@ class GithubAddLabelBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="98bd6b77-9506-43d5-b669-6b9733c4b1f1",
|
||||
description="A block that adds a label to a GitHub issue or pull request for categorization and organization.",
|
||||
description="This block adds a label to a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubAddLabelBlock.Input,
|
||||
output_schema=GithubAddLabelBlock.Output,
|
||||
@@ -657,7 +657,7 @@ class GithubRemoveLabelBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="78f050c5-3e3a-48c0-9e5b-ef1ceca5589c",
|
||||
description="A block that removes a label from a GitHub issue or pull request.",
|
||||
description="This block removes a label from a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubRemoveLabelBlock.Input,
|
||||
output_schema=GithubRemoveLabelBlock.Output,
|
||||
@@ -720,7 +720,7 @@ class GithubAssignIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="90507c72-b0ff-413a-886a-23bbbd66f542",
|
||||
description="A block that assigns a GitHub user to an issue for task ownership and tracking.",
|
||||
description="This block assigns a user to a specified GitHub issue.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubAssignIssueBlock.Input,
|
||||
output_schema=GithubAssignIssueBlock.Output,
|
||||
@@ -786,7 +786,7 @@ class GithubUnassignIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d154002a-38f4-46c2-962d-2488f2b05ece",
|
||||
description="A block that removes a user's assignment from a GitHub issue.",
|
||||
description="This block unassigns a user from a specified GitHub issue.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUnassignIssueBlock.Input,
|
||||
output_schema=GithubUnassignIssueBlock.Output,
|
||||
|
||||
@@ -353,7 +353,7 @@ class GmailReadBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
|
||||
description="A block that retrieves and reads emails from a Gmail account based on search criteria, returning detailed message information including subject, sender, body, and attachments.",
|
||||
description="This block reads emails from Gmail.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
input_schema=GmailReadBlock.Input,
|
||||
@@ -743,7 +743,7 @@ class GmailListLabelsBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
|
||||
description="A block that retrieves all labels (categories) from a Gmail account for organizing and categorizing emails.",
|
||||
description="This block lists all labels in Gmail.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailListLabelsBlock.Input,
|
||||
output_schema=GmailListLabelsBlock.Output,
|
||||
@@ -807,7 +807,7 @@ class GmailAddLabelBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f884b2fb-04f4-4265-9658-14f433926ac9",
|
||||
description="A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist.",
|
||||
description="This block adds a label to a Gmail message.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailAddLabelBlock.Input,
|
||||
output_schema=GmailAddLabelBlock.Output,
|
||||
@@ -893,7 +893,7 @@ class GmailRemoveLabelBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
|
||||
description="A block that removes a label from a specific email message in a Gmail account.",
|
||||
description="This block removes a label from a Gmail message.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailRemoveLabelBlock.Input,
|
||||
output_schema=GmailRemoveLabelBlock.Output,
|
||||
@@ -961,7 +961,7 @@ class GmailGetThreadBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="21a79166-9df7-4b5f-9f36-96f639d86112",
|
||||
description="A block that retrieves an entire Gmail thread (email conversation) by ID, returning all messages with decoded bodies for reading complete conversations.",
|
||||
description="Get a full Gmail thread by ID",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailGetThreadBlock.Input,
|
||||
output_schema=GmailGetThreadBlock.Output,
|
||||
|
||||
@@ -282,7 +282,7 @@ class GoogleSheetsReadBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
||||
description="A block that reads data from a Google Sheets spreadsheet using A1 notation range selection.",
|
||||
description="This block reads data from a Google Sheets spreadsheet.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsReadBlock.Input,
|
||||
output_schema=GoogleSheetsReadBlock.Output,
|
||||
@@ -409,7 +409,7 @@ class GoogleSheetsWriteBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d9291e87-301d-47a8-91fe-907fb55460e5",
|
||||
description="A block that writes data to a Google Sheets spreadsheet at a specified A1 notation range.",
|
||||
description="This block writes data to a Google Sheets spreadsheet.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsWriteBlock.Input,
|
||||
output_schema=GoogleSheetsWriteBlock.Output,
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
"""
|
||||
Shared helpers for Human-In-The-Loop (HITL) review functionality.
|
||||
Used by both the dedicated HumanInTheLoopBlock and blocks that require human review.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.execution import ExecutionContext, ExecutionStatus
|
||||
from backend.data.human_review import ReviewResult
|
||||
from backend.executor.manager import async_update_node_execution_status
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReviewDecision(BaseModel):
|
||||
"""Result of a review decision."""
|
||||
|
||||
should_proceed: bool
|
||||
message: str
|
||||
review_result: ReviewResult
|
||||
|
||||
|
||||
class HITLReviewHelper:
|
||||
"""Helper class for Human-In-The-Loop review operations."""
|
||||
|
||||
@staticmethod
|
||||
async def get_or_create_human_review(**kwargs) -> Optional[ReviewResult]:
|
||||
"""Create or retrieve a human review from the database."""
|
||||
return await get_database_manager_async_client().get_or_create_human_review(
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def update_node_execution_status(**kwargs) -> None:
|
||||
"""Update the execution status of a node."""
|
||||
await async_update_node_execution_status(
|
||||
db_client=get_database_manager_async_client(), **kwargs
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def update_review_processed_status(
|
||||
node_exec_id: str, processed: bool
|
||||
) -> None:
|
||||
"""Update the processed status of a review."""
|
||||
return await get_database_manager_async_client().update_review_processed_status(
|
||||
node_exec_id, processed
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _handle_review_request(
|
||||
input_data: Any,
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
block_name: str = "Block",
|
||||
editable: bool = False,
|
||||
) -> Optional[ReviewResult]:
|
||||
"""
|
||||
Handle a review request for a block that requires human review.
|
||||
|
||||
Args:
|
||||
input_data: The input data to be reviewed
|
||||
user_id: ID of the user requesting the review
|
||||
node_exec_id: ID of the node execution
|
||||
graph_exec_id: ID of the graph execution
|
||||
graph_id: ID of the graph
|
||||
graph_version: Version of the graph
|
||||
execution_context: Current execution context
|
||||
block_name: Name of the block requesting review
|
||||
editable: Whether the reviewer can edit the data
|
||||
|
||||
Returns:
|
||||
ReviewResult if review is complete, None if waiting for human input
|
||||
|
||||
Raises:
|
||||
Exception: If review creation or status update fails
|
||||
"""
|
||||
# Skip review if safe mode is disabled - return auto-approved result
|
||||
if not execution_context.safe_mode:
|
||||
logger.info(
|
||||
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
return ReviewResult(
|
||||
data=input_data,
|
||||
status=ReviewStatus.APPROVED,
|
||||
message="Auto-approved (safe mode disabled)",
|
||||
processed=True,
|
||||
node_exec_id=node_exec_id,
|
||||
)
|
||||
|
||||
result = await HITLReviewHelper.get_or_create_human_review(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
input_data=input_data,
|
||||
message=f"Review required for {block_name} execution",
|
||||
editable=editable,
|
||||
)
|
||||
|
||||
if result is None:
|
||||
logger.info(
|
||||
f"Block {block_name} pausing execution for node {node_exec_id} - awaiting human review"
|
||||
)
|
||||
await HITLReviewHelper.update_node_execution_status(
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
return None # Signal that execution should pause
|
||||
|
||||
# Mark review as processed if not already done
|
||||
if not result.processed:
|
||||
await HITLReviewHelper.update_review_processed_status(
|
||||
node_exec_id=node_exec_id, processed=True
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
async def handle_review_decision(
|
||||
input_data: Any,
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
block_name: str = "Block",
|
||||
editable: bool = False,
|
||||
) -> Optional[ReviewDecision]:
|
||||
"""
|
||||
Handle a review request and return the decision in a single call.
|
||||
|
||||
Args:
|
||||
input_data: The input data to be reviewed
|
||||
user_id: ID of the user requesting the review
|
||||
node_exec_id: ID of the node execution
|
||||
graph_exec_id: ID of the graph execution
|
||||
graph_id: ID of the graph
|
||||
graph_version: Version of the graph
|
||||
execution_context: Current execution context
|
||||
block_name: Name of the block requesting review
|
||||
editable: Whether the reviewer can edit the data
|
||||
|
||||
Returns:
|
||||
ReviewDecision if review is complete (approved/rejected),
|
||||
None if execution should pause (awaiting review)
|
||||
"""
|
||||
review_result = await HITLReviewHelper._handle_review_request(
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
block_name=block_name,
|
||||
editable=editable,
|
||||
)
|
||||
|
||||
if review_result is None:
|
||||
# Still awaiting review - return None to pause execution
|
||||
return None
|
||||
|
||||
# Review is complete, determine outcome
|
||||
should_proceed = review_result.status == ReviewStatus.APPROVED
|
||||
message = review_result.message or (
|
||||
"Execution approved by reviewer"
|
||||
if should_proceed
|
||||
else "Execution rejected by reviewer"
|
||||
)
|
||||
|
||||
return ReviewDecision(
|
||||
should_proceed=should_proceed, message=message, review_result=review_result
|
||||
)
|
||||
@@ -3,7 +3,6 @@ from typing import Any
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.blocks.helpers.review import HITLReviewHelper
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
@@ -12,9 +11,11 @@ from backend.data.block import (
|
||||
BlockSchemaOutput,
|
||||
BlockType,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.execution import ExecutionContext, ExecutionStatus
|
||||
from backend.data.human_review import ReviewResult
|
||||
from backend.data.model import SchemaField
|
||||
from backend.executor.manager import async_update_node_execution_status
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -71,26 +72,32 @@ class HumanInTheLoopBlock(Block):
|
||||
("approved_data", {"name": "John Doe", "age": 30}),
|
||||
],
|
||||
test_mock={
|
||||
"handle_review_decision": lambda **kwargs: type(
|
||||
"ReviewDecision",
|
||||
(),
|
||||
{
|
||||
"should_proceed": True,
|
||||
"message": "Test approval message",
|
||||
"review_result": ReviewResult(
|
||||
data={"name": "John Doe", "age": 30},
|
||||
status=ReviewStatus.APPROVED,
|
||||
message="",
|
||||
processed=False,
|
||||
node_exec_id="test-node-exec-id",
|
||||
),
|
||||
},
|
||||
)(),
|
||||
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
|
||||
data={"name": "John Doe", "age": 30},
|
||||
status=ReviewStatus.APPROVED,
|
||||
message="",
|
||||
processed=False,
|
||||
node_exec_id="test-node-exec-id",
|
||||
),
|
||||
"update_node_execution_status": lambda *_args, **_kwargs: None,
|
||||
"update_review_processed_status": lambda *_args, **_kwargs: None,
|
||||
},
|
||||
)
|
||||
|
||||
async def handle_review_decision(self, **kwargs):
|
||||
return await HITLReviewHelper.handle_review_decision(**kwargs)
|
||||
async def get_or_create_human_review(self, **kwargs):
|
||||
return await get_database_manager_async_client().get_or_create_human_review(
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async def update_node_execution_status(self, **kwargs):
|
||||
return await async_update_node_execution_status(
|
||||
db_client=get_database_manager_async_client(), **kwargs
|
||||
)
|
||||
|
||||
async def update_review_processed_status(self, node_exec_id: str, processed: bool):
|
||||
return await get_database_manager_async_client().update_review_processed_status(
|
||||
node_exec_id, processed
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
@@ -102,7 +109,7 @@ class HumanInTheLoopBlock(Block):
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
**_kwargs,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
if not execution_context.safe_mode:
|
||||
logger.info(
|
||||
@@ -112,28 +119,48 @@ class HumanInTheLoopBlock(Block):
|
||||
yield "review_message", "Auto-approved (safe mode disabled)"
|
||||
return
|
||||
|
||||
decision = await self.handle_review_decision(
|
||||
input_data=input_data.data,
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
block_name=self.name,
|
||||
editable=input_data.editable,
|
||||
)
|
||||
try:
|
||||
result = await self.get_or_create_human_review(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
input_data=input_data.data,
|
||||
message=input_data.name,
|
||||
editable=input_data.editable,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
if decision is None:
|
||||
return
|
||||
if result is None:
|
||||
logger.info(
|
||||
f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
|
||||
)
|
||||
try:
|
||||
await self.update_node_execution_status(
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
status = decision.review_result.status
|
||||
if status == ReviewStatus.APPROVED:
|
||||
yield "approved_data", decision.review_result.data
|
||||
elif status == ReviewStatus.REJECTED:
|
||||
yield "rejected_data", decision.review_result.data
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected review status: {status}")
|
||||
if not result.processed:
|
||||
await self.update_review_processed_status(
|
||||
node_exec_id=node_exec_id, processed=True
|
||||
)
|
||||
|
||||
if decision.message:
|
||||
yield "review_message", decision.message
|
||||
if result.status == ReviewStatus.APPROVED:
|
||||
yield "approved_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
elif result.status == ReviewStatus.REJECTED:
|
||||
yield "rejected_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
@@ -76,7 +76,7 @@ class AgentInputBlock(Block):
|
||||
super().__init__(
|
||||
**{
|
||||
"id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"description": "A block that accepts and processes user input values within a workflow, supporting various input types and validation.",
|
||||
"description": "Base block for user inputs.",
|
||||
"input_schema": AgentInputBlock.Input,
|
||||
"output_schema": AgentInputBlock.Output,
|
||||
"test_input": [
|
||||
@@ -168,7 +168,7 @@ class AgentOutputBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
description="A block that records and formats workflow results for display to users, with optional Jinja2 template formatting support.",
|
||||
description="Stores the output of the graph for users to see.",
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
|
||||
@@ -854,7 +854,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
description="A block that generates structured JSON responses using a Large Language Model (LLM), with schema validation and format enforcement.",
|
||||
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
@@ -1265,7 +1265,7 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
description="A block that produces text responses using a Large Language Model (LLM) based on customizable prompts and system instructions.",
|
||||
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AITextGeneratorBlock.Input,
|
||||
output_schema=AITextGeneratorBlock.Output,
|
||||
@@ -1361,7 +1361,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a0a69be1-4528-491c-a85a-a4ab6873e3f0",
|
||||
description="A block that summarizes long texts using a Large Language Model (LLM), with configurable focus topics and summary styles.",
|
||||
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AITextSummarizerBlock.Input,
|
||||
output_schema=AITextSummarizerBlock.Output,
|
||||
@@ -1562,7 +1562,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="32a87eab-381e-4dd4-bdb8-4c47151be35a",
|
||||
description="A block that facilitates multi-turn conversations with a Large Language Model (LLM), maintaining context across message exchanges.",
|
||||
description="Advanced LLM call that takes a list of messages and sends them to the language model.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIConversationBlock.Input,
|
||||
output_schema=AIConversationBlock.Output,
|
||||
@@ -1682,7 +1682,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9c0b0450-d199-458b-a731-072189dd6593",
|
||||
description="A block that creates lists of items based on prompts using a Large Language Model (LLM), with optional source data for context.",
|
||||
description="Generate a list of values based on the given prompt using a Large Language Model (LLM).",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AIListGeneratorBlock.Input,
|
||||
output_schema=AIListGeneratorBlock.Output,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -975,28 +975,10 @@ class SmartDecisionMakerBlock(Block):
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
nodes_to_skip: set[str] | None = None,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
tool_functions = await self._create_tool_node_signatures(node_id)
|
||||
original_tool_count = len(tool_functions)
|
||||
|
||||
# Filter out tools for nodes that should be skipped (e.g., missing optional credentials)
|
||||
if nodes_to_skip:
|
||||
tool_functions = [
|
||||
tf
|
||||
for tf in tool_functions
|
||||
if tf.get("function", {}).get("_sink_node_id") not in nodes_to_skip
|
||||
]
|
||||
|
||||
# Only raise error if we had tools but they were all filtered out
|
||||
if original_tool_count > 0 and not tool_functions:
|
||||
raise ValueError(
|
||||
"No available tools to execute - all downstream nodes are unavailable "
|
||||
"(possibly due to missing optional credentials)"
|
||||
)
|
||||
|
||||
yield "tool_functions", json.dumps(tool_functions)
|
||||
|
||||
conversation_history = input_data.conversation_history or []
|
||||
|
||||
@@ -50,8 +50,6 @@ from .model import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
from .graph import Link
|
||||
|
||||
app_config = Config()
|
||||
@@ -474,7 +472,6 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
self.block_type = block_type
|
||||
self.webhook_config = webhook_config
|
||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||
self.requires_human_review: bool = False
|
||||
|
||||
if self.webhook_config:
|
||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||
@@ -617,77 +614,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
block_id=self.id,
|
||||
) from ex
|
||||
|
||||
async def is_block_exec_need_review(
|
||||
self,
|
||||
input_data: BlockInput,
|
||||
*,
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: "ExecutionContext",
|
||||
**kwargs,
|
||||
) -> tuple[bool, BlockInput]:
|
||||
"""
|
||||
Check if this block execution needs human review and handle the review process.
|
||||
|
||||
Returns:
|
||||
Tuple of (should_pause, input_data_to_use)
|
||||
- should_pause: True if execution should be paused for review
|
||||
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||
"""
|
||||
# Skip review if not required or safe mode is disabled
|
||||
if not self.requires_human_review or not execution_context.safe_mode:
|
||||
return False, input_data
|
||||
|
||||
from backend.blocks.helpers.review import HITLReviewHelper
|
||||
|
||||
# Handle the review request and get decision
|
||||
decision = await HITLReviewHelper.handle_review_decision(
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
block_name=self.name,
|
||||
editable=True,
|
||||
)
|
||||
|
||||
if decision is None:
|
||||
# We're awaiting review - pause execution
|
||||
return True, input_data
|
||||
|
||||
if not decision.should_proceed:
|
||||
# Review was rejected, raise an error to stop execution
|
||||
raise BlockExecutionError(
|
||||
message=f"Block execution rejected by reviewer: {decision.message}",
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
)
|
||||
|
||||
# Review was approved - use the potentially modified data
|
||||
# ReviewResult.data must be a dict for block inputs
|
||||
reviewed_data = decision.review_result.data
|
||||
if not isinstance(reviewed_data, dict):
|
||||
raise BlockExecutionError(
|
||||
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
)
|
||||
return False, reviewed_data
|
||||
|
||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
# Check for review requirement and get potentially modified input data
|
||||
should_pause, input_data = await self.is_block_exec_need_review(
|
||||
input_data, **kwargs
|
||||
)
|
||||
if should_pause:
|
||||
return
|
||||
|
||||
# Validate the input data (original or reviewer-modified) once
|
||||
if error := self.input_schema.validate_data(input_data):
|
||||
raise BlockInputError(
|
||||
message=f"Unable to execute block with invalid input data: {error}",
|
||||
@@ -695,7 +622,6 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
block_id=self.id,
|
||||
)
|
||||
|
||||
# Use the validated input data
|
||||
async for output_name, output_data in self.run(
|
||||
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
|
||||
**kwargs,
|
||||
|
||||
@@ -383,7 +383,6 @@ class GraphExecutionWithNodes(GraphExecution):
|
||||
self,
|
||||
execution_context: ExecutionContext,
|
||||
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
nodes_to_skip: Optional[set[str]] = None,
|
||||
):
|
||||
return GraphExecutionEntry(
|
||||
user_id=self.user_id,
|
||||
@@ -391,7 +390,6 @@ class GraphExecutionWithNodes(GraphExecution):
|
||||
graph_version=self.graph_version or 0,
|
||||
graph_exec_id=self.id,
|
||||
nodes_input_masks=compiled_nodes_input_masks,
|
||||
nodes_to_skip=nodes_to_skip or set(),
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
@@ -1147,8 +1145,6 @@ class GraphExecutionEntry(BaseModel):
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None
|
||||
nodes_to_skip: set[str] = Field(default_factory=set)
|
||||
"""Node IDs that should be skipped due to optional credentials not being configured."""
|
||||
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
|
||||
|
||||
|
||||
|
||||
@@ -94,15 +94,6 @@ class Node(BaseDbModel):
|
||||
input_links: list[Link] = []
|
||||
output_links: list[Link] = []
|
||||
|
||||
@property
|
||||
def credentials_optional(self) -> bool:
|
||||
"""
|
||||
Whether credentials are optional for this node.
|
||||
When True and credentials are not configured, the node will be skipped
|
||||
during execution rather than causing a validation error.
|
||||
"""
|
||||
return self.metadata.get("credentials_optional", False)
|
||||
|
||||
@property
|
||||
def block(self) -> AnyBlockSchema | "_UnknownBlockBase":
|
||||
"""Get the block for this node. Returns UnknownBlock if block is deleted/missing."""
|
||||
@@ -244,10 +235,7 @@ class BaseGraph(BaseDbModel):
|
||||
return any(
|
||||
node.block_id
|
||||
for node in self.nodes
|
||||
if (
|
||||
node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
or node.block.requires_human_review
|
||||
)
|
||||
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -338,35 +326,7 @@ class Graph(BaseGraph):
|
||||
@computed_field
|
||||
@property
|
||||
def credentials_input_schema(self) -> dict[str, Any]:
|
||||
schema = self._credentials_input_schema.jsonschema()
|
||||
|
||||
# Determine which credential fields are required based on credentials_optional metadata
|
||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||
required_fields = []
|
||||
|
||||
# Build a map of node_id -> node for quick lookup
|
||||
all_nodes = {node.id: node for node in self.nodes}
|
||||
for sub_graph in self.sub_graphs:
|
||||
for node in sub_graph.nodes:
|
||||
all_nodes[node.id] = node
|
||||
|
||||
for field_key, (
|
||||
_field_info,
|
||||
node_field_pairs,
|
||||
) in graph_credentials_inputs.items():
|
||||
# A field is required if ANY node using it has credentials_optional=False
|
||||
is_required = False
|
||||
for node_id, _field_name in node_field_pairs:
|
||||
node = all_nodes.get(node_id)
|
||||
if node and not node.credentials_optional:
|
||||
is_required = True
|
||||
break
|
||||
|
||||
if is_required:
|
||||
required_fields.append(field_key)
|
||||
|
||||
schema["required"] = required_fields
|
||||
return schema
|
||||
return self._credentials_input_schema.jsonschema()
|
||||
|
||||
@property
|
||||
def _credentials_input_schema(self) -> type[BlockSchema]:
|
||||
|
||||
@@ -396,58 +396,3 @@ async def test_access_store_listing_graph(server: SpinTestServer):
|
||||
created_graph.id, created_graph.version, "3e53486c-cf57-477e-ba2a-cb02dc828e1b"
|
||||
)
|
||||
assert got_graph is not None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for Optional Credentials Feature
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_node_credentials_optional_default():
|
||||
"""Test that credentials_optional defaults to False when not set in metadata."""
|
||||
node = Node(
|
||||
id="test_node",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
metadata={},
|
||||
)
|
||||
assert node.credentials_optional is False
|
||||
|
||||
|
||||
def test_node_credentials_optional_true():
|
||||
"""Test that credentials_optional returns True when explicitly set."""
|
||||
node = Node(
|
||||
id="test_node",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
metadata={"credentials_optional": True},
|
||||
)
|
||||
assert node.credentials_optional is True
|
||||
|
||||
|
||||
def test_node_credentials_optional_false():
|
||||
"""Test that credentials_optional returns False when explicitly set to False."""
|
||||
node = Node(
|
||||
id="test_node",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
metadata={"credentials_optional": False},
|
||||
)
|
||||
assert node.credentials_optional is False
|
||||
|
||||
|
||||
def test_node_credentials_optional_with_other_metadata():
|
||||
"""Test that credentials_optional works correctly with other metadata present."""
|
||||
node = Node(
|
||||
id="test_node",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
metadata={
|
||||
"position": {"x": 100, "y": 200},
|
||||
"customized_name": "My Custom Node",
|
||||
"credentials_optional": True,
|
||||
},
|
||||
)
|
||||
assert node.credentials_optional is True
|
||||
assert node.metadata["position"] == {"x": 100, "y": 200}
|
||||
assert node.metadata["customized_name"] == "My Custom Node"
|
||||
|
||||
@@ -178,7 +178,6 @@ async def execute_node(
|
||||
execution_processor: "ExecutionProcessor",
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
nodes_to_skip: Optional[set[str]] = None,
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Execute a node in the graph. This will trigger a block execution on a node,
|
||||
@@ -246,7 +245,6 @@ async def execute_node(
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
"execution_processor": execution_processor,
|
||||
"nodes_to_skip": nodes_to_skip or set(),
|
||||
}
|
||||
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
@@ -544,7 +542,6 @@ class ExecutionProcessor:
|
||||
node_exec_progress: NodeExecutionProgress,
|
||||
nodes_input_masks: Optional[NodesInputMasks],
|
||||
graph_stats_pair: tuple[GraphExecutionStats, threading.Lock],
|
||||
nodes_to_skip: Optional[set[str]] = None,
|
||||
) -> NodeExecutionStats:
|
||||
log_metadata = LogMetadata(
|
||||
logger=_logger,
|
||||
@@ -567,7 +564,6 @@ class ExecutionProcessor:
|
||||
db_client=db_client,
|
||||
log_metadata=log_metadata,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
nodes_to_skip=nodes_to_skip,
|
||||
)
|
||||
if isinstance(status, BaseException):
|
||||
raise status
|
||||
@@ -613,7 +609,6 @@ class ExecutionProcessor:
|
||||
db_client: "DatabaseManagerAsyncClient",
|
||||
log_metadata: LogMetadata,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
nodes_to_skip: Optional[set[str]] = None,
|
||||
) -> ExecutionStatus:
|
||||
status = ExecutionStatus.RUNNING
|
||||
|
||||
@@ -650,7 +645,6 @@ class ExecutionProcessor:
|
||||
execution_processor=self,
|
||||
execution_stats=stats,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
nodes_to_skip=nodes_to_skip,
|
||||
):
|
||||
await persist_output(output_name, output_data)
|
||||
|
||||
@@ -962,21 +956,6 @@ class ExecutionProcessor:
|
||||
|
||||
queued_node_exec = execution_queue.get()
|
||||
|
||||
# Check if this node should be skipped due to optional credentials
|
||||
if queued_node_exec.node_id in graph_exec.nodes_to_skip:
|
||||
log_metadata.info(
|
||||
f"Skipping node execution {queued_node_exec.node_exec_id} "
|
||||
f"for node {queued_node_exec.node_id} - optional credentials not configured"
|
||||
)
|
||||
# Mark the node as completed without executing
|
||||
# No outputs will be produced, so downstream nodes won't trigger
|
||||
update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=queued_node_exec.node_exec_id,
|
||||
status=ExecutionStatus.COMPLETED,
|
||||
)
|
||||
continue
|
||||
|
||||
log_metadata.debug(
|
||||
f"Dispatching node execution {queued_node_exec.node_exec_id} "
|
||||
f"for node {queued_node_exec.node_id}",
|
||||
@@ -1037,7 +1016,6 @@ class ExecutionProcessor:
|
||||
execution_stats,
|
||||
execution_stats_lock,
|
||||
),
|
||||
nodes_to_skip=graph_exec.nodes_to_skip,
|
||||
),
|
||||
self.node_execution_loop,
|
||||
)
|
||||
|
||||
@@ -239,19 +239,14 @@ async def _validate_node_input_credentials(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> tuple[dict[str, dict[str, str]], set[str]]:
|
||||
) -> dict[str, dict[str, str]]:
|
||||
"""
|
||||
Checks all credentials for all nodes of the graph and returns structured errors
|
||||
and a set of nodes that should be skipped due to optional missing credentials.
|
||||
Checks all credentials for all nodes of the graph and returns structured errors.
|
||||
|
||||
Returns:
|
||||
tuple[
|
||||
dict[node_id, dict[field_name, error_message]]: Credential validation errors per node,
|
||||
set[node_id]: Nodes that should be skipped (optional credentials not configured)
|
||||
]
|
||||
dict[node_id, dict[field_name, error_message]]: Credential validation errors per node
|
||||
"""
|
||||
credential_errors: dict[str, dict[str, str]] = defaultdict(dict)
|
||||
nodes_to_skip: set[str] = set()
|
||||
|
||||
for node in graph.nodes:
|
||||
block = node.block
|
||||
@@ -261,46 +256,27 @@ async def _validate_node_input_credentials(
|
||||
if not credentials_fields:
|
||||
continue
|
||||
|
||||
# Track if any credential field is missing for this node
|
||||
has_missing_credentials = False
|
||||
|
||||
for field_name, credentials_meta_type in credentials_fields.items():
|
||||
try:
|
||||
# Check nodes_input_masks first, then input_default
|
||||
field_value = None
|
||||
if (
|
||||
nodes_input_masks
|
||||
and (node_input_mask := nodes_input_masks.get(node.id))
|
||||
and field_name in node_input_mask
|
||||
):
|
||||
field_value = node_input_mask[field_name]
|
||||
credentials_meta = credentials_meta_type.model_validate(
|
||||
node_input_mask[field_name]
|
||||
)
|
||||
elif field_name in node.input_default:
|
||||
# For optional credentials, don't use input_default - treat as missing
|
||||
# This prevents stale credential IDs from failing validation
|
||||
if node.credentials_optional:
|
||||
field_value = None
|
||||
else:
|
||||
field_value = node.input_default[field_name]
|
||||
|
||||
# Check if credentials are missing (None, empty, or not present)
|
||||
if field_value is None or (
|
||||
isinstance(field_value, dict) and not field_value.get("id")
|
||||
):
|
||||
has_missing_credentials = True
|
||||
# If node has credentials_optional flag, mark for skipping instead of error
|
||||
if node.credentials_optional:
|
||||
continue # Don't add error, will be marked for skip after loop
|
||||
else:
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = "These credentials are required"
|
||||
continue
|
||||
|
||||
credentials_meta = credentials_meta_type.model_validate(field_value)
|
||||
|
||||
credentials_meta = credentials_meta_type.model_validate(
|
||||
node.input_default[field_name]
|
||||
)
|
||||
else:
|
||||
# Missing credentials
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = "These credentials are required"
|
||||
continue
|
||||
except ValidationError as e:
|
||||
# Validation error means credentials were provided but invalid
|
||||
# This should always be an error, even if optional
|
||||
credential_errors[node.id][field_name] = f"Invalid credentials: {e}"
|
||||
continue
|
||||
|
||||
@@ -311,7 +287,6 @@ async def _validate_node_input_credentials(
|
||||
)
|
||||
except Exception as e:
|
||||
# Handle any errors fetching credentials
|
||||
# If credentials were explicitly configured but unavailable, it's an error
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = f"Credentials not available: {e}"
|
||||
@@ -338,19 +313,7 @@ async def _validate_node_input_credentials(
|
||||
] = "Invalid credentials: type/provider mismatch"
|
||||
continue
|
||||
|
||||
# If node has optional credentials and any are missing, mark for skipping
|
||||
# But only if there are no other errors for this node
|
||||
if (
|
||||
has_missing_credentials
|
||||
and node.credentials_optional
|
||||
and node.id not in credential_errors
|
||||
):
|
||||
nodes_to_skip.add(node.id)
|
||||
logger.info(
|
||||
f"Node #{node.id} will be skipped: optional credentials not configured"
|
||||
)
|
||||
|
||||
return credential_errors, nodes_to_skip
|
||||
return credential_errors
|
||||
|
||||
|
||||
def make_node_credentials_input_map(
|
||||
@@ -392,25 +355,21 @@ async def validate_graph_with_credentials(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> tuple[Mapping[str, Mapping[str, str]], set[str]]:
|
||||
) -> Mapping[str, Mapping[str, str]]:
|
||||
"""
|
||||
Validate graph including credentials and return structured errors per node,
|
||||
along with a set of nodes that should be skipped due to optional missing credentials.
|
||||
Validate graph including credentials and return structured errors per node.
|
||||
|
||||
Returns:
|
||||
tuple[
|
||||
dict[node_id, dict[field_name, error_message]]: Validation errors per node,
|
||||
set[node_id]: Nodes that should be skipped (optional credentials not configured)
|
||||
]
|
||||
dict[node_id, dict[field_name, error_message]]: Validation errors per node
|
||||
"""
|
||||
# Get input validation errors
|
||||
node_input_errors = GraphModel.validate_graph_get_errors(
|
||||
graph, for_run=True, nodes_input_masks=nodes_input_masks
|
||||
)
|
||||
|
||||
# Get credential input/availability/validation errors and nodes to skip
|
||||
node_credential_input_errors, nodes_to_skip = (
|
||||
await _validate_node_input_credentials(graph, user_id, nodes_input_masks)
|
||||
# Get credential input/availability/validation errors
|
||||
node_credential_input_errors = await _validate_node_input_credentials(
|
||||
graph, user_id, nodes_input_masks
|
||||
)
|
||||
|
||||
# Merge credential errors with structural errors
|
||||
@@ -419,7 +378,7 @@ async def validate_graph_with_credentials(
|
||||
node_input_errors[node_id] = {}
|
||||
node_input_errors[node_id].update(field_errors)
|
||||
|
||||
return node_input_errors, nodes_to_skip
|
||||
return node_input_errors
|
||||
|
||||
|
||||
async def _construct_starting_node_execution_input(
|
||||
@@ -427,7 +386,7 @@ async def _construct_starting_node_execution_input(
|
||||
user_id: str,
|
||||
graph_inputs: BlockInput,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> tuple[list[tuple[str, BlockInput]], set[str]]:
|
||||
) -> list[tuple[str, BlockInput]]:
|
||||
"""
|
||||
Validates and prepares the input data for executing a graph.
|
||||
This function checks the graph for starting nodes, validates the input data
|
||||
@@ -441,14 +400,11 @@ async def _construct_starting_node_execution_input(
|
||||
node_credentials_map: `dict[node_id, dict[input_name, CredentialsMetaInput]]`
|
||||
|
||||
Returns:
|
||||
tuple[
|
||||
list[tuple[str, BlockInput]]: A list of tuples, each containing the node ID
|
||||
and the corresponding input data for that node.
|
||||
set[str]: Node IDs that should be skipped (optional credentials not configured)
|
||||
]
|
||||
list[tuple[str, BlockInput]]: A list of tuples, each containing the node ID and
|
||||
the corresponding input data for that node.
|
||||
"""
|
||||
# Use new validation function that includes credentials
|
||||
validation_errors, nodes_to_skip = await validate_graph_with_credentials(
|
||||
validation_errors = await validate_graph_with_credentials(
|
||||
graph, user_id, nodes_input_masks
|
||||
)
|
||||
n_error_nodes = len(validation_errors)
|
||||
@@ -489,7 +445,7 @@ async def _construct_starting_node_execution_input(
|
||||
"No starting nodes found for the graph, make sure an AgentInput or blocks with no inbound links are present as starting nodes."
|
||||
)
|
||||
|
||||
return nodes_input, nodes_to_skip
|
||||
return nodes_input
|
||||
|
||||
|
||||
async def validate_and_construct_node_execution_input(
|
||||
@@ -500,7 +456,7 @@ async def validate_and_construct_node_execution_input(
|
||||
graph_credentials_inputs: Optional[Mapping[str, CredentialsMetaInput]] = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
is_sub_graph: bool = False,
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]], NodesInputMasks, set[str]]:
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]], NodesInputMasks]:
|
||||
"""
|
||||
Public wrapper that handles graph fetching, credential mapping, and validation+construction.
|
||||
This centralizes the logic used by both scheduler validation and actual execution.
|
||||
@@ -517,7 +473,6 @@ async def validate_and_construct_node_execution_input(
|
||||
GraphModel: Full graph object for the given `graph_id`.
|
||||
list[tuple[node_id, BlockInput]]: Starting node IDs with corresponding inputs.
|
||||
dict[str, BlockInput]: Node input masks including all passed-in credentials.
|
||||
set[str]: Node IDs that should be skipped (optional credentials not configured).
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the graph is not found.
|
||||
@@ -559,16 +514,14 @@ async def validate_and_construct_node_execution_input(
|
||||
nodes_input_masks or {},
|
||||
)
|
||||
|
||||
starting_nodes_input, nodes_to_skip = (
|
||||
await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
starting_nodes_input = await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
|
||||
return graph, starting_nodes_input, nodes_input_masks, nodes_to_skip
|
||||
return graph, starting_nodes_input, nodes_input_masks
|
||||
|
||||
|
||||
def _merge_nodes_input_masks(
|
||||
@@ -826,9 +779,6 @@ async def add_graph_execution(
|
||||
|
||||
# Use existing execution's compiled input masks
|
||||
compiled_nodes_input_masks = graph_exec.nodes_input_masks or {}
|
||||
# For resumed executions, nodes_to_skip was already determined at creation time
|
||||
# TODO: Consider storing nodes_to_skip in DB if we need to preserve it across resumes
|
||||
nodes_to_skip: set[str] = set()
|
||||
|
||||
logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}")
|
||||
else:
|
||||
@@ -837,7 +787,7 @@ async def add_graph_execution(
|
||||
)
|
||||
|
||||
# Create new execution
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks, nodes_to_skip = (
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
@@ -886,7 +836,6 @@ async def add_graph_execution(
|
||||
try:
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(
|
||||
compiled_nodes_input_masks=compiled_nodes_input_masks,
|
||||
nodes_to_skip=nodes_to_skip,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
logger.info(f"Publishing execution {graph_exec.id} to execution queue")
|
||||
|
||||
@@ -367,13 +367,10 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
)
|
||||
|
||||
# Setup mock returns
|
||||
# The function returns (graph, starting_nodes_input, compiled_nodes_input_masks, nodes_to_skip)
|
||||
nodes_to_skip: set[str] = set()
|
||||
mock_validate.return_value = (
|
||||
mock_graph,
|
||||
starting_nodes_input,
|
||||
compiled_nodes_input_masks,
|
||||
nodes_to_skip,
|
||||
)
|
||||
mock_prisma.is_connected.return_value = True
|
||||
mock_edb.create_graph_execution = mocker.AsyncMock(return_value=mock_graph_exec)
|
||||
@@ -459,212 +456,3 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
# Both executions should succeed (though they create different objects)
|
||||
assert result1 == mock_graph_exec
|
||||
assert result2 == mock_graph_exec_2
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for Optional Credentials Feature
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_returns_nodes_to_skip(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
Test that _validate_node_input_credentials returns nodes_to_skip set
|
||||
for nodes with credentials_optional=True and missing credentials.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
# Create a mock node with credentials_optional=True
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-optional-creds"
|
||||
mock_node.credentials_optional = True
|
||||
mock_node.input_default = {} # No credentials configured
|
||||
|
||||
# Create a mock block with credentials field
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_credentials_field_type = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {
|
||||
"credentials": mock_credentials_field_type
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
# Create mock graph
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Call the function
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user-id",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# Node should be in nodes_to_skip, not in errors
|
||||
assert mock_node.id in nodes_to_skip
|
||||
assert mock_node.id not in errors
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_required_missing_creds_error(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
Test that _validate_node_input_credentials returns errors
|
||||
for nodes with credentials_optional=False and missing credentials.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
# Create a mock node with credentials_optional=False (required)
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-required-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {} # No credentials configured
|
||||
|
||||
# Create a mock block with credentials field
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_credentials_field_type = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {
|
||||
"credentials": mock_credentials_field_type
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
# Create mock graph
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Call the function
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user-id",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# Node should be in errors, not in nodes_to_skip
|
||||
assert mock_node.id in errors
|
||||
assert "credentials" in errors[mock_node.id]
|
||||
assert "required" in errors[mock_node.id]["credentials"].lower()
|
||||
assert mock_node.id not in nodes_to_skip
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_graph_with_credentials_returns_nodes_to_skip(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
Test that validate_graph_with_credentials returns nodes_to_skip set
|
||||
from _validate_node_input_credentials.
|
||||
"""
|
||||
from backend.executor.utils import validate_graph_with_credentials
|
||||
|
||||
# Mock _validate_node_input_credentials to return specific values
|
||||
mock_validate = mocker.patch(
|
||||
"backend.executor.utils._validate_node_input_credentials"
|
||||
)
|
||||
expected_errors = {"node1": {"field": "error"}}
|
||||
expected_nodes_to_skip = {"node2", "node3"}
|
||||
mock_validate.return_value = (expected_errors, expected_nodes_to_skip)
|
||||
|
||||
# Mock GraphModel with validate_graph_get_errors method
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.validate_graph_get_errors.return_value = {}
|
||||
|
||||
# Call the function
|
||||
errors, nodes_to_skip = await validate_graph_with_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user-id",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# Verify nodes_to_skip is passed through
|
||||
assert nodes_to_skip == expected_nodes_to_skip
|
||||
assert "node1" in errors
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
|
||||
"""
|
||||
Test that add_graph_execution properly passes nodes_to_skip
|
||||
to the graph execution entry.
|
||||
"""
|
||||
from backend.data.execution import GraphExecutionWithNodes
|
||||
from backend.executor.utils import add_graph_execution
|
||||
|
||||
# Mock data
|
||||
graph_id = "test-graph-id"
|
||||
user_id = "test-user-id"
|
||||
inputs = {"test_input": "test_value"}
|
||||
graph_version = 1
|
||||
|
||||
# Mock the graph object
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.version = graph_version
|
||||
|
||||
# Starting nodes and masks
|
||||
starting_nodes_input = [("node1", {"input1": "value1"})]
|
||||
compiled_nodes_input_masks = {}
|
||||
nodes_to_skip = {"skipped-node-1", "skipped-node-2"}
|
||||
|
||||
# Mock the graph execution object
|
||||
mock_graph_exec = mocker.MagicMock(spec=GraphExecutionWithNodes)
|
||||
mock_graph_exec.id = "execution-id-123"
|
||||
mock_graph_exec.node_executions = []
|
||||
|
||||
# Track what's passed to to_graph_execution_entry
|
||||
captured_kwargs = {}
|
||||
|
||||
def capture_to_entry(**kwargs):
|
||||
captured_kwargs.update(kwargs)
|
||||
return mocker.MagicMock()
|
||||
|
||||
mock_graph_exec.to_graph_execution_entry.side_effect = capture_to_entry
|
||||
|
||||
# Setup mocks
|
||||
mock_validate = mocker.patch(
|
||||
"backend.executor.utils.validate_and_construct_node_execution_input"
|
||||
)
|
||||
mock_edb = mocker.patch("backend.executor.utils.execution_db")
|
||||
mock_prisma = mocker.patch("backend.executor.utils.prisma")
|
||||
mock_udb = mocker.patch("backend.executor.utils.user_db")
|
||||
mock_gdb = mocker.patch("backend.executor.utils.graph_db")
|
||||
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
|
||||
mock_get_event_bus = mocker.patch(
|
||||
"backend.executor.utils.get_async_execution_event_bus"
|
||||
)
|
||||
|
||||
# Setup returns - include nodes_to_skip in the tuple
|
||||
mock_validate.return_value = (
|
||||
mock_graph,
|
||||
starting_nodes_input,
|
||||
compiled_nodes_input_masks,
|
||||
nodes_to_skip, # This should be passed through
|
||||
)
|
||||
mock_prisma.is_connected.return_value = True
|
||||
mock_edb.create_graph_execution = mocker.AsyncMock(return_value=mock_graph_exec)
|
||||
mock_edb.update_graph_execution_stats = mocker.AsyncMock(
|
||||
return_value=mock_graph_exec
|
||||
)
|
||||
mock_edb.update_node_execution_status_batch = mocker.AsyncMock()
|
||||
|
||||
mock_user = mocker.MagicMock()
|
||||
mock_user.timezone = "UTC"
|
||||
mock_settings = mocker.MagicMock()
|
||||
mock_settings.human_in_the_loop_safe_mode = True
|
||||
|
||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||
mock_get_queue.return_value = mocker.AsyncMock()
|
||||
mock_get_event_bus.return_value = mocker.MagicMock(publish=mocker.AsyncMock())
|
||||
|
||||
# Call the function
|
||||
await add_graph_execution(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
inputs=inputs,
|
||||
graph_version=graph_version,
|
||||
)
|
||||
|
||||
# Verify nodes_to_skip was passed to to_graph_execution_entry
|
||||
assert "nodes_to_skip" in captured_kwargs
|
||||
assert captured_kwargs["nodes_to_skip"] == nodes_to_skip
|
||||
|
||||
@@ -8,7 +8,6 @@ from .discord import DiscordOAuthHandler
|
||||
from .github import GitHubOAuthHandler
|
||||
from .google import GoogleOAuthHandler
|
||||
from .notion import NotionOAuthHandler
|
||||
from .reddit import RedditOAuthHandler
|
||||
from .twitter import TwitterOAuthHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -21,7 +20,6 @@ _ORIGINAL_HANDLERS = [
|
||||
GitHubOAuthHandler,
|
||||
GoogleOAuthHandler,
|
||||
NotionOAuthHandler,
|
||||
RedditOAuthHandler,
|
||||
TwitterOAuthHandler,
|
||||
TodoistOAuthHandler,
|
||||
]
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
import time
|
||||
import urllib.parse
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import OAuth2Credentials
|
||||
from backend.integrations.oauth.base import BaseOAuthHandler
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
class RedditOAuthHandler(BaseOAuthHandler):
|
||||
"""
|
||||
Reddit OAuth 2.0 handler.
|
||||
|
||||
Based on the documentation at:
|
||||
- https://github.com/reddit-archive/reddit/wiki/OAuth2
|
||||
|
||||
Notes:
|
||||
- Reddit requires `duration=permanent` to get refresh tokens
|
||||
- Access tokens expire after 1 hour (3600 seconds)
|
||||
- Reddit requires HTTP Basic Auth for token requests
|
||||
- Reddit requires a unique User-Agent header
|
||||
"""
|
||||
|
||||
PROVIDER_NAME = ProviderName.REDDIT
|
||||
DEFAULT_SCOPES: ClassVar[list[str]] = [
|
||||
"identity", # Get username, verify auth
|
||||
"read", # Access posts and comments
|
||||
"submit", # Submit new posts and comments
|
||||
"edit", # Edit own posts and comments
|
||||
"history", # Access user's post history
|
||||
"privatemessages", # Access inbox and send private messages
|
||||
"flair", # Access and set flair on posts/subreddits
|
||||
]
|
||||
|
||||
AUTHORIZE_URL = "https://www.reddit.com/api/v1/authorize"
|
||||
TOKEN_URL = "https://www.reddit.com/api/v1/access_token"
|
||||
USERNAME_URL = "https://oauth.reddit.com/api/v1/me"
|
||||
REVOKE_URL = "https://www.reddit.com/api/v1/revoke_token"
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
|
||||
def get_login_url(
|
||||
self, scopes: list[str], state: str, code_challenge: Optional[str]
|
||||
) -> str:
|
||||
"""Generate Reddit OAuth 2.0 authorization URL"""
|
||||
scopes = self.handle_default_scopes(scopes)
|
||||
|
||||
params = {
|
||||
"response_type": "code",
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"scope": " ".join(scopes),
|
||||
"state": state,
|
||||
"duration": "permanent", # Required for refresh tokens
|
||||
}
|
||||
|
||||
return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}"
|
||||
|
||||
async def exchange_code_for_tokens(
|
||||
self, code: str, scopes: list[str], code_verifier: Optional[str]
|
||||
) -> OAuth2Credentials:
|
||||
"""Exchange authorization code for access tokens"""
|
||||
scopes = self.handle_default_scopes(scopes)
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": settings.config.reddit_user_agent,
|
||||
}
|
||||
|
||||
data = {
|
||||
"grant_type": "authorization_code",
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
}
|
||||
|
||||
# Reddit requires HTTP Basic Auth for token requests
|
||||
auth = (self.client_id, self.client_secret)
|
||||
|
||||
response = await Requests().post(
|
||||
self.TOKEN_URL, headers=headers, data=data, auth=auth
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
error_text = response.text()
|
||||
raise ValueError(
|
||||
f"Reddit token exchange failed: {response.status} - {error_text}"
|
||||
)
|
||||
|
||||
tokens = response.json()
|
||||
|
||||
if "error" in tokens:
|
||||
raise ValueError(f"Reddit OAuth error: {tokens.get('error')}")
|
||||
|
||||
username = await self._get_username(tokens["access_token"])
|
||||
|
||||
return OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=None,
|
||||
username=username,
|
||||
access_token=tokens["access_token"],
|
||||
refresh_token=tokens.get("refresh_token"),
|
||||
access_token_expires_at=int(time.time()) + tokens.get("expires_in", 3600),
|
||||
refresh_token_expires_at=None, # Reddit refresh tokens don't expire
|
||||
scopes=scopes,
|
||||
)
|
||||
|
||||
async def _get_username(self, access_token: str) -> str:
|
||||
"""Get the username from the access token"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"User-Agent": settings.config.reddit_user_agent,
|
||||
}
|
||||
|
||||
response = await Requests().get(self.USERNAME_URL, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
raise ValueError(f"Failed to get Reddit username: {response.status}")
|
||||
|
||||
data = response.json()
|
||||
return data.get("name", "unknown")
|
||||
|
||||
async def _refresh_tokens(
|
||||
self, credentials: OAuth2Credentials
|
||||
) -> OAuth2Credentials:
|
||||
"""Refresh access tokens using refresh token"""
|
||||
if not credentials.refresh_token:
|
||||
raise ValueError("No refresh token available")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": settings.config.reddit_user_agent,
|
||||
}
|
||||
|
||||
data = {
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": credentials.refresh_token.get_secret_value(),
|
||||
}
|
||||
|
||||
auth = (self.client_id, self.client_secret)
|
||||
|
||||
response = await Requests().post(
|
||||
self.TOKEN_URL, headers=headers, data=data, auth=auth
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
error_text = response.text()
|
||||
raise ValueError(
|
||||
f"Reddit token refresh failed: {response.status} - {error_text}"
|
||||
)
|
||||
|
||||
tokens = response.json()
|
||||
|
||||
if "error" in tokens:
|
||||
raise ValueError(f"Reddit OAuth error: {tokens.get('error')}")
|
||||
|
||||
username = await self._get_username(tokens["access_token"])
|
||||
|
||||
# Reddit may or may not return a new refresh token
|
||||
new_refresh_token = tokens.get("refresh_token")
|
||||
if new_refresh_token:
|
||||
refresh_token: SecretStr | None = SecretStr(new_refresh_token)
|
||||
elif credentials.refresh_token:
|
||||
# Keep the existing refresh token
|
||||
refresh_token = credentials.refresh_token
|
||||
else:
|
||||
refresh_token = None
|
||||
|
||||
return OAuth2Credentials(
|
||||
id=credentials.id,
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=credentials.title,
|
||||
username=username,
|
||||
access_token=tokens["access_token"],
|
||||
refresh_token=refresh_token,
|
||||
access_token_expires_at=int(time.time()) + tokens.get("expires_in", 3600),
|
||||
refresh_token_expires_at=None,
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
|
||||
async def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
|
||||
"""Revoke the access token"""
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": settings.config.reddit_user_agent,
|
||||
}
|
||||
|
||||
data = {
|
||||
"token": credentials.access_token.get_secret_value(),
|
||||
"token_type_hint": "access_token",
|
||||
}
|
||||
|
||||
auth = (self.client_id, self.client_secret)
|
||||
|
||||
response = await Requests().post(
|
||||
self.REVOKE_URL, headers=headers, data=data, auth=auth
|
||||
)
|
||||
|
||||
# Reddit returns 204 No Content on successful revocation
|
||||
return response.ok
|
||||
@@ -264,7 +264,7 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
)
|
||||
|
||||
reddit_user_agent: str = Field(
|
||||
default="web:AutoGPT:v0.6.0 (by /u/autogpt)",
|
||||
default="AutoGPT:1.0 (by /u/autogpt)",
|
||||
description="The user agent for the Reddit API",
|
||||
)
|
||||
|
||||
|
||||
@@ -1,227 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate a lightweight stub for prisma/types.py that collapses all exported
|
||||
symbols to Any. This prevents Pyright from spending time/budget on Prisma's
|
||||
query DSL types while keeping runtime behavior unchanged.
|
||||
|
||||
Usage:
|
||||
poetry run gen-prisma-stub
|
||||
|
||||
This script automatically finds the prisma package location and generates
|
||||
the types.pyi stub file in the same directory as types.py.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import importlib.util
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Set
|
||||
|
||||
|
||||
def _iter_assigned_names(target: ast.expr) -> Iterable[str]:
|
||||
"""Extract names from assignment targets (handles tuple unpacking)."""
|
||||
if isinstance(target, ast.Name):
|
||||
yield target.id
|
||||
elif isinstance(target, (ast.Tuple, ast.List)):
|
||||
for elt in target.elts:
|
||||
yield from _iter_assigned_names(elt)
|
||||
|
||||
|
||||
def _is_private(name: str) -> bool:
|
||||
"""Check if a name is private (starts with _ but not __)."""
|
||||
return name.startswith("_") and not name.startswith("__")
|
||||
|
||||
|
||||
def _is_safe_type_alias(node: ast.Assign) -> bool:
|
||||
"""Check if an assignment is a safe type alias that shouldn't be stubbed.
|
||||
|
||||
Safe types are:
|
||||
- Literal types (don't cause type budget issues)
|
||||
- Simple type references (SortMode, SortOrder, etc.)
|
||||
- TypeVar definitions
|
||||
"""
|
||||
if not node.value:
|
||||
return False
|
||||
|
||||
# Check if it's a Subscript (like Literal[...], Union[...], TypeVar[...])
|
||||
if isinstance(node.value, ast.Subscript):
|
||||
# Get the base type name
|
||||
if isinstance(node.value.value, ast.Name):
|
||||
base_name = node.value.value.id
|
||||
# Literal types are safe
|
||||
if base_name == "Literal":
|
||||
return True
|
||||
# TypeVar is safe
|
||||
if base_name == "TypeVar":
|
||||
return True
|
||||
elif isinstance(node.value.value, ast.Attribute):
|
||||
# Handle typing_extensions.Literal etc.
|
||||
if node.value.value.attr == "Literal":
|
||||
return True
|
||||
|
||||
# Check if it's a simple Name reference (like SortMode = _types.SortMode)
|
||||
if isinstance(node.value, ast.Attribute):
|
||||
return True
|
||||
|
||||
# Check if it's a Call (like TypeVar(...))
|
||||
if isinstance(node.value, ast.Call):
|
||||
if isinstance(node.value.func, ast.Name):
|
||||
if node.value.func.id == "TypeVar":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def collect_top_level_symbols(
|
||||
tree: ast.Module, source_lines: list[str]
|
||||
) -> tuple[Set[str], Set[str], list[str], Set[str]]:
|
||||
"""Collect all top-level symbols from an AST module.
|
||||
|
||||
Returns:
|
||||
Tuple of (class_names, function_names, safe_variable_sources, unsafe_variable_names)
|
||||
safe_variable_sources contains the actual source code lines for safe variables
|
||||
"""
|
||||
classes: Set[str] = set()
|
||||
functions: Set[str] = set()
|
||||
safe_variable_sources: list[str] = []
|
||||
unsafe_variables: Set[str] = set()
|
||||
|
||||
for node in tree.body:
|
||||
if isinstance(node, ast.ClassDef):
|
||||
if not _is_private(node.name):
|
||||
classes.add(node.name)
|
||||
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
||||
if not _is_private(node.name):
|
||||
functions.add(node.name)
|
||||
elif isinstance(node, ast.Assign):
|
||||
is_safe = _is_safe_type_alias(node)
|
||||
names = []
|
||||
for t in node.targets:
|
||||
for n in _iter_assigned_names(t):
|
||||
if not _is_private(n):
|
||||
names.append(n)
|
||||
if names:
|
||||
if is_safe:
|
||||
# Extract the source code for this assignment
|
||||
start_line = node.lineno - 1 # 0-indexed
|
||||
end_line = node.end_lineno if node.end_lineno else node.lineno
|
||||
source = "\n".join(source_lines[start_line:end_line])
|
||||
safe_variable_sources.append(source)
|
||||
else:
|
||||
unsafe_variables.update(names)
|
||||
elif isinstance(node, ast.AnnAssign) and node.target:
|
||||
# Annotated assignments are always stubbed
|
||||
for n in _iter_assigned_names(node.target):
|
||||
if not _is_private(n):
|
||||
unsafe_variables.add(n)
|
||||
|
||||
return classes, functions, safe_variable_sources, unsafe_variables
|
||||
|
||||
|
||||
def find_prisma_types_path() -> Path:
|
||||
"""Find the prisma types.py file in the installed package."""
|
||||
spec = importlib.util.find_spec("prisma")
|
||||
if spec is None or spec.origin is None:
|
||||
raise RuntimeError("Could not find prisma package. Is it installed?")
|
||||
|
||||
prisma_dir = Path(spec.origin).parent
|
||||
types_path = prisma_dir / "types.py"
|
||||
|
||||
if not types_path.exists():
|
||||
raise RuntimeError(f"prisma/types.py not found at {types_path}")
|
||||
|
||||
return types_path
|
||||
|
||||
|
||||
def generate_stub(src_path: Path, stub_path: Path) -> int:
|
||||
"""Generate the .pyi stub file from the source types.py."""
|
||||
code = src_path.read_text(encoding="utf-8", errors="ignore")
|
||||
source_lines = code.splitlines()
|
||||
tree = ast.parse(code, filename=str(src_path))
|
||||
classes, functions, safe_variable_sources, unsafe_variables = (
|
||||
collect_top_level_symbols(tree, source_lines)
|
||||
)
|
||||
|
||||
header = """\
|
||||
# -*- coding: utf-8 -*-
|
||||
# Auto-generated stub file - DO NOT EDIT
|
||||
# Generated by gen_prisma_types_stub.py
|
||||
#
|
||||
# This stub intentionally collapses complex Prisma query DSL types to Any.
|
||||
# Prisma's generated types can explode Pyright's type inference budgets
|
||||
# on large schemas. We collapse them to Any so the rest of the codebase
|
||||
# can remain strongly typed while keeping runtime behavior unchanged.
|
||||
#
|
||||
# Safe types (Literal, TypeVar, simple references) are preserved from the
|
||||
# original types.py to maintain proper type checking where possible.
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
from typing_extensions import Literal
|
||||
|
||||
# Re-export commonly used typing constructs that may be imported from this module
|
||||
from typing import TYPE_CHECKING, TypeVar, Generic, Union, Optional, List, Dict
|
||||
|
||||
# Base type alias for stubbed Prisma types - allows any dict structure
|
||||
_PrismaDict = dict[str, Any]
|
||||
|
||||
"""
|
||||
|
||||
lines = [header]
|
||||
|
||||
# Include safe variable definitions (Literal types, TypeVars, etc.)
|
||||
lines.append("# Safe type definitions preserved from original types.py")
|
||||
for source in safe_variable_sources:
|
||||
lines.append(source)
|
||||
lines.append("")
|
||||
|
||||
# Stub all classes and unsafe variables uniformly as dict[str, Any] aliases
|
||||
# This allows:
|
||||
# 1. Use in type annotations: x: SomeType
|
||||
# 2. Constructor calls: SomeType(...)
|
||||
# 3. Dict literal assignments: x: SomeType = {...}
|
||||
lines.append(
|
||||
"# Stubbed types (collapsed to dict[str, Any] to prevent type budget exhaustion)"
|
||||
)
|
||||
all_stubbed = sorted(classes | unsafe_variables)
|
||||
for name in all_stubbed:
|
||||
lines.append(f"{name} = _PrismaDict")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Stub functions
|
||||
for name in sorted(functions):
|
||||
lines.append(f"def {name}(*args: Any, **kwargs: Any) -> Any: ...")
|
||||
|
||||
lines.append("")
|
||||
|
||||
stub_path.write_text("\n".join(lines), encoding="utf-8")
|
||||
return (
|
||||
len(classes)
|
||||
+ len(functions)
|
||||
+ len(safe_variable_sources)
|
||||
+ len(unsafe_variables)
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main entry point."""
|
||||
try:
|
||||
types_path = find_prisma_types_path()
|
||||
stub_path = types_path.with_suffix(".pyi")
|
||||
|
||||
print(f"Found prisma types.py at: {types_path}")
|
||||
print(f"Generating stub at: {stub_path}")
|
||||
|
||||
num_symbols = generate_stub(types_path, stub_path)
|
||||
print(f"Generated {stub_path.name} with {num_symbols} Any-typed symbols")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -25,9 +25,6 @@ def run(*command: str) -> None:
|
||||
|
||||
|
||||
def lint():
|
||||
# Generate Prisma types stub before running pyright to prevent type budget exhaustion
|
||||
run("gen-prisma-stub")
|
||||
|
||||
lint_step_args: list[list[str]] = [
|
||||
["ruff", "check", *TARGET_DIRS, "--exit-zero"],
|
||||
["ruff", "format", "--diff", "--check", LIBS_DIR],
|
||||
@@ -52,6 +49,4 @@ def format():
|
||||
run("ruff", "format", LIBS_DIR)
|
||||
run("isort", "--profile", "black", BACKEND_DIR)
|
||||
run("black", BACKEND_DIR)
|
||||
# Generate Prisma types stub before running pyright to prevent type budget exhaustion
|
||||
run("gen-prisma-stub")
|
||||
run("pyright", *TARGET_DIRS)
|
||||
|
||||
@@ -117,7 +117,6 @@ lint = "linter:lint"
|
||||
test = "run_tests:test"
|
||||
load-store-agents = "test.load_store_agents:run"
|
||||
export-api-schema = "backend.cli.generate_openapi_json:main"
|
||||
gen-prisma-stub = "gen_prisma_types_stub:main"
|
||||
oauth-tool = "backend.cli.oauth_tool:cli"
|
||||
|
||||
[tool.isort]
|
||||
@@ -135,9 +134,6 @@ ignore_patterns = []
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
asyncio_default_fixture_loop_scope = "session"
|
||||
# Disable syrupy plugin to avoid conflict with pytest-snapshot
|
||||
# Both provide --snapshot-update argument causing ArgumentError
|
||||
addopts = "-p no:syrupy"
|
||||
filterwarnings = [
|
||||
"ignore:'audioop' is deprecated:DeprecationWarning:discord.player",
|
||||
"ignore:invalid escape sequence:DeprecationWarning:tweepy.api",
|
||||
|
||||
@@ -1,746 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Block Documentation Generator
|
||||
|
||||
Generates markdown documentation for all blocks from code introspection.
|
||||
Preserves manually-written content between marker comments.
|
||||
|
||||
Usage:
|
||||
# Generate all docs
|
||||
poetry run python scripts/generate_block_docs.py
|
||||
|
||||
# Check mode for CI (exits 1 if stale)
|
||||
poetry run python scripts/generate_block_docs.py --check
|
||||
|
||||
# Migrate existing docs (add markers, preserve content)
|
||||
poetry run python scripts/generate_block_docs.py --migrate
|
||||
|
||||
# Verbose output
|
||||
poetry run python scripts/generate_block_docs.py -v
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
# Add backend to path for imports
|
||||
backend_dir = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(backend_dir))
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default output directory relative to repo root
|
||||
DEFAULT_OUTPUT_DIR = (
|
||||
Path(__file__).parent.parent.parent.parent / "docs" / "platform" / "blocks"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FieldDoc:
|
||||
"""Documentation for a single input/output field."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
type_str: str
|
||||
required: bool
|
||||
default: Any = None
|
||||
advanced: bool = False
|
||||
hidden: bool = False
|
||||
placeholder: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockDoc:
|
||||
"""Documentation data extracted from a block."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
class_name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
category_descriptions: dict[str, str]
|
||||
inputs: list[FieldDoc]
|
||||
outputs: list[FieldDoc]
|
||||
block_type: str
|
||||
source_file: str
|
||||
contributors: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
# Category to human-readable name mapping
|
||||
CATEGORY_DISPLAY_NAMES = {
|
||||
"AI": "AI and Language Models",
|
||||
"BASIC": "Basic Operations",
|
||||
"TEXT": "Text Processing",
|
||||
"SEARCH": "Search and Information Retrieval",
|
||||
"SOCIAL": "Social Media and Content",
|
||||
"DEVELOPER_TOOLS": "Developer Tools",
|
||||
"DATA": "Data Processing",
|
||||
"LOGIC": "Logic and Control Flow",
|
||||
"COMMUNICATION": "Communication",
|
||||
"INPUT": "Input/Output",
|
||||
"OUTPUT": "Input/Output",
|
||||
"MULTIMEDIA": "Media Generation",
|
||||
"PRODUCTIVITY": "Productivity",
|
||||
"HARDWARE": "Hardware",
|
||||
"AGENT": "Agent Integration",
|
||||
"CRM": "CRM Services",
|
||||
"SAFETY": "AI Safety",
|
||||
"ISSUE_TRACKING": "Issue Tracking",
|
||||
"MARKETING": "Marketing",
|
||||
}
|
||||
|
||||
# Category to doc file mapping (for grouping related blocks)
|
||||
CATEGORY_FILE_MAP = {
|
||||
"BASIC": "basic",
|
||||
"TEXT": "text",
|
||||
"AI": "llm",
|
||||
"SEARCH": "search",
|
||||
"DATA": "data",
|
||||
"LOGIC": "logic",
|
||||
"COMMUNICATION": "communication",
|
||||
"MULTIMEDIA": "multimedia",
|
||||
"PRODUCTIVITY": "productivity",
|
||||
}
|
||||
|
||||
|
||||
def class_name_to_display_name(class_name: str) -> str:
|
||||
"""Convert BlockClassName to 'Block Class Name'."""
|
||||
# Remove 'Block' suffix
|
||||
name = class_name.replace("Block", "")
|
||||
# Insert space before capitals
|
||||
name = re.sub(r"([a-z])([A-Z])", r"\1 \2", name)
|
||||
# Handle consecutive capitals (e.g., 'HTTPRequest' -> 'HTTP Request')
|
||||
name = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1 \2", name)
|
||||
return name.strip()
|
||||
|
||||
|
||||
def type_to_readable(type_schema: dict[str, Any]) -> str:
|
||||
"""Convert JSON schema type to human-readable string."""
|
||||
if not isinstance(type_schema, dict):
|
||||
return str(type_schema) if type_schema else "Any"
|
||||
|
||||
if "anyOf" in type_schema:
|
||||
# Union type - show options
|
||||
any_of = type_schema["anyOf"]
|
||||
if not isinstance(any_of, list):
|
||||
return "Any"
|
||||
options = []
|
||||
for opt in any_of:
|
||||
if isinstance(opt, dict) and opt.get("type") == "null":
|
||||
continue
|
||||
options.append(type_to_readable(opt))
|
||||
if len(options) == 1:
|
||||
return options[0]
|
||||
return " | ".join(options)
|
||||
|
||||
if "allOf" in type_schema:
|
||||
all_of = type_schema["allOf"]
|
||||
if not isinstance(all_of, list) or not all_of:
|
||||
return "Any"
|
||||
return type_to_readable(all_of[0])
|
||||
|
||||
schema_type = type_schema.get("type")
|
||||
|
||||
if schema_type == "array":
|
||||
items = type_schema.get("items", {})
|
||||
item_type = type_to_readable(items)
|
||||
return f"List[{item_type}]"
|
||||
|
||||
if schema_type == "object":
|
||||
if "additionalProperties" in type_schema:
|
||||
value_type = type_to_readable(type_schema["additionalProperties"])
|
||||
return f"Dict[str, {value_type}]"
|
||||
# Check if it's a specific model
|
||||
title = type_schema.get("title", "Object")
|
||||
return title
|
||||
|
||||
if schema_type == "string":
|
||||
if "enum" in type_schema:
|
||||
return " | ".join(f'"{v}"' for v in type_schema["enum"][:3])
|
||||
if "format" in type_schema:
|
||||
return f"str ({type_schema['format']})"
|
||||
return "str"
|
||||
|
||||
if schema_type == "integer":
|
||||
return "int"
|
||||
|
||||
if schema_type == "number":
|
||||
return "float"
|
||||
|
||||
if schema_type == "boolean":
|
||||
return "bool"
|
||||
|
||||
if schema_type == "null":
|
||||
return "None"
|
||||
|
||||
# Fallback
|
||||
return type_schema.get("title", schema_type or "Any")
|
||||
|
||||
|
||||
def safe_get(d: Any, key: str, default: Any = None) -> Any:
|
||||
"""Safely get a value from a dict-like object."""
|
||||
if isinstance(d, dict):
|
||||
return d.get(key, default)
|
||||
return default
|
||||
|
||||
|
||||
def extract_block_doc(block_cls: type) -> BlockDoc:
|
||||
"""Extract documentation data from a block class."""
|
||||
block = block_cls.create()
|
||||
|
||||
# Get source file
|
||||
try:
|
||||
source_file = inspect.getfile(block_cls)
|
||||
# Make relative to blocks directory
|
||||
blocks_dir = Path(source_file).parent
|
||||
while blocks_dir.name != "blocks" and blocks_dir.parent != blocks_dir:
|
||||
blocks_dir = blocks_dir.parent
|
||||
source_file = str(Path(source_file).relative_to(blocks_dir.parent))
|
||||
except (TypeError, ValueError):
|
||||
source_file = "unknown"
|
||||
|
||||
# Extract input fields
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
input_properties = safe_get(input_schema, "properties", {})
|
||||
if not isinstance(input_properties, dict):
|
||||
input_properties = {}
|
||||
required_raw = safe_get(input_schema, "required", [])
|
||||
# Handle edge cases where required might not be a list
|
||||
if isinstance(required_raw, (list, set, tuple)):
|
||||
required_inputs = set(required_raw)
|
||||
else:
|
||||
required_inputs = set()
|
||||
|
||||
inputs = []
|
||||
for field_name, field_schema in input_properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
# Skip credentials fields in docs (they're auto-handled)
|
||||
if "credentials" in field_name.lower():
|
||||
continue
|
||||
|
||||
inputs.append(
|
||||
FieldDoc(
|
||||
name=field_name,
|
||||
description=safe_get(field_schema, "description", ""),
|
||||
type_str=type_to_readable(field_schema),
|
||||
required=field_name in required_inputs,
|
||||
default=safe_get(field_schema, "default"),
|
||||
advanced=safe_get(field_schema, "advanced", False) or False,
|
||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
||||
placeholder=safe_get(field_schema, "placeholder"),
|
||||
)
|
||||
)
|
||||
|
||||
# Extract output fields
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
output_properties = safe_get(output_schema, "properties", {})
|
||||
if not isinstance(output_properties, dict):
|
||||
output_properties = {}
|
||||
|
||||
outputs = []
|
||||
for field_name, field_schema in output_properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
outputs.append(
|
||||
FieldDoc(
|
||||
name=field_name,
|
||||
description=safe_get(field_schema, "description", ""),
|
||||
type_str=type_to_readable(field_schema),
|
||||
required=True, # Outputs are always produced
|
||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
||||
)
|
||||
)
|
||||
|
||||
# Get category info (sort for deterministic ordering since it's a set)
|
||||
categories = []
|
||||
category_descriptions = {}
|
||||
for cat in sorted(block.categories, key=lambda c: c.name):
|
||||
categories.append(cat.name)
|
||||
category_descriptions[cat.name] = cat.value
|
||||
|
||||
# Get contributors
|
||||
contributors = []
|
||||
for contrib in block.contributors:
|
||||
contributors.append(contrib.name if hasattr(contrib, "name") else str(contrib))
|
||||
|
||||
return BlockDoc(
|
||||
id=block.id,
|
||||
name=class_name_to_display_name(block.name),
|
||||
class_name=block.name,
|
||||
description=block.description,
|
||||
categories=categories,
|
||||
category_descriptions=category_descriptions,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
block_type=block.block_type.value,
|
||||
source_file=source_file,
|
||||
contributors=contributors,
|
||||
)
|
||||
|
||||
|
||||
def generate_anchor(name: str) -> str:
|
||||
"""Generate markdown anchor from block name."""
|
||||
return name.lower().replace(" ", "-").replace("(", "").replace(")", "")
|
||||
|
||||
|
||||
def extract_manual_content(existing_content: str) -> dict[str, str]:
|
||||
"""Extract content between MANUAL markers from existing file."""
|
||||
manual_sections = {}
|
||||
|
||||
# Pattern: <!-- MANUAL: section_name -->content<!-- END MANUAL -->
|
||||
pattern = r"<!-- MANUAL: (\w+) -->\s*(.*?)\s*<!-- END MANUAL -->"
|
||||
matches = re.findall(pattern, existing_content, re.DOTALL)
|
||||
|
||||
for section_name, content in matches:
|
||||
manual_sections[section_name] = content.strip()
|
||||
|
||||
return manual_sections
|
||||
|
||||
|
||||
def strip_markers(content: str) -> str:
|
||||
"""Remove MANUAL markers from content."""
|
||||
# Remove opening markers
|
||||
content = re.sub(r"<!-- MANUAL: \w+ -->\s*", "", content)
|
||||
# Remove closing markers
|
||||
content = re.sub(r"\s*<!-- END MANUAL -->", "", content)
|
||||
return content.strip()
|
||||
|
||||
|
||||
def extract_legacy_content(existing_content: str) -> dict[str, str]:
|
||||
"""Extract content from legacy docs without markers (for migration)."""
|
||||
manual_sections = {}
|
||||
|
||||
# Try to extract "How it works" section
|
||||
how_it_works_match = re.search(
|
||||
r"### How it works\s*\n(.*?)(?=\n### |\n## |\Z)", existing_content, re.DOTALL
|
||||
)
|
||||
if how_it_works_match:
|
||||
content = strip_markers(how_it_works_match.group(1).strip())
|
||||
if content and not content.startswith("|"): # Not a table
|
||||
manual_sections["how_it_works"] = content
|
||||
|
||||
# Try to extract "Possible use case" section
|
||||
use_case_match = re.search(
|
||||
r"### Possible use case\s*\n(.*?)(?=\n### |\n## |\n---|\Z)",
|
||||
existing_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if use_case_match:
|
||||
content = strip_markers(use_case_match.group(1).strip())
|
||||
if content:
|
||||
manual_sections["use_case"] = content
|
||||
|
||||
return manual_sections
|
||||
|
||||
|
||||
def generate_block_markdown(
|
||||
block: BlockDoc,
|
||||
manual_content: dict[str, str] | None = None,
|
||||
is_first_in_file: bool = True,
|
||||
) -> str:
|
||||
"""Generate markdown documentation for a single block."""
|
||||
manual_content = manual_content or {}
|
||||
lines = []
|
||||
|
||||
# Block heading
|
||||
heading_level = "#" if is_first_in_file else "##"
|
||||
lines.append(f"{heading_level} {block.name}")
|
||||
lines.append("")
|
||||
|
||||
# What it is (full description)
|
||||
lines.append("### What it is")
|
||||
lines.append(block.description or "No description available.")
|
||||
lines.append("")
|
||||
|
||||
# How it works (manual section)
|
||||
lines.append("### How it works")
|
||||
how_it_works = manual_content.get(
|
||||
"how_it_works", "_Add technical explanation here._"
|
||||
)
|
||||
lines.append("<!-- MANUAL: how_it_works -->")
|
||||
lines.append(how_it_works)
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
# Inputs table (auto-generated)
|
||||
visible_inputs = [f for f in block.inputs if not f.hidden]
|
||||
if visible_inputs:
|
||||
lines.append("### Inputs")
|
||||
lines.append("| Input | Description | Type | Required |")
|
||||
lines.append("|-------|-------------|------|----------|")
|
||||
for inp in visible_inputs:
|
||||
required = "Yes" if inp.required else "No"
|
||||
desc = inp.description or "-"
|
||||
# Escape pipes in description
|
||||
desc = desc.replace("|", "\\|")
|
||||
lines.append(f"| {inp.name} | {desc} | {inp.type_str} | {required} |")
|
||||
lines.append("")
|
||||
|
||||
# Outputs table (auto-generated)
|
||||
visible_outputs = [f for f in block.outputs if not f.hidden]
|
||||
if visible_outputs:
|
||||
lines.append("### Outputs")
|
||||
lines.append("| Output | Description | Type |")
|
||||
lines.append("|--------|-------------|------|")
|
||||
for out in visible_outputs:
|
||||
desc = out.description or "-"
|
||||
desc = desc.replace("|", "\\|")
|
||||
lines.append(f"| {out.name} | {desc} | {out.type_str} |")
|
||||
lines.append("")
|
||||
|
||||
# Possible use case (manual section)
|
||||
lines.append("### Possible use case")
|
||||
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
|
||||
lines.append("<!-- MANUAL: use_case -->")
|
||||
lines.append(use_case)
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def get_block_file_mapping(blocks: list[BlockDoc]) -> dict[str, list[BlockDoc]]:
|
||||
"""
|
||||
Map blocks to their documentation files.
|
||||
|
||||
Returns dict of {relative_file_path: [blocks]}
|
||||
"""
|
||||
file_mapping = defaultdict(list)
|
||||
|
||||
for block in blocks:
|
||||
# Determine file path based on source file or category
|
||||
source_path = Path(block.source_file)
|
||||
|
||||
# If source is in a subdirectory (e.g., google/gmail.py), use that structure
|
||||
if len(source_path.parts) > 2: # blocks/subdir/file.py
|
||||
subdir = source_path.parts[1] # e.g., "google"
|
||||
# Use the Python filename as the md filename
|
||||
md_file = source_path.stem + ".md" # e.g., "gmail.md"
|
||||
file_path = f"{subdir}/{md_file}"
|
||||
else:
|
||||
# Use category-based grouping for top-level blocks
|
||||
primary_category = block.categories[0] if block.categories else "BASIC"
|
||||
file_name = CATEGORY_FILE_MAP.get(primary_category, "misc")
|
||||
file_path = f"{file_name}.md"
|
||||
|
||||
file_mapping[file_path].append(block)
|
||||
|
||||
return dict(file_mapping)
|
||||
|
||||
|
||||
def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
||||
"""Generate the overview table markdown (blocks.md)."""
|
||||
lines = []
|
||||
|
||||
lines.append("# AutoGPT Blocks Overview")
|
||||
lines.append("")
|
||||
lines.append(
|
||||
'AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.'
|
||||
)
|
||||
lines.append("")
|
||||
lines.append('!!! info "Creating Your Own Blocks"')
|
||||
lines.append(" Want to create your own custom blocks? Check out our guides:")
|
||||
lines.append(" ")
|
||||
lines.append(
|
||||
" - [Build your own Blocks](../new_blocks.md) - Step-by-step tutorial with examples"
|
||||
)
|
||||
lines.append(
|
||||
" - [Block SDK Guide](../block-sdk-guide.md) - Advanced SDK patterns with OAuth, webhooks, and provider configuration"
|
||||
)
|
||||
lines.append("")
|
||||
lines.append(
|
||||
"Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation."
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Group blocks by category
|
||||
by_category = defaultdict(list)
|
||||
for block in blocks:
|
||||
primary_cat = block.categories[0] if block.categories else "BASIC"
|
||||
by_category[primary_cat].append(block)
|
||||
|
||||
# Sort categories
|
||||
category_order = [
|
||||
"BASIC",
|
||||
"DATA",
|
||||
"TEXT",
|
||||
"AI",
|
||||
"SEARCH",
|
||||
"SOCIAL",
|
||||
"COMMUNICATION",
|
||||
"DEVELOPER_TOOLS",
|
||||
"MULTIMEDIA",
|
||||
"PRODUCTIVITY",
|
||||
"LOGIC",
|
||||
"INPUT",
|
||||
"OUTPUT",
|
||||
"AGENT",
|
||||
"CRM",
|
||||
"SAFETY",
|
||||
"ISSUE_TRACKING",
|
||||
"HARDWARE",
|
||||
"MARKETING",
|
||||
]
|
||||
|
||||
for category in category_order:
|
||||
if category not in by_category:
|
||||
continue
|
||||
|
||||
cat_blocks = sorted(by_category[category], key=lambda b: b.name)
|
||||
display_name = CATEGORY_DISPLAY_NAMES.get(category, category)
|
||||
|
||||
lines.append(f"## {display_name}")
|
||||
lines.append("| Block Name | Description |")
|
||||
lines.append("|------------|-------------|")
|
||||
|
||||
for block in cat_blocks:
|
||||
# Determine link path
|
||||
file_mapping = get_block_file_mapping([block])
|
||||
file_path = list(file_mapping.keys())[0]
|
||||
anchor = generate_anchor(block.name)
|
||||
|
||||
# Short description (first sentence)
|
||||
short_desc = (
|
||||
block.description.split(".")[0]
|
||||
if block.description
|
||||
else "No description"
|
||||
)
|
||||
short_desc = short_desc.replace("|", "\\|")
|
||||
|
||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def load_all_blocks_for_docs() -> list[BlockDoc]:
|
||||
"""Load all blocks and extract documentation."""
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
block_classes = load_all_blocks()
|
||||
blocks = []
|
||||
|
||||
for _block_id, block_cls in block_classes.items():
|
||||
try:
|
||||
block_doc = extract_block_doc(block_cls)
|
||||
blocks.append(block_doc)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract docs for {block_cls.__name__}: {e}")
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def write_block_docs(
|
||||
output_dir: Path,
|
||||
blocks: list[BlockDoc],
|
||||
migrate: bool = False,
|
||||
verbose: bool = False,
|
||||
) -> dict[str, str]:
|
||||
"""
|
||||
Write block documentation files.
|
||||
|
||||
Returns dict of {file_path: content} for all generated files.
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
generated_files = {}
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
|
||||
# Create subdirectories if needed
|
||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing content for manual section preservation
|
||||
existing_content = ""
|
||||
if full_path.exists():
|
||||
existing_content = full_path.read_text()
|
||||
|
||||
# Generate content for each block
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
# Try to extract manual content
|
||||
if migrate:
|
||||
manual_content = extract_legacy_content(existing_content)
|
||||
else:
|
||||
# Extract manual content specific to this block
|
||||
# Look for content after the block heading
|
||||
block_pattern = (
|
||||
rf"(?:^|\n)##? {re.escape(block.name)}\s*\n(.*?)(?=\n##? |\Z)"
|
||||
)
|
||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
||||
if block_match:
|
||||
manual_content = extract_manual_content(block_match.group(1))
|
||||
else:
|
||||
manual_content = {}
|
||||
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
full_content = "\n".join(content_parts)
|
||||
generated_files[str(file_path)] = full_content
|
||||
|
||||
if verbose:
|
||||
print(f" Writing {file_path} ({len(file_blocks)} blocks)")
|
||||
|
||||
full_path.write_text(full_content)
|
||||
|
||||
# Generate overview file
|
||||
overview_content = generate_overview_table(blocks)
|
||||
overview_path = output_dir / "blocks.md"
|
||||
generated_files["blocks.md"] = overview_content
|
||||
overview_path.write_text(overview_content)
|
||||
|
||||
if verbose:
|
||||
print(" Writing blocks.md (overview)")
|
||||
|
||||
return generated_files
|
||||
|
||||
|
||||
def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
||||
"""
|
||||
Check if generated docs match existing docs.
|
||||
|
||||
Returns True if in sync, False otherwise.
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
|
||||
all_match = True
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
|
||||
if not full_path.exists():
|
||||
print(f"MISSING: {file_path}")
|
||||
all_match = False
|
||||
continue
|
||||
|
||||
existing_content = full_path.read_text()
|
||||
|
||||
# Extract manual content from existing file
|
||||
manual_sections_by_block = {}
|
||||
for block in file_blocks:
|
||||
block_pattern = (
|
||||
rf"(?:^|\n)##? {re.escape(block.name)}\s*\n(.*?)(?=\n##? |\Z)"
|
||||
)
|
||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
||||
if block_match:
|
||||
manual_sections_by_block[block.name] = extract_manual_content(
|
||||
block_match.group(1)
|
||||
)
|
||||
|
||||
# Generate expected content
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
manual_content = manual_sections_by_block.get(block.name, {})
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
expected_content = "\n".join(content_parts)
|
||||
|
||||
if existing_content.strip() != expected_content.strip():
|
||||
print(f"OUT OF SYNC: {file_path}")
|
||||
all_match = False
|
||||
|
||||
# Check overview
|
||||
overview_path = output_dir / "blocks.md"
|
||||
if overview_path.exists():
|
||||
existing_overview = overview_path.read_text()
|
||||
expected_overview = generate_overview_table(blocks)
|
||||
if existing_overview.strip() != expected_overview.strip():
|
||||
print("OUT OF SYNC: blocks.md (overview)")
|
||||
all_match = False
|
||||
else:
|
||||
print("MISSING: blocks.md (overview)")
|
||||
all_match = False
|
||||
|
||||
return all_match
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate block documentation from code introspection"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=Path,
|
||||
default=DEFAULT_OUTPUT_DIR,
|
||||
help="Output directory for generated docs",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check",
|
||||
action="store_true",
|
||||
help="Check if docs are in sync (for CI), exit 1 if not",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--migrate",
|
||||
action="store_true",
|
||||
help="Migrate existing docs (extract legacy manual content)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Verbose output",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
format="%(levelname)s: %(message)s",
|
||||
)
|
||||
|
||||
print("Loading blocks...")
|
||||
blocks = load_all_blocks_for_docs()
|
||||
print(f"Found {len(blocks)} blocks")
|
||||
|
||||
if args.check:
|
||||
print(f"Checking docs in {args.output_dir}...")
|
||||
in_sync = check_docs_in_sync(args.output_dir, blocks)
|
||||
if in_sync:
|
||||
print("All documentation is in sync!")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("\nDocumentation is out of sync!")
|
||||
print(
|
||||
"Run: cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Generating docs to {args.output_dir}...")
|
||||
write_block_docs(
|
||||
args.output_dir,
|
||||
blocks,
|
||||
migrate=args.migrate,
|
||||
verbose=args.verbose,
|
||||
)
|
||||
print("Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,211 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migration script to preserve manual content from existing docs.
|
||||
|
||||
This script:
|
||||
1. Reads all existing block documentation (from git HEAD)
|
||||
2. Extracts manual content (How it works, Possible use case) by block name
|
||||
3. Creates a JSON mapping of block_name -> manual_content
|
||||
4. Generates new docs using current block structure while preserving manual content
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
generate_block_markdown,
|
||||
generate_overview_table,
|
||||
get_block_file_mapping,
|
||||
load_all_blocks_for_docs,
|
||||
strip_markers,
|
||||
)
|
||||
|
||||
|
||||
def get_git_file_content(file_path: str) -> str | None:
|
||||
"""Get file content from git HEAD."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "show", f"HEAD:{file_path}"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).parent.parent.parent.parent, # repo root
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def extract_blocks_from_doc(content: str) -> dict[str, dict[str, str]]:
|
||||
"""Extract all block sections and their manual content from a doc file."""
|
||||
blocks = {}
|
||||
|
||||
# Find all block headings (# or ##)
|
||||
block_pattern = r"(?:^|\n)(##?) ([^\n]+)\n"
|
||||
matches = list(re.finditer(block_pattern, content))
|
||||
|
||||
for i, match in enumerate(matches):
|
||||
block_name = match.group(2).strip()
|
||||
start = match.end()
|
||||
|
||||
# Find end (next heading or end of file)
|
||||
if i + 1 < len(matches):
|
||||
end = matches[i + 1].start()
|
||||
else:
|
||||
end = len(content)
|
||||
|
||||
block_content = content[start:end]
|
||||
|
||||
# Extract manual sections
|
||||
manual_content = {}
|
||||
|
||||
# How it works
|
||||
how_match = re.search(
|
||||
r"### How it works\s*\n(.*?)(?=\n### |\Z)", block_content, re.DOTALL
|
||||
)
|
||||
if how_match:
|
||||
text = strip_markers(how_match.group(1).strip())
|
||||
# Skip if it's just placeholder or a table
|
||||
if text and not text.startswith("|") and not text.startswith("_Add"):
|
||||
manual_content["how_it_works"] = text
|
||||
|
||||
# Possible use case
|
||||
use_case_match = re.search(
|
||||
r"### Possible use case\s*\n(.*?)(?=\n### |\n## |\n---|\Z)",
|
||||
block_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if use_case_match:
|
||||
text = strip_markers(use_case_match.group(1).strip())
|
||||
if text and not text.startswith("_Add"):
|
||||
manual_content["use_case"] = text
|
||||
|
||||
if manual_content:
|
||||
blocks[block_name] = manual_content
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def collect_existing_manual_content() -> dict[str, dict[str, str]]:
|
||||
"""Collect all manual content from existing git HEAD docs."""
|
||||
all_manual_content = {}
|
||||
|
||||
# Find all existing md files via git
|
||||
result = subprocess.run(
|
||||
["git", "ls-files", "docs/content/platform/blocks/"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).parent.parent.parent.parent,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
print("Failed to list git files")
|
||||
return {}
|
||||
|
||||
for file_path in result.stdout.strip().split("\n"):
|
||||
if not file_path.endswith(".md"):
|
||||
continue
|
||||
if file_path.endswith("blocks.md"): # Skip overview
|
||||
continue
|
||||
|
||||
print(f"Processing: {file_path}")
|
||||
content = get_git_file_content(file_path)
|
||||
if content:
|
||||
blocks = extract_blocks_from_doc(content)
|
||||
for block_name, manual_content in blocks.items():
|
||||
if block_name in all_manual_content:
|
||||
# Merge if already exists
|
||||
all_manual_content[block_name].update(manual_content)
|
||||
else:
|
||||
all_manual_content[block_name] = manual_content
|
||||
|
||||
return all_manual_content
|
||||
|
||||
|
||||
def run_migration():
|
||||
"""Run the migration."""
|
||||
print("Step 1: Collecting existing manual content from git HEAD...")
|
||||
manual_content_cache = collect_existing_manual_content()
|
||||
|
||||
print(f"\nFound manual content for {len(manual_content_cache)} blocks")
|
||||
|
||||
# Show some examples
|
||||
for name, content in list(manual_content_cache.items())[:3]:
|
||||
print(f" - {name}: {list(content.keys())}")
|
||||
|
||||
# Save cache for reference
|
||||
cache_path = Path(__file__).parent / "manual_content_cache.json"
|
||||
with open(cache_path, "w") as f:
|
||||
json.dump(manual_content_cache, f, indent=2)
|
||||
print(f"\nSaved cache to {cache_path}")
|
||||
|
||||
print("\nStep 2: Loading blocks from code...")
|
||||
blocks = load_all_blocks_for_docs()
|
||||
print(f"Found {len(blocks)} blocks")
|
||||
|
||||
print("\nStep 3: Generating new documentation...")
|
||||
output_dir = (
|
||||
Path(__file__).parent.parent.parent.parent
|
||||
/ "docs"
|
||||
/ "content"
|
||||
/ "platform"
|
||||
/ "blocks"
|
||||
)
|
||||
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
|
||||
# Track statistics
|
||||
preserved_count = 0
|
||||
missing_count = 0
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
# Look up manual content by block name
|
||||
manual_content = manual_content_cache.get(block.name, {})
|
||||
|
||||
if manual_content:
|
||||
preserved_count += 1
|
||||
else:
|
||||
# Try with class name
|
||||
manual_content = manual_content_cache.get(block.class_name, {})
|
||||
if not manual_content:
|
||||
missing_count += 1
|
||||
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
full_content = "\n".join(content_parts)
|
||||
full_path.write_text(full_content)
|
||||
print(f" Wrote {file_path} ({len(file_blocks)} blocks)")
|
||||
|
||||
# Generate overview
|
||||
overview_content = generate_overview_table(blocks)
|
||||
overview_path = output_dir / "blocks.md"
|
||||
overview_path.write_text(overview_content)
|
||||
print(" Wrote blocks.md (overview)")
|
||||
|
||||
print("\nMigration complete!")
|
||||
print(f" - Blocks with preserved manual content: {preserved_count}")
|
||||
print(f" - Blocks without manual content: {missing_count}")
|
||||
print(
|
||||
"\nYou can now run `poetry run python scripts/generate_block_docs.py --check` to verify"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_migration()
|
||||
@@ -1,233 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for the block documentation generator."""
|
||||
import pytest
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
class_name_to_display_name,
|
||||
extract_manual_content,
|
||||
generate_anchor,
|
||||
strip_markers,
|
||||
type_to_readable,
|
||||
)
|
||||
|
||||
|
||||
class TestClassNameToDisplayName:
|
||||
"""Tests for class_name_to_display_name function."""
|
||||
|
||||
def test_simple_block_name(self):
|
||||
assert class_name_to_display_name("PrintBlock") == "Print"
|
||||
|
||||
def test_multi_word_block_name(self):
|
||||
assert class_name_to_display_name("GetWeatherBlock") == "Get Weather"
|
||||
|
||||
def test_consecutive_capitals(self):
|
||||
assert class_name_to_display_name("HTTPRequestBlock") == "HTTP Request"
|
||||
|
||||
def test_ai_prefix(self):
|
||||
assert class_name_to_display_name("AIConditionBlock") == "AI Condition"
|
||||
|
||||
def test_no_block_suffix(self):
|
||||
assert class_name_to_display_name("SomeClass") == "Some Class"
|
||||
|
||||
|
||||
class TestTypeToReadable:
|
||||
"""Tests for type_to_readable function."""
|
||||
|
||||
def test_string_type(self):
|
||||
assert type_to_readable({"type": "string"}) == "str"
|
||||
|
||||
def test_integer_type(self):
|
||||
assert type_to_readable({"type": "integer"}) == "int"
|
||||
|
||||
def test_number_type(self):
|
||||
assert type_to_readable({"type": "number"}) == "float"
|
||||
|
||||
def test_boolean_type(self):
|
||||
assert type_to_readable({"type": "boolean"}) == "bool"
|
||||
|
||||
def test_array_type(self):
|
||||
result = type_to_readable({"type": "array", "items": {"type": "string"}})
|
||||
assert result == "List[str]"
|
||||
|
||||
def test_object_type(self):
|
||||
result = type_to_readable({"type": "object", "title": "MyModel"})
|
||||
assert result == "MyModel"
|
||||
|
||||
def test_anyof_with_null(self):
|
||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "null"}]})
|
||||
assert result == "str"
|
||||
|
||||
def test_anyof_multiple_types(self):
|
||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "integer"}]})
|
||||
assert result == "str | int"
|
||||
|
||||
def test_enum_type(self):
|
||||
result = type_to_readable(
|
||||
{"type": "string", "enum": ["option1", "option2", "option3"]}
|
||||
)
|
||||
assert result == '"option1" | "option2" | "option3"'
|
||||
|
||||
def test_none_input(self):
|
||||
assert type_to_readable(None) == "Any"
|
||||
|
||||
def test_non_dict_input(self):
|
||||
assert type_to_readable("string") == "string"
|
||||
|
||||
|
||||
class TestExtractManualContent:
|
||||
"""Tests for extract_manual_content function."""
|
||||
|
||||
def test_extract_how_it_works(self):
|
||||
content = """
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This is how it works.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {"how_it_works": "This is how it works."}
|
||||
|
||||
def test_extract_use_case(self):
|
||||
content = """
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
Example use case here.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {"use_case": "Example use case here."}
|
||||
|
||||
def test_extract_multiple_sections(self):
|
||||
content = """
|
||||
<!-- MANUAL: how_it_works -->
|
||||
How it works content.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
<!-- MANUAL: use_case -->
|
||||
Use case content.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {
|
||||
"how_it_works": "How it works content.",
|
||||
"use_case": "Use case content.",
|
||||
}
|
||||
|
||||
def test_empty_content(self):
|
||||
result = extract_manual_content("")
|
||||
assert result == {}
|
||||
|
||||
def test_no_markers(self):
|
||||
result = extract_manual_content("Some content without markers")
|
||||
assert result == {}
|
||||
|
||||
|
||||
class TestStripMarkers:
|
||||
"""Tests for strip_markers function."""
|
||||
|
||||
def test_strip_opening_marker(self):
|
||||
content = "<!-- MANUAL: how_it_works -->\nContent here"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_strip_closing_marker(self):
|
||||
content = "Content here\n<!-- END MANUAL -->"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_strip_both_markers(self):
|
||||
content = "<!-- MANUAL: section -->\nContent here\n<!-- END MANUAL -->"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_no_markers(self):
|
||||
content = "Content without markers"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content without markers"
|
||||
|
||||
|
||||
class TestGenerateAnchor:
|
||||
"""Tests for generate_anchor function."""
|
||||
|
||||
def test_simple_name(self):
|
||||
assert generate_anchor("Print") == "print"
|
||||
|
||||
def test_multi_word_name(self):
|
||||
assert generate_anchor("Get Weather") == "get-weather"
|
||||
|
||||
def test_name_with_parentheses(self):
|
||||
assert generate_anchor("Something (Optional)") == "something-optional"
|
||||
|
||||
def test_already_lowercase(self):
|
||||
assert generate_anchor("already lowercase") == "already-lowercase"
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests that require block loading."""
|
||||
|
||||
def test_load_blocks(self):
|
||||
"""Test that blocks can be loaded successfully."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
||||
|
||||
blocks = load_all_blocks_for_docs()
|
||||
assert len(blocks) > 0, "Should load at least one block"
|
||||
|
||||
def test_block_doc_has_required_fields(self):
|
||||
"""Test that extracted block docs have required fields."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
||||
|
||||
blocks = load_all_blocks_for_docs()
|
||||
block = blocks[0]
|
||||
|
||||
assert hasattr(block, "id")
|
||||
assert hasattr(block, "name")
|
||||
assert hasattr(block, "description")
|
||||
assert hasattr(block, "categories")
|
||||
assert hasattr(block, "inputs")
|
||||
assert hasattr(block, "outputs")
|
||||
|
||||
def test_file_mapping_is_deterministic(self):
|
||||
"""Test that file mapping produces consistent results."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
get_block_file_mapping,
|
||||
load_all_blocks_for_docs,
|
||||
)
|
||||
|
||||
# Load blocks twice and compare mappings
|
||||
blocks1 = load_all_blocks_for_docs()
|
||||
blocks2 = load_all_blocks_for_docs()
|
||||
|
||||
mapping1 = get_block_file_mapping(blocks1)
|
||||
mapping2 = get_block_file_mapping(blocks2)
|
||||
|
||||
# Check same files are generated
|
||||
assert set(mapping1.keys()) == set(mapping2.keys())
|
||||
|
||||
# Check same block counts per file
|
||||
for file_path in mapping1:
|
||||
assert len(mapping1[file_path]) == len(mapping2[file_path])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -2,7 +2,6 @@
|
||||
"created_at": "2025-09-04T13:37:00",
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
{
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
"id": "test-agent-1",
|
||||
"graph_id": "test-agent-1",
|
||||
"graph_version": 1,
|
||||
"owner_user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
@@ -42,7 +41,6 @@
|
||||
"id": "test-agent-2",
|
||||
"graph_id": "test-agent-2",
|
||||
"graph_version": 1,
|
||||
"owner_user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"submissions": [
|
||||
{
|
||||
"listing_id": "test-listing-id",
|
||||
"agent_id": "test-agent-id",
|
||||
"agent_version": 1,
|
||||
"name": "Test Agent",
|
||||
|
||||
@@ -37,7 +37,7 @@ services:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/backend/Dockerfile
|
||||
target: migrate
|
||||
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
||||
command: ["sh", "-c", "poetry run prisma generate && poetry run prisma migrate deploy"]
|
||||
develop:
|
||||
watch:
|
||||
- path: ./
|
||||
|
||||
@@ -46,15 +46,14 @@
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slider": "1.3.6",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@rjsf/core": "6.1.2",
|
||||
"@rjsf/utils": "6.1.2",
|
||||
"@rjsf/validator-ajv8": "6.1.2",
|
||||
"@rjsf/core": "5.24.13",
|
||||
"@rjsf/utils": "5.24.13",
|
||||
"@rjsf/validator-ajv8": "5.24.13",
|
||||
"@sentry/nextjs": "10.27.0",
|
||||
"@supabase/ssr": "0.7.0",
|
||||
"@supabase/supabase-js": "2.78.0",
|
||||
@@ -92,6 +91,7 @@
|
||||
"react-currency-input-field": "4.0.3",
|
||||
"react-day-picker": "9.11.1",
|
||||
"react-dom": "18.3.1",
|
||||
"react-drag-drop-files": "2.4.0",
|
||||
"react-hook-form": "7.66.0",
|
||||
"react-icons": "5.5.0",
|
||||
"react-markdown": "9.0.3",
|
||||
|
||||
3868
autogpt_platform/frontend/pnpm-lock.yaml
generated
3868
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 2.6 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 16 KiB |
@@ -1,4 +1,4 @@
|
||||
import { OAuthPopupResultMessage } from "./types";
|
||||
import { OAuthPopupResultMessage } from "@/components/renderers/input-renderer/fields/CredentialField/models/OAuthCredentialModal/useOAuthCredentialModal";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & (
|
||||
| {
|
||||
success: true;
|
||||
code: string;
|
||||
state: string;
|
||||
}
|
||||
| {
|
||||
success: false;
|
||||
message: string;
|
||||
}
|
||||
);
|
||||
@@ -5,7 +5,7 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { ClockIcon, PlayIcon } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
import { FormRenderer } from "@/components/renderers/input-renderer/FormRenderer";
|
||||
import { useRunInputDialog } from "./useRunInputDialog";
|
||||
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
|
||||
|
||||
@@ -66,7 +66,6 @@ export const RunInputDialog = ({
|
||||
formContext={{
|
||||
showHandles: false,
|
||||
size: "large",
|
||||
showOptionalToggle: false,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||
import { useMemo, useState } from "react";
|
||||
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/input-renderer/fields/CredentialField/helpers";
|
||||
|
||||
export const useRunInputDialog = ({
|
||||
setIsOpen,
|
||||
@@ -66,7 +66,7 @@ export const useRunInputDialog = ({
|
||||
if (isCredentialFieldSchema(fieldSchema)) {
|
||||
dynamicUiSchema[fieldName] = {
|
||||
...dynamicUiSchema[fieldName],
|
||||
"ui:field": "custom/credential_field",
|
||||
"ui:field": "credentials",
|
||||
};
|
||||
}
|
||||
});
|
||||
@@ -76,18 +76,12 @@ export const useRunInputDialog = ({
|
||||
}, [credentialsSchema]);
|
||||
|
||||
const handleManualRun = async () => {
|
||||
// Filter out incomplete credentials (those without a valid id)
|
||||
// RJSF auto-populates const values (provider, type) but not id field
|
||||
const validCredentials = Object.fromEntries(
|
||||
Object.entries(credentialValues).filter(([_, cred]) => cred && cred.id),
|
||||
);
|
||||
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
data: {
|
||||
inputs: inputValues,
|
||||
credentials_inputs: validCredentials,
|
||||
credentials_inputs: credentialValues,
|
||||
source: "builder",
|
||||
},
|
||||
});
|
||||
|
||||
@@ -12,59 +12,16 @@ import {
|
||||
import { useDraftRecoveryPopup } from "./useDraftRecoveryPopup";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { DraftDiff } from "@/lib/dexie/draft-utils";
|
||||
|
||||
interface DraftRecoveryPopupProps {
|
||||
isInitialLoadComplete: boolean;
|
||||
}
|
||||
|
||||
function formatDiffSummary(diff: DraftDiff | null): string {
|
||||
if (!diff) return "";
|
||||
|
||||
const parts: string[] = [];
|
||||
|
||||
// Node changes
|
||||
const nodeChanges: string[] = [];
|
||||
if (diff.nodes.added > 0) nodeChanges.push(`+${diff.nodes.added}`);
|
||||
if (diff.nodes.removed > 0) nodeChanges.push(`-${diff.nodes.removed}`);
|
||||
if (diff.nodes.modified > 0) nodeChanges.push(`~${diff.nodes.modified}`);
|
||||
|
||||
if (nodeChanges.length > 0) {
|
||||
parts.push(
|
||||
`${nodeChanges.join("/")} block${diff.nodes.added + diff.nodes.removed + diff.nodes.modified !== 1 ? "s" : ""}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Edge changes
|
||||
const edgeChanges: string[] = [];
|
||||
if (diff.edges.added > 0) edgeChanges.push(`+${diff.edges.added}`);
|
||||
if (diff.edges.removed > 0) edgeChanges.push(`-${diff.edges.removed}`);
|
||||
if (diff.edges.modified > 0) edgeChanges.push(`~${diff.edges.modified}`);
|
||||
|
||||
if (edgeChanges.length > 0) {
|
||||
parts.push(
|
||||
`${edgeChanges.join("/")} connection${diff.edges.added + diff.edges.removed + diff.edges.modified !== 1 ? "s" : ""}`,
|
||||
);
|
||||
}
|
||||
|
||||
return parts.join(", ");
|
||||
}
|
||||
|
||||
export function DraftRecoveryPopup({
|
||||
isInitialLoadComplete,
|
||||
}: DraftRecoveryPopupProps) {
|
||||
const {
|
||||
isOpen,
|
||||
popupRef,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
savedAt,
|
||||
onLoad,
|
||||
onDiscard,
|
||||
} = useDraftRecoveryPopup(isInitialLoadComplete);
|
||||
|
||||
const diffSummary = formatDiffSummary(diff);
|
||||
const { isOpen, popupRef, nodeCount, edgeCount, savedAt, onLoad, onDiscard } =
|
||||
useDraftRecoveryPopup(isInitialLoadComplete);
|
||||
|
||||
return (
|
||||
<AnimatePresence>
|
||||
@@ -115,9 +72,10 @@ export function DraftRecoveryPopup({
|
||||
variant="small"
|
||||
className="text-amber-700 dark:text-amber-400"
|
||||
>
|
||||
{diffSummary ||
|
||||
`${nodeCount} block${nodeCount !== 1 ? "s" : ""}, ${edgeCount} connection${edgeCount !== 1 ? "s" : ""}`}{" "}
|
||||
• {formatTimeAgo(new Date(savedAt).toISOString())}
|
||||
{nodeCount} block{nodeCount !== 1 ? "s" : ""}, {edgeCount}{" "}
|
||||
connection
|
||||
{edgeCount !== 1 ? "s" : ""} •{" "}
|
||||
{formatTimeAgo(new Date(savedAt).toISOString())}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ export const useDraftRecoveryPopup = (isInitialLoadComplete: boolean) => {
|
||||
savedAt,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
loadDraft: onLoad,
|
||||
discardDraft: onDiscard,
|
||||
} = useDraftManager(isInitialLoadComplete);
|
||||
@@ -55,7 +54,6 @@ export const useDraftRecoveryPopup = (isInitialLoadComplete: boolean) => {
|
||||
isOpen,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
savedAt,
|
||||
onLoad,
|
||||
onDiscard,
|
||||
|
||||
@@ -97,9 +97,6 @@ export const Flow = () => {
|
||||
onConnect={onConnect}
|
||||
onEdgesChange={onEdgesChange}
|
||||
onNodeDragStop={onNodeDragStop}
|
||||
onNodeContextMenu={(event) => {
|
||||
event.preventDefault();
|
||||
}}
|
||||
maxZoom={2}
|
||||
minZoom={0.1}
|
||||
onDragOver={onDragOver}
|
||||
|
||||
@@ -48,6 +48,8 @@ export const resolveCollisions: CollisionAlgorithm = (
|
||||
const width = (node.width ?? node.measured?.width ?? 0) + margin * 2;
|
||||
const height = (node.height ?? node.measured?.height ?? 0) + margin * 2;
|
||||
|
||||
console.log("width", width);
|
||||
console.log("height", height);
|
||||
const x = node.position.x - margin;
|
||||
const y = node.position.y - margin;
|
||||
|
||||
|
||||
@@ -7,12 +7,7 @@ import {
|
||||
DraftData,
|
||||
} from "@/services/builder-draft/draft-service";
|
||||
import { BuilderDraft } from "@/lib/dexie/db";
|
||||
import {
|
||||
cleanNodes,
|
||||
cleanEdges,
|
||||
calculateDraftDiff,
|
||||
DraftDiff,
|
||||
} from "@/lib/dexie/draft-utils";
|
||||
import { cleanNodes, cleanEdges } from "@/lib/dexie/draft-utils";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
@@ -24,7 +19,6 @@ const AUTO_SAVE_INTERVAL_MS = 15000; // 15 seconds
|
||||
interface DraftRecoveryState {
|
||||
isOpen: boolean;
|
||||
draft: BuilderDraft | null;
|
||||
diff: DraftDiff | null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -37,7 +31,6 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
const [state, setState] = useState<DraftRecoveryState>({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
|
||||
const [{ flowID, flowVersion }] = useQueryStates({
|
||||
@@ -214,16 +207,9 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
);
|
||||
|
||||
if (isDifferent && (draft.nodes.length > 0 || draft.edges.length > 0)) {
|
||||
const diff = calculateDraftDiff(
|
||||
draft.nodes,
|
||||
draft.edges,
|
||||
currentNodes,
|
||||
currentEdges,
|
||||
);
|
||||
setState({
|
||||
isOpen: true,
|
||||
draft,
|
||||
diff,
|
||||
});
|
||||
} else {
|
||||
await draftService.deleteDraft(effectiveFlowId);
|
||||
@@ -245,7 +231,6 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
setState({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
}, [flowID]);
|
||||
|
||||
@@ -257,10 +242,8 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
try {
|
||||
useNodeStore.getState().setNodes(draft.nodes);
|
||||
useEdgeStore.getState().setEdges(draft.edges);
|
||||
draft.nodes.forEach((node) => {
|
||||
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
||||
});
|
||||
|
||||
// Restore nodeCounter to prevent ID conflicts when adding new nodes
|
||||
if (draft.nodeCounter !== undefined) {
|
||||
useNodeStore.setState({ nodeCounter: draft.nodeCounter });
|
||||
}
|
||||
@@ -284,7 +267,6 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
setState({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("[DraftRecovery] Failed to load draft:", error);
|
||||
@@ -293,7 +275,7 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
|
||||
const discardDraft = useCallback(async () => {
|
||||
if (!state.draft) {
|
||||
setState({ isOpen: false, draft: null, diff: null });
|
||||
setState({ isOpen: false, draft: null });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -303,7 +285,7 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
console.error("[DraftRecovery] Failed to discard draft:", error);
|
||||
}
|
||||
|
||||
setState({ isOpen: false, draft: null, diff: null });
|
||||
setState({ isOpen: false, draft: null });
|
||||
}, [state.draft]);
|
||||
|
||||
return {
|
||||
@@ -312,7 +294,6 @@ export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
savedAt: state.draft?.savedAt ?? 0,
|
||||
nodeCount: state.draft?.nodes.length ?? 0,
|
||||
edgeCount: state.draft?.edges.length ?? 0,
|
||||
diff: state.diff,
|
||||
loadDraft,
|
||||
discardDraft,
|
||||
};
|
||||
|
||||
@@ -121,14 +121,6 @@ export const useFlow = () => {
|
||||
if (customNodes.length > 0) {
|
||||
useNodeStore.getState().setNodes([]);
|
||||
addNodes(customNodes);
|
||||
|
||||
// Sync hardcoded values with handle IDs.
|
||||
// If a key–value field has a key without a value, the backend omits it from hardcoded values.
|
||||
// But if a handleId exists for that key, it causes inconsistency.
|
||||
// This ensures hardcoded values stay in sync with handle IDs.
|
||||
customNodes.forEach((node) => {
|
||||
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
||||
});
|
||||
}
|
||||
}, [customNodes, addNodes]);
|
||||
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
import {
|
||||
Connection as RFConnection,
|
||||
EdgeChange,
|
||||
applyEdgeChanges,
|
||||
} from "@xyflow/react";
|
||||
import { Connection as RFConnection, EdgeChange } from "@xyflow/react";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { useCallback } from "react";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { CustomEdge } from "./CustomEdge";
|
||||
|
||||
export const useCustomEdge = () => {
|
||||
const edges = useEdgeStore((s) => s.edges);
|
||||
const addEdge = useEdgeStore((s) => s.addEdge);
|
||||
const setEdges = useEdgeStore((s) => s.setEdges);
|
||||
const removeEdge = useEdgeStore((s) => s.removeEdge);
|
||||
|
||||
const onConnect = useCallback(
|
||||
(conn: RFConnection) => {
|
||||
@@ -50,10 +45,14 @@ export const useCustomEdge = () => {
|
||||
);
|
||||
|
||||
const onEdgesChange = useCallback(
|
||||
(changes: EdgeChange<CustomEdge>[]) => {
|
||||
setEdges(applyEdgeChanges(changes, edges));
|
||||
(changes: EdgeChange[]) => {
|
||||
changes.forEach((change) => {
|
||||
if (change.type === "remove") {
|
||||
removeEdge(change.id);
|
||||
}
|
||||
});
|
||||
},
|
||||
[edges, setEdges],
|
||||
[removeEdge],
|
||||
);
|
||||
|
||||
return { edges, onConnect, onEdgesChange };
|
||||
|
||||
@@ -1,32 +1,26 @@
|
||||
import { CircleIcon } from "@phosphor-icons/react";
|
||||
import { Handle, Position } from "@xyflow/react";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { cleanUpHandleId } from "@/components/renderers/InputRenderer/helpers";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const InputNodeHandle = ({
|
||||
const NodeHandle = ({
|
||||
handleId,
|
||||
nodeId,
|
||||
isConnected,
|
||||
side,
|
||||
}: {
|
||||
handleId: string;
|
||||
nodeId: string;
|
||||
isConnected: boolean;
|
||||
side: "left" | "right";
|
||||
}) => {
|
||||
const cleanedHandleId = cleanUpHandleId(handleId);
|
||||
const isInputConnected = useEdgeStore((state) =>
|
||||
state.isInputConnected(nodeId ?? "", cleanedHandleId),
|
||||
);
|
||||
|
||||
return (
|
||||
<Handle
|
||||
type={"target"}
|
||||
position={Position.Left}
|
||||
id={cleanedHandleId}
|
||||
className={"-ml-6 mr-2"}
|
||||
type={side === "left" ? "target" : "source"}
|
||||
position={side === "left" ? Position.Left : Position.Right}
|
||||
id={handleId}
|
||||
className={side === "left" ? "-ml-4 mr-2" : "-mr-2 ml-2"}
|
||||
>
|
||||
<div className="pointer-events-none">
|
||||
<CircleIcon
|
||||
size={16}
|
||||
weight={isInputConnected ? "fill" : "duotone"}
|
||||
weight={isConnected ? "fill" : "duotone"}
|
||||
className={"text-gray-400 opacity-100"}
|
||||
/>
|
||||
</div>
|
||||
@@ -34,35 +28,4 @@ const InputNodeHandle = ({
|
||||
);
|
||||
};
|
||||
|
||||
const OutputNodeHandle = ({
|
||||
field_name,
|
||||
nodeId,
|
||||
hexColor,
|
||||
}: {
|
||||
field_name: string;
|
||||
nodeId: string;
|
||||
hexColor: string;
|
||||
}) => {
|
||||
const isOutputConnected = useEdgeStore((state) =>
|
||||
state.isOutputConnected(nodeId, field_name),
|
||||
);
|
||||
return (
|
||||
<Handle
|
||||
type={"source"}
|
||||
position={Position.Right}
|
||||
id={field_name}
|
||||
className={"-mr-2 ml-2"}
|
||||
>
|
||||
<div className="pointer-events-none">
|
||||
<CircleIcon
|
||||
size={16}
|
||||
weight={"duotone"}
|
||||
color={isOutputConnected ? hexColor : "gray"}
|
||||
className={cn("text-gray-400 opacity-100")}
|
||||
/>
|
||||
</div>
|
||||
</Handle>
|
||||
);
|
||||
};
|
||||
|
||||
export { InputNodeHandle, OutputNodeHandle };
|
||||
export default NodeHandle;
|
||||
|
||||
@@ -1,4 +1,31 @@
|
||||
// Here we are handling single level of nesting, if need more in future then i will update it
|
||||
/**
|
||||
* Handle ID Types for different input structures
|
||||
*
|
||||
* Examples:
|
||||
* SIMPLE: "message"
|
||||
* NESTED: "config.api_key"
|
||||
* ARRAY: "items_$_0", "items_$_1"
|
||||
* KEY_VALUE: "headers_#_Authorization", "params_#_limit"
|
||||
*
|
||||
* Note: All handle IDs are sanitized to remove spaces and special characters.
|
||||
* Spaces become underscores, and special characters are removed.
|
||||
* Example: "user name" becomes "user_name", "email@domain.com" becomes "emaildomaincom"
|
||||
*/
|
||||
export enum HandleIdType {
|
||||
SIMPLE = "SIMPLE",
|
||||
NESTED = "NESTED",
|
||||
ARRAY = "ARRAY",
|
||||
KEY_VALUE = "KEY_VALUE",
|
||||
}
|
||||
|
||||
const fromRjsfId = (id: string): string => {
|
||||
if (!id) return "";
|
||||
const parts = id.split("_");
|
||||
const filtered = parts.filter(
|
||||
(p) => p !== "root" && p !== "properties" && p.length > 0,
|
||||
);
|
||||
return filtered.join("_") || "";
|
||||
};
|
||||
|
||||
const sanitizeForHandleId = (str: string): string => {
|
||||
if (!str) return "";
|
||||
@@ -11,53 +38,51 @@ const sanitizeForHandleId = (str: string): string => {
|
||||
.replace(/^_|_$/g, ""); // Remove leading/trailing underscores
|
||||
};
|
||||
|
||||
const cleanTitleId = (id: string): string => {
|
||||
if (!id) return "";
|
||||
|
||||
if (id.endsWith("_title")) {
|
||||
id = id.slice(0, -6);
|
||||
}
|
||||
const parts = id.split("_");
|
||||
const filtered = parts.filter(
|
||||
(p) => p !== "root" && p !== "properties" && p.length > 0,
|
||||
);
|
||||
const filtered_id = filtered.join("_") || "";
|
||||
return filtered_id;
|
||||
};
|
||||
|
||||
export const generateHandleIdFromTitleId = (
|
||||
export const generateHandleId = (
|
||||
fieldKey: string,
|
||||
{
|
||||
isObjectProperty,
|
||||
isAdditionalProperty,
|
||||
isArrayItem,
|
||||
}: {
|
||||
isArrayItem?: boolean;
|
||||
isObjectProperty?: boolean;
|
||||
isAdditionalProperty?: boolean;
|
||||
} = {
|
||||
isArrayItem: false,
|
||||
isObjectProperty: false,
|
||||
isAdditionalProperty: false,
|
||||
},
|
||||
nestedValues: string[] = [],
|
||||
type: HandleIdType = HandleIdType.SIMPLE,
|
||||
): string => {
|
||||
if (!fieldKey) return "";
|
||||
|
||||
const filteredKey = cleanTitleId(fieldKey);
|
||||
if (isAdditionalProperty || isArrayItem) {
|
||||
return filteredKey;
|
||||
}
|
||||
const cleanedKey = sanitizeForHandleId(filteredKey);
|
||||
fieldKey = fromRjsfId(fieldKey);
|
||||
fieldKey = sanitizeForHandleId(fieldKey);
|
||||
|
||||
if (isObjectProperty) {
|
||||
// "config_api_key" -> "config.api_key"
|
||||
const parts = cleanedKey.split("_");
|
||||
if (parts.length >= 2) {
|
||||
const baseName = parts[0];
|
||||
const propertyName = parts.slice(1).join("_");
|
||||
return `${baseName}.${propertyName}`;
|
||||
}
|
||||
if (type === HandleIdType.SIMPLE || nestedValues.length === 0) {
|
||||
return fieldKey;
|
||||
}
|
||||
|
||||
return cleanedKey;
|
||||
const sanitizedNestedValues = nestedValues.map((value) =>
|
||||
sanitizeForHandleId(value),
|
||||
);
|
||||
|
||||
switch (type) {
|
||||
case HandleIdType.NESTED:
|
||||
return [fieldKey, ...sanitizedNestedValues].join(".");
|
||||
|
||||
case HandleIdType.ARRAY:
|
||||
return [fieldKey, ...sanitizedNestedValues].join("_$_");
|
||||
|
||||
case HandleIdType.KEY_VALUE:
|
||||
return [fieldKey, ...sanitizedNestedValues].join("_#_");
|
||||
|
||||
default:
|
||||
return fieldKey;
|
||||
}
|
||||
};
|
||||
|
||||
export const parseKeyValueHandleId = (
|
||||
handleId: string,
|
||||
type: HandleIdType,
|
||||
): string => {
|
||||
if (type === HandleIdType.KEY_VALUE) {
|
||||
return handleId.split("_#_")[1];
|
||||
} else if (type === HandleIdType.ARRAY) {
|
||||
return handleId.split("_$_")[1];
|
||||
} else if (type === HandleIdType.NESTED) {
|
||||
return handleId.split(".")[1];
|
||||
} else if (type === HandleIdType.SIMPLE) {
|
||||
return handleId.split("_")[1];
|
||||
}
|
||||
return "";
|
||||
};
|
||||
|
||||
@@ -1,25 +1,24 @@
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { BlockCost } from "@/app/api/__generated__/models/blockCost";
|
||||
import { BlockInfoCategoriesItem } from "@/app/api/__generated__/models/blockInfoCategoriesItem";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { NodeModelMetadata } from "@/app/api/__generated__/models/nodeModelMetadata";
|
||||
import { preprocessInputSchema } from "@/components/renderers/InputRenderer/utils/input-schema-pre-processor";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { NodeProps, Node as XYNode } from "@xyflow/react";
|
||||
import React from "react";
|
||||
import { Node as XYNode, NodeProps } from "@xyflow/react";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { BlockUIType } from "../../../types";
|
||||
import { FormCreator } from "../FormCreator";
|
||||
import { OutputHandler } from "../OutputHandler";
|
||||
import { AyrshareConnectButton } from "./components/AyrshareConnectButton";
|
||||
import { NodeAdvancedToggle } from "./components/NodeAdvancedToggle";
|
||||
import { NodeContainer } from "./components/NodeContainer";
|
||||
import { NodeExecutionBadge } from "./components/NodeExecutionBadge";
|
||||
import { NodeHeader } from "./components/NodeHeader";
|
||||
import { NodeDataRenderer } from "./components/NodeOutput/NodeOutput";
|
||||
import { NodeRightClickMenu } from "./components/NodeRightClickMenu";
|
||||
import { StickyNoteBlock } from "./components/StickyNoteBlock";
|
||||
import { BlockInfoCategoriesItem } from "@/app/api/__generated__/models/blockInfoCategoriesItem";
|
||||
import { BlockCost } from "@/app/api/__generated__/models/blockCost";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { NodeContainer } from "./components/NodeContainer";
|
||||
import { NodeHeader } from "./components/NodeHeader";
|
||||
import { FormCreator } from "../FormCreator";
|
||||
import { preprocessInputSchema } from "@/components/renderers/input-renderer/utils/input-schema-pre-processor";
|
||||
import { OutputHandler } from "../OutputHandler";
|
||||
import { NodeAdvancedToggle } from "./components/NodeAdvancedToggle";
|
||||
import { NodeDataRenderer } from "./components/NodeOutput/NodeOutput";
|
||||
import { NodeExecutionBadge } from "./components/NodeExecutionBadge";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { WebhookDisclaimer } from "./components/WebhookDisclaimer";
|
||||
import { AyrshareConnectButton } from "./components/AyrshareConnectButton";
|
||||
import { NodeModelMetadata } from "@/app/api/__generated__/models/nodeModelMetadata";
|
||||
|
||||
export type CustomNodeData = {
|
||||
hardcodedValues: {
|
||||
@@ -89,7 +88,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
|
||||
// Currently all blockTypes design are similar - that's why i am using the same component for all of them
|
||||
// If in future - if we need some drastic change in some blockTypes design - we can create separate components for them
|
||||
const node = (
|
||||
return (
|
||||
<NodeContainer selected={selected} nodeId={nodeId} hasErrors={hasErrors}>
|
||||
<div className="rounded-xlarge bg-white">
|
||||
<NodeHeader data={data} nodeId={nodeId} />
|
||||
@@ -100,7 +99,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
nodeId={nodeId}
|
||||
uiType={data.uiType}
|
||||
className={cn(
|
||||
"bg-white px-4",
|
||||
"bg-white pr-6",
|
||||
isWebhook && "pointer-events-none opacity-50",
|
||||
)}
|
||||
showHandles={showHandles}
|
||||
@@ -118,15 +117,6 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
<NodeExecutionBadge nodeId={nodeId} />
|
||||
</NodeContainer>
|
||||
);
|
||||
|
||||
return (
|
||||
<NodeRightClickMenu
|
||||
nodeId={nodeId}
|
||||
subGraphID={data.hardcodedValues?.graph_id}
|
||||
>
|
||||
{node}
|
||||
</NodeRightClickMenu>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ export const NodeAdvancedToggle = ({ nodeId }: { nodeId: string }) => {
|
||||
);
|
||||
const setShowAdvanced = useNodeStore((state) => state.setShowAdvanced);
|
||||
return (
|
||||
<div className="flex items-center justify-between gap-2 rounded-b-xlarge border-t border-zinc-200 bg-white px-5 py-3.5">
|
||||
<div className="flex items-center justify-between gap-2 rounded-b-xlarge border-t border-slate-200/50 bg-white px-5 py-3.5">
|
||||
<Text variant="body" className="font-medium text-slate-700">
|
||||
Advanced
|
||||
</Text>
|
||||
|
||||
@@ -22,7 +22,7 @@ export const NodeContainer = ({
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"z-12 w-[350px] rounded-xlarge ring-1 ring-slate-200/60",
|
||||
"z-12 max-w-[370px] rounded-xlarge ring-1 ring-slate-200/60",
|
||||
selected && "shadow-lg ring-2 ring-slate-200",
|
||||
status && nodeStyleBasedOnStatus[status],
|
||||
hasErrors ? nodeStyleBasedOnStatus[AgentExecutionStatus.FAILED] : "",
|
||||
|
||||
@@ -1,31 +1,26 @@
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
||||
import {
|
||||
SecondaryDropdownMenuContent,
|
||||
SecondaryDropdownMenuItem,
|
||||
SecondaryDropdownMenuSeparator,
|
||||
} from "@/components/molecules/SecondaryMenu/SecondaryMenu";
|
||||
import {
|
||||
ArrowSquareOutIcon,
|
||||
CopyIcon,
|
||||
DotsThreeOutlineVerticalIcon,
|
||||
TrashIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { DotsThreeOutlineVerticalIcon } from "@phosphor-icons/react";
|
||||
import { Copy, Trash2, ExternalLink } from "lucide-react";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
|
||||
type Props = {
|
||||
export const NodeContextMenu = ({
|
||||
nodeId,
|
||||
subGraphID,
|
||||
}: {
|
||||
nodeId: string;
|
||||
subGraphID?: string;
|
||||
};
|
||||
|
||||
export const NodeContextMenu = ({ nodeId, subGraphID }: Props) => {
|
||||
}) => {
|
||||
const { deleteElements } = useReactFlow();
|
||||
|
||||
function handleCopy() {
|
||||
const handleCopy = () => {
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
@@ -35,47 +30,47 @@ export const NodeContextMenu = ({ nodeId, subGraphID }: Props) => {
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
}
|
||||
};
|
||||
|
||||
function handleDelete() {
|
||||
const handleDelete = () => {
|
||||
deleteElements({ nodes: [{ id: nodeId }] });
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger className="py-2">
|
||||
<DotsThreeOutlineVerticalIcon size={16} weight="fill" />
|
||||
</DropdownMenuTrigger>
|
||||
<SecondaryDropdownMenuContent side="right" align="start">
|
||||
<SecondaryDropdownMenuItem onClick={handleCopy}>
|
||||
<CopyIcon size={20} className="mr-2 dark:text-gray-100" />
|
||||
<span className="dark:text-gray-100">Copy</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
<SecondaryDropdownMenuSeparator />
|
||||
<DropdownMenuContent
|
||||
side="right"
|
||||
align="start"
|
||||
className="rounded-xlarge"
|
||||
>
|
||||
<DropdownMenuItem onClick={handleCopy} className="hover:rounded-xlarge">
|
||||
<Copy className="mr-2 h-4 w-4" />
|
||||
Copy Node
|
||||
</DropdownMenuItem>
|
||||
|
||||
{subGraphID && (
|
||||
<>
|
||||
<SecondaryDropdownMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
>
|
||||
<ArrowSquareOutIcon
|
||||
size={20}
|
||||
className="mr-2 dark:text-gray-100"
|
||||
/>
|
||||
<span className="dark:text-gray-100">Open agent</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
<SecondaryDropdownMenuSeparator />
|
||||
</>
|
||||
<DropdownMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
className="hover:rounded-xlarge"
|
||||
>
|
||||
<ExternalLink className="mr-2 h-4 w-4" />
|
||||
Open Agent
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
|
||||
<SecondaryDropdownMenuItem variant="destructive" onClick={handleDelete}>
|
||||
<TrashIcon
|
||||
size={20}
|
||||
className="mr-2 text-red-500 dark:text-red-400"
|
||||
/>
|
||||
<span className="dark:text-red-400">Delete</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
</SecondaryDropdownMenuContent>
|
||||
<Separator className="my-2" />
|
||||
|
||||
<DropdownMenuItem
|
||||
onClick={handleDelete}
|
||||
className="text-red-600 hover:rounded-xlarge"
|
||||
>
|
||||
<Trash2 className="mr-2 h-4 w-4" />
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,30 +1,29 @@
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { NodeCost } from "./NodeCost";
|
||||
import { NodeBadges } from "./NodeBadges";
|
||||
import { NodeContextMenu } from "./NodeContextMenu";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useState } from "react";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { useState } from "react";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { NodeBadges } from "./NodeBadges";
|
||||
import { NodeContextMenu } from "./NodeContextMenu";
|
||||
import { NodeCost } from "./NodeCost";
|
||||
|
||||
type Props = {
|
||||
export const NodeHeader = ({
|
||||
data,
|
||||
nodeId,
|
||||
}: {
|
||||
data: CustomNodeData;
|
||||
nodeId: string;
|
||||
};
|
||||
|
||||
export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
}) => {
|
||||
const updateNodeData = useNodeStore((state) => state.updateNodeData);
|
||||
const title = (data.metadata?.customized_name as string) || data.title;
|
||||
const [isEditingTitle, setIsEditingTitle] = useState(false);
|
||||
const [editedTitle, setEditedTitle] = useState(
|
||||
beautifyString(title).replace("Block", "").trim(),
|
||||
);
|
||||
const [editedTitle, setEditedTitle] = useState(title);
|
||||
|
||||
const handleTitleEdit = () => {
|
||||
updateNodeData(nodeId, {
|
||||
@@ -42,7 +41,7 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex h-auto flex-col gap-1 rounded-xlarge border-b border-zinc-200 bg-gradient-to-r from-slate-50/80 to-white/90 px-4 py-4 pt-3">
|
||||
<div className="flex h-auto flex-col gap-1 rounded-xlarge border-b border-slate-200/50 bg-gradient-to-r from-slate-50/80 to-white/90 px-4 py-4 pt-3">
|
||||
{/* Title row with context menu */}
|
||||
<div className="flex items-start justify-between gap-2">
|
||||
<div className="flex min-w-0 flex-1 items-center gap-2">
|
||||
@@ -69,12 +68,12 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<Text variant="large-semibold" className="line-clamp-1">
|
||||
{beautifyString(title).replace("Block", "").trim()}
|
||||
{beautifyString(title)}
|
||||
</Text>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{beautifyString(title).replace("Block", "").trim()}</p>
|
||||
<p>{beautifyString(title)}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
|
||||
@@ -23,7 +23,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-3 rounded-b-xl border-t border-zinc-200 px-4 py-4">
|
||||
<div className="flex flex-col gap-3 rounded-b-xl border-t border-slate-200/50 px-4 py-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<Text variant="body-medium" className="!font-semibold text-slate-700">
|
||||
Node Output
|
||||
|
||||
@@ -151,7 +151,7 @@ export const NodeDataViewer: FC<NodeDataViewerProps> = ({
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end pt-4">
|
||||
{outputItems.length > 1 && (
|
||||
{outputItems.length > 0 && (
|
||||
<OutputActions
|
||||
items={outputItems.map((item) => ({
|
||||
value: item.value,
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
SecondaryMenuContent,
|
||||
SecondaryMenuItem,
|
||||
SecondaryMenuSeparator,
|
||||
} from "@/components/molecules/SecondaryMenu/SecondaryMenu";
|
||||
import { ArrowSquareOutIcon, CopyIcon, TrashIcon } from "@phosphor-icons/react";
|
||||
import * as ContextMenu from "@radix-ui/react-context-menu";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { CustomNode } from "../CustomNode";
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
subGraphID?: string;
|
||||
children: React.ReactNode;
|
||||
};
|
||||
|
||||
const DOUBLE_CLICK_TIMEOUT = 300;
|
||||
|
||||
export function NodeRightClickMenu({ nodeId, subGraphID, children }: Props) {
|
||||
const { deleteElements } = useReactFlow<CustomNode>();
|
||||
const lastRightClickTime = useRef<number>(0);
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
function copyNode() {
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
selected: node.id === nodeId,
|
||||
})),
|
||||
}));
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
}
|
||||
|
||||
function deleteNode() {
|
||||
deleteElements({ nodes: [{ id: nodeId }] });
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
function handleContextMenu(e: MouseEvent) {
|
||||
const now = Date.now();
|
||||
const timeSinceLastClick = now - lastRightClickTime.current;
|
||||
|
||||
if (timeSinceLastClick < DOUBLE_CLICK_TIMEOUT) {
|
||||
e.stopImmediatePropagation();
|
||||
lastRightClickTime.current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
lastRightClickTime.current = now;
|
||||
}
|
||||
|
||||
container.addEventListener("contextmenu", handleContextMenu, true);
|
||||
|
||||
return () => {
|
||||
container.removeEventListener("contextmenu", handleContextMenu, true);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ContextMenu.Root>
|
||||
<ContextMenu.Trigger asChild>
|
||||
<div ref={containerRef}>{children}</div>
|
||||
</ContextMenu.Trigger>
|
||||
<SecondaryMenuContent>
|
||||
<SecondaryMenuItem onSelect={copyNode}>
|
||||
<CopyIcon size={20} className="mr-2 dark:text-gray-100" />
|
||||
<span className="dark:text-gray-100">Copy</span>
|
||||
</SecondaryMenuItem>
|
||||
<SecondaryMenuSeparator />
|
||||
|
||||
{subGraphID && (
|
||||
<>
|
||||
<SecondaryMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
>
|
||||
<ArrowSquareOutIcon
|
||||
size={20}
|
||||
className="mr-2 dark:text-gray-100"
|
||||
/>
|
||||
<span className="dark:text-gray-100">Open agent</span>
|
||||
</SecondaryMenuItem>
|
||||
<SecondaryMenuSeparator />
|
||||
</>
|
||||
)}
|
||||
|
||||
<SecondaryMenuItem variant="destructive" onSelect={deleteNode}>
|
||||
<TrashIcon
|
||||
size={20}
|
||||
className="mr-2 text-red-500 dark:text-red-400"
|
||||
/>
|
||||
<span className="dark:text-red-400">Delete</span>
|
||||
</SecondaryMenuItem>
|
||||
</SecondaryMenuContent>
|
||||
</ContextMenu.Root>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useMemo } from "react";
|
||||
import { FormCreator } from "../../FormCreator";
|
||||
import { preprocessInputSchema } from "@/components/renderers/InputRenderer/utils/input-schema-pre-processor";
|
||||
import { preprocessInputSchema } from "@/components/renderers/input-renderer/utils/input-schema-pre-processor";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
@@ -3,7 +3,7 @@ import React from "react";
|
||||
import { uiSchema } from "./uiSchema";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { BlockUIType } from "../../types";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
import { FormRenderer } from "@/components/renderers/input-renderer/FormRenderer";
|
||||
|
||||
export const FormCreator = React.memo(
|
||||
({
|
||||
|
||||
@@ -4,7 +4,7 @@ import { CaretDownIcon, InfoIcon } from "@phosphor-icons/react";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { useState } from "react";
|
||||
|
||||
import { OutputNodeHandle } from "../handlers/NodeHandle";
|
||||
import NodeHandle from "../handlers/NodeHandle";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { getTypeDisplayInfo } from "./helpers";
|
||||
import { generateHandleId } from "../handlers/helpers";
|
||||
import { BlockUIType } from "../../types";
|
||||
|
||||
export const OutputHandler = ({
|
||||
@@ -28,73 +29,8 @@ export const OutputHandler = ({
|
||||
const properties = outputSchema?.properties || {};
|
||||
const [isOutputVisible, setIsOutputVisible] = useState(true);
|
||||
|
||||
const showHandles = uiType !== BlockUIType.OUTPUT;
|
||||
|
||||
const renderOutputHandles = (
|
||||
schema: RJSFSchema,
|
||||
keyPrefix: string = "",
|
||||
titlePrefix: string = "",
|
||||
): React.ReactNode[] => {
|
||||
return Object.entries(schema).map(
|
||||
([key, fieldSchema]: [string, RJSFSchema]) => {
|
||||
const fullKey = keyPrefix ? `${keyPrefix}_#_${key}` : key;
|
||||
const fieldTitle = titlePrefix + (fieldSchema?.title || key);
|
||||
|
||||
const isConnected = isOutputConnected(nodeId, fullKey);
|
||||
const shouldShow = isConnected || isOutputVisible;
|
||||
const { displayType, colorClass, hexColor } =
|
||||
getTypeDisplayInfo(fieldSchema);
|
||||
|
||||
return shouldShow ? (
|
||||
<div key={fullKey} className="flex flex-col items-end gap-2">
|
||||
<div className="relative flex items-center gap-2">
|
||||
{fieldSchema?.description && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span
|
||||
style={{ marginLeft: 6, cursor: "pointer" }}
|
||||
aria-label="info"
|
||||
tabIndex={0}
|
||||
>
|
||||
<InfoIcon />
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{fieldSchema?.description}</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
<Text variant="body" className="text-slate-700">
|
||||
{fieldTitle}
|
||||
</Text>
|
||||
<Text variant="small" as="span" className={colorClass}>
|
||||
({displayType})
|
||||
</Text>
|
||||
|
||||
{showHandles && (
|
||||
<OutputNodeHandle
|
||||
field_name={fullKey}
|
||||
nodeId={nodeId}
|
||||
hexColor={hexColor}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Recursively render nested properties */}
|
||||
{fieldSchema?.properties &&
|
||||
renderOutputHandles(
|
||||
fieldSchema.properties,
|
||||
fullKey,
|
||||
`${fieldTitle}.`,
|
||||
)}
|
||||
</div>
|
||||
) : null;
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-end justify-between gap-2 rounded-b-xlarge border-t border-zinc-200 bg-white py-3.5">
|
||||
<div className="flex flex-col items-end justify-between gap-2 rounded-b-xlarge border-t border-slate-200/50 bg-white py-3.5">
|
||||
<Button
|
||||
variant="ghost"
|
||||
className="mr-4 h-fit min-w-0 p-0 hover:border-transparent hover:bg-transparent"
|
||||
@@ -113,9 +49,50 @@ export const OutputHandler = ({
|
||||
</Text>
|
||||
</Button>
|
||||
|
||||
<div className="flex flex-col items-end gap-2">
|
||||
{renderOutputHandles(properties)}
|
||||
</div>
|
||||
{
|
||||
<div className="flex flex-col items-end gap-2">
|
||||
{Object.entries(properties).map(([key, property]: [string, any]) => {
|
||||
const isConnected = isOutputConnected(nodeId, key);
|
||||
const shouldShow = isConnected || isOutputVisible;
|
||||
const { displayType, colorClass } = getTypeDisplayInfo(property);
|
||||
|
||||
return shouldShow ? (
|
||||
<div key={key} className="relative flex items-center gap-2">
|
||||
{property?.description && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span
|
||||
style={{ marginLeft: 6, cursor: "pointer" }}
|
||||
aria-label="info"
|
||||
tabIndex={0}
|
||||
>
|
||||
<InfoIcon />
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{property?.description}</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
<Text variant="body" className="text-slate-700">
|
||||
{property?.title || key}{" "}
|
||||
</Text>
|
||||
<Text variant="small" as="span" className={colorClass}>
|
||||
({displayType})
|
||||
</Text>
|
||||
|
||||
<NodeHandle
|
||||
handleId={
|
||||
uiType === BlockUIType.AGENT ? key : generateHandleId(key)
|
||||
}
|
||||
isConnected={isConnected}
|
||||
side="right"
|
||||
/>
|
||||
</div>
|
||||
) : null;
|
||||
})}
|
||||
</div>
|
||||
}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -92,38 +92,14 @@ export const getTypeDisplayInfo = (schema: any) => {
|
||||
if (schema?.type === "string" && schema?.format) {
|
||||
const formatMap: Record<
|
||||
string,
|
||||
{ displayType: string; colorClass: string; hexColor: string }
|
||||
{ displayType: string; colorClass: string }
|
||||
> = {
|
||||
file: {
|
||||
displayType: "file",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
date: {
|
||||
displayType: "date",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
time: {
|
||||
displayType: "time",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
"date-time": {
|
||||
displayType: "datetime",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
"long-text": {
|
||||
displayType: "text",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
"short-text": {
|
||||
displayType: "text",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
file: { displayType: "file", colorClass: "!text-green-500" },
|
||||
date: { displayType: "date", colorClass: "!text-blue-500" },
|
||||
time: { displayType: "time", colorClass: "!text-blue-500" },
|
||||
"date-time": { displayType: "datetime", colorClass: "!text-blue-500" },
|
||||
"long-text": { displayType: "text", colorClass: "!text-green-500" },
|
||||
"short-text": { displayType: "text", colorClass: "!text-green-500" },
|
||||
};
|
||||
|
||||
const formatInfo = formatMap[schema.format];
|
||||
@@ -155,23 +131,10 @@ export const getTypeDisplayInfo = (schema: any) => {
|
||||
any: "!text-gray-500",
|
||||
};
|
||||
|
||||
const hexColorMap: Record<string, string> = {
|
||||
string: "#22c55e",
|
||||
number: "#3b82f6",
|
||||
integer: "#3b82f6",
|
||||
boolean: "#eab308",
|
||||
object: "#a855f7",
|
||||
array: "#6366f1",
|
||||
null: "#6b7280",
|
||||
any: "#6b7280",
|
||||
};
|
||||
|
||||
const colorClass = colorMap[schema?.type] || "!text-gray-500";
|
||||
const hexColor = hexColorMap[schema?.type] || "#6b7280";
|
||||
|
||||
return {
|
||||
displayType,
|
||||
colorClass,
|
||||
hexColor,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
export const uiSchema = {
|
||||
credentials: {
|
||||
"ui:field": "custom/credential_field",
|
||||
"ui:field": "credentials",
|
||||
provider: { "ui:widget": "hidden" },
|
||||
type: { "ui:widget": "hidden" },
|
||||
id: { "ui:autofocus": true },
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { FilterChip } from "../FilterChip";
|
||||
import { categories } from "./constants";
|
||||
import { FilterSheet } from "../FilterSheet/FilterSheet";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const BlockMenuFilters = () => {
|
||||
const {
|
||||
filters,
|
||||
addFilter,
|
||||
removeFilter,
|
||||
categoryCounts,
|
||||
creators,
|
||||
addCreator,
|
||||
removeCreator,
|
||||
} = useBlockMenuStore();
|
||||
|
||||
const handleFilterClick = (filter: GetV2BuilderSearchFilterAnyOfItem) => {
|
||||
if (filters.includes(filter)) {
|
||||
removeFilter(filter);
|
||||
} else {
|
||||
addFilter(filter);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCreatorClick = (creator: string) => {
|
||||
if (creators.includes(creator)) {
|
||||
removeCreator(creator);
|
||||
} else {
|
||||
addCreator(creator);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<FilterSheet categories={categories} />
|
||||
{creators.length > 0 &&
|
||||
creators.map((creator) => (
|
||||
<FilterChip
|
||||
key={creator}
|
||||
name={"Created by " + creator.slice(0, 10) + "..."}
|
||||
selected={creators.includes(creator)}
|
||||
onClick={() => handleCreatorClick(creator)}
|
||||
/>
|
||||
))}
|
||||
{categories.map((category) => (
|
||||
<FilterChip
|
||||
key={category.key}
|
||||
name={category.name}
|
||||
selected={filters.includes(category.key)}
|
||||
onClick={() => handleFilterClick(category.key)}
|
||||
number={categoryCounts[category.key] ?? 0}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
import { CategoryKey } from "./types";
|
||||
|
||||
export const categories: Array<{ key: CategoryKey; name: string }> = [
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.blocks, name: "Blocks" },
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.integrations,
|
||||
name: "Integrations",
|
||||
},
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.marketplace_agents,
|
||||
name: "Marketplace agents",
|
||||
},
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.my_agents, name: "My agents" },
|
||||
];
|
||||
@@ -1,26 +0,0 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export type DefaultStateType =
|
||||
| "suggestion"
|
||||
| "all_blocks"
|
||||
| "input_blocks"
|
||||
| "action_blocks"
|
||||
| "output_blocks"
|
||||
| "integrations"
|
||||
| "marketplace_agents"
|
||||
| "my_agents";
|
||||
|
||||
export type CategoryKey = GetV2BuilderSearchFilterAnyOfItem;
|
||||
|
||||
export interface Filters {
|
||||
categories: {
|
||||
blocks: boolean;
|
||||
integrations: boolean;
|
||||
marketplace_agents: boolean;
|
||||
my_agents: boolean;
|
||||
providers: boolean;
|
||||
};
|
||||
createdBy: string[];
|
||||
}
|
||||
|
||||
export type CategoryCounts = Record<CategoryKey, number>;
|
||||
@@ -1,14 +1,111 @@
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useBlockMenuSearch } from "./useBlockMenuSearch";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { LoadingSpinner } from "@/components/__legacy__/ui/loading";
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { MarketplaceAgentBlock } from "../MarketplaceAgentBlock";
|
||||
import { Block } from "../Block";
|
||||
import { UGCAgentBlock } from "../UGCAgentBlock";
|
||||
import { getSearchItemType } from "./helper";
|
||||
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
|
||||
import { blockMenuContainerStyle } from "../style";
|
||||
import { BlockMenuFilters } from "../BlockMenuFilters/BlockMenuFilters";
|
||||
import { BlockMenuSearchContent } from "../BlockMenuSearchContent/BlockMenuSearchContent";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { NoSearchResult } from "../NoSearchResult";
|
||||
|
||||
export const BlockMenuSearch = () => {
|
||||
const {
|
||||
searchResults,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
hasNextPage,
|
||||
searchLoading,
|
||||
handleAddLibraryAgent,
|
||||
handleAddMarketplaceAgent,
|
||||
addingLibraryAgentId,
|
||||
addingMarketplaceAgentSlug,
|
||||
} = useBlockMenuSearch();
|
||||
const { searchQuery } = useBlockMenuStore();
|
||||
|
||||
if (searchLoading) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
blockMenuContainerStyle,
|
||||
"flex items-center justify-center",
|
||||
)}
|
||||
>
|
||||
<LoadingSpinner className="size-13" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (searchResults.length === 0) {
|
||||
return <NoSearchResult />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={blockMenuContainerStyle}>
|
||||
<BlockMenuFilters />
|
||||
<Text variant="body-medium">Search results</Text>
|
||||
<BlockMenuSearchContent />
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
fetchNextPage={fetchNextPage}
|
||||
hasNextPage={hasNextPage}
|
||||
loader={<LoadingSpinner className="size-13" />}
|
||||
className="space-y-2.5"
|
||||
>
|
||||
{searchResults.map((item: SearchResponseItemsItem, index: number) => {
|
||||
const { type, data } = getSearchItemType(item);
|
||||
// backend give support to these 3 types only [right now] - we need to give support to integration and ai agent types in follow up PRs
|
||||
switch (type) {
|
||||
case "store_agent":
|
||||
return (
|
||||
<MarketplaceAgentBlock
|
||||
key={index}
|
||||
slug={data.slug}
|
||||
highlightedText={searchQuery}
|
||||
title={data.agent_name}
|
||||
image_url={data.agent_image}
|
||||
creator_name={data.creator}
|
||||
number_of_runs={data.runs}
|
||||
loading={addingMarketplaceAgentSlug === data.slug}
|
||||
onClick={() =>
|
||||
handleAddMarketplaceAgent({
|
||||
creator_name: data.creator,
|
||||
slug: data.slug,
|
||||
})
|
||||
}
|
||||
/>
|
||||
);
|
||||
case "block":
|
||||
return (
|
||||
<Block
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
description={data.description}
|
||||
blockData={data}
|
||||
/>
|
||||
);
|
||||
|
||||
case "library_agent":
|
||||
return (
|
||||
<UGCAgentBlock
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
image_url={data.image_url}
|
||||
version={data.graph_version}
|
||||
edited_time={data.updated_at}
|
||||
isLoading={addingLibraryAgentId === data.id}
|
||||
onClick={() => handleAddLibraryAgent(data)}
|
||||
/>
|
||||
);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
</InfiniteScroll>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -23,19 +23,9 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useBlockMenuSearchContent = () => {
|
||||
const {
|
||||
searchQuery,
|
||||
searchId,
|
||||
setSearchId,
|
||||
filters,
|
||||
setCreatorsList,
|
||||
creators,
|
||||
setCategoryCounts,
|
||||
} = useBlockMenuStore();
|
||||
|
||||
export const useBlockMenuSearch = () => {
|
||||
const { searchQuery, searchId, setSearchId } = useBlockMenuStore();
|
||||
const { toast } = useToast();
|
||||
const { addAgentToBuilder, addLibraryAgentToBuilder } =
|
||||
useAddAgentToBuilder();
|
||||
@@ -67,8 +57,6 @@ export const useBlockMenuSearchContent = () => {
|
||||
page_size: 8,
|
||||
search_query: searchQuery,
|
||||
search_id: searchId,
|
||||
filter: filters.length > 0 ? filters : undefined,
|
||||
by_creator: creators.length > 0 ? creators : undefined,
|
||||
},
|
||||
{
|
||||
query: { getNextPageParam: getPaginationNextPageNumber },
|
||||
@@ -110,26 +98,6 @@ export const useBlockMenuSearchContent = () => {
|
||||
}
|
||||
}, [searchQueryData, searchId, setSearchId]);
|
||||
|
||||
// from all the results, we need to get all the unique creators
|
||||
useEffect(() => {
|
||||
if (!searchQueryData?.pages?.length) {
|
||||
return;
|
||||
}
|
||||
const latestData = okData(searchQueryData.pages.at(-1));
|
||||
setCategoryCounts(
|
||||
(latestData?.total_items as Record<
|
||||
GetV2BuilderSearchFilterAnyOfItem,
|
||||
number
|
||||
>) || {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
my_agents: 0,
|
||||
},
|
||||
);
|
||||
setCreatorsList(latestData?.items || []);
|
||||
}, [searchQueryData]);
|
||||
|
||||
useEffect(() => {
|
||||
if (searchId && !searchQuery) {
|
||||
resetSearchSession();
|
||||
@@ -1,108 +0,0 @@
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { getSearchItemType } from "./helper";
|
||||
import { MarketplaceAgentBlock } from "../MarketplaceAgentBlock";
|
||||
import { Block } from "../Block";
|
||||
import { UGCAgentBlock } from "../UGCAgentBlock";
|
||||
import { useBlockMenuSearchContent } from "./useBlockMenuSearchContent";
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { blockMenuContainerStyle } from "../style";
|
||||
import { NoSearchResult } from "../NoSearchResult";
|
||||
|
||||
export const BlockMenuSearchContent = () => {
|
||||
const {
|
||||
searchResults,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
hasNextPage,
|
||||
searchLoading,
|
||||
handleAddLibraryAgent,
|
||||
handleAddMarketplaceAgent,
|
||||
addingLibraryAgentId,
|
||||
addingMarketplaceAgentSlug,
|
||||
} = useBlockMenuSearchContent();
|
||||
|
||||
const { searchQuery } = useBlockMenuStore();
|
||||
|
||||
if (searchLoading) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
blockMenuContainerStyle,
|
||||
"flex items-center justify-center",
|
||||
)}
|
||||
>
|
||||
<LoadingSpinner className="size-13" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (searchResults.length === 0) {
|
||||
return <NoSearchResult />;
|
||||
}
|
||||
|
||||
return (
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
fetchNextPage={fetchNextPage}
|
||||
hasNextPage={hasNextPage}
|
||||
loader={<LoadingSpinner className="size-13" />}
|
||||
className="space-y-2.5"
|
||||
>
|
||||
{searchResults.map((item: SearchResponseItemsItem, index: number) => {
|
||||
const { type, data } = getSearchItemType(item);
|
||||
// backend give support to these 3 types only [right now] - we need to give support to integration and ai agent types in follow up PRs
|
||||
switch (type) {
|
||||
case "store_agent":
|
||||
return (
|
||||
<MarketplaceAgentBlock
|
||||
key={index}
|
||||
slug={data.slug}
|
||||
highlightedText={searchQuery}
|
||||
title={data.agent_name}
|
||||
image_url={data.agent_image}
|
||||
creator_name={data.creator}
|
||||
number_of_runs={data.runs}
|
||||
loading={addingMarketplaceAgentSlug === data.slug}
|
||||
onClick={() =>
|
||||
handleAddMarketplaceAgent({
|
||||
creator_name: data.creator,
|
||||
slug: data.slug,
|
||||
})
|
||||
}
|
||||
/>
|
||||
);
|
||||
case "block":
|
||||
return (
|
||||
<Block
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
description={data.description}
|
||||
blockData={data}
|
||||
/>
|
||||
);
|
||||
|
||||
case "library_agent":
|
||||
return (
|
||||
<UGCAgentBlock
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
image_url={data.image_url}
|
||||
version={data.graph_version}
|
||||
edited_time={data.updated_at}
|
||||
isLoading={addingLibraryAgentId === data.id}
|
||||
onClick={() => handleAddLibraryAgent(data)}
|
||||
/>
|
||||
);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
</InfiniteScroll>
|
||||
);
|
||||
};
|
||||
@@ -1,9 +1,7 @@
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { XIcon } from "@phosphor-icons/react";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
|
||||
import React, { ButtonHTMLAttributes, useState } from "react";
|
||||
import { X } from "lucide-react";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
selected?: boolean;
|
||||
@@ -18,51 +16,39 @@ export const FilterChip: React.FC<Props> = ({
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
return (
|
||||
<AnimatePresence mode="wait">
|
||||
<Button
|
||||
onMouseEnter={() => setIsHovered(true)}
|
||||
onMouseLeave={() => setIsHovered(false)}
|
||||
<Button
|
||||
className={cn(
|
||||
"group w-fit space-x-1 rounded-[1.5rem] border border-zinc-300 bg-transparent px-[0.625rem] py-[0.375rem] shadow-none transition-transform duration-300 ease-in-out",
|
||||
"hover:border-violet-500 hover:bg-transparent focus:ring-0 disabled:cursor-not-allowed",
|
||||
selected && "border-0 bg-violet-700 hover:border",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<span
|
||||
className={cn(
|
||||
"group w-fit space-x-1 rounded-[1.5rem] border border-zinc-300 bg-transparent px-[0.625rem] py-[0.375rem] shadow-none",
|
||||
"hover:border-violet-500 hover:bg-transparent focus:ring-0 disabled:cursor-not-allowed",
|
||||
selected && "border-0 bg-violet-700 hover:border",
|
||||
className,
|
||||
"font-sans text-sm font-medium leading-[1.375rem] text-zinc-600 group-hover:text-zinc-600 group-disabled:text-zinc-400",
|
||||
selected && "text-zinc-50",
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<span
|
||||
className={cn(
|
||||
"font-sans text-sm font-medium leading-[1.375rem] text-zinc-600 group-hover:text-zinc-600 group-disabled:text-zinc-400",
|
||||
selected && "text-zinc-50",
|
||||
{name}
|
||||
</span>
|
||||
{selected && (
|
||||
<>
|
||||
<span className="flex h-4 w-4 items-center justify-center rounded-full bg-zinc-50 transition-all duration-300 ease-in-out group-hover:hidden">
|
||||
<X
|
||||
className="h-3 w-3 rounded-full text-violet-700"
|
||||
strokeWidth={2}
|
||||
/>
|
||||
</span>
|
||||
{number !== undefined && (
|
||||
<span className="hidden h-[1.375rem] items-center rounded-[1.25rem] bg-violet-700 p-[0.375rem] text-zinc-50 transition-all duration-300 ease-in-out animate-in fade-in zoom-in group-hover:flex">
|
||||
{number > 100 ? "100+" : number}
|
||||
</span>
|
||||
)}
|
||||
>
|
||||
{name}
|
||||
</span>
|
||||
{selected && !isHovered && (
|
||||
<motion.span
|
||||
initial={{ opacity: 0.5, scale: 0.5, filter: "blur(20px)" }}
|
||||
animate={{ opacity: 1, scale: 1, filter: "blur(0px)" }}
|
||||
exit={{ opacity: 0.5, scale: 0.5, filter: "blur(20px)" }}
|
||||
transition={{ duration: 0.3, type: "spring", bounce: 0.2 }}
|
||||
className="flex h-4 w-4 items-center justify-center rounded-full bg-zinc-50"
|
||||
>
|
||||
<XIcon size={12} weight="bold" className="text-violet-700" />
|
||||
</motion.span>
|
||||
)}
|
||||
{number !== undefined && isHovered && (
|
||||
<motion.span
|
||||
initial={{ opacity: 0.5, scale: 0.5, filter: "blur(10px)" }}
|
||||
animate={{ opacity: 1, scale: 1, filter: "blur(0px)" }}
|
||||
exit={{ opacity: 0.5, scale: 0.5, filter: "blur(10px)" }}
|
||||
transition={{ duration: 0.3, type: "spring", bounce: 0.2 }}
|
||||
className="flex h-[1.375rem] items-center rounded-[1.25rem] bg-violet-700 p-[0.375rem] text-zinc-50"
|
||||
>
|
||||
{number > 100 ? "100+" : number}
|
||||
</motion.span>
|
||||
)}
|
||||
</Button>
|
||||
</AnimatePresence>
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
import { FilterChip } from "../FilterChip";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CategoryKey } from "../BlockMenuFilters/types";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { Checkbox } from "@/components/__legacy__/ui/checkbox";
|
||||
import { useFilterSheet } from "./useFilterSheet";
|
||||
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
|
||||
|
||||
export function FilterSheet({
|
||||
categories,
|
||||
}: {
|
||||
categories: Array<{ key: CategoryKey; name: string }>;
|
||||
}) {
|
||||
const {
|
||||
isOpen,
|
||||
localCategories,
|
||||
localCreators,
|
||||
displayedCreatorsCount,
|
||||
handleLocalCategoryChange,
|
||||
handleToggleShowMoreCreators,
|
||||
handleLocalCreatorChange,
|
||||
handleClearFilters,
|
||||
handleCloseButton,
|
||||
handleApplyFilters,
|
||||
hasLocalActiveFilters,
|
||||
visibleCreators,
|
||||
creators,
|
||||
handleOpenFilters,
|
||||
hasActiveFilters,
|
||||
} = useFilterSheet();
|
||||
|
||||
return (
|
||||
<div className="m-0 inline w-fit p-0">
|
||||
<FilterChip
|
||||
name={hasActiveFilters() ? "Edit filters" : "All filters"}
|
||||
onClick={handleOpenFilters}
|
||||
/>
|
||||
|
||||
<AnimatePresence>
|
||||
{isOpen && (
|
||||
<motion.div
|
||||
className={cn(
|
||||
"absolute bottom-2 left-2 top-2 z-20 w-3/4 max-w-[22.5rem] space-y-4 overflow-hidden rounded-[0.75rem] bg-white pb-4 shadow-[0_4px_12px_2px_rgba(0,0,0,0.1)]",
|
||||
)}
|
||||
initial={{ x: "-100%", filter: "blur(10px)" }}
|
||||
animate={{ x: 0, filter: "blur(0px)" }}
|
||||
exit={{ x: "-110%", filter: "blur(10px)" }}
|
||||
transition={{ duration: 0.4, type: "spring", bounce: 0.2 }}
|
||||
>
|
||||
{/* Top section */}
|
||||
<div className="flex items-center justify-between px-5 pt-4">
|
||||
<Text variant="body">Filters</Text>
|
||||
<Button
|
||||
className="p-0"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={handleCloseButton}
|
||||
>
|
||||
<XIcon size={20} />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<Separator className="h-[1px] w-full text-zinc-300" />
|
||||
|
||||
{/* Category section */}
|
||||
<div className="space-y-4 px-5">
|
||||
<Text variant="large">Categories</Text>
|
||||
<div className="space-y-2">
|
||||
{categories.map((category) => (
|
||||
<div
|
||||
key={category.key}
|
||||
className="flex items-center space-x-2"
|
||||
>
|
||||
<Checkbox
|
||||
id={category.key}
|
||||
checked={localCategories.includes(category.key)}
|
||||
onCheckedChange={() =>
|
||||
handleLocalCategoryChange(category.key)
|
||||
}
|
||||
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
|
||||
/>
|
||||
<label
|
||||
htmlFor={category.key}
|
||||
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
|
||||
>
|
||||
{category.name}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Created by section */}
|
||||
<div className="space-y-4 px-5">
|
||||
<p className="font-sans text-base font-medium text-zinc-800">
|
||||
Created by
|
||||
</p>
|
||||
<div className="space-y-2">
|
||||
{visibleCreators.map((creator, i) => (
|
||||
<div key={i} className="flex items-center space-x-2">
|
||||
<Checkbox
|
||||
id={`creator-${creator}`}
|
||||
checked={localCreators.includes(creator)}
|
||||
onCheckedChange={() => handleLocalCreatorChange(creator)}
|
||||
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
|
||||
/>
|
||||
<label
|
||||
htmlFor={`creator-${creator}`}
|
||||
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
|
||||
>
|
||||
{creator}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
{creators.length > INITIAL_CREATORS_TO_SHOW && (
|
||||
<Button
|
||||
variant={"link"}
|
||||
className="m-0 p-0 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 underline hover:text-zinc-600"
|
||||
onClick={handleToggleShowMoreCreators}
|
||||
>
|
||||
{displayedCreatorsCount < creators.length ? "More" : "Less"}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer section */}
|
||||
<div className="fixed bottom-0 flex w-full justify-between gap-3 border-t border-zinc-200 bg-white px-5 py-3">
|
||||
<Button
|
||||
size="small"
|
||||
variant={"outline"}
|
||||
onClick={handleClearFilters}
|
||||
className="rounded-[8px] px-2 py-1.5"
|
||||
>
|
||||
Clear
|
||||
</Button>
|
||||
|
||||
<Button
|
||||
size="small"
|
||||
onClick={handleApplyFilters}
|
||||
disabled={!hasLocalActiveFilters()}
|
||||
className="rounded-[8px] px-2 py-1.5"
|
||||
>
|
||||
Apply filters
|
||||
</Button>
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
export const INITIAL_CREATORS_TO_SHOW = 5;
|
||||
@@ -1,100 +0,0 @@
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { useState } from "react";
|
||||
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useFilterSheet = () => {
|
||||
const { filters, creators_list, creators, setFilters, setCreators } =
|
||||
useBlockMenuStore();
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [localCategories, setLocalCategories] =
|
||||
useState<GetV2BuilderSearchFilterAnyOfItem[]>(filters);
|
||||
const [localCreators, setLocalCreators] = useState<string[]>(creators);
|
||||
const [displayedCreatorsCount, setDisplayedCreatorsCount] = useState(
|
||||
INITIAL_CREATORS_TO_SHOW,
|
||||
);
|
||||
|
||||
const handleLocalCategoryChange = (
|
||||
category: GetV2BuilderSearchFilterAnyOfItem,
|
||||
) => {
|
||||
setLocalCategories((prev) => {
|
||||
if (prev.includes(category)) {
|
||||
return prev.filter((c) => c !== category);
|
||||
}
|
||||
return [...prev, category];
|
||||
});
|
||||
};
|
||||
|
||||
const hasActiveFilters = () => {
|
||||
return filters.length > 0 || creators.length > 0;
|
||||
};
|
||||
|
||||
const handleToggleShowMoreCreators = () => {
|
||||
if (displayedCreatorsCount < creators.length) {
|
||||
setDisplayedCreatorsCount(creators.length);
|
||||
} else {
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
}
|
||||
};
|
||||
|
||||
const handleLocalCreatorChange = (creator: string) => {
|
||||
setLocalCreators((prev) => {
|
||||
if (prev.includes(creator)) {
|
||||
return prev.filter((c) => c !== creator);
|
||||
}
|
||||
return [...prev, creator];
|
||||
});
|
||||
};
|
||||
|
||||
const handleClearFilters = () => {
|
||||
setLocalCategories([]);
|
||||
setLocalCreators([]);
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
};
|
||||
|
||||
const handleCloseButton = () => {
|
||||
setIsOpen(false);
|
||||
setLocalCategories(filters);
|
||||
setLocalCreators(creators);
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
};
|
||||
|
||||
const handleApplyFilters = () => {
|
||||
setFilters(localCategories);
|
||||
setCreators(localCreators);
|
||||
setIsOpen(false);
|
||||
};
|
||||
|
||||
const handleOpenFilters = () => {
|
||||
setIsOpen(true);
|
||||
setLocalCategories(filters);
|
||||
setLocalCreators(creators);
|
||||
};
|
||||
|
||||
const hasLocalActiveFilters = () => {
|
||||
return localCategories.length > 0 || localCreators.length > 0;
|
||||
};
|
||||
|
||||
const visibleCreators = creators_list.slice(0, displayedCreatorsCount);
|
||||
|
||||
return {
|
||||
creators,
|
||||
isOpen,
|
||||
setIsOpen,
|
||||
localCategories,
|
||||
localCreators,
|
||||
displayedCreatorsCount,
|
||||
setDisplayedCreatorsCount,
|
||||
handleLocalCategoryChange,
|
||||
handleToggleShowMoreCreators,
|
||||
handleLocalCreatorChange,
|
||||
handleClearFilters,
|
||||
handleCloseButton,
|
||||
handleOpenFilters,
|
||||
handleApplyFilters,
|
||||
hasLocalActiveFilters,
|
||||
visibleCreators,
|
||||
hasActiveFilters,
|
||||
};
|
||||
};
|
||||
@@ -1,30 +1,12 @@
|
||||
import { create } from "zustand";
|
||||
import { DefaultStateType } from "../components/NewControlPanel/NewBlockMenu/types";
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { getSearchItemType } from "../components/NewControlPanel/NewBlockMenu/BlockMenuSearchContent/helper";
|
||||
import { StoreAgent } from "@/app/api/__generated__/models/storeAgent";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
type BlockMenuStore = {
|
||||
searchQuery: string;
|
||||
searchId: string | undefined;
|
||||
defaultState: DefaultStateType;
|
||||
integration: string | undefined;
|
||||
filters: GetV2BuilderSearchFilterAnyOfItem[];
|
||||
creators: string[];
|
||||
creators_list: string[];
|
||||
categoryCounts: Record<GetV2BuilderSearchFilterAnyOfItem, number>;
|
||||
|
||||
setCategoryCounts: (
|
||||
counts: Record<GetV2BuilderSearchFilterAnyOfItem, number>,
|
||||
) => void;
|
||||
setCreatorsList: (searchData: SearchResponseItemsItem[]) => void;
|
||||
addCreator: (creator: string) => void;
|
||||
setCreators: (creators: string[]) => void;
|
||||
removeCreator: (creator: string) => void;
|
||||
addFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
|
||||
setFilters: (filters: GetV2BuilderSearchFilterAnyOfItem[]) => void;
|
||||
removeFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
|
||||
setSearchQuery: (query: string) => void;
|
||||
setSearchId: (id: string | undefined) => void;
|
||||
setDefaultState: (state: DefaultStateType) => void;
|
||||
@@ -37,44 +19,11 @@ export const useBlockMenuStore = create<BlockMenuStore>((set) => ({
|
||||
searchId: undefined,
|
||||
defaultState: DefaultStateType.SUGGESTION,
|
||||
integration: undefined,
|
||||
filters: [],
|
||||
creators: [], // creator filters that are applied to the search results
|
||||
creators_list: [], // all creators that are available to filter by
|
||||
categoryCounts: {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
my_agents: 0,
|
||||
},
|
||||
|
||||
setCategoryCounts: (counts) => set({ categoryCounts: counts }),
|
||||
setCreatorsList: (searchData) => {
|
||||
const marketplaceAgents = searchData.filter((item) => {
|
||||
return getSearchItemType(item).type === "store_agent";
|
||||
}) as StoreAgent[];
|
||||
|
||||
const newCreators = marketplaceAgents.map((agent) => agent.creator);
|
||||
|
||||
set((state) => ({
|
||||
creators_list: Array.from(
|
||||
new Set([...state.creators_list, ...newCreators]),
|
||||
),
|
||||
}));
|
||||
},
|
||||
setCreators: (creators) => set({ creators }),
|
||||
setFilters: (filters) => set({ filters }),
|
||||
setSearchQuery: (query) => set({ searchQuery: query }),
|
||||
setSearchId: (id) => set({ searchId: id }),
|
||||
setDefaultState: (state) => set({ defaultState: state }),
|
||||
setIntegration: (integration) => set({ integration }),
|
||||
addFilter: (filter) =>
|
||||
set((state) => ({ filters: [...state.filters, filter] })),
|
||||
removeFilter: (filter) =>
|
||||
set((state) => ({ filters: state.filters.filter((f) => f !== filter) })),
|
||||
addCreator: (creator) =>
|
||||
set((state) => ({ creators: [...state.creators, creator] })),
|
||||
removeCreator: (creator) =>
|
||||
set((state) => ({ creators: state.creators.filter((c) => c !== creator) })),
|
||||
reset: () =>
|
||||
set({
|
||||
searchQuery: "",
|
||||
|
||||
@@ -4,7 +4,6 @@ import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
import { customEdgeToLink, linkToCustomEdge } from "../components/helper";
|
||||
import { MarkerType } from "@xyflow/react";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { cleanUpHandleId } from "@/components/renderers/InputRenderer/helpers";
|
||||
|
||||
type EdgeStore = {
|
||||
edges: CustomEdge[];
|
||||
@@ -14,8 +13,6 @@ type EdgeStore = {
|
||||
removeEdge: (edgeId: string) => void;
|
||||
upsertMany: (edges: CustomEdge[]) => void;
|
||||
|
||||
removeEdgesByHandlePrefix: (nodeId: string, handlePrefix: string) => void;
|
||||
|
||||
getNodeEdges: (nodeId: string) => CustomEdge[];
|
||||
isInputConnected: (nodeId: string, handle: string) => boolean;
|
||||
isOutputConnected: (nodeId: string, handle: string) => boolean;
|
||||
@@ -82,27 +79,11 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
return { edges: Array.from(byKey.values()) };
|
||||
}),
|
||||
|
||||
removeEdgesByHandlePrefix: (nodeId, handlePrefix) =>
|
||||
set((state) => ({
|
||||
edges: state.edges.filter(
|
||||
(e) =>
|
||||
!(
|
||||
e.target === nodeId &&
|
||||
e.targetHandle &&
|
||||
e.targetHandle.startsWith(handlePrefix)
|
||||
),
|
||||
),
|
||||
})),
|
||||
|
||||
getNodeEdges: (nodeId) =>
|
||||
get().edges.filter((e) => e.source === nodeId || e.target === nodeId),
|
||||
|
||||
isInputConnected: (nodeId, handle) => {
|
||||
const cleanedHandle = cleanUpHandleId(handle);
|
||||
return get().edges.some(
|
||||
(e) => e.target === nodeId && e.targetHandle === cleanedHandle,
|
||||
);
|
||||
},
|
||||
isInputConnected: (nodeId, handle) =>
|
||||
get().edges.some((e) => e.target === nodeId && e.targetHandle === handle),
|
||||
|
||||
isOutputConnected: (nodeId, handle) =>
|
||||
get().edges.some((e) => e.source === nodeId && e.sourceHandle === handle),
|
||||
@@ -124,15 +105,15 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
targetNodeId: string,
|
||||
executionResult: NodeExecutionResult,
|
||||
) => {
|
||||
set((state) => {
|
||||
let hasChanges = false;
|
||||
|
||||
const newEdges = state.edges.map((edge) => {
|
||||
set((state) => ({
|
||||
edges: state.edges.map((edge) => {
|
||||
if (edge.target !== targetNodeId) {
|
||||
return edge;
|
||||
}
|
||||
|
||||
const beadData = new Map(edge.data?.beadData ?? new Map());
|
||||
const beadData =
|
||||
edge.data?.beadData ??
|
||||
new Map<string, NodeExecutionResult["status"]>();
|
||||
|
||||
const inputValue = edge.targetHandle
|
||||
? executionResult.input_data[edge.targetHandle]
|
||||
@@ -156,11 +137,6 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
beadUp = beadDown + 1;
|
||||
}
|
||||
|
||||
if (edge.data?.beadUp === beadUp && edge.data?.beadDown === beadDown) {
|
||||
return edge;
|
||||
}
|
||||
|
||||
hasChanges = true;
|
||||
return {
|
||||
...edge,
|
||||
data: {
|
||||
@@ -170,10 +146,8 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
beadData,
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
return hasChanges ? { edges: newEdges } : state;
|
||||
});
|
||||
}),
|
||||
}));
|
||||
},
|
||||
|
||||
resetEdgeBeads: () => {
|
||||
|
||||
@@ -13,10 +13,6 @@ import { useHistoryStore } from "./historyStore";
|
||||
import { useEdgeStore } from "./edgeStore";
|
||||
import { BlockUIType } from "../components/types";
|
||||
import { pruneEmptyValues } from "@/lib/utils";
|
||||
import {
|
||||
ensurePathExists,
|
||||
parseHandleIdToPath,
|
||||
} from "@/components/renderers/InputRenderer/helpers";
|
||||
|
||||
// Minimum movement (in pixels) required before logging position change to history
|
||||
// Prevents spamming history with small movements when clicking on inputs inside blocks
|
||||
@@ -66,11 +62,6 @@ type NodeStore = {
|
||||
errors: { [key: string]: string },
|
||||
) => void;
|
||||
clearAllNodeErrors: () => void; // Add this
|
||||
|
||||
syncHardcodedValuesWithHandleIds: (nodeId: string) => void;
|
||||
|
||||
// Credentials optional helpers
|
||||
setCredentialsOptional: (nodeId: string, optional: boolean) => void;
|
||||
};
|
||||
|
||||
export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
@@ -229,9 +220,6 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
...(node.data.metadata?.customized_name !== undefined && {
|
||||
customized_name: node.data.metadata.customized_name,
|
||||
}),
|
||||
...(node.data.metadata?.credentials_optional !== undefined && {
|
||||
credentials_optional: node.data.metadata.credentials_optional,
|
||||
}),
|
||||
},
|
||||
};
|
||||
},
|
||||
@@ -317,61 +305,4 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
})),
|
||||
}));
|
||||
},
|
||||
|
||||
syncHardcodedValuesWithHandleIds: (nodeId: string) => {
|
||||
const node = get().nodes.find((n) => n.id === nodeId);
|
||||
if (!node) return;
|
||||
|
||||
const handleIds = useEdgeStore.getState().getAllHandleIdsOfANode(nodeId);
|
||||
const additionalHandles = handleIds.filter((h) => h.includes("_#_"));
|
||||
|
||||
if (additionalHandles.length === 0) return;
|
||||
|
||||
const hardcodedValues = JSON.parse(
|
||||
JSON.stringify(node.data.hardcodedValues || {}),
|
||||
);
|
||||
|
||||
let modified = false;
|
||||
|
||||
additionalHandles.forEach((handleId) => {
|
||||
const segments = parseHandleIdToPath(handleId);
|
||||
if (ensurePathExists(hardcodedValues, segments)) {
|
||||
modified = true;
|
||||
}
|
||||
});
|
||||
|
||||
if (modified) {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId ? { ...n, data: { ...n.data, hardcodedValues } } : n,
|
||||
),
|
||||
}));
|
||||
}
|
||||
},
|
||||
|
||||
setCredentialsOptional: (nodeId: string, optional: boolean) => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId
|
||||
? {
|
||||
...n,
|
||||
data: {
|
||||
...n.data,
|
||||
metadata: {
|
||||
...n.data.metadata,
|
||||
credentials_optional: optional,
|
||||
},
|
||||
},
|
||||
}
|
||||
: n,
|
||||
),
|
||||
}));
|
||||
|
||||
const newState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
useHistoryStore.getState().pushState(newState);
|
||||
},
|
||||
}));
|
||||
|
||||
@@ -34,7 +34,6 @@ type Props = {
|
||||
onSelectCredentials: (newValue?: CredentialsMetaInput) => void;
|
||||
onLoaded?: (loaded: boolean) => void;
|
||||
readOnly?: boolean;
|
||||
isOptional?: boolean;
|
||||
showTitle?: boolean;
|
||||
};
|
||||
|
||||
@@ -46,7 +45,6 @@ export function CredentialsInput({
|
||||
siblingInputs,
|
||||
onLoaded,
|
||||
readOnly = false,
|
||||
isOptional = false,
|
||||
showTitle = true,
|
||||
}: Props) {
|
||||
const hookData = useCredentialsInput({
|
||||
@@ -56,7 +54,6 @@ export function CredentialsInput({
|
||||
siblingInputs,
|
||||
onLoaded,
|
||||
readOnly,
|
||||
isOptional,
|
||||
});
|
||||
|
||||
if (!isLoaded(hookData)) {
|
||||
@@ -97,14 +94,7 @@ export function CredentialsInput({
|
||||
<div className={cn("mb-6", className)}>
|
||||
{showTitle && (
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<Text variant="large-medium">
|
||||
{displayName} credentials
|
||||
{isOptional && (
|
||||
<span className="ml-1 text-sm font-normal text-gray-500">
|
||||
(optional)
|
||||
</span>
|
||||
)}
|
||||
</Text>
|
||||
<Text variant="large-medium">{displayName} credentials</Text>
|
||||
{schema.description && (
|
||||
<InformationTooltip description={schema.description} />
|
||||
)}
|
||||
@@ -113,16 +103,14 @@ export function CredentialsInput({
|
||||
|
||||
{hasCredentialsToShow ? (
|
||||
<>
|
||||
{(credentialsToShow.length > 1 || isOptional) && !readOnly ? (
|
||||
{credentialsToShow.length > 1 && !readOnly ? (
|
||||
<CredentialsSelect
|
||||
credentials={credentialsToShow}
|
||||
provider={provider}
|
||||
displayName={displayName}
|
||||
selectedCredentials={selectedCredential}
|
||||
onSelectCredential={handleCredentialSelect}
|
||||
onClearCredential={() => onSelectCredential(undefined)}
|
||||
readOnly={readOnly}
|
||||
allowNone={isOptional}
|
||||
/>
|
||||
) : (
|
||||
<div className="mb-4 space-y-2">
|
||||
@@ -155,7 +143,6 @@ export function CredentialsInput({
|
||||
size="small"
|
||||
onClick={handleActionButtonClick}
|
||||
className="w-fit"
|
||||
type="button"
|
||||
>
|
||||
{actionButtonText}
|
||||
</Button>
|
||||
@@ -168,7 +155,6 @@ export function CredentialsInput({
|
||||
size="small"
|
||||
onClick={handleActionButtonClick}
|
||||
className="w-fit"
|
||||
type="button"
|
||||
>
|
||||
{actionButtonText}
|
||||
</Button>
|
||||
|
||||
@@ -23,9 +23,7 @@ interface Props {
|
||||
displayName: string;
|
||||
selectedCredentials?: CredentialsMetaInput;
|
||||
onSelectCredential: (credentialId: string) => void;
|
||||
onClearCredential?: () => void;
|
||||
readOnly?: boolean;
|
||||
allowNone?: boolean;
|
||||
}
|
||||
|
||||
export function CredentialsSelect({
|
||||
@@ -34,30 +32,20 @@ export function CredentialsSelect({
|
||||
displayName,
|
||||
selectedCredentials,
|
||||
onSelectCredential,
|
||||
onClearCredential,
|
||||
readOnly = false,
|
||||
allowNone = true,
|
||||
}: Props) {
|
||||
// Auto-select first credential if none is selected (only if allowNone is false)
|
||||
// Auto-select first credential if none is selected
|
||||
useEffect(() => {
|
||||
if (!allowNone && !selectedCredentials && credentials.length > 0) {
|
||||
if (!selectedCredentials && credentials.length > 0) {
|
||||
onSelectCredential(credentials[0].id);
|
||||
}
|
||||
}, [allowNone, selectedCredentials, credentials, onSelectCredential]);
|
||||
|
||||
const handleValueChange = (value: string) => {
|
||||
if (value === "__none__") {
|
||||
onClearCredential?.();
|
||||
} else {
|
||||
onSelectCredential(value);
|
||||
}
|
||||
};
|
||||
}, [selectedCredentials, credentials, onSelectCredential]);
|
||||
|
||||
return (
|
||||
<div className="mb-4 w-full">
|
||||
<Select
|
||||
value={selectedCredentials?.id || (allowNone ? "__none__" : "")}
|
||||
onValueChange={handleValueChange}
|
||||
value={selectedCredentials?.id || ""}
|
||||
onValueChange={(value) => onSelectCredential(value)}
|
||||
>
|
||||
<SelectTrigger className="h-auto min-h-12 w-full rounded-medium border-zinc-200 p-0 pr-4 shadow-none">
|
||||
{selectedCredentials ? (
|
||||
@@ -82,15 +70,6 @@ export function CredentialsSelect({
|
||||
)}
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{allowNone && (
|
||||
<SelectItem key="__none__" value="__none__">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="tracking-tight text-gray-500">
|
||||
None (skip this credential)
|
||||
</Text>
|
||||
</div>
|
||||
</SelectItem>
|
||||
)}
|
||||
{credentials.map((credential) => (
|
||||
<SelectItem key={credential.id} value={credential.id}>
|
||||
<div className="flex items-center gap-2">
|
||||
|
||||
@@ -22,7 +22,6 @@ type Params = {
|
||||
siblingInputs?: Record<string, any>;
|
||||
onLoaded?: (loaded: boolean) => void;
|
||||
readOnly?: boolean;
|
||||
isOptional?: boolean;
|
||||
};
|
||||
|
||||
export function useCredentialsInput({
|
||||
@@ -32,7 +31,6 @@ export function useCredentialsInput({
|
||||
siblingInputs,
|
||||
onLoaded,
|
||||
readOnly = false,
|
||||
isOptional = false,
|
||||
}: Params) {
|
||||
const [isAPICredentialsModalOpen, setAPICredentialsModalOpen] =
|
||||
useState(false);
|
||||
@@ -101,20 +99,13 @@ export function useCredentialsInput({
|
||||
: null;
|
||||
}, [credentials]);
|
||||
|
||||
// Auto-select the one available credential (only if not optional)
|
||||
// Auto-select the one available credential
|
||||
useEffect(() => {
|
||||
if (readOnly) return;
|
||||
if (isOptional) return; // Don't auto-select when credential is optional
|
||||
if (singleCredential && !selectedCredential) {
|
||||
onSelectCredential(singleCredential);
|
||||
}
|
||||
}, [
|
||||
singleCredential,
|
||||
selectedCredential,
|
||||
onSelectCredential,
|
||||
readOnly,
|
||||
isOptional,
|
||||
]);
|
||||
}, [singleCredential, selectedCredential, onSelectCredential, readOnly]);
|
||||
|
||||
if (
|
||||
!credentials ||
|
||||
|
||||
@@ -8,7 +8,6 @@ import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBann
|
||||
|
||||
export function ModalRunSection() {
|
||||
const {
|
||||
agent,
|
||||
defaultRunType,
|
||||
presetName,
|
||||
setPresetName,
|
||||
@@ -25,11 +24,6 @@ export function ModalRunSection() {
|
||||
const inputFields = Object.entries(agentInputFields || {});
|
||||
const credentialFields = Object.entries(agentCredentialsInputFields || {});
|
||||
|
||||
// Get the list of required credentials from the schema
|
||||
const requiredCredentials = new Set(
|
||||
(agent.credentials_input_schema?.required as string[]) || [],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4">
|
||||
{defaultRunType === "automatic-trigger" ||
|
||||
@@ -105,12 +99,14 @@ export function ModalRunSection() {
|
||||
schema={
|
||||
{ ...inputSubSchema, discriminator: undefined } as any
|
||||
}
|
||||
selectedCredentials={inputCredentials?.[key]}
|
||||
selectedCredentials={
|
||||
(inputCredentials && inputCredentials[key]) ??
|
||||
inputSubSchema.default
|
||||
}
|
||||
onSelectCredentials={(value) =>
|
||||
setInputCredentialsValue(key, value)
|
||||
}
|
||||
siblingInputs={inputValues}
|
||||
isOptional={!requiredCredentials.has(key)}
|
||||
/>
|
||||
),
|
||||
)}
|
||||
|
||||
@@ -163,21 +163,15 @@ export function useAgentRunModal(
|
||||
}, [agentInputSchema.required, inputValues]);
|
||||
|
||||
const [allCredentialsAreSet, missingCredentials] = useMemo(() => {
|
||||
// Only check required credentials from schema, not all properties
|
||||
// Credentials marked as optional in node metadata won't be in the required array
|
||||
const requiredCredentials = new Set(
|
||||
(agent.credentials_input_schema?.required as string[]) || [],
|
||||
const availableCredentials = new Set(Object.keys(inputCredentials));
|
||||
const allCredentials = new Set(
|
||||
Object.keys(agentCredentialsInputFields || {}) ?? [],
|
||||
);
|
||||
const missing = [...allCredentials].filter(
|
||||
(key) => !availableCredentials.has(key),
|
||||
);
|
||||
|
||||
// Check if required credentials have valid id (not just key existence)
|
||||
// A credential is valid only if it has an id field set
|
||||
const missing = [...requiredCredentials].filter((key) => {
|
||||
const cred = inputCredentials[key];
|
||||
return !cred || !cred.id;
|
||||
});
|
||||
|
||||
return [missing.length === 0, missing];
|
||||
}, [agent.credentials_input_schema, inputCredentials]);
|
||||
}, [agentCredentialsInputFields, inputCredentials]);
|
||||
|
||||
const credentialsRequired = useMemo(
|
||||
() => Object.keys(agentCredentialsInputFields || {}).length > 0,
|
||||
@@ -245,18 +239,12 @@ export function useAgentRunModal(
|
||||
});
|
||||
} else {
|
||||
// Manual execution
|
||||
// Filter out incomplete credentials (optional ones not selected)
|
||||
// Only send credentials that have a valid id field
|
||||
const validCredentials = Object.fromEntries(
|
||||
Object.entries(inputCredentials).filter(([_, cred]) => cred && cred.id),
|
||||
);
|
||||
|
||||
executeGraphMutation.mutate({
|
||||
graphId: agent.graph_id,
|
||||
graphVersion: agent.graph_version,
|
||||
data: {
|
||||
inputs: inputValues,
|
||||
credentials_inputs: validCredentials,
|
||||
credentials_inputs: inputCredentials,
|
||||
source: "library",
|
||||
},
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user