mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-21 04:57:58 -05:00
Compare commits
7 Commits
dev
...
feature/vi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8d3893c16 | ||
|
|
1cfbc0dd08 | ||
|
|
ff84643b48 | ||
|
|
c19c3c834a | ||
|
|
d0f7ba8cfd | ||
|
|
2a855f4bd0 | ||
|
|
b93bb3b9f8 |
@@ -93,5 +93,5 @@ jobs:
|
|||||||
|
|
||||||
Error logs:
|
Error logs:
|
||||||
${{ toJSON(fromJSON(steps.failure_details.outputs.result).errorLogs) }}
|
${{ toJSON(fromJSON(steps.failure_details.outputs.result).errorLogs) }}
|
||||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
claude_args: "--allowedTools 'Edit,MultiEdit,Write,Read,Glob,Grep,LS,Bash(git:*),Bash(bun:*),Bash(npm:*),Bash(npx:*),Bash(gh:*)'"
|
claude_args: "--allowedTools 'Edit,MultiEdit,Write,Read,Glob,Grep,LS,Bash(git:*),Bash(bun:*),Bash(npm:*),Bash(npx:*),Bash(gh:*)'"
|
||||||
|
|||||||
4
.github/workflows/claude-dependabot.yml
vendored
4
.github/workflows/claude-dependabot.yml
vendored
@@ -7,7 +7,7 @@
|
|||||||
# - Provide actionable recommendations for the development team
|
# - Provide actionable recommendations for the development team
|
||||||
#
|
#
|
||||||
# Triggered on: Dependabot PRs (opened, synchronize)
|
# Triggered on: Dependabot PRs (opened, synchronize)
|
||||||
# Requirements: CLAUDE_CODE_OAUTH_TOKEN secret must be configured
|
# Requirements: ANTHROPIC_API_KEY secret must be configured
|
||||||
|
|
||||||
name: Claude Dependabot PR Review
|
name: Claude Dependabot PR Review
|
||||||
|
|
||||||
@@ -308,7 +308,7 @@ jobs:
|
|||||||
id: claude_review
|
id: claude_review
|
||||||
uses: anthropics/claude-code-action@v1
|
uses: anthropics/claude-code-action@v1
|
||||||
with:
|
with:
|
||||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
claude_args: |
|
claude_args: |
|
||||||
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
|
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
|
||||||
prompt: |
|
prompt: |
|
||||||
|
|||||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -323,7 +323,7 @@ jobs:
|
|||||||
id: claude
|
id: claude
|
||||||
uses: anthropics/claude-code-action@v1
|
uses: anthropics/claude-code-action@v1
|
||||||
with:
|
with:
|
||||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
claude_args: |
|
claude_args: |
|
||||||
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr edit:*)"
|
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr edit:*)"
|
||||||
--model opus
|
--model opus
|
||||||
|
|||||||
78
.github/workflows/docs-block-sync.yml
vendored
78
.github/workflows/docs-block-sync.yml
vendored
@@ -1,78 +0,0 @@
|
|||||||
name: Block Documentation Sync Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [master, dev]
|
|
||||||
paths:
|
|
||||||
- "autogpt_platform/backend/backend/blocks/**"
|
|
||||||
- "docs/integrations/**"
|
|
||||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
|
||||||
- ".github/workflows/docs-block-sync.yml"
|
|
||||||
pull_request:
|
|
||||||
branches: [master, dev]
|
|
||||||
paths:
|
|
||||||
- "autogpt_platform/backend/backend/blocks/**"
|
|
||||||
- "docs/integrations/**"
|
|
||||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
|
||||||
- ".github/workflows/docs-block-sync.yml"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-docs-sync:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 15
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry
|
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
poetry-${{ runner.os }}-
|
|
||||||
|
|
||||||
- name: Install Poetry
|
|
||||||
run: |
|
|
||||||
cd autogpt_platform/backend
|
|
||||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
|
||||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
|
||||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
working-directory: autogpt_platform/backend
|
|
||||||
run: |
|
|
||||||
poetry install --only main
|
|
||||||
poetry run prisma generate
|
|
||||||
|
|
||||||
- name: Check block documentation is in sync
|
|
||||||
working-directory: autogpt_platform/backend
|
|
||||||
run: |
|
|
||||||
echo "Checking if block documentation is in sync with code..."
|
|
||||||
poetry run python scripts/generate_block_docs.py --check
|
|
||||||
|
|
||||||
- name: Show diff if out of sync
|
|
||||||
if: failure()
|
|
||||||
working-directory: autogpt_platform/backend
|
|
||||||
run: |
|
|
||||||
echo "::error::Block documentation is out of sync with code!"
|
|
||||||
echo ""
|
|
||||||
echo "To fix this, run the following command locally:"
|
|
||||||
echo " cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
|
||||||
echo ""
|
|
||||||
echo "Then commit the updated documentation files."
|
|
||||||
echo ""
|
|
||||||
echo "Regenerating docs to show diff..."
|
|
||||||
poetry run python scripts/generate_block_docs.py
|
|
||||||
echo ""
|
|
||||||
echo "Changes detected:"
|
|
||||||
git diff ../../docs/integrations/ || true
|
|
||||||
95
.github/workflows/docs-claude-review.yml
vendored
95
.github/workflows/docs-claude-review.yml
vendored
@@ -1,95 +0,0 @@
|
|||||||
name: Claude Block Docs Review
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [opened, synchronize]
|
|
||||||
paths:
|
|
||||||
- "docs/integrations/**"
|
|
||||||
- "autogpt_platform/backend/backend/blocks/**"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
claude-review:
|
|
||||||
# Only run for PRs from members/collaborators
|
|
||||||
if: |
|
|
||||||
github.event.pull_request.author_association == 'OWNER' ||
|
|
||||||
github.event.pull_request.author_association == 'MEMBER' ||
|
|
||||||
github.event.pull_request.author_association == 'COLLABORATOR'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 15
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry
|
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
poetry-${{ runner.os }}-
|
|
||||||
|
|
||||||
- name: Install Poetry
|
|
||||||
run: |
|
|
||||||
cd autogpt_platform/backend
|
|
||||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
|
||||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
working-directory: autogpt_platform/backend
|
|
||||||
run: |
|
|
||||||
poetry install --only main
|
|
||||||
poetry run prisma generate
|
|
||||||
|
|
||||||
- name: Run Claude Code Review
|
|
||||||
uses: anthropics/claude-code-action@v1
|
|
||||||
with:
|
|
||||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
||||||
claude_args: |
|
|
||||||
--allowedTools "Read,Glob,Grep,Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*)"
|
|
||||||
prompt: |
|
|
||||||
You are reviewing a PR that modifies block documentation or block code for AutoGPT.
|
|
||||||
|
|
||||||
## Your Task
|
|
||||||
Review the changes in this PR and provide constructive feedback. Focus on:
|
|
||||||
|
|
||||||
1. **Documentation Accuracy**: For any block code changes, verify that:
|
|
||||||
- Input/output tables in docs match the actual block schemas
|
|
||||||
- Description text accurately reflects what the block does
|
|
||||||
- Any new blocks have corresponding documentation
|
|
||||||
|
|
||||||
2. **Manual Content Quality**: Check manual sections (marked with `<!-- MANUAL: -->` markers):
|
|
||||||
- "How it works" sections should have clear technical explanations
|
|
||||||
- "Possible use case" sections should have practical, real-world examples
|
|
||||||
- Content should be helpful for users trying to understand the blocks
|
|
||||||
|
|
||||||
3. **Template Compliance**: Ensure docs follow the standard template:
|
|
||||||
- What it is (brief intro)
|
|
||||||
- What it does (description)
|
|
||||||
- How it works (technical explanation)
|
|
||||||
- Inputs table
|
|
||||||
- Outputs table
|
|
||||||
- Possible use case
|
|
||||||
|
|
||||||
4. **Cross-references**: Check that links and anchors are correct
|
|
||||||
|
|
||||||
## Review Process
|
|
||||||
1. First, get the PR diff to see what changed: `gh pr diff ${{ github.event.pull_request.number }}`
|
|
||||||
2. Read any modified block files to understand the implementation
|
|
||||||
3. Read corresponding documentation files to verify accuracy
|
|
||||||
4. Provide your feedback as a PR comment
|
|
||||||
|
|
||||||
Be constructive and specific. If everything looks good, say so!
|
|
||||||
If there are issues, explain what's wrong and suggest how to fix it.
|
|
||||||
194
.github/workflows/docs-enhance.yml
vendored
194
.github/workflows/docs-enhance.yml
vendored
@@ -1,194 +0,0 @@
|
|||||||
name: Enhance Block Documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
block_pattern:
|
|
||||||
description: 'Block file pattern to enhance (e.g., "google/*.md" or "*" for all blocks)'
|
|
||||||
required: true
|
|
||||||
default: '*'
|
|
||||||
type: string
|
|
||||||
dry_run:
|
|
||||||
description: 'Dry run mode - show proposed changes without committing'
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
max_blocks:
|
|
||||||
description: 'Maximum number of blocks to process (0 for unlimited)'
|
|
||||||
type: number
|
|
||||||
default: 10
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
enhance-docs:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 45
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry
|
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
poetry-${{ runner.os }}-
|
|
||||||
|
|
||||||
- name: Install Poetry
|
|
||||||
run: |
|
|
||||||
cd autogpt_platform/backend
|
|
||||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
|
||||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
working-directory: autogpt_platform/backend
|
|
||||||
run: |
|
|
||||||
poetry install --only main
|
|
||||||
poetry run prisma generate
|
|
||||||
|
|
||||||
- name: Run Claude Enhancement
|
|
||||||
uses: anthropics/claude-code-action@v1
|
|
||||||
with:
|
|
||||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
||||||
claude_args: |
|
|
||||||
--allowedTools "Read,Edit,Glob,Grep,Write,Bash(git:*),Bash(gh:*),Bash(find:*),Bash(ls:*)"
|
|
||||||
prompt: |
|
|
||||||
You are enhancing block documentation for AutoGPT. Your task is to improve the MANUAL sections
|
|
||||||
of block documentation files by reading the actual block implementations and writing helpful content.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
- Block pattern: ${{ inputs.block_pattern }}
|
|
||||||
- Dry run: ${{ inputs.dry_run }}
|
|
||||||
- Max blocks to process: ${{ inputs.max_blocks }}
|
|
||||||
|
|
||||||
## Your Task
|
|
||||||
|
|
||||||
1. **Find Documentation Files**
|
|
||||||
Find block documentation files matching the pattern in `docs/integrations/`
|
|
||||||
Pattern: ${{ inputs.block_pattern }}
|
|
||||||
|
|
||||||
Use: `find docs/integrations -name "*.md" -type f`
|
|
||||||
|
|
||||||
2. **For Each Documentation File** (up to ${{ inputs.max_blocks }} files):
|
|
||||||
|
|
||||||
a. Read the documentation file
|
|
||||||
|
|
||||||
b. Identify which block(s) it documents (look for the block class name)
|
|
||||||
|
|
||||||
c. Find and read the corresponding block implementation in `autogpt_platform/backend/backend/blocks/`
|
|
||||||
|
|
||||||
d. Improve the MANUAL sections:
|
|
||||||
|
|
||||||
**"How it works" section** (within `<!-- MANUAL: how_it_works -->` markers):
|
|
||||||
- Explain the technical flow of the block
|
|
||||||
- Describe what APIs or services it connects to
|
|
||||||
- Note any important configuration or prerequisites
|
|
||||||
- Keep it concise but informative (2-4 paragraphs)
|
|
||||||
|
|
||||||
**"Possible use case" section** (within `<!-- MANUAL: use_case -->` markers):
|
|
||||||
- Provide 2-3 practical, real-world examples
|
|
||||||
- Make them specific and actionable
|
|
||||||
- Show how this block could be used in an automation workflow
|
|
||||||
|
|
||||||
3. **Important Rules**
|
|
||||||
- ONLY modify content within `<!-- MANUAL: -->` and `<!-- END MANUAL -->` markers
|
|
||||||
- Do NOT modify auto-generated sections (inputs/outputs tables, descriptions)
|
|
||||||
- Keep content accurate based on the actual block implementation
|
|
||||||
- Write for users who may not be technical experts
|
|
||||||
|
|
||||||
4. **Output**
|
|
||||||
${{ inputs.dry_run == true && 'DRY RUN MODE: Show proposed changes for each file but do NOT actually edit the files. Describe what you would change.' || 'LIVE MODE: Actually edit the files to improve the documentation.' }}
|
|
||||||
|
|
||||||
## Example Improvements
|
|
||||||
|
|
||||||
**Before (How it works):**
|
|
||||||
```
|
|
||||||
_Add technical explanation here._
|
|
||||||
```
|
|
||||||
|
|
||||||
**After (How it works):**
|
|
||||||
```
|
|
||||||
This block connects to the GitHub API to retrieve issue information. When executed,
|
|
||||||
it authenticates using your GitHub credentials and fetches issue details including
|
|
||||||
title, body, labels, and assignees.
|
|
||||||
|
|
||||||
The block requires a valid GitHub OAuth connection with repository access permissions.
|
|
||||||
It supports both public and private repositories you have access to.
|
|
||||||
```
|
|
||||||
|
|
||||||
**Before (Possible use case):**
|
|
||||||
```
|
|
||||||
_Add practical use case examples here._
|
|
||||||
```
|
|
||||||
|
|
||||||
**After (Possible use case):**
|
|
||||||
```
|
|
||||||
**Customer Support Automation**: Monitor a GitHub repository for new issues with
|
|
||||||
the "bug" label, then automatically create a ticket in your support system and
|
|
||||||
notify the on-call engineer via Slack.
|
|
||||||
|
|
||||||
**Release Notes Generation**: When a new release is published, gather all closed
|
|
||||||
issues since the last release and generate a summary for your changelog.
|
|
||||||
```
|
|
||||||
|
|
||||||
Begin by finding and listing the documentation files to process.
|
|
||||||
|
|
||||||
- name: Create PR with enhanced documentation
|
|
||||||
if: ${{ inputs.dry_run == false }}
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
# Check if there are changes
|
|
||||||
if git diff --quiet docs/integrations/; then
|
|
||||||
echo "No changes to commit"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure git
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
|
|
||||||
# Create branch and commit
|
|
||||||
BRANCH_NAME="docs/enhance-blocks-$(date +%Y%m%d-%H%M%S)"
|
|
||||||
git checkout -b "$BRANCH_NAME"
|
|
||||||
git add docs/integrations/
|
|
||||||
git commit -m "docs: enhance block documentation with LLM-generated content
|
|
||||||
|
|
||||||
Pattern: ${{ inputs.block_pattern }}
|
|
||||||
Max blocks: ${{ inputs.max_blocks }}
|
|
||||||
|
|
||||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
|
||||||
|
|
||||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
|
||||||
|
|
||||||
# Push and create PR
|
|
||||||
git push -u origin "$BRANCH_NAME"
|
|
||||||
gh pr create \
|
|
||||||
--title "docs: LLM-enhanced block documentation" \
|
|
||||||
--body "## Summary
|
|
||||||
This PR contains LLM-enhanced documentation for block files matching pattern: \`${{ inputs.block_pattern }}\`
|
|
||||||
|
|
||||||
The following manual sections were improved:
|
|
||||||
- **How it works**: Technical explanations based on block implementations
|
|
||||||
- **Possible use case**: Practical, real-world examples
|
|
||||||
|
|
||||||
## Review Checklist
|
|
||||||
- [ ] Content is accurate based on block implementations
|
|
||||||
- [ ] Examples are practical and helpful
|
|
||||||
- [ ] No auto-generated sections were modified
|
|
||||||
|
|
||||||
---
|
|
||||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)" \
|
|
||||||
--base dev
|
|
||||||
@@ -4,9 +4,14 @@ from collections.abc import AsyncGenerator
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import orjson
|
import orjson
|
||||||
from langfuse import get_client, propagate_attributes
|
from langfuse import Langfuse
|
||||||
from langfuse.openai import openai # type: ignore
|
from openai import (
|
||||||
from openai import APIConnectionError, APIError, APIStatusError, RateLimitError
|
APIConnectionError,
|
||||||
|
APIError,
|
||||||
|
APIStatusError,
|
||||||
|
AsyncOpenAI,
|
||||||
|
RateLimitError,
|
||||||
|
)
|
||||||
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
|
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
|
||||||
|
|
||||||
from backend.data.understanding import (
|
from backend.data.understanding import (
|
||||||
@@ -16,6 +21,7 @@ from backend.data.understanding import (
|
|||||||
from backend.util.exceptions import NotFoundError
|
from backend.util.exceptions import NotFoundError
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
from . import db as chat_db
|
||||||
from .config import ChatConfig
|
from .config import ChatConfig
|
||||||
from .model import (
|
from .model import (
|
||||||
ChatMessage,
|
ChatMessage,
|
||||||
@@ -44,10 +50,10 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
config = ChatConfig()
|
config = ChatConfig()
|
||||||
settings = Settings()
|
settings = Settings()
|
||||||
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||||
|
|
||||||
|
# Langfuse client (lazy initialization)
|
||||||
langfuse = get_client()
|
_langfuse_client: Langfuse | None = None
|
||||||
|
|
||||||
|
|
||||||
class LangfuseNotConfiguredError(Exception):
|
class LangfuseNotConfiguredError(Exception):
|
||||||
@@ -63,6 +69,65 @@ def _is_langfuse_configured() -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_langfuse_client() -> Langfuse:
|
||||||
|
"""Get or create the Langfuse client for prompt management and tracing."""
|
||||||
|
global _langfuse_client
|
||||||
|
if _langfuse_client is None:
|
||||||
|
if not _is_langfuse_configured():
|
||||||
|
raise LangfuseNotConfiguredError(
|
||||||
|
"Langfuse is not configured. The chat feature requires Langfuse for prompt management. "
|
||||||
|
"Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables."
|
||||||
|
)
|
||||||
|
_langfuse_client = Langfuse(
|
||||||
|
public_key=settings.secrets.langfuse_public_key,
|
||||||
|
secret_key=settings.secrets.langfuse_secret_key,
|
||||||
|
host=settings.secrets.langfuse_host or "https://cloud.langfuse.com",
|
||||||
|
)
|
||||||
|
return _langfuse_client
|
||||||
|
|
||||||
|
|
||||||
|
def _get_environment() -> str:
|
||||||
|
"""Get the current environment name for Langfuse tagging."""
|
||||||
|
return settings.config.app_env.value
|
||||||
|
|
||||||
|
|
||||||
|
def _get_langfuse_prompt() -> str:
|
||||||
|
"""Fetch the latest production prompt from Langfuse.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The compiled prompt text from Langfuse.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If Langfuse is unavailable or prompt fetch fails.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
langfuse = _get_langfuse_client()
|
||||||
|
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||||
|
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
|
||||||
|
compiled = prompt.compile()
|
||||||
|
logger.info(
|
||||||
|
f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse "
|
||||||
|
f"(version: {prompt.version})"
|
||||||
|
)
|
||||||
|
return compiled
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to fetch prompt from Langfuse: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
async def _is_first_session(user_id: str) -> bool:
|
||||||
|
"""Check if this is the user's first chat session.
|
||||||
|
|
||||||
|
Returns True if the user has 1 or fewer sessions (meaning this is their first).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
session_count = await chat_db.get_user_session_count(user_id)
|
||||||
|
return session_count <= 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to check session count for user {user_id}: {e}")
|
||||||
|
return False # Default to non-onboarding if we can't check
|
||||||
|
|
||||||
|
|
||||||
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
||||||
"""Build the full system prompt including business understanding if available.
|
"""Build the full system prompt including business understanding if available.
|
||||||
|
|
||||||
@@ -74,6 +139,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
|||||||
Tuple of (compiled prompt string, Langfuse prompt object for tracing)
|
Tuple of (compiled prompt string, Langfuse prompt object for tracing)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
langfuse = _get_langfuse_client()
|
||||||
|
|
||||||
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||||
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
|
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
|
||||||
|
|
||||||
@@ -91,7 +158,7 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
|||||||
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
|
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
|
||||||
|
|
||||||
compiled = prompt.compile(users_information=context)
|
compiled = prompt.compile(users_information=context)
|
||||||
return compiled, understanding
|
return compiled, prompt
|
||||||
|
|
||||||
|
|
||||||
async def _generate_session_title(message: str) -> str | None:
|
async def _generate_session_title(message: str) -> str | None:
|
||||||
@@ -150,7 +217,6 @@ async def assign_user_to_session(
|
|||||||
async def stream_chat_completion(
|
async def stream_chat_completion(
|
||||||
session_id: str,
|
session_id: str,
|
||||||
message: str | None = None,
|
message: str | None = None,
|
||||||
tool_call_response: str | None = None,
|
|
||||||
is_user_message: bool = True,
|
is_user_message: bool = True,
|
||||||
user_id: str | None = None,
|
user_id: str | None = None,
|
||||||
retry_count: int = 0,
|
retry_count: int = 0,
|
||||||
@@ -190,6 +256,11 @@ async def stream_chat_completion(
|
|||||||
yield StreamFinish()
|
yield StreamFinish()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Langfuse observations will be created after session is loaded (need messages for input)
|
||||||
|
# Initialize to None so finally block can safely check and end them
|
||||||
|
trace = None
|
||||||
|
generation = None
|
||||||
|
|
||||||
# Only fetch from Redis if session not provided (initial call)
|
# Only fetch from Redis if session not provided (initial call)
|
||||||
if session is None:
|
if session is None:
|
||||||
session = await get_chat_session(session_id, user_id)
|
session = await get_chat_session(session_id, user_id)
|
||||||
@@ -265,259 +336,297 @@ async def stream_chat_completion(
|
|||||||
asyncio.create_task(_update_title())
|
asyncio.create_task(_update_title())
|
||||||
|
|
||||||
# Build system prompt with business understanding
|
# Build system prompt with business understanding
|
||||||
system_prompt, understanding = await _build_system_prompt(user_id)
|
system_prompt, langfuse_prompt = await _build_system_prompt(user_id)
|
||||||
|
|
||||||
|
# Build input messages including system prompt for complete Langfuse logging
|
||||||
|
trace_input_messages = [{"role": "system", "content": system_prompt}] + [
|
||||||
|
m.model_dump() for m in session.messages
|
||||||
|
]
|
||||||
|
|
||||||
# Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id)
|
# Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id)
|
||||||
# Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes
|
# Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes
|
||||||
input = message
|
try:
|
||||||
if not message and tool_call_response:
|
langfuse = _get_langfuse_client()
|
||||||
input = tool_call_response
|
env = _get_environment()
|
||||||
|
trace = langfuse.start_observation(
|
||||||
langfuse = get_client()
|
name="chat_completion",
|
||||||
with langfuse.start_as_current_observation(
|
input={"messages": trace_input_messages},
|
||||||
as_type="span",
|
metadata={
|
||||||
name="user-copilot-request",
|
"environment": env,
|
||||||
input=input,
|
"model": config.model,
|
||||||
) as span:
|
"message_count": len(session.messages),
|
||||||
with propagate_attributes(
|
"prompt_name": langfuse_prompt.name if langfuse_prompt else None,
|
||||||
|
"prompt_version": langfuse_prompt.version if langfuse_prompt else None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
# Set trace-level attributes (session_id, user_id, tags)
|
||||||
|
trace.update_trace(
|
||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
tags=["copilot"],
|
tags=[env, "copilot"],
|
||||||
metadata={
|
)
|
||||||
"users_information": format_understanding_for_prompt(understanding)[
|
except Exception as e:
|
||||||
:200
|
logger.warning(f"Failed to create Langfuse trace: {e}")
|
||||||
] # langfuse only accepts upto to 200 chars
|
|
||||||
},
|
|
||||||
):
|
|
||||||
|
|
||||||
# Initialize variables that will be used in finally block (must be defined before try)
|
# Initialize variables that will be used in finally block (must be defined before try)
|
||||||
assistant_response = ChatMessage(
|
assistant_response = ChatMessage(
|
||||||
role="assistant",
|
role="assistant",
|
||||||
content="",
|
content="",
|
||||||
|
)
|
||||||
|
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||||
|
|
||||||
|
# Wrap main logic in try/finally to ensure Langfuse observations are always ended
|
||||||
|
try:
|
||||||
|
has_yielded_end = False
|
||||||
|
has_yielded_error = False
|
||||||
|
has_done_tool_call = False
|
||||||
|
has_received_text = False
|
||||||
|
text_streaming_ended = False
|
||||||
|
tool_response_messages: list[ChatMessage] = []
|
||||||
|
should_retry = False
|
||||||
|
|
||||||
|
# Generate unique IDs for AI SDK protocol
|
||||||
|
import uuid as uuid_module
|
||||||
|
|
||||||
|
message_id = str(uuid_module.uuid4())
|
||||||
|
text_block_id = str(uuid_module.uuid4())
|
||||||
|
|
||||||
|
# Yield message start
|
||||||
|
yield StreamStart(messageId=message_id)
|
||||||
|
|
||||||
|
# Create Langfuse generation for each LLM call, linked to the prompt
|
||||||
|
# Using v3 SDK: start_observation with as_type="generation"
|
||||||
|
generation = (
|
||||||
|
trace.start_observation(
|
||||||
|
as_type="generation",
|
||||||
|
name="llm_call",
|
||||||
|
model=config.model,
|
||||||
|
input={"messages": trace_input_messages},
|
||||||
|
prompt=langfuse_prompt,
|
||||||
)
|
)
|
||||||
accumulated_tool_calls: list[dict[str, Any]] = []
|
if trace
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
# Wrap main logic in try/finally to ensure Langfuse observations are always ended
|
try:
|
||||||
has_yielded_end = False
|
async for chunk in _stream_chat_chunks(
|
||||||
has_yielded_error = False
|
session=session,
|
||||||
has_done_tool_call = False
|
tools=tools,
|
||||||
has_received_text = False
|
system_prompt=system_prompt,
|
||||||
text_streaming_ended = False
|
text_block_id=text_block_id,
|
||||||
tool_response_messages: list[ChatMessage] = []
|
):
|
||||||
should_retry = False
|
|
||||||
|
|
||||||
# Generate unique IDs for AI SDK protocol
|
if isinstance(chunk, StreamTextStart):
|
||||||
import uuid as uuid_module
|
# Emit text-start before first text delta
|
||||||
|
if not has_received_text:
|
||||||
message_id = str(uuid_module.uuid4())
|
|
||||||
text_block_id = str(uuid_module.uuid4())
|
|
||||||
|
|
||||||
# Yield message start
|
|
||||||
yield StreamStart(messageId=message_id)
|
|
||||||
|
|
||||||
try:
|
|
||||||
async for chunk in _stream_chat_chunks(
|
|
||||||
session=session,
|
|
||||||
tools=tools,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
text_block_id=text_block_id,
|
|
||||||
):
|
|
||||||
|
|
||||||
if isinstance(chunk, StreamTextStart):
|
|
||||||
# Emit text-start before first text delta
|
|
||||||
if not has_received_text:
|
|
||||||
yield chunk
|
|
||||||
elif isinstance(chunk, StreamTextDelta):
|
|
||||||
delta = chunk.delta or ""
|
|
||||||
assert assistant_response.content is not None
|
|
||||||
assistant_response.content += delta
|
|
||||||
has_received_text = True
|
|
||||||
yield chunk
|
yield chunk
|
||||||
elif isinstance(chunk, StreamTextEnd):
|
elif isinstance(chunk, StreamTextDelta):
|
||||||
# Emit text-end after text completes
|
delta = chunk.delta or ""
|
||||||
if has_received_text and not text_streaming_ended:
|
assert assistant_response.content is not None
|
||||||
text_streaming_ended = True
|
assistant_response.content += delta
|
||||||
if assistant_response.content:
|
has_received_text = True
|
||||||
logger.warn(
|
yield chunk
|
||||||
f"StreamTextEnd: Attempting to set output {assistant_response.content}"
|
elif isinstance(chunk, StreamTextEnd):
|
||||||
)
|
# Emit text-end after text completes
|
||||||
span.update_trace(output=assistant_response.content)
|
if has_received_text and not text_streaming_ended:
|
||||||
span.update(output=assistant_response.content)
|
text_streaming_ended = True
|
||||||
yield chunk
|
yield chunk
|
||||||
elif isinstance(chunk, StreamToolInputStart):
|
elif isinstance(chunk, StreamToolInputStart):
|
||||||
# Emit text-end before first tool call, but only if we've received text
|
# Emit text-end before first tool call, but only if we've received text
|
||||||
|
if has_received_text and not text_streaming_ended:
|
||||||
|
yield StreamTextEnd(id=text_block_id)
|
||||||
|
text_streaming_ended = True
|
||||||
|
yield chunk
|
||||||
|
elif isinstance(chunk, StreamToolInputAvailable):
|
||||||
|
# Accumulate tool calls in OpenAI format
|
||||||
|
accumulated_tool_calls.append(
|
||||||
|
{
|
||||||
|
"id": chunk.toolCallId,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": chunk.toolName,
|
||||||
|
"arguments": orjson.dumps(chunk.input).decode("utf-8"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif isinstance(chunk, StreamToolOutputAvailable):
|
||||||
|
result_content = (
|
||||||
|
chunk.output
|
||||||
|
if isinstance(chunk.output, str)
|
||||||
|
else orjson.dumps(chunk.output).decode("utf-8")
|
||||||
|
)
|
||||||
|
tool_response_messages.append(
|
||||||
|
ChatMessage(
|
||||||
|
role="tool",
|
||||||
|
content=result_content,
|
||||||
|
tool_call_id=chunk.toolCallId,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
has_done_tool_call = True
|
||||||
|
# Track if any tool execution failed
|
||||||
|
if not chunk.success:
|
||||||
|
logger.warning(
|
||||||
|
f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed"
|
||||||
|
)
|
||||||
|
yield chunk
|
||||||
|
elif isinstance(chunk, StreamFinish):
|
||||||
|
if not has_done_tool_call:
|
||||||
|
# Emit text-end before finish if we received text but haven't closed it
|
||||||
if has_received_text and not text_streaming_ended:
|
if has_received_text and not text_streaming_ended:
|
||||||
yield StreamTextEnd(id=text_block_id)
|
yield StreamTextEnd(id=text_block_id)
|
||||||
text_streaming_ended = True
|
text_streaming_ended = True
|
||||||
|
has_yielded_end = True
|
||||||
yield chunk
|
yield chunk
|
||||||
elif isinstance(chunk, StreamToolInputAvailable):
|
elif isinstance(chunk, StreamError):
|
||||||
# Accumulate tool calls in OpenAI format
|
has_yielded_error = True
|
||||||
accumulated_tool_calls.append(
|
elif isinstance(chunk, StreamUsage):
|
||||||
{
|
session.usage.append(
|
||||||
"id": chunk.toolCallId,
|
Usage(
|
||||||
"type": "function",
|
prompt_tokens=chunk.promptTokens,
|
||||||
"function": {
|
completion_tokens=chunk.completionTokens,
|
||||||
"name": chunk.toolName,
|
total_tokens=chunk.totalTokens,
|
||||||
"arguments": orjson.dumps(chunk.input).decode(
|
|
||||||
"utf-8"
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
elif isinstance(chunk, StreamToolOutputAvailable):
|
|
||||||
result_content = (
|
|
||||||
chunk.output
|
|
||||||
if isinstance(chunk.output, str)
|
|
||||||
else orjson.dumps(chunk.output).decode("utf-8")
|
|
||||||
)
|
|
||||||
tool_response_messages.append(
|
|
||||||
ChatMessage(
|
|
||||||
role="tool",
|
|
||||||
content=result_content,
|
|
||||||
tool_call_id=chunk.toolCallId,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
has_done_tool_call = True
|
|
||||||
# Track if any tool execution failed
|
|
||||||
if not chunk.success:
|
|
||||||
logger.warning(
|
|
||||||
f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed"
|
|
||||||
)
|
|
||||||
yield chunk
|
|
||||||
elif isinstance(chunk, StreamFinish):
|
|
||||||
if not has_done_tool_call:
|
|
||||||
# Emit text-end before finish if we received text but haven't closed it
|
|
||||||
if has_received_text and not text_streaming_ended:
|
|
||||||
yield StreamTextEnd(id=text_block_id)
|
|
||||||
text_streaming_ended = True
|
|
||||||
has_yielded_end = True
|
|
||||||
yield chunk
|
|
||||||
elif isinstance(chunk, StreamError):
|
|
||||||
has_yielded_error = True
|
|
||||||
elif isinstance(chunk, StreamUsage):
|
|
||||||
session.usage.append(
|
|
||||||
Usage(
|
|
||||||
prompt_tokens=chunk.promptTokens,
|
|
||||||
completion_tokens=chunk.completionTokens,
|
|
||||||
total_tokens=chunk.totalTokens,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Unknown chunk type: {type(chunk)}", exc_info=True
|
|
||||||
)
|
|
||||||
if assistant_response.content:
|
|
||||||
langfuse.update_current_trace(output=assistant_response.content)
|
|
||||||
langfuse.update_current_span(output=assistant_response.content)
|
|
||||||
elif tool_response_messages:
|
|
||||||
langfuse.update_current_trace(output=str(tool_response_messages))
|
|
||||||
langfuse.update_current_span(output=str(tool_response_messages))
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error during stream: {e!s}", exc_info=True)
|
|
||||||
|
|
||||||
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
|
|
||||||
is_retryable = isinstance(
|
|
||||||
e, (orjson.JSONDecodeError, KeyError, TypeError)
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_retryable and retry_count < config.max_retries:
|
|
||||||
logger.info(
|
|
||||||
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
|
|
||||||
)
|
)
|
||||||
should_retry = True
|
|
||||||
else:
|
else:
|
||||||
# Non-retryable error or max retries exceeded
|
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||||
# Save any partial progress before reporting error
|
except Exception as e:
|
||||||
messages_to_save: list[ChatMessage] = []
|
logger.error(f"Error during stream: {e!s}", exc_info=True)
|
||||||
|
|
||||||
# Add assistant message if it has content or tool calls
|
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
|
||||||
if accumulated_tool_calls:
|
is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError))
|
||||||
assistant_response.tool_calls = accumulated_tool_calls
|
|
||||||
if assistant_response.content or assistant_response.tool_calls:
|
|
||||||
messages_to_save.append(assistant_response)
|
|
||||||
|
|
||||||
# Add tool response messages after assistant message
|
if is_retryable and retry_count < config.max_retries:
|
||||||
messages_to_save.extend(tool_response_messages)
|
|
||||||
|
|
||||||
session.messages.extend(messages_to_save)
|
|
||||||
await upsert_chat_session(session)
|
|
||||||
|
|
||||||
if not has_yielded_error:
|
|
||||||
error_message = str(e)
|
|
||||||
if not is_retryable:
|
|
||||||
error_message = f"Non-retryable error: {error_message}"
|
|
||||||
elif retry_count >= config.max_retries:
|
|
||||||
error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}"
|
|
||||||
|
|
||||||
error_response = StreamError(errorText=error_message)
|
|
||||||
yield error_response
|
|
||||||
if not has_yielded_end:
|
|
||||||
yield StreamFinish()
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle retry outside of exception handler to avoid nesting
|
|
||||||
if should_retry and retry_count < config.max_retries:
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
|
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
|
||||||
)
|
)
|
||||||
async for chunk in stream_chat_completion(
|
should_retry = True
|
||||||
session_id=session.session_id,
|
else:
|
||||||
user_id=user_id,
|
# Non-retryable error or max retries exceeded
|
||||||
retry_count=retry_count + 1,
|
# Save any partial progress before reporting error
|
||||||
session=session,
|
messages_to_save: list[ChatMessage] = []
|
||||||
context=context,
|
|
||||||
):
|
|
||||||
yield chunk
|
|
||||||
return # Exit after retry to avoid double-saving in finally block
|
|
||||||
|
|
||||||
# Normal completion path - save session and handle tool call continuation
|
# Add assistant message if it has content or tool calls
|
||||||
|
if accumulated_tool_calls:
|
||||||
|
assistant_response.tool_calls = accumulated_tool_calls
|
||||||
|
if assistant_response.content or assistant_response.tool_calls:
|
||||||
|
messages_to_save.append(assistant_response)
|
||||||
|
|
||||||
|
# Add tool response messages after assistant message
|
||||||
|
messages_to_save.extend(tool_response_messages)
|
||||||
|
|
||||||
|
session.messages.extend(messages_to_save)
|
||||||
|
await upsert_chat_session(session)
|
||||||
|
|
||||||
|
if not has_yielded_error:
|
||||||
|
error_message = str(e)
|
||||||
|
if not is_retryable:
|
||||||
|
error_message = f"Non-retryable error: {error_message}"
|
||||||
|
elif retry_count >= config.max_retries:
|
||||||
|
error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}"
|
||||||
|
|
||||||
|
error_response = StreamError(errorText=error_message)
|
||||||
|
yield error_response
|
||||||
|
if not has_yielded_end:
|
||||||
|
yield StreamFinish()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle retry outside of exception handler to avoid nesting
|
||||||
|
if should_retry and retry_count < config.max_retries:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Normal completion path: session={session.session_id}, "
|
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
|
||||||
f"current message_count={len(session.messages)}"
|
)
|
||||||
|
async for chunk in stream_chat_completion(
|
||||||
|
session_id=session.session_id,
|
||||||
|
user_id=user_id,
|
||||||
|
retry_count=retry_count + 1,
|
||||||
|
session=session,
|
||||||
|
context=context,
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
return # Exit after retry to avoid double-saving in finally block
|
||||||
|
|
||||||
|
# Normal completion path - save session and handle tool call continuation
|
||||||
|
logger.info(
|
||||||
|
f"Normal completion path: session={session.session_id}, "
|
||||||
|
f"current message_count={len(session.messages)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build the messages list in the correct order
|
||||||
|
messages_to_save: list[ChatMessage] = []
|
||||||
|
|
||||||
|
# Add assistant message with tool_calls if any
|
||||||
|
if accumulated_tool_calls:
|
||||||
|
assistant_response.tool_calls = accumulated_tool_calls
|
||||||
|
logger.info(
|
||||||
|
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
|
||||||
|
)
|
||||||
|
if assistant_response.content or assistant_response.tool_calls:
|
||||||
|
messages_to_save.append(assistant_response)
|
||||||
|
logger.info(
|
||||||
|
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Build the messages list in the correct order
|
# Add tool response messages after assistant message
|
||||||
messages_to_save: list[ChatMessage] = []
|
messages_to_save.extend(tool_response_messages)
|
||||||
|
logger.info(
|
||||||
|
f"Saving {len(tool_response_messages)} tool response messages, "
|
||||||
|
f"total_to_save={len(messages_to_save)}"
|
||||||
|
)
|
||||||
|
|
||||||
# Add assistant message with tool_calls if any
|
session.messages.extend(messages_to_save)
|
||||||
if accumulated_tool_calls:
|
logger.info(
|
||||||
assistant_response.tool_calls = accumulated_tool_calls
|
f"Extended session messages, new message_count={len(session.messages)}"
|
||||||
logger.info(
|
)
|
||||||
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
|
await upsert_chat_session(session)
|
||||||
)
|
|
||||||
if assistant_response.content or assistant_response.tool_calls:
|
|
||||||
messages_to_save.append(assistant_response)
|
|
||||||
logger.info(
|
|
||||||
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add tool response messages after assistant message
|
# If we did a tool call, stream the chat completion again to get the next response
|
||||||
messages_to_save.extend(tool_response_messages)
|
if has_done_tool_call:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Saving {len(tool_response_messages)} tool response messages, "
|
"Tool call executed, streaming chat completion again to get assistant response"
|
||||||
f"total_to_save={len(messages_to_save)}"
|
|
||||||
)
|
)
|
||||||
|
async for chunk in stream_chat_completion(
|
||||||
|
session_id=session.session_id,
|
||||||
|
user_id=user_id,
|
||||||
|
session=session, # Pass session object to avoid Redis refetch
|
||||||
|
context=context,
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
|
||||||
session.messages.extend(messages_to_save)
|
finally:
|
||||||
logger.info(
|
# Always end Langfuse observations to prevent resource leaks
|
||||||
f"Extended session messages, new message_count={len(session.messages)}"
|
# Guard against None and catch errors to avoid masking original exceptions
|
||||||
)
|
if generation is not None:
|
||||||
await upsert_chat_session(session)
|
try:
|
||||||
|
latest_usage = session.usage[-1] if session.usage else None
|
||||||
# If we did a tool call, stream the chat completion again to get the next response
|
generation.update(
|
||||||
if has_done_tool_call:
|
model=config.model,
|
||||||
logger.info(
|
output={
|
||||||
"Tool call executed, streaming chat completion again to get assistant response"
|
"content": assistant_response.content,
|
||||||
|
"tool_calls": accumulated_tool_calls or None,
|
||||||
|
},
|
||||||
|
usage_details=(
|
||||||
|
{
|
||||||
|
"input": latest_usage.prompt_tokens,
|
||||||
|
"output": latest_usage.completion_tokens,
|
||||||
|
"total": latest_usage.total_tokens,
|
||||||
|
}
|
||||||
|
if latest_usage
|
||||||
|
else None
|
||||||
|
),
|
||||||
)
|
)
|
||||||
async for chunk in stream_chat_completion(
|
generation.end()
|
||||||
session_id=session.session_id,
|
except Exception as e:
|
||||||
user_id=user_id,
|
logger.warning(f"Failed to end Langfuse generation: {e}")
|
||||||
session=session, # Pass session object to avoid Redis refetch
|
|
||||||
context=context,
|
if trace is not None:
|
||||||
tool_call_response=str(tool_response_messages),
|
try:
|
||||||
):
|
if accumulated_tool_calls:
|
||||||
yield chunk
|
trace.update_trace(output={"tool_calls": accumulated_tool_calls})
|
||||||
|
else:
|
||||||
|
trace.update_trace(output={"response": assistant_response.content})
|
||||||
|
trace.end()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to end Langfuse trace: {e}")
|
||||||
|
|
||||||
|
|
||||||
# Retry configuration for OpenAI API calls
|
# Retry configuration for OpenAI API calls
|
||||||
@@ -791,4 +900,5 @@ async def _yield_tool_call(
|
|||||||
session=session,
|
session=session,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.info(f"Yielding Tool execution response: {tool_execution_response}")
|
||||||
yield tool_execution_response
|
yield tool_execution_response
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
|||||||
"find_library_agent": FindLibraryAgentTool(),
|
"find_library_agent": FindLibraryAgentTool(),
|
||||||
"run_agent": RunAgentTool(),
|
"run_agent": RunAgentTool(),
|
||||||
"run_block": RunBlockTool(),
|
"run_block": RunBlockTool(),
|
||||||
"view_agent_output": AgentOutputTool(),
|
"agent_output": AgentOutputTool(),
|
||||||
"search_docs": SearchDocsTool(),
|
"search_docs": SearchDocsTool(),
|
||||||
"get_doc_page": GetDocPageTool(),
|
"get_doc_page": GetDocPageTool(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
from backend.data.understanding import (
|
from backend.data.understanding import (
|
||||||
BusinessUnderstandingInput,
|
BusinessUnderstandingInput,
|
||||||
@@ -61,7 +59,6 @@ and automations for the user's specific needs."""
|
|||||||
"""Requires authentication to store user-specific data."""
|
"""Requires authentication to store user-specific data."""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@observe(as_type="tool", name="add_understanding")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -218,7 +218,6 @@ async def save_agent_to_library(
|
|||||||
library_agents = await library_db.create_library_agent(
|
library_agents = await library_db.create_library_agent(
|
||||||
graph=created_graph,
|
graph=created_graph,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
sensitive_action_safe_mode=True,
|
|
||||||
create_library_agents_for_sub_graphs=False,
|
create_library_agents_for_sub_graphs=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import re
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
from pydantic import BaseModel, field_validator
|
from pydantic import BaseModel, field_validator
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
@@ -104,7 +103,7 @@ class AgentOutputTool(BaseTool):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self) -> str:
|
def name(self) -> str:
|
||||||
return "view_agent_output"
|
return "agent_output"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def description(self) -> str:
|
def description(self) -> str:
|
||||||
@@ -329,7 +328,6 @@ class AgentOutputTool(BaseTool):
|
|||||||
total_executions=len(available_executions) if available_executions else 1,
|
total_executions=len(available_executions) if available_executions else 1,
|
||||||
)
|
)
|
||||||
|
|
||||||
@observe(as_type="tool", name="view_agent_output")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
|
||||||
from .agent_generator import (
|
from .agent_generator import (
|
||||||
@@ -80,7 +78,6 @@ class CreateAgentTool(BaseTool):
|
|||||||
"required": ["description"],
|
"required": ["description"],
|
||||||
}
|
}
|
||||||
|
|
||||||
@observe(as_type="tool", name="create_agent")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
|
||||||
from .agent_generator import (
|
from .agent_generator import (
|
||||||
@@ -87,7 +85,6 @@ class EditAgentTool(BaseTool):
|
|||||||
"required": ["agent_id", "changes"],
|
"required": ["agent_id", "changes"],
|
||||||
}
|
}
|
||||||
|
|
||||||
@observe(as_type="tool", name="edit_agent")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
|
||||||
from .agent_search import search_agents
|
from .agent_search import search_agents
|
||||||
@@ -37,7 +35,6 @@ class FindAgentTool(BaseTool):
|
|||||||
"required": ["query"],
|
"required": ["query"],
|
||||||
}
|
}
|
||||||
|
|
||||||
@observe(as_type="tool", name="find_agent")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self, user_id: str | None, session: ChatSession, **kwargs
|
self, user_id: str | None, session: ChatSession, **kwargs
|
||||||
) -> ToolResponseBase:
|
) -> ToolResponseBase:
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
from prisma.enums import ContentType
|
from prisma.enums import ContentType
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
@@ -56,7 +55,6 @@ class FindBlockTool(BaseTool):
|
|||||||
def requires_auth(self) -> bool:
|
def requires_auth(self) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@observe(as_type="tool", name="find_block")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
|
||||||
from .agent_search import search_agents
|
from .agent_search import search_agents
|
||||||
@@ -43,7 +41,6 @@ class FindLibraryAgentTool(BaseTool):
|
|||||||
def requires_auth(self) -> bool:
|
def requires_auth(self) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@observe(as_type="tool", name="find_library_agent")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self, user_id: str | None, session: ChatSession, **kwargs
|
self, user_id: str | None, session: ChatSession, **kwargs
|
||||||
) -> ToolResponseBase:
|
) -> ToolResponseBase:
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ import logging
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
from backend.api.features.chat.tools.base import BaseTool
|
from backend.api.features.chat.tools.base import BaseTool
|
||||||
from backend.api.features.chat.tools.models import (
|
from backend.api.features.chat.tools.models import (
|
||||||
@@ -73,7 +71,6 @@ class GetDocPageTool(BaseTool):
|
|||||||
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
||||||
return f"{DOCS_BASE_URL}/{url_path}"
|
return f"{DOCS_BASE_URL}/{url_path}"
|
||||||
|
|
||||||
@observe(as_type="tool", name="get_doc_page")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, field_validator
|
||||||
|
|
||||||
from backend.api.features.chat.config import ChatConfig
|
from backend.api.features.chat.config import ChatConfig
|
||||||
@@ -33,7 +32,7 @@ from .models import (
|
|||||||
UserReadiness,
|
UserReadiness,
|
||||||
)
|
)
|
||||||
from .utils import (
|
from .utils import (
|
||||||
build_missing_credentials_from_graph,
|
check_user_has_required_credentials,
|
||||||
extract_credentials_from_schema,
|
extract_credentials_from_schema,
|
||||||
fetch_graph_from_store_slug,
|
fetch_graph_from_store_slug,
|
||||||
get_or_create_library_agent,
|
get_or_create_library_agent,
|
||||||
@@ -155,7 +154,6 @@ class RunAgentTool(BaseTool):
|
|||||||
"""All operations require authentication."""
|
"""All operations require authentication."""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@observe(as_type="tool", name="run_agent")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
@@ -237,13 +235,15 @@ class RunAgentTool(BaseTool):
|
|||||||
# Return credentials needed response with input data info
|
# Return credentials needed response with input data info
|
||||||
# The UI handles credential setup automatically, so the message
|
# The UI handles credential setup automatically, so the message
|
||||||
# focuses on asking about input data
|
# focuses on asking about input data
|
||||||
requirements_creds_dict = build_missing_credentials_from_graph(
|
credentials = extract_credentials_from_schema(
|
||||||
graph, None
|
graph.credentials_input_schema
|
||||||
)
|
)
|
||||||
missing_credentials_dict = build_missing_credentials_from_graph(
|
missing_creds_check = await check_user_has_required_credentials(
|
||||||
graph, graph_credentials
|
user_id, credentials
|
||||||
)
|
)
|
||||||
requirements_creds_list = list(requirements_creds_dict.values())
|
missing_credentials_dict = {
|
||||||
|
c.id: c.model_dump() for c in missing_creds_check
|
||||||
|
}
|
||||||
|
|
||||||
return SetupRequirementsResponse(
|
return SetupRequirementsResponse(
|
||||||
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
|
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
|
||||||
@@ -257,7 +257,7 @@ class RunAgentTool(BaseTool):
|
|||||||
ready_to_run=False,
|
ready_to_run=False,
|
||||||
),
|
),
|
||||||
requirements={
|
requirements={
|
||||||
"credentials": requirements_creds_list,
|
"credentials": [c.model_dump() for c in credentials],
|
||||||
"inputs": self._get_inputs_list(graph.input_schema),
|
"inputs": self._get_inputs_list(graph.input_schema),
|
||||||
"execution_modes": self._get_execution_modes(graph),
|
"execution_modes": self._get_execution_modes(graph),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ import logging
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
from backend.data.block import get_block
|
from backend.data.block import get_block
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
@@ -22,7 +20,6 @@ from .models import (
|
|||||||
ToolResponseBase,
|
ToolResponseBase,
|
||||||
UserReadiness,
|
UserReadiness,
|
||||||
)
|
)
|
||||||
from .utils import build_missing_credentials_from_field_info
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -130,7 +127,6 @@ class RunBlockTool(BaseTool):
|
|||||||
|
|
||||||
return matched_credentials, missing_credentials
|
return matched_credentials, missing_credentials
|
||||||
|
|
||||||
@observe(as_type="tool", name="run_block")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
@@ -190,11 +186,7 @@ class RunBlockTool(BaseTool):
|
|||||||
|
|
||||||
if missing_credentials:
|
if missing_credentials:
|
||||||
# Return setup requirements response with missing credentials
|
# Return setup requirements response with missing credentials
|
||||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
|
||||||
missing_creds_dict = build_missing_credentials_from_field_info(
|
|
||||||
credentials_fields_info, set(matched_credentials.keys())
|
|
||||||
)
|
|
||||||
missing_creds_list = list(missing_creds_dict.values())
|
|
||||||
|
|
||||||
return SetupRequirementsResponse(
|
return SetupRequirementsResponse(
|
||||||
message=(
|
message=(
|
||||||
@@ -211,7 +203,7 @@ class RunBlockTool(BaseTool):
|
|||||||
ready_to_run=False,
|
ready_to_run=False,
|
||||||
),
|
),
|
||||||
requirements={
|
requirements={
|
||||||
"credentials": missing_creds_list,
|
"credentials": [c.model_dump() for c in missing_credentials],
|
||||||
"inputs": self._get_inputs_list(block),
|
"inputs": self._get_inputs_list(block),
|
||||||
"execution_modes": ["immediate"],
|
"execution_modes": ["immediate"],
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langfuse import observe
|
|
||||||
from prisma.enums import ContentType
|
from prisma.enums import ContentType
|
||||||
|
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.api.features.chat.model import ChatSession
|
||||||
@@ -88,7 +87,6 @@ class SearchDocsTool(BaseTool):
|
|||||||
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
||||||
return f"{DOCS_BASE_URL}/{url_path}"
|
return f"{DOCS_BASE_URL}/{url_path}"
|
||||||
|
|
||||||
@observe(as_type="tool", name="search_docs")
|
|
||||||
async def _execute(
|
async def _execute(
|
||||||
self,
|
self,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model
|
|||||||
from backend.api.features.store import db as store_db
|
from backend.api.features.store import db as store_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.graph import GraphModel
|
from backend.data.graph import GraphModel
|
||||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
from backend.data.model import CredentialsMetaInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.util.exceptions import NotFoundError
|
from backend.util.exceptions import NotFoundError
|
||||||
|
|
||||||
@@ -89,59 +89,6 @@ def extract_credentials_from_schema(
|
|||||||
return credentials
|
return credentials
|
||||||
|
|
||||||
|
|
||||||
def _serialize_missing_credential(
|
|
||||||
field_key: str, field_info: CredentialsFieldInfo
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Convert credential field info into a serializable dict that preserves all supported
|
|
||||||
credential types (e.g., api_key + oauth2) so the UI can offer multiple options.
|
|
||||||
"""
|
|
||||||
supported_types = sorted(field_info.supported_types)
|
|
||||||
provider = next(iter(field_info.provider), "unknown")
|
|
||||||
scopes = sorted(field_info.required_scopes or [])
|
|
||||||
|
|
||||||
return {
|
|
||||||
"id": field_key,
|
|
||||||
"title": field_key.replace("_", " ").title(),
|
|
||||||
"provider": provider,
|
|
||||||
"provider_name": provider.replace("_", " ").title(),
|
|
||||||
"type": supported_types[0] if supported_types else "api_key",
|
|
||||||
"types": supported_types,
|
|
||||||
"scopes": scopes,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def build_missing_credentials_from_graph(
|
|
||||||
graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Build a missing_credentials mapping from a graph's aggregated credentials inputs,
|
|
||||||
preserving all supported credential types for each field.
|
|
||||||
"""
|
|
||||||
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
|
||||||
aggregated_fields = graph.aggregate_credentials_inputs()
|
|
||||||
|
|
||||||
return {
|
|
||||||
field_key: _serialize_missing_credential(field_key, field_info)
|
|
||||||
for field_key, (field_info, _node_fields) in aggregated_fields.items()
|
|
||||||
if field_key not in matched_keys
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def build_missing_credentials_from_field_info(
|
|
||||||
credential_fields: dict[str, CredentialsFieldInfo],
|
|
||||||
matched_keys: set[str],
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Build missing_credentials mapping from a simple credentials field info dictionary.
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
field_key: _serialize_missing_credential(field_key, field_info)
|
|
||||||
for field_key, field_info in credential_fields.items()
|
|
||||||
if field_key not in matched_keys
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def extract_credentials_as_dict(
|
def extract_credentials_as_dict(
|
||||||
credentials_input_schema: dict[str, Any] | None,
|
credentials_input_schema: dict[str, Any] | None,
|
||||||
) -> dict[str, CredentialsMetaInput]:
|
) -> dict[str, CredentialsMetaInput]:
|
||||||
|
|||||||
@@ -401,11 +401,27 @@ async def add_generated_agent_image(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
|
||||||
|
"""
|
||||||
|
Initialize GraphSettings based on graph content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph: The graph to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
GraphSettings with appropriate human_in_the_loop_safe_mode value
|
||||||
|
"""
|
||||||
|
if graph.has_human_in_the_loop:
|
||||||
|
# Graph has HITL blocks - set safe mode to True by default
|
||||||
|
return GraphSettings(human_in_the_loop_safe_mode=True)
|
||||||
|
else:
|
||||||
|
# Graph has no HITL blocks - keep None
|
||||||
|
return GraphSettings(human_in_the_loop_safe_mode=None)
|
||||||
|
|
||||||
|
|
||||||
async def create_library_agent(
|
async def create_library_agent(
|
||||||
graph: graph_db.GraphModel,
|
graph: graph_db.GraphModel,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
hitl_safe_mode: bool = True,
|
|
||||||
sensitive_action_safe_mode: bool = False,
|
|
||||||
create_library_agents_for_sub_graphs: bool = True,
|
create_library_agents_for_sub_graphs: bool = True,
|
||||||
) -> list[library_model.LibraryAgent]:
|
) -> list[library_model.LibraryAgent]:
|
||||||
"""
|
"""
|
||||||
@@ -414,8 +430,6 @@ async def create_library_agent(
|
|||||||
Args:
|
Args:
|
||||||
agent: The agent/Graph to add to the library.
|
agent: The agent/Graph to add to the library.
|
||||||
user_id: The user to whom the agent will be added.
|
user_id: The user to whom the agent will be added.
|
||||||
hitl_safe_mode: Whether HITL blocks require manual review (default True).
|
|
||||||
sensitive_action_safe_mode: Whether sensitive action blocks require review.
|
|
||||||
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
|
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -451,11 +465,7 @@ async def create_library_agent(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
settings=SafeJson(
|
settings=SafeJson(
|
||||||
GraphSettings.from_graph(
|
_initialize_graph_settings(graph_entry).model_dump()
|
||||||
graph_entry,
|
|
||||||
hitl_safe_mode=hitl_safe_mode,
|
|
||||||
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
|
||||||
).model_dump()
|
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
include=library_agent_include(
|
include=library_agent_include(
|
||||||
@@ -617,6 +627,33 @@ async def update_library_agent(
|
|||||||
raise DatabaseError("Failed to update library agent") from e
|
raise DatabaseError("Failed to update library agent") from e
|
||||||
|
|
||||||
|
|
||||||
|
async def update_library_agent_settings(
|
||||||
|
user_id: str,
|
||||||
|
agent_id: str,
|
||||||
|
settings: GraphSettings,
|
||||||
|
) -> library_model.LibraryAgent:
|
||||||
|
"""
|
||||||
|
Updates the settings for a specific LibraryAgent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: The owner of the LibraryAgent.
|
||||||
|
agent_id: The ID of the LibraryAgent to update.
|
||||||
|
settings: New GraphSettings to apply.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The updated LibraryAgent.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
NotFoundError: If the specified LibraryAgent does not exist.
|
||||||
|
DatabaseError: If there's an error in the update operation.
|
||||||
|
"""
|
||||||
|
return await update_library_agent(
|
||||||
|
library_agent_id=agent_id,
|
||||||
|
user_id=user_id,
|
||||||
|
settings=settings,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def delete_library_agent(
|
async def delete_library_agent(
|
||||||
library_agent_id: str, user_id: str, soft_delete: bool = True
|
library_agent_id: str, user_id: str, soft_delete: bool = True
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -801,7 +838,7 @@ async def add_store_agent_to_library(
|
|||||||
"isCreatedByUser": False,
|
"isCreatedByUser": False,
|
||||||
"useGraphIsActiveVersion": False,
|
"useGraphIsActiveVersion": False,
|
||||||
"settings": SafeJson(
|
"settings": SafeJson(
|
||||||
GraphSettings.from_graph(graph_model).model_dump()
|
_initialize_graph_settings(graph_model).model_dump()
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
include=library_agent_include(
|
include=library_agent_include(
|
||||||
@@ -1191,15 +1228,8 @@ async def fork_library_agent(
|
|||||||
)
|
)
|
||||||
new_graph = await on_graph_activate(new_graph, user_id=user_id)
|
new_graph = await on_graph_activate(new_graph, user_id=user_id)
|
||||||
|
|
||||||
# Create a library agent for the new graph, preserving safe mode settings
|
# Create a library agent for the new graph
|
||||||
return (
|
return (await create_library_agent(new_graph, user_id))[0]
|
||||||
await create_library_agent(
|
|
||||||
new_graph,
|
|
||||||
user_id,
|
|
||||||
hitl_safe_mode=original_agent.settings.human_in_the_loop_safe_mode,
|
|
||||||
sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode,
|
|
||||||
)
|
|
||||||
)[0]
|
|
||||||
except prisma.errors.PrismaError as e:
|
except prisma.errors.PrismaError as e:
|
||||||
logger.error(f"Database error cloning library agent: {e}")
|
logger.error(f"Database error cloning library agent: {e}")
|
||||||
raise DatabaseError("Failed to fork library agent") from e
|
raise DatabaseError("Failed to fork library agent") from e
|
||||||
|
|||||||
@@ -73,12 +73,6 @@ class LibraryAgent(pydantic.BaseModel):
|
|||||||
has_external_trigger: bool = pydantic.Field(
|
has_external_trigger: bool = pydantic.Field(
|
||||||
description="Whether the agent has an external trigger (e.g. webhook) node"
|
description="Whether the agent has an external trigger (e.g. webhook) node"
|
||||||
)
|
)
|
||||||
has_human_in_the_loop: bool = pydantic.Field(
|
|
||||||
description="Whether the agent has human-in-the-loop blocks"
|
|
||||||
)
|
|
||||||
has_sensitive_action: bool = pydantic.Field(
|
|
||||||
description="Whether the agent has sensitive action blocks"
|
|
||||||
)
|
|
||||||
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
||||||
|
|
||||||
# Indicates whether there's a new output (based on recent runs)
|
# Indicates whether there's a new output (based on recent runs)
|
||||||
@@ -186,8 +180,6 @@ class LibraryAgent(pydantic.BaseModel):
|
|||||||
graph.credentials_input_schema if sub_graphs is not None else None
|
graph.credentials_input_schema if sub_graphs is not None else None
|
||||||
),
|
),
|
||||||
has_external_trigger=graph.has_external_trigger,
|
has_external_trigger=graph.has_external_trigger,
|
||||||
has_human_in_the_loop=graph.has_human_in_the_loop,
|
|
||||||
has_sensitive_action=graph.has_sensitive_action,
|
|
||||||
trigger_setup_info=graph.trigger_setup_info,
|
trigger_setup_info=graph.trigger_setup_info,
|
||||||
new_output=new_output,
|
new_output=new_output,
|
||||||
can_access_graph=can_access_graph,
|
can_access_graph=can_access_graph,
|
||||||
|
|||||||
@@ -52,8 +52,6 @@ async def test_get_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
has_human_in_the_loop=False,
|
|
||||||
has_sensitive_action=False,
|
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -77,8 +75,6 @@ async def test_get_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
has_human_in_the_loop=False,
|
|
||||||
has_sensitive_action=False,
|
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -154,8 +150,6 @@ async def test_get_favorite_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
has_human_in_the_loop=False,
|
|
||||||
has_sensitive_action=False,
|
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -224,8 +218,6 @@ def test_add_agent_to_library_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
has_human_in_the_loop=False,
|
|
||||||
has_sensitive_action=False,
|
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
can_access_graph=True,
|
can_access_graph=True,
|
||||||
|
|||||||
@@ -154,16 +154,15 @@ async def store_content_embedding(
|
|||||||
|
|
||||||
# Upsert the embedding
|
# Upsert the embedding
|
||||||
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
|
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
|
||||||
# Use {pgvector_schema}.vector for explicit pgvector type qualification
|
|
||||||
await execute_raw_with_schema(
|
await execute_raw_with_schema(
|
||||||
"""
|
"""
|
||||||
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
|
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
|
||||||
"id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt"
|
"id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt"
|
||||||
)
|
)
|
||||||
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::{pgvector_schema}.vector, $5, $6::jsonb, NOW(), NOW())
|
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW())
|
||||||
ON CONFLICT ("contentType", "contentId", "userId")
|
ON CONFLICT ("contentType", "contentId", "userId")
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
"embedding" = $4::{pgvector_schema}.vector,
|
"embedding" = $4::vector,
|
||||||
"searchableText" = $5,
|
"searchableText" = $5,
|
||||||
"metadata" = $6::jsonb,
|
"metadata" = $6::jsonb,
|
||||||
"updatedAt" = NOW()
|
"updatedAt" = NOW()
|
||||||
@@ -178,6 +177,7 @@ async def store_content_embedding(
|
|||||||
searchable_text,
|
searchable_text,
|
||||||
metadata_json,
|
metadata_json,
|
||||||
client=client,
|
client=client,
|
||||||
|
set_public_search_path=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Stored embedding for {content_type}:{content_id}")
|
logger.info(f"Stored embedding for {content_type}:{content_id}")
|
||||||
@@ -236,6 +236,7 @@ async def get_content_embedding(
|
|||||||
content_type,
|
content_type,
|
||||||
content_id,
|
content_id,
|
||||||
user_id,
|
user_id,
|
||||||
|
set_public_search_path=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if result and len(result) > 0:
|
if result and len(result) > 0:
|
||||||
@@ -870,46 +871,31 @@ async def semantic_search(
|
|||||||
# Add content type parameters and build placeholders dynamically
|
# Add content type parameters and build placeholders dynamically
|
||||||
content_type_start_idx = len(params) + 1
|
content_type_start_idx = len(params) + 1
|
||||||
content_type_placeholders = ", ".join(
|
content_type_placeholders = ", ".join(
|
||||||
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
|
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
|
||||||
for i in range(len(content_types))
|
for i in range(len(content_types))
|
||||||
)
|
)
|
||||||
params.extend([ct.value for ct in content_types])
|
params.extend([ct.value for ct in content_types])
|
||||||
|
|
||||||
# Build min_similarity param index before appending
|
sql = f"""
|
||||||
min_similarity_idx = len(params) + 1
|
|
||||||
params.append(min_similarity)
|
|
||||||
|
|
||||||
# Use regular string (not f-string) for template to preserve {schema_prefix} and {schema} placeholders
|
|
||||||
# Use OPERATOR({pgvector_schema}.<=>) for explicit operator schema qualification
|
|
||||||
sql = (
|
|
||||||
"""
|
|
||||||
SELECT
|
SELECT
|
||||||
"contentId" as content_id,
|
"contentId" as content_id,
|
||||||
"contentType" as content_type,
|
"contentType" as content_type,
|
||||||
"searchableText" as searchable_text,
|
"searchableText" as searchable_text,
|
||||||
metadata,
|
metadata,
|
||||||
1 - (embedding OPERATOR({pgvector_schema}.<=>) '"""
|
1 - (embedding <=> '{embedding_str}'::vector) as similarity
|
||||||
+ embedding_str
|
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
|
||||||
+ """'::{pgvector_schema}.vector) as similarity
|
WHERE "contentType" IN ({content_type_placeholders})
|
||||||
FROM {schema_prefix}"UnifiedContentEmbedding"
|
{user_filter}
|
||||||
WHERE "contentType" IN ("""
|
AND 1 - (embedding <=> '{embedding_str}'::vector) >= ${len(params) + 1}
|
||||||
+ content_type_placeholders
|
|
||||||
+ """)
|
|
||||||
"""
|
|
||||||
+ user_filter
|
|
||||||
+ """
|
|
||||||
AND 1 - (embedding OPERATOR({pgvector_schema}.<=>) '"""
|
|
||||||
+ embedding_str
|
|
||||||
+ """'::{pgvector_schema}.vector) >= $"""
|
|
||||||
+ str(min_similarity_idx)
|
|
||||||
+ """
|
|
||||||
ORDER BY similarity DESC
|
ORDER BY similarity DESC
|
||||||
LIMIT $1
|
LIMIT $1
|
||||||
"""
|
"""
|
||||||
)
|
params.append(min_similarity)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = await query_raw_with_schema(sql, *params)
|
results = await query_raw_with_schema(
|
||||||
|
sql, *params, set_public_search_path=True
|
||||||
|
)
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
"content_id": row["content_id"],
|
"content_id": row["content_id"],
|
||||||
@@ -936,41 +922,31 @@ async def semantic_search(
|
|||||||
# Add content type parameters and build placeholders dynamically
|
# Add content type parameters and build placeholders dynamically
|
||||||
content_type_start_idx = len(params_lexical) + 1
|
content_type_start_idx = len(params_lexical) + 1
|
||||||
content_type_placeholders_lexical = ", ".join(
|
content_type_placeholders_lexical = ", ".join(
|
||||||
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
|
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
|
||||||
for i in range(len(content_types))
|
for i in range(len(content_types))
|
||||||
)
|
)
|
||||||
params_lexical.extend([ct.value for ct in content_types])
|
params_lexical.extend([ct.value for ct in content_types])
|
||||||
|
|
||||||
# Build query param index before appending
|
sql_lexical = f"""
|
||||||
query_param_idx = len(params_lexical) + 1
|
|
||||||
params_lexical.append(f"%{query}%")
|
|
||||||
|
|
||||||
# Use regular string (not f-string) for template to preserve {schema_prefix} placeholders
|
|
||||||
sql_lexical = (
|
|
||||||
"""
|
|
||||||
SELECT
|
SELECT
|
||||||
"contentId" as content_id,
|
"contentId" as content_id,
|
||||||
"contentType" as content_type,
|
"contentType" as content_type,
|
||||||
"searchableText" as searchable_text,
|
"searchableText" as searchable_text,
|
||||||
metadata,
|
metadata,
|
||||||
0.0 as similarity
|
0.0 as similarity
|
||||||
FROM {schema_prefix}"UnifiedContentEmbedding"
|
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
|
||||||
WHERE "contentType" IN ("""
|
WHERE "contentType" IN ({content_type_placeholders_lexical})
|
||||||
+ content_type_placeholders_lexical
|
{user_filter}
|
||||||
+ """)
|
AND "searchableText" ILIKE ${len(params_lexical) + 1}
|
||||||
"""
|
|
||||||
+ user_filter
|
|
||||||
+ """
|
|
||||||
AND "searchableText" ILIKE $"""
|
|
||||||
+ str(query_param_idx)
|
|
||||||
+ """
|
|
||||||
ORDER BY "updatedAt" DESC
|
ORDER BY "updatedAt" DESC
|
||||||
LIMIT $1
|
LIMIT $1
|
||||||
"""
|
"""
|
||||||
)
|
params_lexical.append(f"%{query}%")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = await query_raw_with_schema(sql_lexical, *params_lexical)
|
results = await query_raw_with_schema(
|
||||||
|
sql_lexical, *params_lexical, set_public_search_path=True
|
||||||
|
)
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
"content_id": row["content_id"],
|
"content_id": row["content_id"],
|
||||||
|
|||||||
@@ -155,14 +155,18 @@ async def test_store_embedding_success(mocker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
assert result is True
|
assert result is True
|
||||||
# execute_raw is called once for INSERT (no separate SET search_path needed)
|
# execute_raw is called twice: once for SET search_path, once for INSERT
|
||||||
assert mock_client.execute_raw.call_count == 1
|
assert mock_client.execute_raw.call_count == 2
|
||||||
|
|
||||||
# Verify the INSERT query with the actual data
|
# First call: SET search_path
|
||||||
call_args = mock_client.execute_raw.call_args_list[0][0]
|
first_call_args = mock_client.execute_raw.call_args_list[0][0]
|
||||||
assert "test-version-id" in call_args
|
assert "SET search_path" in first_call_args[0]
|
||||||
assert "[0.1,0.2,0.3]" in call_args
|
|
||||||
assert None in call_args # userId should be None for store agents
|
# Second call: INSERT query with the actual data
|
||||||
|
second_call_args = mock_client.execute_raw.call_args_list[1][0]
|
||||||
|
assert "test-version-id" in second_call_args
|
||||||
|
assert "[0.1,0.2,0.3]" in second_call_args
|
||||||
|
assert None in second_call_args # userId should be None for store agents
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from dataclasses import dataclass
|
|||||||
from typing import Any, Literal
|
from typing import Any, Literal
|
||||||
|
|
||||||
from prisma.enums import ContentType
|
from prisma.enums import ContentType
|
||||||
from rank_bm25 import BM25Okapi # type: ignore[import-untyped]
|
from rank_bm25 import BM25Okapi
|
||||||
|
|
||||||
from backend.api.features.store.embeddings import (
|
from backend.api.features.store.embeddings import (
|
||||||
EMBEDDING_DIM,
|
EMBEDDING_DIM,
|
||||||
@@ -295,7 +295,7 @@ async def unified_hybrid_search(
|
|||||||
FROM {{schema_prefix}}"UnifiedContentEmbedding" uce
|
FROM {{schema_prefix}}"UnifiedContentEmbedding" uce
|
||||||
WHERE uce."contentType" = ANY({content_types_param}::{{schema_prefix}}"ContentType"[])
|
WHERE uce."contentType" = ANY({content_types_param}::{{schema_prefix}}"ContentType"[])
|
||||||
{user_filter}
|
{user_filter}
|
||||||
ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector
|
ORDER BY uce.embedding <=> {embedding_param}::vector
|
||||||
LIMIT 200
|
LIMIT 200
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
@@ -307,7 +307,7 @@ async def unified_hybrid_search(
|
|||||||
uce.metadata,
|
uce.metadata,
|
||||||
uce."updatedAt" as updated_at,
|
uce."updatedAt" as updated_at,
|
||||||
-- Semantic score: cosine similarity (1 - distance)
|
-- Semantic score: cosine similarity (1 - distance)
|
||||||
COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score,
|
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
|
||||||
-- Lexical score: ts_rank_cd
|
-- Lexical score: ts_rank_cd
|
||||||
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
|
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
|
||||||
-- Category match from metadata
|
-- Category match from metadata
|
||||||
@@ -363,7 +363,9 @@ async def unified_hybrid_search(
|
|||||||
LIMIT {limit_param} OFFSET {offset_param}
|
LIMIT {limit_param} OFFSET {offset_param}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
results = await query_raw_with_schema(sql_query, *params)
|
results = await query_raw_with_schema(
|
||||||
|
sql_query, *params, set_public_search_path=True
|
||||||
|
)
|
||||||
|
|
||||||
total = results[0]["total_count"] if results else 0
|
total = results[0]["total_count"] if results else 0
|
||||||
# Apply BM25 reranking
|
# Apply BM25 reranking
|
||||||
@@ -583,7 +585,7 @@ async def hybrid_search(
|
|||||||
WHERE uce."contentType" = 'STORE_AGENT'::{{schema_prefix}}"ContentType"
|
WHERE uce."contentType" = 'STORE_AGENT'::{{schema_prefix}}"ContentType"
|
||||||
AND uce."userId" IS NULL
|
AND uce."userId" IS NULL
|
||||||
AND {where_clause}
|
AND {where_clause}
|
||||||
ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector
|
ORDER BY uce.embedding <=> {embedding_param}::vector
|
||||||
LIMIT 200
|
LIMIT 200
|
||||||
) uce
|
) uce
|
||||||
),
|
),
|
||||||
@@ -605,7 +607,7 @@ async def hybrid_search(
|
|||||||
-- Searchable text for BM25 reranking
|
-- Searchable text for BM25 reranking
|
||||||
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
|
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
|
||||||
-- Semantic score
|
-- Semantic score
|
||||||
COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score,
|
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
|
||||||
-- Lexical score (raw, will normalize)
|
-- Lexical score (raw, will normalize)
|
||||||
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
|
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
|
||||||
-- Category match
|
-- Category match
|
||||||
@@ -686,7 +688,9 @@ async def hybrid_search(
|
|||||||
LIMIT {limit_param} OFFSET {offset_param}
|
LIMIT {limit_param} OFFSET {offset_param}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
results = await query_raw_with_schema(sql_query, *params)
|
results = await query_raw_with_schema(
|
||||||
|
sql_query, *params, set_public_search_path=True
|
||||||
|
)
|
||||||
|
|
||||||
total = results[0]["total_count"] if results else 0
|
total = results[0]["total_count"] if results else 0
|
||||||
|
|
||||||
|
|||||||
@@ -761,8 +761,10 @@ async def create_new_graph(
|
|||||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||||
graph.validate_graph(for_run=False)
|
graph.validate_graph(for_run=False)
|
||||||
|
|
||||||
|
# The return value of the create graph & library function is intentionally not used here,
|
||||||
|
# as the graph already valid and no sub-graphs are returned back.
|
||||||
await graph_db.create_graph(graph, user_id=user_id)
|
await graph_db.create_graph(graph, user_id=user_id)
|
||||||
await library_db.create_library_agent(graph, user_id)
|
await library_db.create_library_agent(graph, user_id=user_id)
|
||||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||||
|
|
||||||
if create_graph.source == "builder":
|
if create_graph.source == "builder":
|
||||||
@@ -886,19 +888,21 @@ async def set_graph_active_version(
|
|||||||
async def _update_library_agent_version_and_settings(
|
async def _update_library_agent_version_and_settings(
|
||||||
user_id: str, agent_graph: graph_db.GraphModel
|
user_id: str, agent_graph: graph_db.GraphModel
|
||||||
) -> library_model.LibraryAgent:
|
) -> library_model.LibraryAgent:
|
||||||
|
# Keep the library agent up to date with the new active version
|
||||||
library = await library_db.update_agent_version_in_library(
|
library = await library_db.update_agent_version_in_library(
|
||||||
user_id, agent_graph.id, agent_graph.version
|
user_id, agent_graph.id, agent_graph.version
|
||||||
)
|
)
|
||||||
updated_settings = GraphSettings.from_graph(
|
# If the graph has HITL node, initialize the setting if it's not already set.
|
||||||
graph=agent_graph,
|
if (
|
||||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
agent_graph.has_human_in_the_loop
|
||||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
and library.settings.human_in_the_loop_safe_mode is None
|
||||||
)
|
):
|
||||||
if updated_settings != library.settings:
|
await library_db.update_library_agent_settings(
|
||||||
library = await library_db.update_library_agent(
|
|
||||||
library_agent_id=library.id,
|
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
settings=updated_settings,
|
agent_id=library.id,
|
||||||
|
settings=library.settings.model_copy(
|
||||||
|
update={"human_in_the_loop_safe_mode": True}
|
||||||
|
),
|
||||||
)
|
)
|
||||||
return library
|
return library
|
||||||
|
|
||||||
@@ -915,18 +919,21 @@ async def update_graph_settings(
|
|||||||
user_id: Annotated[str, Security(get_user_id)],
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
) -> GraphSettings:
|
) -> GraphSettings:
|
||||||
"""Update graph settings for the user's library agent."""
|
"""Update graph settings for the user's library agent."""
|
||||||
|
# Get the library agent for this graph
|
||||||
library_agent = await library_db.get_library_agent_by_graph_id(
|
library_agent = await library_db.get_library_agent_by_graph_id(
|
||||||
graph_id=graph_id, user_id=user_id
|
graph_id=graph_id, user_id=user_id
|
||||||
)
|
)
|
||||||
if not library_agent:
|
if not library_agent:
|
||||||
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
||||||
|
|
||||||
updated_agent = await library_db.update_library_agent(
|
# Update the library agent settings
|
||||||
library_agent_id=library_agent.id,
|
updated_agent = await library_db.update_library_agent_settings(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
agent_id=library_agent.id,
|
||||||
settings=settings,
|
settings=settings,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Return the updated settings
|
||||||
return GraphSettings.model_validate(updated_agent.settings)
|
return GraphSettings.model_validate(updated_agent.settings)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -174,7 +174,7 @@ class AIShortformVideoCreatorBlock(Block):
|
|||||||
)
|
)
|
||||||
frame_rate: int = SchemaField(description="Frame rate of the video", default=60)
|
frame_rate: int = SchemaField(description="Frame rate of the video", default=60)
|
||||||
generation_preset: GenerationPreset = SchemaField(
|
generation_preset: GenerationPreset = SchemaField(
|
||||||
description="Generation preset for visual style - only affects AI-generated visuals",
|
description="Generation preset for visual style - only effects AI generated visuals",
|
||||||
default=GenerationPreset.LEONARDO,
|
default=GenerationPreset.LEONARDO,
|
||||||
placeholder=GenerationPreset.LEONARDO,
|
placeholder=GenerationPreset.LEONARDO,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -381,7 +381,7 @@ Each range you add needs to be a string, with the upper and lower numbers of the
|
|||||||
organization_locations: Optional[list[str]] = SchemaField(
|
organization_locations: Optional[list[str]] = SchemaField(
|
||||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||||
|
|
||||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appear in your search results, even if they match other parameters.
|
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||||
|
|
||||||
To exclude companies based on location, use the organization_not_locations parameter.
|
To exclude companies based on location, use the organization_not_locations parameter.
|
||||||
""",
|
""",
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ Each range you add needs to be a string, with the upper and lower numbers of the
|
|||||||
organization_locations: list[str] = SchemaField(
|
organization_locations: list[str] = SchemaField(
|
||||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||||
|
|
||||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appear in your search results, even if they match other parameters.
|
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||||
|
|
||||||
To exclude companies based on location, use the organization_not_locations parameter.
|
To exclude companies based on location, use the organization_not_locations parameter.
|
||||||
""",
|
""",
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ class StoreValueBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
|
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
|
||||||
description="A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks.",
|
description="This block forwards an input value as output, allowing reuse without change.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=StoreValueBlock.Input,
|
input_schema=StoreValueBlock.Input,
|
||||||
output_schema=StoreValueBlock.Output,
|
output_schema=StoreValueBlock.Output,
|
||||||
@@ -111,7 +111,7 @@ class PrintToConsoleBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
|
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
|
||||||
description="A debugging block that outputs text to the console for monitoring and troubleshooting workflow execution.",
|
description="Print the given text to the console, this is used for a debugging purpose.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=PrintToConsoleBlock.Input,
|
input_schema=PrintToConsoleBlock.Input,
|
||||||
output_schema=PrintToConsoleBlock.Output,
|
output_schema=PrintToConsoleBlock.Output,
|
||||||
@@ -137,7 +137,7 @@ class NoteBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||||
description="A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes.",
|
description="This block is used to display a sticky note with the given text.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=NoteBlock.Input,
|
input_schema=NoteBlock.Input,
|
||||||
output_schema=NoteBlock.Output,
|
output_schema=NoteBlock.Output,
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ class FindInDictionaryBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
|
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
|
||||||
description="A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value.",
|
description="Lookup the given key in the input dictionary/object/list and return the value.",
|
||||||
input_schema=FindInDictionaryBlock.Input,
|
input_schema=FindInDictionaryBlock.Input,
|
||||||
output_schema=FindInDictionaryBlock.Output,
|
output_schema=FindInDictionaryBlock.Output,
|
||||||
test_input=[
|
test_input=[
|
||||||
@@ -680,58 +680,3 @@ class ListIsEmptyBlock(Block):
|
|||||||
|
|
||||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
yield "is_empty", len(input_data.list) == 0
|
yield "is_empty", len(input_data.list) == 0
|
||||||
|
|
||||||
|
|
||||||
class ConcatenateListsBlock(Block):
|
|
||||||
class Input(BlockSchemaInput):
|
|
||||||
lists: List[List[Any]] = SchemaField(
|
|
||||||
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
|
|
||||||
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
|
|
||||||
)
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
|
||||||
concatenated_list: List[Any] = SchemaField(
|
|
||||||
description="The concatenated list containing all elements from all input lists in order."
|
|
||||||
)
|
|
||||||
error: str = SchemaField(
|
|
||||||
description="Error message if concatenation failed due to invalid input types."
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__(
|
|
||||||
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
|
|
||||||
description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.",
|
|
||||||
categories={BlockCategory.BASIC},
|
|
||||||
input_schema=ConcatenateListsBlock.Input,
|
|
||||||
output_schema=ConcatenateListsBlock.Output,
|
|
||||||
test_input=[
|
|
||||||
{"lists": [[1, 2, 3], [4, 5, 6]]},
|
|
||||||
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
|
|
||||||
{"lists": [[1, 2], []]},
|
|
||||||
{"lists": []},
|
|
||||||
],
|
|
||||||
test_output=[
|
|
||||||
("concatenated_list", [1, 2, 3, 4, 5, 6]),
|
|
||||||
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
|
|
||||||
("concatenated_list", [1, 2]),
|
|
||||||
("concatenated_list", []),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
|
||||||
concatenated = []
|
|
||||||
for idx, lst in enumerate(input_data.lists):
|
|
||||||
if lst is None:
|
|
||||||
# Skip None values to avoid errors
|
|
||||||
continue
|
|
||||||
if not isinstance(lst, list):
|
|
||||||
# Type validation: each item must be a list
|
|
||||||
# Strings are iterable and would cause extend() to iterate character-by-character
|
|
||||||
# Non-iterable types would raise TypeError
|
|
||||||
yield "error", (
|
|
||||||
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
|
|
||||||
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
|
||||||
)
|
|
||||||
return
|
|
||||||
concatenated.extend(lst)
|
|
||||||
yield "concatenated_list", concatenated
|
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ class GithubCommentBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="a8db4d8d-db1c-4a25-a1b0-416a8c33602b",
|
id="a8db4d8d-db1c-4a25-a1b0-416a8c33602b",
|
||||||
description="A block that posts comments on GitHub issues or pull requests using the GitHub API.",
|
description="This block posts a comment on a specified GitHub issue or pull request.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubCommentBlock.Input,
|
input_schema=GithubCommentBlock.Input,
|
||||||
output_schema=GithubCommentBlock.Output,
|
output_schema=GithubCommentBlock.Output,
|
||||||
@@ -151,7 +151,7 @@ class GithubUpdateCommentBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
|
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
|
||||||
description="A block that updates an existing comment on a GitHub issue or pull request.",
|
description="This block updates a comment on a specified GitHub issue or pull request.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubUpdateCommentBlock.Input,
|
input_schema=GithubUpdateCommentBlock.Input,
|
||||||
output_schema=GithubUpdateCommentBlock.Output,
|
output_schema=GithubUpdateCommentBlock.Output,
|
||||||
@@ -249,7 +249,7 @@ class GithubListCommentsBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
|
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
|
||||||
description="A block that retrieves all comments from a GitHub issue or pull request, including comment metadata and content.",
|
description="This block lists all comments for a specified GitHub issue or pull request.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubListCommentsBlock.Input,
|
input_schema=GithubListCommentsBlock.Input,
|
||||||
output_schema=GithubListCommentsBlock.Output,
|
output_schema=GithubListCommentsBlock.Output,
|
||||||
@@ -363,7 +363,7 @@ class GithubMakeIssueBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="691dad47-f494-44c3-a1e8-05b7990f2dab",
|
id="691dad47-f494-44c3-a1e8-05b7990f2dab",
|
||||||
description="A block that creates new issues on GitHub repositories with a title and body content.",
|
description="This block creates a new issue on a specified GitHub repository.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubMakeIssueBlock.Input,
|
input_schema=GithubMakeIssueBlock.Input,
|
||||||
output_schema=GithubMakeIssueBlock.Output,
|
output_schema=GithubMakeIssueBlock.Output,
|
||||||
@@ -433,7 +433,7 @@ class GithubReadIssueBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="6443c75d-032a-4772-9c08-230c707c8acc",
|
id="6443c75d-032a-4772-9c08-230c707c8acc",
|
||||||
description="A block that retrieves information about a specific GitHub issue, including its title, body content, and creator.",
|
description="This block reads the body, title, and user of a specified GitHub issue.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubReadIssueBlock.Input,
|
input_schema=GithubReadIssueBlock.Input,
|
||||||
output_schema=GithubReadIssueBlock.Output,
|
output_schema=GithubReadIssueBlock.Output,
|
||||||
@@ -510,7 +510,7 @@ class GithubListIssuesBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="c215bfd7-0e57-4573-8f8c-f7d4963dcd74",
|
id="c215bfd7-0e57-4573-8f8c-f7d4963dcd74",
|
||||||
description="A block that retrieves a list of issues from a GitHub repository with their titles and URLs.",
|
description="This block lists all issues for a specified GitHub repository.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubListIssuesBlock.Input,
|
input_schema=GithubListIssuesBlock.Input,
|
||||||
output_schema=GithubListIssuesBlock.Output,
|
output_schema=GithubListIssuesBlock.Output,
|
||||||
@@ -597,7 +597,7 @@ class GithubAddLabelBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="98bd6b77-9506-43d5-b669-6b9733c4b1f1",
|
id="98bd6b77-9506-43d5-b669-6b9733c4b1f1",
|
||||||
description="A block that adds a label to a GitHub issue or pull request for categorization and organization.",
|
description="This block adds a label to a specified GitHub issue or pull request.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubAddLabelBlock.Input,
|
input_schema=GithubAddLabelBlock.Input,
|
||||||
output_schema=GithubAddLabelBlock.Output,
|
output_schema=GithubAddLabelBlock.Output,
|
||||||
@@ -657,7 +657,7 @@ class GithubRemoveLabelBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="78f050c5-3e3a-48c0-9e5b-ef1ceca5589c",
|
id="78f050c5-3e3a-48c0-9e5b-ef1ceca5589c",
|
||||||
description="A block that removes a label from a GitHub issue or pull request.",
|
description="This block removes a label from a specified GitHub issue or pull request.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubRemoveLabelBlock.Input,
|
input_schema=GithubRemoveLabelBlock.Input,
|
||||||
output_schema=GithubRemoveLabelBlock.Output,
|
output_schema=GithubRemoveLabelBlock.Output,
|
||||||
@@ -720,7 +720,7 @@ class GithubAssignIssueBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="90507c72-b0ff-413a-886a-23bbbd66f542",
|
id="90507c72-b0ff-413a-886a-23bbbd66f542",
|
||||||
description="A block that assigns a GitHub user to an issue for task ownership and tracking.",
|
description="This block assigns a user to a specified GitHub issue.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubAssignIssueBlock.Input,
|
input_schema=GithubAssignIssueBlock.Input,
|
||||||
output_schema=GithubAssignIssueBlock.Output,
|
output_schema=GithubAssignIssueBlock.Output,
|
||||||
@@ -786,7 +786,7 @@ class GithubUnassignIssueBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="d154002a-38f4-46c2-962d-2488f2b05ece",
|
id="d154002a-38f4-46c2-962d-2488f2b05ece",
|
||||||
description="A block that removes a user's assignment from a GitHub issue.",
|
description="This block unassigns a user from a specified GitHub issue.",
|
||||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||||
input_schema=GithubUnassignIssueBlock.Input,
|
input_schema=GithubUnassignIssueBlock.Input,
|
||||||
output_schema=GithubUnassignIssueBlock.Output,
|
output_schema=GithubUnassignIssueBlock.Output,
|
||||||
|
|||||||
@@ -353,7 +353,7 @@ class GmailReadBlock(GmailBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
|
id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
|
||||||
description="A block that retrieves and reads emails from a Gmail account based on search criteria, returning detailed message information including subject, sender, body, and attachments.",
|
description="This block reads emails from Gmail.",
|
||||||
categories={BlockCategory.COMMUNICATION},
|
categories={BlockCategory.COMMUNICATION},
|
||||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||||
input_schema=GmailReadBlock.Input,
|
input_schema=GmailReadBlock.Input,
|
||||||
@@ -743,7 +743,7 @@ class GmailListLabelsBlock(GmailBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
|
id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
|
||||||
description="A block that retrieves all labels (categories) from a Gmail account for organizing and categorizing emails.",
|
description="This block lists all labels in Gmail.",
|
||||||
categories={BlockCategory.COMMUNICATION},
|
categories={BlockCategory.COMMUNICATION},
|
||||||
input_schema=GmailListLabelsBlock.Input,
|
input_schema=GmailListLabelsBlock.Input,
|
||||||
output_schema=GmailListLabelsBlock.Output,
|
output_schema=GmailListLabelsBlock.Output,
|
||||||
@@ -807,7 +807,7 @@ class GmailAddLabelBlock(GmailBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="f884b2fb-04f4-4265-9658-14f433926ac9",
|
id="f884b2fb-04f4-4265-9658-14f433926ac9",
|
||||||
description="A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist.",
|
description="This block adds a label to a Gmail message.",
|
||||||
categories={BlockCategory.COMMUNICATION},
|
categories={BlockCategory.COMMUNICATION},
|
||||||
input_schema=GmailAddLabelBlock.Input,
|
input_schema=GmailAddLabelBlock.Input,
|
||||||
output_schema=GmailAddLabelBlock.Output,
|
output_schema=GmailAddLabelBlock.Output,
|
||||||
@@ -893,7 +893,7 @@ class GmailRemoveLabelBlock(GmailBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
|
id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
|
||||||
description="A block that removes a label from a specific email message in a Gmail account.",
|
description="This block removes a label from a Gmail message.",
|
||||||
categories={BlockCategory.COMMUNICATION},
|
categories={BlockCategory.COMMUNICATION},
|
||||||
input_schema=GmailRemoveLabelBlock.Input,
|
input_schema=GmailRemoveLabelBlock.Input,
|
||||||
output_schema=GmailRemoveLabelBlock.Output,
|
output_schema=GmailRemoveLabelBlock.Output,
|
||||||
@@ -961,7 +961,7 @@ class GmailGetThreadBlock(GmailBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="21a79166-9df7-4b5f-9f36-96f639d86112",
|
id="21a79166-9df7-4b5f-9f36-96f639d86112",
|
||||||
description="A block that retrieves an entire Gmail thread (email conversation) by ID, returning all messages with decoded bodies for reading complete conversations.",
|
description="Get a full Gmail thread by ID",
|
||||||
categories={BlockCategory.COMMUNICATION},
|
categories={BlockCategory.COMMUNICATION},
|
||||||
input_schema=GmailGetThreadBlock.Input,
|
input_schema=GmailGetThreadBlock.Input,
|
||||||
output_schema=GmailGetThreadBlock.Output,
|
output_schema=GmailGetThreadBlock.Output,
|
||||||
|
|||||||
@@ -282,7 +282,7 @@ class GoogleSheetsReadBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
||||||
description="A block that reads data from a Google Sheets spreadsheet using A1 notation range selection.",
|
description="This block reads data from a Google Sheets spreadsheet.",
|
||||||
categories={BlockCategory.DATA},
|
categories={BlockCategory.DATA},
|
||||||
input_schema=GoogleSheetsReadBlock.Input,
|
input_schema=GoogleSheetsReadBlock.Input,
|
||||||
output_schema=GoogleSheetsReadBlock.Output,
|
output_schema=GoogleSheetsReadBlock.Output,
|
||||||
@@ -409,7 +409,7 @@ class GoogleSheetsWriteBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="d9291e87-301d-47a8-91fe-907fb55460e5",
|
id="d9291e87-301d-47a8-91fe-907fb55460e5",
|
||||||
description="A block that writes data to a Google Sheets spreadsheet at a specified A1 notation range.",
|
description="This block writes data to a Google Sheets spreadsheet.",
|
||||||
categories={BlockCategory.DATA},
|
categories={BlockCategory.DATA},
|
||||||
input_schema=GoogleSheetsWriteBlock.Input,
|
input_schema=GoogleSheetsWriteBlock.Input,
|
||||||
output_schema=GoogleSheetsWriteBlock.Output,
|
output_schema=GoogleSheetsWriteBlock.Output,
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ class HITLReviewHelper:
|
|||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
# Skip review if safe mode is disabled - return auto-approved result
|
# Skip review if safe mode is disabled - return auto-approved result
|
||||||
if not execution_context.human_in_the_loop_safe_mode:
|
if not execution_context.safe_mode:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class HumanInTheLoopBlock(Block):
|
|||||||
execution_context: ExecutionContext,
|
execution_context: ExecutionContext,
|
||||||
**_kwargs,
|
**_kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
if not execution_context.human_in_the_loop_safe_mode:
|
if not execution_context.safe_mode:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ class AgentInputBlock(Block):
|
|||||||
super().__init__(
|
super().__init__(
|
||||||
**{
|
**{
|
||||||
"id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
"id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||||
"description": "A block that accepts and processes user input values within a workflow, supporting various input types and validation.",
|
"description": "Base block for user inputs.",
|
||||||
"input_schema": AgentInputBlock.Input,
|
"input_schema": AgentInputBlock.Input,
|
||||||
"output_schema": AgentInputBlock.Output,
|
"output_schema": AgentInputBlock.Output,
|
||||||
"test_input": [
|
"test_input": [
|
||||||
@@ -168,7 +168,7 @@ class AgentOutputBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||||
description="A block that records and formats workflow results for display to users, with optional Jinja2 template formatting support.",
|
description="Stores the output of the graph for users to see.",
|
||||||
input_schema=AgentOutputBlock.Input,
|
input_schema=AgentOutputBlock.Input,
|
||||||
output_schema=AgentOutputBlock.Output,
|
output_schema=AgentOutputBlock.Output,
|
||||||
test_input=[
|
test_input=[
|
||||||
|
|||||||
@@ -854,7 +854,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||||
description="A block that generates structured JSON responses using a Large Language Model (LLM), with schema validation and format enforcement.",
|
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
|
||||||
categories={BlockCategory.AI},
|
categories={BlockCategory.AI},
|
||||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||||
@@ -1265,7 +1265,7 @@ class AITextGeneratorBlock(AIBlockBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||||
description="A block that produces text responses using a Large Language Model (LLM) based on customizable prompts and system instructions.",
|
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
|
||||||
categories={BlockCategory.AI},
|
categories={BlockCategory.AI},
|
||||||
input_schema=AITextGeneratorBlock.Input,
|
input_schema=AITextGeneratorBlock.Input,
|
||||||
output_schema=AITextGeneratorBlock.Output,
|
output_schema=AITextGeneratorBlock.Output,
|
||||||
@@ -1361,7 +1361,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="a0a69be1-4528-491c-a85a-a4ab6873e3f0",
|
id="a0a69be1-4528-491c-a85a-a4ab6873e3f0",
|
||||||
description="A block that summarizes long texts using a Large Language Model (LLM), with configurable focus topics and summary styles.",
|
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||||
input_schema=AITextSummarizerBlock.Input,
|
input_schema=AITextSummarizerBlock.Input,
|
||||||
output_schema=AITextSummarizerBlock.Output,
|
output_schema=AITextSummarizerBlock.Output,
|
||||||
@@ -1562,7 +1562,7 @@ class AIConversationBlock(AIBlockBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="32a87eab-381e-4dd4-bdb8-4c47151be35a",
|
id="32a87eab-381e-4dd4-bdb8-4c47151be35a",
|
||||||
description="A block that facilitates multi-turn conversations with a Large Language Model (LLM), maintaining context across message exchanges.",
|
description="Advanced LLM call that takes a list of messages and sends them to the language model.",
|
||||||
categories={BlockCategory.AI},
|
categories={BlockCategory.AI},
|
||||||
input_schema=AIConversationBlock.Input,
|
input_schema=AIConversationBlock.Input,
|
||||||
output_schema=AIConversationBlock.Output,
|
output_schema=AIConversationBlock.Output,
|
||||||
@@ -1682,7 +1682,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="9c0b0450-d199-458b-a731-072189dd6593",
|
id="9c0b0450-d199-458b-a731-072189dd6593",
|
||||||
description="A block that creates lists of items based on prompts using a Large Language Model (LLM), with optional source data for context.",
|
description="Generate a list of values based on the given prompt using a Large Language Model (LLM).",
|
||||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||||
input_schema=AIListGeneratorBlock.Input,
|
input_schema=AIListGeneratorBlock.Input,
|
||||||
output_schema=AIListGeneratorBlock.Output,
|
output_schema=AIListGeneratorBlock.Output,
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ class PublishToMediumBlock(Block):
|
|||||||
class Input(BlockSchemaInput):
|
class Input(BlockSchemaInput):
|
||||||
author_id: BlockSecret = SecretField(
|
author_id: BlockSecret = SecretField(
|
||||||
key="medium_author_id",
|
key="medium_author_id",
|
||||||
description="""The Medium AuthorID of the user. You can get this by calling the /me endpoint of the Medium API.\n\ncurl -H "Authorization: Bearer YOUR_ACCESS_TOKEN" https://api.medium.com/v1/me\n\nThe response will contain the authorId field.""",
|
description="""The Medium AuthorID of the user. You can get this by calling the /me endpoint of the Medium API.\n\ncurl -H "Authorization: Bearer YOUR_ACCESS_TOKEN" https://api.medium.com/v1/me" the response will contain the authorId field.""",
|
||||||
placeholder="Enter the author's Medium AuthorID",
|
placeholder="Enter the author's Medium AuthorID",
|
||||||
)
|
)
|
||||||
title: str = SchemaField(
|
title: str = SchemaField(
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ class CreateTalkingAvatarVideoBlock(Block):
|
|||||||
description="The voice provider to use", default="microsoft"
|
description="The voice provider to use", default="microsoft"
|
||||||
)
|
)
|
||||||
voice_id: str = SchemaField(
|
voice_id: str = SchemaField(
|
||||||
description="The voice ID to use, see [available voice IDs](https://agpt.co/docs/platform/using-ai-services/d_id)",
|
description="The voice ID to use, get list of voices [here](https://docs.agpt.co/server/d_id)",
|
||||||
default="en-US-JennyNeural",
|
default="en-US-JennyNeural",
|
||||||
)
|
)
|
||||||
presenter_id: str = SchemaField(
|
presenter_id: str = SchemaField(
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode():
|
|||||||
# Create a mock execution context
|
# Create a mock execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(
|
mock_execution_context = ExecutionContext(
|
||||||
human_in_the_loop_safe_mode=False,
|
safe_mode=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a mock execution processor for agent mode tests
|
# Create a mock execution processor for agent mode tests
|
||||||
@@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
|
|||||||
@@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
mock_execution_processor = MagicMock()
|
mock_execution_processor = MagicMock()
|
||||||
|
|
||||||
async for output_name, output_value in block.run(
|
async for output_name, output_value in block.run(
|
||||||
@@ -609,9 +609,7 @@ async def test_validation_errors_dont_pollute_conversation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(
|
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||||
human_in_the_loop_safe_mode=False
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create a proper mock execution processor for agent mode
|
# Create a proper mock execution processor for agent mode
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|||||||
@@ -474,7 +474,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
self.block_type = block_type
|
self.block_type = block_type
|
||||||
self.webhook_config = webhook_config
|
self.webhook_config = webhook_config
|
||||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||||
self.is_sensitive_action: bool = False
|
self.requires_human_review: bool = False
|
||||||
|
|
||||||
if self.webhook_config:
|
if self.webhook_config:
|
||||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||||
@@ -637,9 +637,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
- should_pause: True if execution should be paused for review
|
- should_pause: True if execution should be paused for review
|
||||||
- input_data_to_use: The input data to use (may be modified by reviewer)
|
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||||
"""
|
"""
|
||||||
if not (
|
# Skip review if not required or safe mode is disabled
|
||||||
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
if not self.requires_human_review or not execution_context.safe_mode:
|
||||||
):
|
|
||||||
return False, input_data
|
return False, input_data
|
||||||
|
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
|
|||||||
@@ -38,6 +38,20 @@ POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
|
|||||||
if POOL_TIMEOUT:
|
if POOL_TIMEOUT:
|
||||||
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
|
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
|
||||||
|
|
||||||
|
# Add public schema to search_path for pgvector type access
|
||||||
|
# The vector extension is in public schema, but search_path is determined by schema parameter
|
||||||
|
# Extract the schema from DATABASE_URL or default to 'public' (matching get_database_schema())
|
||||||
|
parsed_url = urlparse(DATABASE_URL)
|
||||||
|
url_params = dict(parse_qsl(parsed_url.query))
|
||||||
|
db_schema = url_params.get("schema", "public")
|
||||||
|
# Build search_path, avoiding duplicates if db_schema is already 'public'
|
||||||
|
search_path_schemas = list(
|
||||||
|
dict.fromkeys([db_schema, "public"])
|
||||||
|
) # Preserves order, removes duplicates
|
||||||
|
search_path = ",".join(search_path_schemas)
|
||||||
|
# This allows using ::vector without schema qualification
|
||||||
|
DATABASE_URL = add_param(DATABASE_URL, "options", f"-c search_path={search_path}")
|
||||||
|
|
||||||
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
|
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
|
||||||
|
|
||||||
prisma = Prisma(
|
prisma = Prisma(
|
||||||
@@ -113,48 +127,38 @@ async def _raw_with_schema(
|
|||||||
*args,
|
*args,
|
||||||
execute: bool = False,
|
execute: bool = False,
|
||||||
client: Prisma | None = None,
|
client: Prisma | None = None,
|
||||||
|
set_public_search_path: bool = False,
|
||||||
) -> list[dict] | int:
|
) -> list[dict] | int:
|
||||||
"""Internal: Execute raw SQL with proper schema handling.
|
"""Internal: Execute raw SQL with proper schema handling.
|
||||||
|
|
||||||
Use query_raw_with_schema() or execute_raw_with_schema() instead.
|
Use query_raw_with_schema() or execute_raw_with_schema() instead.
|
||||||
|
|
||||||
Supports placeholders:
|
|
||||||
- {schema_prefix}: Table/type prefix (e.g., "platform".)
|
|
||||||
- {schema}: Raw schema name for application tables (e.g., platform)
|
|
||||||
- {pgvector_schema}: Schema where pgvector is installed (defaults to "public")
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix}, {schema}, and/or {pgvector_schema} placeholders
|
query_template: SQL query with {schema_prefix} placeholder
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
|
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
|
||||||
client: Optional Prisma client for transactions (only used when execute=True).
|
client: Optional Prisma client for transactions (only used when execute=True).
|
||||||
|
set_public_search_path: If True, sets search_path to include public schema.
|
||||||
|
Needed for pgvector types and other public schema objects.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
- list[dict] if execute=False (query results)
|
- list[dict] if execute=False (query results)
|
||||||
- int if execute=True (number of affected rows)
|
- int if execute=True (number of affected rows)
|
||||||
|
|
||||||
Example with vector type:
|
|
||||||
await execute_raw_with_schema(
|
|
||||||
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::{pgvector_schema}.vector)',
|
|
||||||
embedding_data
|
|
||||||
)
|
|
||||||
"""
|
"""
|
||||||
schema = get_database_schema()
|
schema = get_database_schema()
|
||||||
schema_prefix = f'"{schema}".' if schema != "public" else ""
|
schema_prefix = f'"{schema}".' if schema != "public" else ""
|
||||||
# pgvector extension is typically installed in "public" schema
|
formatted_query = query_template.format(schema_prefix=schema_prefix)
|
||||||
# On Supabase it may be in "extensions" but "public" is the common default
|
|
||||||
pgvector_schema = "public"
|
|
||||||
|
|
||||||
formatted_query = query_template.format(
|
|
||||||
schema_prefix=schema_prefix,
|
|
||||||
schema=schema,
|
|
||||||
pgvector_schema=pgvector_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
import prisma as prisma_module
|
import prisma as prisma_module
|
||||||
|
|
||||||
db_client = client if client else prisma_module.get_client()
|
db_client = client if client else prisma_module.get_client()
|
||||||
|
|
||||||
|
# Set search_path to include public schema if requested
|
||||||
|
# Prisma doesn't support the 'options' connection parameter, so we set it per-session
|
||||||
|
# This is idempotent and safe to call multiple times
|
||||||
|
if set_public_search_path:
|
||||||
|
await db_client.execute_raw(f"SET search_path = {schema}, public") # type: ignore
|
||||||
|
|
||||||
if execute:
|
if execute:
|
||||||
result = await db_client.execute_raw(formatted_query, *args) # type: ignore
|
result = await db_client.execute_raw(formatted_query, *args) # type: ignore
|
||||||
else:
|
else:
|
||||||
@@ -163,12 +167,16 @@ async def _raw_with_schema(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
|
async def query_raw_with_schema(
|
||||||
|
query_template: str, *args, set_public_search_path: bool = False
|
||||||
|
) -> list[dict]:
|
||||||
"""Execute raw SQL SELECT query with proper schema handling.
|
"""Execute raw SQL SELECT query with proper schema handling.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
|
query_template: SQL query with {schema_prefix} placeholder
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
|
set_public_search_path: If True, sets search_path to include public schema.
|
||||||
|
Needed for pgvector types and other public schema objects.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of result rows as dictionaries
|
List of result rows as dictionaries
|
||||||
@@ -179,20 +187,23 @@ async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
|
|||||||
user_id
|
user_id
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
return await _raw_with_schema(query_template, *args, execute=False) # type: ignore
|
return await _raw_with_schema(query_template, *args, execute=False, set_public_search_path=set_public_search_path) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
async def execute_raw_with_schema(
|
async def execute_raw_with_schema(
|
||||||
query_template: str,
|
query_template: str,
|
||||||
*args,
|
*args,
|
||||||
client: Prisma | None = None,
|
client: Prisma | None = None,
|
||||||
|
set_public_search_path: bool = False,
|
||||||
) -> int:
|
) -> int:
|
||||||
"""Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling.
|
"""Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
|
query_template: SQL query with {schema_prefix} placeholder
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
client: Optional Prisma client for transactions
|
client: Optional Prisma client for transactions
|
||||||
|
set_public_search_path: If True, sets search_path to include public schema.
|
||||||
|
Needed for pgvector types and other public schema objects.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Number of affected rows
|
Number of affected rows
|
||||||
@@ -204,7 +215,7 @@ async def execute_raw_with_schema(
|
|||||||
client=tx # Optional transaction client
|
client=tx # Optional transaction client
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
return await _raw_with_schema(query_template, *args, execute=True, client=client) # type: ignore
|
return await _raw_with_schema(query_template, *args, execute=True, client=client, set_public_search_path=set_public_search_path) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class BaseDbModel(BaseModel):
|
class BaseDbModel(BaseModel):
|
||||||
|
|||||||
@@ -81,8 +81,7 @@ class ExecutionContext(BaseModel):
|
|||||||
This includes information needed by blocks, sub-graphs, and execution management.
|
This includes information needed by blocks, sub-graphs, and execution management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
human_in_the_loop_safe_mode: bool = True
|
safe_mode: bool = True
|
||||||
sensitive_action_safe_mode: bool = False
|
|
||||||
user_timezone: str = "UTC"
|
user_timezone: str = "UTC"
|
||||||
root_execution_id: Optional[str] = None
|
root_execution_id: Optional[str] = None
|
||||||
parent_execution_id: Optional[str] = None
|
parent_execution_id: Optional[str] = None
|
||||||
|
|||||||
@@ -62,23 +62,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GraphSettings(BaseModel):
|
class GraphSettings(BaseModel):
|
||||||
human_in_the_loop_safe_mode: bool = True
|
human_in_the_loop_safe_mode: bool | None = None
|
||||||
sensitive_action_safe_mode: bool = False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_graph(
|
|
||||||
cls,
|
|
||||||
graph: "GraphModel",
|
|
||||||
hitl_safe_mode: bool | None = None,
|
|
||||||
sensitive_action_safe_mode: bool = False,
|
|
||||||
) -> "GraphSettings":
|
|
||||||
# Default to True if not explicitly set
|
|
||||||
if hitl_safe_mode is None:
|
|
||||||
hitl_safe_mode = True
|
|
||||||
return cls(
|
|
||||||
human_in_the_loop_safe_mode=hitl_safe_mode,
|
|
||||||
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Link(BaseDbModel):
|
class Link(BaseDbModel):
|
||||||
@@ -260,14 +244,10 @@ class BaseGraph(BaseDbModel):
|
|||||||
return any(
|
return any(
|
||||||
node.block_id
|
node.block_id
|
||||||
for node in self.nodes
|
for node in self.nodes
|
||||||
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
if (
|
||||||
)
|
node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||||
|
or node.block.requires_human_review
|
||||||
@computed_field
|
)
|
||||||
@property
|
|
||||||
def has_sensitive_action(self) -> bool:
|
|
||||||
return any(
|
|
||||||
node.block_id for node in self.nodes if node.block.is_sensitive_action
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -328,8 +328,6 @@ async def clear_business_understanding(user_id: str) -> bool:
|
|||||||
|
|
||||||
def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str:
|
def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str:
|
||||||
"""Format business understanding as text for system prompt injection."""
|
"""Format business understanding as text for system prompt injection."""
|
||||||
if not understanding:
|
|
||||||
return ""
|
|
||||||
sections = []
|
sections = []
|
||||||
|
|
||||||
# User info section
|
# User info section
|
||||||
|
|||||||
@@ -309,7 +309,7 @@ def ensure_embeddings_coverage():
|
|||||||
|
|
||||||
# Process in batches until no more missing embeddings
|
# Process in batches until no more missing embeddings
|
||||||
while True:
|
while True:
|
||||||
result = db_client.backfill_missing_embeddings(batch_size=100)
|
result = db_client.backfill_missing_embeddings(batch_size=10)
|
||||||
|
|
||||||
total_processed += result["processed"]
|
total_processed += result["processed"]
|
||||||
total_success += result["success"]
|
total_success += result["success"]
|
||||||
|
|||||||
@@ -873,8 +873,11 @@ async def add_graph_execution(
|
|||||||
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
||||||
|
|
||||||
execution_context = ExecutionContext(
|
execution_context = ExecutionContext(
|
||||||
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
|
safe_mode=(
|
||||||
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
|
settings.human_in_the_loop_safe_mode
|
||||||
|
if settings.human_in_the_loop_safe_mode is not None
|
||||||
|
else True
|
||||||
|
),
|
||||||
user_timezone=(
|
user_timezone=(
|
||||||
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -386,7 +386,6 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
|||||||
mock_user.timezone = "UTC"
|
mock_user.timezone = "UTC"
|
||||||
mock_settings = mocker.MagicMock()
|
mock_settings = mocker.MagicMock()
|
||||||
mock_settings.human_in_the_loop_safe_mode = True
|
mock_settings.human_in_the_loop_safe_mode = True
|
||||||
mock_settings.sensitive_action_safe_mode = False
|
|
||||||
|
|
||||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||||
@@ -652,7 +651,6 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
|
|||||||
mock_user.timezone = "UTC"
|
mock_user.timezone = "UTC"
|
||||||
mock_settings = mocker.MagicMock()
|
mock_settings = mocker.MagicMock()
|
||||||
mock_settings.human_in_the_loop_safe_mode = True
|
mock_settings.human_in_the_loop_safe_mode = True
|
||||||
mock_settings.sensitive_action_safe_mode = False
|
|
||||||
|
|
||||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||||
|
|||||||
@@ -1,864 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Block Documentation Generator
|
|
||||||
|
|
||||||
Generates markdown documentation for all blocks from code introspection.
|
|
||||||
Preserves manually-written content between marker comments.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
# Generate all docs
|
|
||||||
poetry run python scripts/generate_block_docs.py
|
|
||||||
|
|
||||||
# Check mode for CI (exits 1 if stale)
|
|
||||||
poetry run python scripts/generate_block_docs.py --check
|
|
||||||
|
|
||||||
# Verbose output
|
|
||||||
poetry run python scripts/generate_block_docs.py -v
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import inspect
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
from collections import defaultdict
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
# Add backend to path for imports
|
|
||||||
backend_dir = Path(__file__).parent.parent
|
|
||||||
sys.path.insert(0, str(backend_dir))
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Default output directory relative to repo root
|
|
||||||
DEFAULT_OUTPUT_DIR = (
|
|
||||||
Path(__file__).parent.parent.parent.parent / "docs" / "integrations"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class FieldDoc:
|
|
||||||
"""Documentation for a single input/output field."""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
description: str
|
|
||||||
type_str: str
|
|
||||||
required: bool
|
|
||||||
default: Any = None
|
|
||||||
advanced: bool = False
|
|
||||||
hidden: bool = False
|
|
||||||
placeholder: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BlockDoc:
|
|
||||||
"""Documentation data extracted from a block."""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
class_name: str
|
|
||||||
description: str
|
|
||||||
categories: list[str]
|
|
||||||
category_descriptions: dict[str, str]
|
|
||||||
inputs: list[FieldDoc]
|
|
||||||
outputs: list[FieldDoc]
|
|
||||||
block_type: str
|
|
||||||
source_file: str
|
|
||||||
contributors: list[str] = field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
# Category to human-readable name mapping
|
|
||||||
CATEGORY_DISPLAY_NAMES = {
|
|
||||||
"AI": "AI and Language Models",
|
|
||||||
"BASIC": "Basic Operations",
|
|
||||||
"TEXT": "Text Processing",
|
|
||||||
"SEARCH": "Search and Information Retrieval",
|
|
||||||
"SOCIAL": "Social Media and Content",
|
|
||||||
"DEVELOPER_TOOLS": "Developer Tools",
|
|
||||||
"DATA": "Data Processing",
|
|
||||||
"LOGIC": "Logic and Control Flow",
|
|
||||||
"COMMUNICATION": "Communication",
|
|
||||||
"INPUT": "Input/Output",
|
|
||||||
"OUTPUT": "Input/Output",
|
|
||||||
"MULTIMEDIA": "Media Generation",
|
|
||||||
"PRODUCTIVITY": "Productivity",
|
|
||||||
"HARDWARE": "Hardware",
|
|
||||||
"AGENT": "Agent Integration",
|
|
||||||
"CRM": "CRM Services",
|
|
||||||
"SAFETY": "AI Safety",
|
|
||||||
"ISSUE_TRACKING": "Issue Tracking",
|
|
||||||
"MARKETING": "Marketing",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Category to doc file mapping (for grouping related blocks)
|
|
||||||
CATEGORY_FILE_MAP = {
|
|
||||||
"BASIC": "basic",
|
|
||||||
"TEXT": "text",
|
|
||||||
"AI": "llm",
|
|
||||||
"SEARCH": "search",
|
|
||||||
"DATA": "data",
|
|
||||||
"LOGIC": "logic",
|
|
||||||
"COMMUNICATION": "communication",
|
|
||||||
"MULTIMEDIA": "multimedia",
|
|
||||||
"PRODUCTIVITY": "productivity",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def class_name_to_display_name(class_name: str) -> str:
|
|
||||||
"""Convert BlockClassName to 'Block Class Name'."""
|
|
||||||
# Remove 'Block' suffix (only at the end, not all occurrences)
|
|
||||||
name = class_name.removesuffix("Block")
|
|
||||||
# Insert space before capitals
|
|
||||||
name = re.sub(r"([a-z])([A-Z])", r"\1 \2", name)
|
|
||||||
# Handle consecutive capitals (e.g., 'HTTPRequest' -> 'HTTP Request')
|
|
||||||
name = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1 \2", name)
|
|
||||||
return name.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def type_to_readable(type_schema: dict[str, Any] | Any) -> str:
|
|
||||||
"""Convert JSON schema type to human-readable string."""
|
|
||||||
if not isinstance(type_schema, dict):
|
|
||||||
return str(type_schema) if type_schema else "Any"
|
|
||||||
|
|
||||||
if "anyOf" in type_schema:
|
|
||||||
# Union type - show options
|
|
||||||
any_of = type_schema["anyOf"]
|
|
||||||
if not isinstance(any_of, list):
|
|
||||||
return "Any"
|
|
||||||
options = []
|
|
||||||
for opt in any_of:
|
|
||||||
if isinstance(opt, dict) and opt.get("type") == "null":
|
|
||||||
continue
|
|
||||||
options.append(type_to_readable(opt))
|
|
||||||
if not options:
|
|
||||||
return "None"
|
|
||||||
if len(options) == 1:
|
|
||||||
return options[0]
|
|
||||||
return " | ".join(options)
|
|
||||||
|
|
||||||
if "allOf" in type_schema:
|
|
||||||
all_of = type_schema["allOf"]
|
|
||||||
if not isinstance(all_of, list) or not all_of:
|
|
||||||
return "Any"
|
|
||||||
return type_to_readable(all_of[0])
|
|
||||||
|
|
||||||
schema_type = type_schema.get("type")
|
|
||||||
|
|
||||||
if schema_type == "array":
|
|
||||||
items = type_schema.get("items", {})
|
|
||||||
item_type = type_to_readable(items)
|
|
||||||
return f"List[{item_type}]"
|
|
||||||
|
|
||||||
if schema_type == "object":
|
|
||||||
if "additionalProperties" in type_schema:
|
|
||||||
additional_props = type_schema["additionalProperties"]
|
|
||||||
# additionalProperties: true means any value type is allowed
|
|
||||||
if additional_props is True:
|
|
||||||
return "Dict[str, Any]"
|
|
||||||
value_type = type_to_readable(additional_props)
|
|
||||||
return f"Dict[str, {value_type}]"
|
|
||||||
# Check if it's a specific model
|
|
||||||
title = type_schema.get("title", "Object")
|
|
||||||
return title
|
|
||||||
|
|
||||||
if schema_type == "string":
|
|
||||||
if "enum" in type_schema:
|
|
||||||
return " | ".join(f'"{v}"' for v in type_schema["enum"])
|
|
||||||
if "format" in type_schema:
|
|
||||||
return f"str ({type_schema['format']})"
|
|
||||||
return "str"
|
|
||||||
|
|
||||||
if schema_type == "integer":
|
|
||||||
return "int"
|
|
||||||
|
|
||||||
if schema_type == "number":
|
|
||||||
return "float"
|
|
||||||
|
|
||||||
if schema_type == "boolean":
|
|
||||||
return "bool"
|
|
||||||
|
|
||||||
if schema_type == "null":
|
|
||||||
return "None"
|
|
||||||
|
|
||||||
# Fallback
|
|
||||||
return type_schema.get("title", schema_type or "Any")
|
|
||||||
|
|
||||||
|
|
||||||
def safe_get(d: Any, key: str, default: Any = None) -> Any:
|
|
||||||
"""Safely get a value from a dict-like object."""
|
|
||||||
if isinstance(d, dict):
|
|
||||||
return d.get(key, default)
|
|
||||||
return default
|
|
||||||
|
|
||||||
|
|
||||||
def file_path_to_title(file_path: str) -> str:
|
|
||||||
"""Convert file path to a readable title.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
"github/issues.md" -> "GitHub Issues"
|
|
||||||
"basic.md" -> "Basic"
|
|
||||||
"llm.md" -> "LLM"
|
|
||||||
"google/sheets.md" -> "Google Sheets"
|
|
||||||
"""
|
|
||||||
# Special case replacements (applied after title casing)
|
|
||||||
TITLE_FIXES = {
|
|
||||||
"Llm": "LLM",
|
|
||||||
"Github": "GitHub",
|
|
||||||
"Api": "API",
|
|
||||||
"Ai": "AI",
|
|
||||||
"Oauth": "OAuth",
|
|
||||||
"Url": "URL",
|
|
||||||
"Ci": "CI",
|
|
||||||
"Pr": "PR",
|
|
||||||
"Gmb": "GMB", # Google My Business
|
|
||||||
"Hubspot": "HubSpot",
|
|
||||||
"Linkedin": "LinkedIn",
|
|
||||||
"Tiktok": "TikTok",
|
|
||||||
"Youtube": "YouTube",
|
|
||||||
}
|
|
||||||
|
|
||||||
def apply_fixes(text: str) -> str:
|
|
||||||
# Split into words, fix each word, rejoin
|
|
||||||
words = text.split()
|
|
||||||
fixed_words = [TITLE_FIXES.get(word, word) for word in words]
|
|
||||||
return " ".join(fixed_words)
|
|
||||||
|
|
||||||
path = Path(file_path)
|
|
||||||
name = path.stem # e.g., "issues" or "sheets"
|
|
||||||
|
|
||||||
# Get parent dir if exists
|
|
||||||
parent = path.parent.name if path.parent.name != "." else None
|
|
||||||
|
|
||||||
# Title case and apply fixes
|
|
||||||
if parent:
|
|
||||||
parent_title = apply_fixes(parent.replace("_", " ").title())
|
|
||||||
name_title = apply_fixes(name.replace("_", " ").title())
|
|
||||||
return f"{parent_title} {name_title}"
|
|
||||||
return apply_fixes(name.replace("_", " ").title())
|
|
||||||
|
|
||||||
|
|
||||||
def extract_block_doc(block_cls: type) -> BlockDoc:
|
|
||||||
"""Extract documentation data from a block class."""
|
|
||||||
block = block_cls.create()
|
|
||||||
|
|
||||||
# Get source file
|
|
||||||
try:
|
|
||||||
source_file = inspect.getfile(block_cls)
|
|
||||||
# Make relative to blocks directory
|
|
||||||
blocks_dir = Path(source_file).parent
|
|
||||||
while blocks_dir.name != "blocks" and blocks_dir.parent != blocks_dir:
|
|
||||||
blocks_dir = blocks_dir.parent
|
|
||||||
source_file = str(Path(source_file).relative_to(blocks_dir.parent))
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
source_file = "unknown"
|
|
||||||
|
|
||||||
# Extract input fields
|
|
||||||
input_schema = block.input_schema.jsonschema()
|
|
||||||
input_properties = safe_get(input_schema, "properties", {})
|
|
||||||
if not isinstance(input_properties, dict):
|
|
||||||
input_properties = {}
|
|
||||||
required_raw = safe_get(input_schema, "required", [])
|
|
||||||
# Handle edge cases where required might not be a list
|
|
||||||
if isinstance(required_raw, (list, set, tuple)):
|
|
||||||
required_inputs = set(required_raw)
|
|
||||||
else:
|
|
||||||
required_inputs = set()
|
|
||||||
|
|
||||||
inputs = []
|
|
||||||
for field_name, field_schema in input_properties.items():
|
|
||||||
if not isinstance(field_schema, dict):
|
|
||||||
continue
|
|
||||||
# Skip credentials fields in docs (they're auto-handled)
|
|
||||||
if "credentials" in field_name.lower():
|
|
||||||
continue
|
|
||||||
|
|
||||||
inputs.append(
|
|
||||||
FieldDoc(
|
|
||||||
name=field_name,
|
|
||||||
description=safe_get(field_schema, "description", ""),
|
|
||||||
type_str=type_to_readable(field_schema),
|
|
||||||
required=field_name in required_inputs,
|
|
||||||
default=safe_get(field_schema, "default"),
|
|
||||||
advanced=safe_get(field_schema, "advanced", False) or False,
|
|
||||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
|
||||||
placeholder=safe_get(field_schema, "placeholder"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract output fields
|
|
||||||
output_schema = block.output_schema.jsonschema()
|
|
||||||
output_properties = safe_get(output_schema, "properties", {})
|
|
||||||
if not isinstance(output_properties, dict):
|
|
||||||
output_properties = {}
|
|
||||||
|
|
||||||
outputs = []
|
|
||||||
for field_name, field_schema in output_properties.items():
|
|
||||||
if not isinstance(field_schema, dict):
|
|
||||||
continue
|
|
||||||
outputs.append(
|
|
||||||
FieldDoc(
|
|
||||||
name=field_name,
|
|
||||||
description=safe_get(field_schema, "description", ""),
|
|
||||||
type_str=type_to_readable(field_schema),
|
|
||||||
required=True, # Outputs are always produced
|
|
||||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get category info (sort for deterministic ordering since it's a set)
|
|
||||||
categories = []
|
|
||||||
category_descriptions = {}
|
|
||||||
for cat in sorted(block.categories, key=lambda c: c.name):
|
|
||||||
categories.append(cat.name)
|
|
||||||
category_descriptions[cat.name] = cat.value
|
|
||||||
|
|
||||||
# Get contributors
|
|
||||||
contributors = []
|
|
||||||
for contrib in block.contributors:
|
|
||||||
contributors.append(contrib.name if hasattr(contrib, "name") else str(contrib))
|
|
||||||
|
|
||||||
return BlockDoc(
|
|
||||||
id=block.id,
|
|
||||||
name=class_name_to_display_name(block.name),
|
|
||||||
class_name=block.name,
|
|
||||||
description=block.description,
|
|
||||||
categories=categories,
|
|
||||||
category_descriptions=category_descriptions,
|
|
||||||
inputs=inputs,
|
|
||||||
outputs=outputs,
|
|
||||||
block_type=block.block_type.value,
|
|
||||||
source_file=source_file,
|
|
||||||
contributors=contributors,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_anchor(name: str) -> str:
|
|
||||||
"""Generate markdown anchor from block name."""
|
|
||||||
return name.lower().replace(" ", "-").replace("(", "").replace(")", "")
|
|
||||||
|
|
||||||
|
|
||||||
def extract_manual_content(existing_content: str) -> dict[str, str]:
|
|
||||||
"""Extract content between MANUAL markers from existing file."""
|
|
||||||
manual_sections = {}
|
|
||||||
|
|
||||||
# Pattern: <!-- MANUAL: section_name -->content<!-- END MANUAL -->
|
|
||||||
pattern = r"<!-- MANUAL: (\w+) -->\s*(.*?)\s*<!-- END MANUAL -->"
|
|
||||||
matches = re.findall(pattern, existing_content, re.DOTALL)
|
|
||||||
|
|
||||||
for section_name, content in matches:
|
|
||||||
manual_sections[section_name] = content.strip()
|
|
||||||
|
|
||||||
return manual_sections
|
|
||||||
|
|
||||||
|
|
||||||
def generate_block_markdown(
|
|
||||||
block: BlockDoc,
|
|
||||||
manual_content: dict[str, str] | None = None,
|
|
||||||
) -> str:
|
|
||||||
"""Generate markdown documentation for a single block."""
|
|
||||||
manual_content = manual_content or {}
|
|
||||||
lines = []
|
|
||||||
|
|
||||||
# All blocks use ## heading, sections use ### (consistent siblings)
|
|
||||||
lines.append(f"## {block.name}")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# What it is (full description)
|
|
||||||
lines.append(f"### What it is")
|
|
||||||
lines.append(block.description or "No description available.")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# How it works (manual section)
|
|
||||||
lines.append(f"### How it works")
|
|
||||||
how_it_works = manual_content.get(
|
|
||||||
"how_it_works", "_Add technical explanation here._"
|
|
||||||
)
|
|
||||||
lines.append("<!-- MANUAL: how_it_works -->")
|
|
||||||
lines.append(how_it_works)
|
|
||||||
lines.append("<!-- END MANUAL -->")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# Inputs table (auto-generated)
|
|
||||||
visible_inputs = [f for f in block.inputs if not f.hidden]
|
|
||||||
if visible_inputs:
|
|
||||||
lines.append(f"### Inputs")
|
|
||||||
lines.append("")
|
|
||||||
lines.append("| Input | Description | Type | Required |")
|
|
||||||
lines.append("|-------|-------------|------|----------|")
|
|
||||||
for inp in visible_inputs:
|
|
||||||
required = "Yes" if inp.required else "No"
|
|
||||||
desc = inp.description or "-"
|
|
||||||
type_str = inp.type_str or "-"
|
|
||||||
# Normalize newlines and escape pipes for valid table syntax
|
|
||||||
desc = desc.replace("\n", " ").replace("|", "\\|")
|
|
||||||
type_str = type_str.replace("|", "\\|")
|
|
||||||
lines.append(f"| {inp.name} | {desc} | {type_str} | {required} |")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# Outputs table (auto-generated)
|
|
||||||
visible_outputs = [f for f in block.outputs if not f.hidden]
|
|
||||||
if visible_outputs:
|
|
||||||
lines.append(f"### Outputs")
|
|
||||||
lines.append("")
|
|
||||||
lines.append("| Output | Description | Type |")
|
|
||||||
lines.append("|--------|-------------|------|")
|
|
||||||
for out in visible_outputs:
|
|
||||||
desc = out.description or "-"
|
|
||||||
type_str = out.type_str or "-"
|
|
||||||
# Normalize newlines and escape pipes for valid table syntax
|
|
||||||
desc = desc.replace("\n", " ").replace("|", "\\|")
|
|
||||||
type_str = type_str.replace("|", "\\|")
|
|
||||||
lines.append(f"| {out.name} | {desc} | {type_str} |")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# Possible use case (manual section)
|
|
||||||
lines.append(f"### Possible use case")
|
|
||||||
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
|
|
||||||
lines.append("<!-- MANUAL: use_case -->")
|
|
||||||
lines.append(use_case)
|
|
||||||
lines.append("<!-- END MANUAL -->")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
lines.append("---")
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
def get_block_file_mapping(blocks: list[BlockDoc]) -> dict[str, list[BlockDoc]]:
|
|
||||||
"""
|
|
||||||
Map blocks to their documentation files.
|
|
||||||
|
|
||||||
Returns dict of {relative_file_path: [blocks]}
|
|
||||||
"""
|
|
||||||
file_mapping = defaultdict(list)
|
|
||||||
|
|
||||||
for block in blocks:
|
|
||||||
# Determine file path based on source file or category
|
|
||||||
source_path = Path(block.source_file)
|
|
||||||
|
|
||||||
# If source is in a subdirectory (e.g., google/gmail.py), use that structure
|
|
||||||
if len(source_path.parts) > 2: # blocks/subdir/file.py
|
|
||||||
subdir = source_path.parts[1] # e.g., "google"
|
|
||||||
# Use the Python filename as the md filename
|
|
||||||
md_file = source_path.stem + ".md" # e.g., "gmail.md"
|
|
||||||
file_path = f"{subdir}/{md_file}"
|
|
||||||
else:
|
|
||||||
# Use category-based grouping for top-level blocks
|
|
||||||
primary_category = block.categories[0] if block.categories else "BASIC"
|
|
||||||
file_name = CATEGORY_FILE_MAP.get(primary_category, "misc")
|
|
||||||
file_path = f"{file_name}.md"
|
|
||||||
|
|
||||||
file_mapping[file_path].append(block)
|
|
||||||
|
|
||||||
return dict(file_mapping)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
|
||||||
"""Generate the overview table markdown (blocks.md)."""
|
|
||||||
lines = []
|
|
||||||
|
|
||||||
lines.append("# AutoGPT Blocks Overview")
|
|
||||||
lines.append("")
|
|
||||||
lines.append(
|
|
||||||
'AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.'
|
|
||||||
)
|
|
||||||
lines.append("")
|
|
||||||
lines.append('!!! info "Creating Your Own Blocks"')
|
|
||||||
lines.append(" Want to create your own custom blocks? Check out our guides:")
|
|
||||||
lines.append(" ")
|
|
||||||
lines.append(
|
|
||||||
" - [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples"
|
|
||||||
)
|
|
||||||
lines.append(
|
|
||||||
" - [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration"
|
|
||||||
)
|
|
||||||
lines.append("")
|
|
||||||
lines.append(
|
|
||||||
"Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation."
|
|
||||||
)
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
# Group blocks by category
|
|
||||||
by_category = defaultdict(list)
|
|
||||||
for block in blocks:
|
|
||||||
primary_cat = block.categories[0] if block.categories else "BASIC"
|
|
||||||
by_category[primary_cat].append(block)
|
|
||||||
|
|
||||||
# Sort categories
|
|
||||||
category_order = [
|
|
||||||
"BASIC",
|
|
||||||
"DATA",
|
|
||||||
"TEXT",
|
|
||||||
"AI",
|
|
||||||
"SEARCH",
|
|
||||||
"SOCIAL",
|
|
||||||
"COMMUNICATION",
|
|
||||||
"DEVELOPER_TOOLS",
|
|
||||||
"MULTIMEDIA",
|
|
||||||
"PRODUCTIVITY",
|
|
||||||
"LOGIC",
|
|
||||||
"INPUT",
|
|
||||||
"OUTPUT",
|
|
||||||
"AGENT",
|
|
||||||
"CRM",
|
|
||||||
"SAFETY",
|
|
||||||
"ISSUE_TRACKING",
|
|
||||||
"HARDWARE",
|
|
||||||
"MARKETING",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Track emitted display names to avoid duplicate headers
|
|
||||||
# (e.g., INPUT and OUTPUT both map to "Input/Output")
|
|
||||||
emitted_display_names: set[str] = set()
|
|
||||||
|
|
||||||
for category in category_order:
|
|
||||||
if category not in by_category:
|
|
||||||
continue
|
|
||||||
|
|
||||||
display_name = CATEGORY_DISPLAY_NAMES.get(category, category)
|
|
||||||
|
|
||||||
# Collect all blocks for this display name (may span multiple categories)
|
|
||||||
if display_name in emitted_display_names:
|
|
||||||
# Already emitted header, just add rows to existing table
|
|
||||||
# Find the position before the last empty line and insert rows
|
|
||||||
cat_blocks = sorted(by_category[category], key=lambda b: b.name)
|
|
||||||
# Remove the trailing empty line, add rows, then re-add empty line
|
|
||||||
lines.pop()
|
|
||||||
for block in cat_blocks:
|
|
||||||
file_mapping = get_block_file_mapping([block])
|
|
||||||
file_path = list(file_mapping.keys())[0]
|
|
||||||
anchor = generate_anchor(block.name)
|
|
||||||
short_desc = (
|
|
||||||
block.description.split(".")[0]
|
|
||||||
if block.description
|
|
||||||
else "No description"
|
|
||||||
)
|
|
||||||
short_desc = short_desc.replace("\n", " ").replace("|", "\\|")
|
|
||||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
|
||||||
lines.append("")
|
|
||||||
continue
|
|
||||||
|
|
||||||
emitted_display_names.add(display_name)
|
|
||||||
cat_blocks = sorted(by_category[category], key=lambda b: b.name)
|
|
||||||
|
|
||||||
lines.append(f"## {display_name}")
|
|
||||||
lines.append("")
|
|
||||||
lines.append("| Block Name | Description |")
|
|
||||||
lines.append("|------------|-------------|")
|
|
||||||
|
|
||||||
for block in cat_blocks:
|
|
||||||
# Determine link path
|
|
||||||
file_mapping = get_block_file_mapping([block])
|
|
||||||
file_path = list(file_mapping.keys())[0]
|
|
||||||
anchor = generate_anchor(block.name)
|
|
||||||
|
|
||||||
# Short description (first sentence)
|
|
||||||
short_desc = (
|
|
||||||
block.description.split(".")[0]
|
|
||||||
if block.description
|
|
||||||
else "No description"
|
|
||||||
)
|
|
||||||
short_desc = short_desc.replace("\n", " ").replace("|", "\\|")
|
|
||||||
|
|
||||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
|
||||||
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
def load_all_blocks_for_docs() -> list[BlockDoc]:
|
|
||||||
"""Load all blocks and extract documentation."""
|
|
||||||
from backend.blocks import load_all_blocks
|
|
||||||
|
|
||||||
block_classes = load_all_blocks()
|
|
||||||
blocks = []
|
|
||||||
|
|
||||||
for _block_id, block_cls in block_classes.items():
|
|
||||||
try:
|
|
||||||
block_doc = extract_block_doc(block_cls)
|
|
||||||
blocks.append(block_doc)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Failed to extract docs for {block_cls.__name__}: {e}")
|
|
||||||
|
|
||||||
return blocks
|
|
||||||
|
|
||||||
|
|
||||||
def write_block_docs(
|
|
||||||
output_dir: Path,
|
|
||||||
blocks: list[BlockDoc],
|
|
||||||
verbose: bool = False,
|
|
||||||
) -> dict[str, str]:
|
|
||||||
"""
|
|
||||||
Write block documentation files.
|
|
||||||
|
|
||||||
Returns dict of {file_path: content} for all generated files.
|
|
||||||
"""
|
|
||||||
output_dir = Path(output_dir)
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
file_mapping = get_block_file_mapping(blocks)
|
|
||||||
generated_files = {}
|
|
||||||
|
|
||||||
for file_path, file_blocks in file_mapping.items():
|
|
||||||
full_path = output_dir / file_path
|
|
||||||
|
|
||||||
# Create subdirectories if needed
|
|
||||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Load existing content for manual section preservation
|
|
||||||
existing_content = ""
|
|
||||||
if full_path.exists():
|
|
||||||
existing_content = full_path.read_text()
|
|
||||||
|
|
||||||
# Always generate title from file path (with fixes applied)
|
|
||||||
file_title = file_path_to_title(file_path)
|
|
||||||
|
|
||||||
# Extract existing file description if present (preserve manual content)
|
|
||||||
file_header_pattern = (
|
|
||||||
r"^# .+?\n<!-- MANUAL: file_description -->\n(.*?)\n<!-- END MANUAL -->"
|
|
||||||
)
|
|
||||||
file_header_match = re.search(file_header_pattern, existing_content, re.DOTALL)
|
|
||||||
|
|
||||||
if file_header_match:
|
|
||||||
file_description = file_header_match.group(1)
|
|
||||||
else:
|
|
||||||
file_description = "_Add a description of this category of blocks._"
|
|
||||||
|
|
||||||
# Generate file header
|
|
||||||
file_header = f"# {file_title}\n"
|
|
||||||
file_header += "<!-- MANUAL: file_description -->\n"
|
|
||||||
file_header += f"{file_description}\n"
|
|
||||||
file_header += "<!-- END MANUAL -->\n"
|
|
||||||
|
|
||||||
# Generate content for each block
|
|
||||||
content_parts = []
|
|
||||||
for block in sorted(file_blocks, key=lambda b: b.name):
|
|
||||||
# Extract manual content specific to this block
|
|
||||||
# Match block heading (h2) and capture until --- separator
|
|
||||||
block_pattern = rf"(?:^|\n)## {re.escape(block.name)}\s*\n(.*?)(?=\n---|\Z)"
|
|
||||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
|
||||||
if block_match:
|
|
||||||
manual_content = extract_manual_content(block_match.group(1))
|
|
||||||
else:
|
|
||||||
manual_content = {}
|
|
||||||
|
|
||||||
content_parts.append(
|
|
||||||
generate_block_markdown(
|
|
||||||
block,
|
|
||||||
manual_content,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
full_content = file_header + "\n" + "\n".join(content_parts)
|
|
||||||
generated_files[str(file_path)] = full_content
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print(f" Writing {file_path} ({len(file_blocks)} blocks)")
|
|
||||||
|
|
||||||
full_path.write_text(full_content)
|
|
||||||
|
|
||||||
# Generate overview file
|
|
||||||
overview_content = generate_overview_table(blocks)
|
|
||||||
overview_path = output_dir / "README.md"
|
|
||||||
generated_files["README.md"] = overview_content
|
|
||||||
overview_path.write_text(overview_content)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print(" Writing README.md (overview)")
|
|
||||||
|
|
||||||
return generated_files
|
|
||||||
|
|
||||||
|
|
||||||
def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
|
||||||
"""
|
|
||||||
Check if generated docs match existing docs.
|
|
||||||
|
|
||||||
Returns True if in sync, False otherwise.
|
|
||||||
"""
|
|
||||||
output_dir = Path(output_dir)
|
|
||||||
file_mapping = get_block_file_mapping(blocks)
|
|
||||||
|
|
||||||
all_match = True
|
|
||||||
out_of_sync_details: list[tuple[str, list[str]]] = []
|
|
||||||
|
|
||||||
for file_path, file_blocks in file_mapping.items():
|
|
||||||
full_path = output_dir / file_path
|
|
||||||
|
|
||||||
if not full_path.exists():
|
|
||||||
block_names = [b.name for b in sorted(file_blocks, key=lambda b: b.name)]
|
|
||||||
print(f"MISSING: {file_path}")
|
|
||||||
print(f" Blocks: {', '.join(block_names)}")
|
|
||||||
out_of_sync_details.append((file_path, block_names))
|
|
||||||
all_match = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
existing_content = full_path.read_text()
|
|
||||||
|
|
||||||
# Always generate title from file path (with fixes applied)
|
|
||||||
file_title = file_path_to_title(file_path)
|
|
||||||
|
|
||||||
# Extract existing file description if present (preserve manual content)
|
|
||||||
file_header_pattern = (
|
|
||||||
r"^# .+?\n<!-- MANUAL: file_description -->\n(.*?)\n<!-- END MANUAL -->"
|
|
||||||
)
|
|
||||||
file_header_match = re.search(file_header_pattern, existing_content, re.DOTALL)
|
|
||||||
|
|
||||||
if file_header_match:
|
|
||||||
file_description = file_header_match.group(1)
|
|
||||||
else:
|
|
||||||
file_description = "_Add a description of this category of blocks._"
|
|
||||||
|
|
||||||
# Generate expected file header
|
|
||||||
file_header = f"# {file_title}\n"
|
|
||||||
file_header += "<!-- MANUAL: file_description -->\n"
|
|
||||||
file_header += f"{file_description}\n"
|
|
||||||
file_header += "<!-- END MANUAL -->\n"
|
|
||||||
|
|
||||||
# Extract manual content from existing file
|
|
||||||
manual_sections_by_block = {}
|
|
||||||
for block in file_blocks:
|
|
||||||
block_pattern = rf"(?:^|\n)## {re.escape(block.name)}\s*\n(.*?)(?=\n---|\Z)"
|
|
||||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
|
||||||
if block_match:
|
|
||||||
manual_sections_by_block[block.name] = extract_manual_content(
|
|
||||||
block_match.group(1)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate expected content and check each block individually
|
|
||||||
content_parts = []
|
|
||||||
mismatched_blocks = []
|
|
||||||
for block in sorted(file_blocks, key=lambda b: b.name):
|
|
||||||
manual_content = manual_sections_by_block.get(block.name, {})
|
|
||||||
expected_block_content = generate_block_markdown(
|
|
||||||
block,
|
|
||||||
manual_content,
|
|
||||||
)
|
|
||||||
content_parts.append(expected_block_content)
|
|
||||||
|
|
||||||
# Check if this specific block's section exists and matches
|
|
||||||
# Include the --- separator to match generate_block_markdown output
|
|
||||||
block_pattern = rf"(?:^|\n)(## {re.escape(block.name)}\s*\n.*?\n---\n)"
|
|
||||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
|
||||||
if not block_match:
|
|
||||||
mismatched_blocks.append(f"{block.name} (missing)")
|
|
||||||
elif block_match.group(1).strip() != expected_block_content.strip():
|
|
||||||
mismatched_blocks.append(block.name)
|
|
||||||
|
|
||||||
expected_content = file_header + "\n" + "\n".join(content_parts)
|
|
||||||
|
|
||||||
if existing_content.strip() != expected_content.strip():
|
|
||||||
print(f"OUT OF SYNC: {file_path}")
|
|
||||||
if mismatched_blocks:
|
|
||||||
print(f" Affected blocks: {', '.join(mismatched_blocks)}")
|
|
||||||
out_of_sync_details.append((file_path, mismatched_blocks))
|
|
||||||
all_match = False
|
|
||||||
|
|
||||||
# Check overview
|
|
||||||
overview_path = output_dir / "README.md"
|
|
||||||
if overview_path.exists():
|
|
||||||
existing_overview = overview_path.read_text()
|
|
||||||
expected_overview = generate_overview_table(blocks)
|
|
||||||
if existing_overview.strip() != expected_overview.strip():
|
|
||||||
print("OUT OF SYNC: README.md (overview)")
|
|
||||||
print(" The blocks overview table needs regeneration")
|
|
||||||
out_of_sync_details.append(("README.md", ["overview table"]))
|
|
||||||
all_match = False
|
|
||||||
else:
|
|
||||||
print("MISSING: README.md (overview)")
|
|
||||||
out_of_sync_details.append(("README.md", ["overview table"]))
|
|
||||||
all_match = False
|
|
||||||
|
|
||||||
# Check for unfilled manual sections
|
|
||||||
unfilled_patterns = [
|
|
||||||
"_Add a description of this category of blocks._",
|
|
||||||
"_Add technical explanation here._",
|
|
||||||
"_Add practical use case examples here._",
|
|
||||||
]
|
|
||||||
files_with_unfilled = []
|
|
||||||
for file_path in file_mapping.keys():
|
|
||||||
full_path = output_dir / file_path
|
|
||||||
if full_path.exists():
|
|
||||||
content = full_path.read_text()
|
|
||||||
unfilled_count = sum(1 for p in unfilled_patterns if p in content)
|
|
||||||
if unfilled_count > 0:
|
|
||||||
files_with_unfilled.append((file_path, unfilled_count))
|
|
||||||
|
|
||||||
if files_with_unfilled:
|
|
||||||
print("\nWARNING: Files with unfilled manual sections:")
|
|
||||||
for file_path, count in sorted(files_with_unfilled):
|
|
||||||
print(f" {file_path}: {count} unfilled section(s)")
|
|
||||||
print(
|
|
||||||
f"\nTotal: {len(files_with_unfilled)} files with unfilled manual sections"
|
|
||||||
)
|
|
||||||
|
|
||||||
return all_match
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Generate block documentation from code introspection"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--output-dir",
|
|
||||||
type=Path,
|
|
||||||
default=DEFAULT_OUTPUT_DIR,
|
|
||||||
help="Output directory for generated docs",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--check",
|
|
||||||
action="store_true",
|
|
||||||
help="Check if docs are in sync (for CI), exit 1 if not",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-v",
|
|
||||||
"--verbose",
|
|
||||||
action="store_true",
|
|
||||||
help="Verbose output",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
|
||||||
format="%(levelname)s: %(message)s",
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Loading blocks...")
|
|
||||||
blocks = load_all_blocks_for_docs()
|
|
||||||
print(f"Found {len(blocks)} blocks")
|
|
||||||
|
|
||||||
if args.check:
|
|
||||||
print(f"Checking docs in {args.output_dir}...")
|
|
||||||
in_sync = check_docs_in_sync(args.output_dir, blocks)
|
|
||||||
if in_sync:
|
|
||||||
print("All documentation is in sync!")
|
|
||||||
sys.exit(0)
|
|
||||||
else:
|
|
||||||
print("\n" + "=" * 60)
|
|
||||||
print("Documentation is out of sync!")
|
|
||||||
print("=" * 60)
|
|
||||||
print("\nTo fix this, run one of the following:")
|
|
||||||
print("\n Option 1 - Run locally:")
|
|
||||||
print(
|
|
||||||
" cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
|
||||||
)
|
|
||||||
print("\n Option 2 - Ask Claude Code to run it:")
|
|
||||||
print(' "Run the block docs generator script to sync documentation"')
|
|
||||||
print("\n" + "=" * 60)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
print(f"Generating docs to {args.output_dir}...")
|
|
||||||
write_block_docs(
|
|
||||||
args.output_dir,
|
|
||||||
blocks,
|
|
||||||
verbose=args.verbose,
|
|
||||||
)
|
|
||||||
print("Done!")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,208 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Tests for the block documentation generator."""
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from scripts.generate_block_docs import (
|
|
||||||
class_name_to_display_name,
|
|
||||||
extract_manual_content,
|
|
||||||
generate_anchor,
|
|
||||||
type_to_readable,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestClassNameToDisplayName:
|
|
||||||
"""Tests for class_name_to_display_name function."""
|
|
||||||
|
|
||||||
def test_simple_block_name(self):
|
|
||||||
assert class_name_to_display_name("PrintBlock") == "Print"
|
|
||||||
|
|
||||||
def test_multi_word_block_name(self):
|
|
||||||
assert class_name_to_display_name("GetWeatherBlock") == "Get Weather"
|
|
||||||
|
|
||||||
def test_consecutive_capitals(self):
|
|
||||||
assert class_name_to_display_name("HTTPRequestBlock") == "HTTP Request"
|
|
||||||
|
|
||||||
def test_ai_prefix(self):
|
|
||||||
assert class_name_to_display_name("AIConditionBlock") == "AI Condition"
|
|
||||||
|
|
||||||
def test_no_block_suffix(self):
|
|
||||||
assert class_name_to_display_name("SomeClass") == "Some Class"
|
|
||||||
|
|
||||||
|
|
||||||
class TestTypeToReadable:
|
|
||||||
"""Tests for type_to_readable function."""
|
|
||||||
|
|
||||||
def test_string_type(self):
|
|
||||||
assert type_to_readable({"type": "string"}) == "str"
|
|
||||||
|
|
||||||
def test_integer_type(self):
|
|
||||||
assert type_to_readable({"type": "integer"}) == "int"
|
|
||||||
|
|
||||||
def test_number_type(self):
|
|
||||||
assert type_to_readable({"type": "number"}) == "float"
|
|
||||||
|
|
||||||
def test_boolean_type(self):
|
|
||||||
assert type_to_readable({"type": "boolean"}) == "bool"
|
|
||||||
|
|
||||||
def test_array_type(self):
|
|
||||||
result = type_to_readable({"type": "array", "items": {"type": "string"}})
|
|
||||||
assert result == "List[str]"
|
|
||||||
|
|
||||||
def test_object_type(self):
|
|
||||||
result = type_to_readable({"type": "object", "title": "MyModel"})
|
|
||||||
assert result == "MyModel"
|
|
||||||
|
|
||||||
def test_anyof_with_null(self):
|
|
||||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "null"}]})
|
|
||||||
assert result == "str"
|
|
||||||
|
|
||||||
def test_anyof_multiple_types(self):
|
|
||||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "integer"}]})
|
|
||||||
assert result == "str | int"
|
|
||||||
|
|
||||||
def test_enum_type(self):
|
|
||||||
result = type_to_readable(
|
|
||||||
{"type": "string", "enum": ["option1", "option2", "option3"]}
|
|
||||||
)
|
|
||||||
assert result == '"option1" | "option2" | "option3"'
|
|
||||||
|
|
||||||
def test_none_input(self):
|
|
||||||
assert type_to_readable(None) == "Any"
|
|
||||||
|
|
||||||
def test_non_dict_input(self):
|
|
||||||
assert type_to_readable("string") == "string"
|
|
||||||
|
|
||||||
|
|
||||||
class TestExtractManualContent:
|
|
||||||
"""Tests for extract_manual_content function."""
|
|
||||||
|
|
||||||
def test_extract_how_it_works(self):
|
|
||||||
content = """
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This is how it works.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
"""
|
|
||||||
result = extract_manual_content(content)
|
|
||||||
assert result == {"how_it_works": "This is how it works."}
|
|
||||||
|
|
||||||
def test_extract_use_case(self):
|
|
||||||
content = """
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
Example use case here.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
"""
|
|
||||||
result = extract_manual_content(content)
|
|
||||||
assert result == {"use_case": "Example use case here."}
|
|
||||||
|
|
||||||
def test_extract_multiple_sections(self):
|
|
||||||
content = """
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
How it works content.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
Use case content.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
"""
|
|
||||||
result = extract_manual_content(content)
|
|
||||||
assert result == {
|
|
||||||
"how_it_works": "How it works content.",
|
|
||||||
"use_case": "Use case content.",
|
|
||||||
}
|
|
||||||
|
|
||||||
def test_empty_content(self):
|
|
||||||
result = extract_manual_content("")
|
|
||||||
assert result == {}
|
|
||||||
|
|
||||||
def test_no_markers(self):
|
|
||||||
result = extract_manual_content("Some content without markers")
|
|
||||||
assert result == {}
|
|
||||||
|
|
||||||
|
|
||||||
class TestGenerateAnchor:
|
|
||||||
"""Tests for generate_anchor function."""
|
|
||||||
|
|
||||||
def test_simple_name(self):
|
|
||||||
assert generate_anchor("Print") == "print"
|
|
||||||
|
|
||||||
def test_multi_word_name(self):
|
|
||||||
assert generate_anchor("Get Weather") == "get-weather"
|
|
||||||
|
|
||||||
def test_name_with_parentheses(self):
|
|
||||||
assert generate_anchor("Something (Optional)") == "something-optional"
|
|
||||||
|
|
||||||
def test_already_lowercase(self):
|
|
||||||
assert generate_anchor("already lowercase") == "already-lowercase"
|
|
||||||
|
|
||||||
|
|
||||||
class TestIntegration:
|
|
||||||
"""Integration tests that require block loading."""
|
|
||||||
|
|
||||||
def test_load_blocks(self):
|
|
||||||
"""Test that blocks can be loaded successfully."""
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
logging.disable(logging.CRITICAL)
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
||||||
|
|
||||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
|
||||||
|
|
||||||
blocks = load_all_blocks_for_docs()
|
|
||||||
assert len(blocks) > 0, "Should load at least one block"
|
|
||||||
|
|
||||||
def test_block_doc_has_required_fields(self):
|
|
||||||
"""Test that extracted block docs have required fields."""
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
logging.disable(logging.CRITICAL)
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
||||||
|
|
||||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
|
||||||
|
|
||||||
blocks = load_all_blocks_for_docs()
|
|
||||||
block = blocks[0]
|
|
||||||
|
|
||||||
assert hasattr(block, "id")
|
|
||||||
assert hasattr(block, "name")
|
|
||||||
assert hasattr(block, "description")
|
|
||||||
assert hasattr(block, "categories")
|
|
||||||
assert hasattr(block, "inputs")
|
|
||||||
assert hasattr(block, "outputs")
|
|
||||||
|
|
||||||
def test_file_mapping_is_deterministic(self):
|
|
||||||
"""Test that file mapping produces consistent results."""
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
logging.disable(logging.CRITICAL)
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
||||||
|
|
||||||
from scripts.generate_block_docs import (
|
|
||||||
get_block_file_mapping,
|
|
||||||
load_all_blocks_for_docs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load blocks twice and compare mappings
|
|
||||||
blocks1 = load_all_blocks_for_docs()
|
|
||||||
blocks2 = load_all_blocks_for_docs()
|
|
||||||
|
|
||||||
mapping1 = get_block_file_mapping(blocks1)
|
|
||||||
mapping2 = get_block_file_mapping(blocks2)
|
|
||||||
|
|
||||||
# Check same files are generated
|
|
||||||
assert set(mapping1.keys()) == set(mapping2.keys())
|
|
||||||
|
|
||||||
# Check same block counts per file
|
|
||||||
for file_path in mapping1:
|
|
||||||
assert len(mapping1[file_path]) == len(mapping2[file_path])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
pytest.main([__file__, "-v"])
|
|
||||||
@@ -11,7 +11,6 @@
|
|||||||
"forked_from_version": null,
|
"forked_from_version": null,
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
"has_human_in_the_loop": false,
|
||||||
"has_sensitive_action": false,
|
|
||||||
"id": "graph-123",
|
"id": "graph-123",
|
||||||
"input_schema": {
|
"input_schema": {
|
||||||
"properties": {},
|
"properties": {},
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
"forked_from_version": null,
|
"forked_from_version": null,
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
"has_human_in_the_loop": false,
|
||||||
"has_sensitive_action": false,
|
|
||||||
"id": "graph-123",
|
"id": "graph-123",
|
||||||
"input_schema": {
|
"input_schema": {
|
||||||
"properties": {},
|
"properties": {},
|
||||||
|
|||||||
@@ -27,8 +27,6 @@
|
|||||||
"properties": {}
|
"properties": {}
|
||||||
},
|
},
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
|
||||||
"has_sensitive_action": false,
|
|
||||||
"trigger_setup_info": null,
|
"trigger_setup_info": null,
|
||||||
"new_output": false,
|
"new_output": false,
|
||||||
"can_access_graph": true,
|
"can_access_graph": true,
|
||||||
@@ -36,8 +34,7 @@
|
|||||||
"is_favorite": false,
|
"is_favorite": false,
|
||||||
"recommended_schedule_cron": null,
|
"recommended_schedule_cron": null,
|
||||||
"settings": {
|
"settings": {
|
||||||
"human_in_the_loop_safe_mode": true,
|
"human_in_the_loop_safe_mode": null
|
||||||
"sensitive_action_safe_mode": false
|
|
||||||
},
|
},
|
||||||
"marketplace_listing": null
|
"marketplace_listing": null
|
||||||
},
|
},
|
||||||
@@ -68,8 +65,6 @@
|
|||||||
"properties": {}
|
"properties": {}
|
||||||
},
|
},
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
|
||||||
"has_sensitive_action": false,
|
|
||||||
"trigger_setup_info": null,
|
"trigger_setup_info": null,
|
||||||
"new_output": false,
|
"new_output": false,
|
||||||
"can_access_graph": false,
|
"can_access_graph": false,
|
||||||
@@ -77,8 +72,7 @@
|
|||||||
"is_favorite": false,
|
"is_favorite": false,
|
||||||
"recommended_schedule_cron": null,
|
"recommended_schedule_cron": null,
|
||||||
"settings": {
|
"settings": {
|
||||||
"human_in_the_loop_safe_mode": true,
|
"human_in_the_loop_safe_mode": null
|
||||||
"sensitive_action_safe_mode": false
|
|
||||||
},
|
},
|
||||||
"marketplace_listing": null
|
"marketplace_listing": null
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,10 @@ import {
|
|||||||
TooltipContent,
|
TooltipContent,
|
||||||
TooltipTrigger,
|
TooltipTrigger,
|
||||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||||
import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react";
|
import { PlayIcon, StopIcon } from "@phosphor-icons/react";
|
||||||
import { useShallow } from "zustand/react/shallow";
|
import { useShallow } from "zustand/react/shallow";
|
||||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||||
import { useRunGraph } from "./useRunGraph";
|
import { useRunGraph } from "./useRunGraph";
|
||||||
import { cn } from "@/lib/utils";
|
|
||||||
|
|
||||||
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||||
const {
|
const {
|
||||||
@@ -25,31 +24,6 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
|||||||
useShallow((state) => state.isGraphRunning),
|
useShallow((state) => state.isGraphRunning),
|
||||||
);
|
);
|
||||||
|
|
||||||
const isLoading = isExecutingGraph || isTerminatingGraph || isSaving;
|
|
||||||
|
|
||||||
// Determine which icon to show with proper animation
|
|
||||||
const renderIcon = () => {
|
|
||||||
const iconClass = cn(
|
|
||||||
"size-4 transition-transform duration-200 ease-out",
|
|
||||||
!isLoading && "group-hover:scale-110",
|
|
||||||
);
|
|
||||||
|
|
||||||
if (isLoading) {
|
|
||||||
return (
|
|
||||||
<CircleNotchIcon
|
|
||||||
className={cn(iconClass, "animate-spin")}
|
|
||||||
weight="bold"
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isGraphRunning) {
|
|
||||||
return <StopIcon className={iconClass} weight="fill" />;
|
|
||||||
}
|
|
||||||
|
|
||||||
return <PlayIcon className={iconClass} weight="fill" />;
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
@@ -59,18 +33,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
|||||||
variant={isGraphRunning ? "destructive" : "primary"}
|
variant={isGraphRunning ? "destructive" : "primary"}
|
||||||
data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"}
|
data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"}
|
||||||
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
||||||
disabled={!flowID || isLoading}
|
disabled={!flowID || isExecutingGraph || isTerminatingGraph}
|
||||||
className="group"
|
loading={isExecutingGraph || isTerminatingGraph || isSaving}
|
||||||
>
|
>
|
||||||
{renderIcon()}
|
{!isGraphRunning ? (
|
||||||
|
<PlayIcon className="size-4" />
|
||||||
|
) : (
|
||||||
|
<StopIcon className="size-4" />
|
||||||
|
)}
|
||||||
</Button>
|
</Button>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>
|
<TooltipContent>
|
||||||
{isLoading
|
{isGraphRunning ? "Stop agent" : "Run agent"}
|
||||||
? "Processing..."
|
|
||||||
: isGraphRunning
|
|
||||||
? "Stop agent"
|
|
||||||
: "Run agent"}
|
|
||||||
</TooltipContent>
|
</TooltipContent>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
<RunInputDialog
|
<RunInputDialog
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import { useRunInputDialog } from "./useRunInputDialog";
|
|||||||
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
|
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
|
||||||
import { useTutorialStore } from "@/app/(platform)/build/stores/tutorialStore";
|
import { useTutorialStore } from "@/app/(platform)/build/stores/tutorialStore";
|
||||||
import { useEffect } from "react";
|
import { useEffect } from "react";
|
||||||
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
|
|
||||||
|
|
||||||
export const RunInputDialog = ({
|
export const RunInputDialog = ({
|
||||||
isOpen,
|
isOpen,
|
||||||
@@ -24,17 +23,19 @@ export const RunInputDialog = ({
|
|||||||
const hasInputs = useGraphStore((state) => state.hasInputs);
|
const hasInputs = useGraphStore((state) => state.hasInputs);
|
||||||
const hasCredentials = useGraphStore((state) => state.hasCredentials);
|
const hasCredentials = useGraphStore((state) => state.hasCredentials);
|
||||||
const inputSchema = useGraphStore((state) => state.inputSchema);
|
const inputSchema = useGraphStore((state) => state.inputSchema);
|
||||||
|
const credentialsSchema = useGraphStore(
|
||||||
|
(state) => state.credentialsInputSchema,
|
||||||
|
);
|
||||||
|
|
||||||
const {
|
const {
|
||||||
credentialFields,
|
credentialsUiSchema,
|
||||||
requiredCredentials,
|
|
||||||
handleManualRun,
|
handleManualRun,
|
||||||
handleInputChange,
|
handleInputChange,
|
||||||
openCronSchedulerDialog,
|
openCronSchedulerDialog,
|
||||||
setOpenCronSchedulerDialog,
|
setOpenCronSchedulerDialog,
|
||||||
inputValues,
|
inputValues,
|
||||||
credentialValues,
|
credentialValues,
|
||||||
handleCredentialFieldChange,
|
handleCredentialChange,
|
||||||
isExecutingGraph,
|
isExecutingGraph,
|
||||||
} = useRunInputDialog({ setIsOpen });
|
} = useRunInputDialog({ setIsOpen });
|
||||||
|
|
||||||
@@ -61,67 +62,67 @@ export const RunInputDialog = ({
|
|||||||
isOpen,
|
isOpen,
|
||||||
set: setIsOpen,
|
set: setIsOpen,
|
||||||
}}
|
}}
|
||||||
styling={{ maxWidth: "700px", minWidth: "700px" }}
|
styling={{ maxWidth: "600px", minWidth: "600px" }}
|
||||||
>
|
>
|
||||||
<Dialog.Content>
|
<Dialog.Content>
|
||||||
<div
|
<div className="space-y-6 p-1" data-id="run-input-dialog-content">
|
||||||
className="grid grid-cols-[1fr_auto] gap-10 p-1"
|
{/* Credentials Section */}
|
||||||
data-id="run-input-dialog-content"
|
{hasCredentials() && (
|
||||||
>
|
<div data-id="run-input-credentials-section">
|
||||||
<div className="space-y-6">
|
<div className="mb-4">
|
||||||
{/* Credentials Section */}
|
<Text variant="h4" className="text-gray-900">
|
||||||
{hasCredentials() && credentialFields.length > 0 && (
|
Credentials
|
||||||
<div data-id="run-input-credentials-section">
|
</Text>
|
||||||
<div className="mb-4">
|
|
||||||
<Text variant="h4" className="text-gray-900">
|
|
||||||
Credentials
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<div className="px-2" data-id="run-input-credentials-form">
|
|
||||||
<CredentialsGroupedView
|
|
||||||
credentialFields={credentialFields}
|
|
||||||
requiredCredentials={requiredCredentials}
|
|
||||||
inputCredentials={credentialValues}
|
|
||||||
inputValues={inputValues}
|
|
||||||
onCredentialChange={handleCredentialFieldChange}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
<div className="px-2" data-id="run-input-credentials-form">
|
||||||
|
<FormRenderer
|
||||||
{/* Inputs Section */}
|
jsonSchema={credentialsSchema as RJSFSchema}
|
||||||
{hasInputs() && (
|
handleChange={(v) => handleCredentialChange(v.formData)}
|
||||||
<div data-id="run-input-inputs-section">
|
uiSchema={credentialsUiSchema}
|
||||||
<div className="mb-4">
|
initialValues={{}}
|
||||||
<Text variant="h4" className="text-gray-900">
|
formContext={{
|
||||||
Inputs
|
showHandles: false,
|
||||||
</Text>
|
size: "large",
|
||||||
</div>
|
showOptionalToggle: false,
|
||||||
<div data-id="run-input-inputs-form">
|
}}
|
||||||
<FormRenderer
|
/>
|
||||||
jsonSchema={inputSchema as RJSFSchema}
|
|
||||||
handleChange={(v) => handleInputChange(v.formData)}
|
|
||||||
uiSchema={uiSchema}
|
|
||||||
initialValues={{}}
|
|
||||||
formContext={{
|
|
||||||
showHandles: false,
|
|
||||||
size: "large",
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
</div>
|
||||||
</div>
|
)}
|
||||||
|
|
||||||
|
{/* Inputs Section */}
|
||||||
|
{hasInputs() && (
|
||||||
|
<div data-id="run-input-inputs-section">
|
||||||
|
<div className="mb-4">
|
||||||
|
<Text variant="h4" className="text-gray-900">
|
||||||
|
Inputs
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<div data-id="run-input-inputs-form">
|
||||||
|
<FormRenderer
|
||||||
|
jsonSchema={inputSchema as RJSFSchema}
|
||||||
|
handleChange={(v) => handleInputChange(v.formData)}
|
||||||
|
uiSchema={uiSchema}
|
||||||
|
initialValues={{}}
|
||||||
|
formContext={{
|
||||||
|
showHandles: false,
|
||||||
|
size: "large",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Action Button */}
|
||||||
<div
|
<div
|
||||||
className="flex flex-col items-end justify-start"
|
className="flex justify-end pt-2"
|
||||||
data-id="run-input-actions-section"
|
data-id="run-input-actions-section"
|
||||||
>
|
>
|
||||||
{purpose === "run" && (
|
{purpose === "run" && (
|
||||||
<Button
|
<Button
|
||||||
variant="primary"
|
variant="primary"
|
||||||
size="large"
|
size="large"
|
||||||
className="group h-fit min-w-0 gap-2 px-10"
|
className="group h-fit min-w-0 gap-2"
|
||||||
onClick={handleManualRun}
|
onClick={handleManualRun}
|
||||||
loading={isExecutingGraph}
|
loading={isExecutingGraph}
|
||||||
data-id="run-input-manual-run-button"
|
data-id="run-input-manual-run-button"
|
||||||
@@ -136,7 +137,7 @@ export const RunInputDialog = ({
|
|||||||
<Button
|
<Button
|
||||||
variant="primary"
|
variant="primary"
|
||||||
size="large"
|
size="large"
|
||||||
className="group h-fit min-w-0 gap-2 px-10"
|
className="group h-fit min-w-0 gap-2"
|
||||||
onClick={() => setOpenCronSchedulerDialog(true)}
|
onClick={() => setOpenCronSchedulerDialog(true)}
|
||||||
data-id="run-input-schedule-button"
|
data-id="run-input-schedule-button"
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -7,11 +7,12 @@ import {
|
|||||||
GraphExecutionMeta,
|
GraphExecutionMeta,
|
||||||
} from "@/lib/autogpt-server-api";
|
} from "@/lib/autogpt-server-api";
|
||||||
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||||
import { useCallback, useMemo, useState } from "react";
|
import { useMemo, useState } from "react";
|
||||||
|
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
|
||||||
|
import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers";
|
||||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { useReactFlow } from "@xyflow/react";
|
import { useReactFlow } from "@xyflow/react";
|
||||||
import type { CredentialField } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
|
|
||||||
|
|
||||||
export const useRunInputDialog = ({
|
export const useRunInputDialog = ({
|
||||||
setIsOpen,
|
setIsOpen,
|
||||||
@@ -119,32 +120,27 @@ export const useRunInputDialog = ({
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Convert credentials schema to credential fields array for CredentialsGroupedView
|
// We are rendering the credentials field differently compared to other fields.
|
||||||
const credentialFields: CredentialField[] = useMemo(() => {
|
// In the node, we have the field name as "credential" - so our library catches it and renders it differently.
|
||||||
if (!credentialsSchema?.properties) return [];
|
// But here we have a different name, something like `Firecrawl credentials`, so here we are telling the library that this field is a credential field type.
|
||||||
return Object.entries(credentialsSchema.properties);
|
|
||||||
}, [credentialsSchema]);
|
|
||||||
|
|
||||||
// Get required credentials as a Set
|
const credentialsUiSchema = useMemo(() => {
|
||||||
const requiredCredentials = useMemo(() => {
|
const dynamicUiSchema: any = { ...uiSchema };
|
||||||
return new Set<string>(credentialsSchema?.required || []);
|
|
||||||
}, [credentialsSchema]);
|
|
||||||
|
|
||||||
// Handler for individual credential changes
|
if (credentialsSchema?.properties) {
|
||||||
const handleCredentialFieldChange = useCallback(
|
Object.keys(credentialsSchema.properties).forEach((fieldName) => {
|
||||||
(key: string, value?: CredentialsMetaInput) => {
|
const fieldSchema = credentialsSchema.properties[fieldName];
|
||||||
setCredentialValues((prev) => {
|
if (isCredentialFieldSchema(fieldSchema)) {
|
||||||
if (value) {
|
dynamicUiSchema[fieldName] = {
|
||||||
return { ...prev, [key]: value };
|
...dynamicUiSchema[fieldName],
|
||||||
} else {
|
"ui:field": "custom/credential_field",
|
||||||
const next = { ...prev };
|
};
|
||||||
delete next[key];
|
|
||||||
return next;
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
},
|
}
|
||||||
[],
|
|
||||||
);
|
return dynamicUiSchema;
|
||||||
|
}, [credentialsSchema]);
|
||||||
|
|
||||||
const handleManualRun = async () => {
|
const handleManualRun = async () => {
|
||||||
// Filter out incomplete credentials (those without a valid id)
|
// Filter out incomplete credentials (those without a valid id)
|
||||||
@@ -177,14 +173,12 @@ export const useRunInputDialog = ({
|
|||||||
};
|
};
|
||||||
|
|
||||||
return {
|
return {
|
||||||
credentialFields,
|
credentialsUiSchema,
|
||||||
requiredCredentials,
|
|
||||||
inputValues,
|
inputValues,
|
||||||
credentialValues,
|
credentialValues,
|
||||||
isExecutingGraph,
|
isExecutingGraph,
|
||||||
handleInputChange,
|
handleInputChange,
|
||||||
handleCredentialChange,
|
handleCredentialChange,
|
||||||
handleCredentialFieldChange,
|
|
||||||
handleManualRun,
|
handleManualRun,
|
||||||
openCronSchedulerDialog,
|
openCronSchedulerDialog,
|
||||||
setOpenCronSchedulerDialog,
|
setOpenCronSchedulerDialog,
|
||||||
|
|||||||
@@ -18,118 +18,69 @@ interface Props {
|
|||||||
fullWidth?: boolean;
|
fullWidth?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface SafeModeButtonProps {
|
|
||||||
isEnabled: boolean;
|
|
||||||
label: string;
|
|
||||||
tooltipEnabled: string;
|
|
||||||
tooltipDisabled: string;
|
|
||||||
onToggle: () => void;
|
|
||||||
isPending: boolean;
|
|
||||||
fullWidth?: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
function SafeModeButton({
|
|
||||||
isEnabled,
|
|
||||||
label,
|
|
||||||
tooltipEnabled,
|
|
||||||
tooltipDisabled,
|
|
||||||
onToggle,
|
|
||||||
isPending,
|
|
||||||
fullWidth = false,
|
|
||||||
}: SafeModeButtonProps) {
|
|
||||||
return (
|
|
||||||
<Tooltip delayDuration={100}>
|
|
||||||
<TooltipTrigger asChild>
|
|
||||||
<Button
|
|
||||||
variant={isEnabled ? "primary" : "outline"}
|
|
||||||
size="small"
|
|
||||||
onClick={onToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className={cn("justify-start", fullWidth ? "w-full" : "")}
|
|
||||||
>
|
|
||||||
{isEnabled ? (
|
|
||||||
<>
|
|
||||||
<ShieldCheckIcon weight="bold" size={16} />
|
|
||||||
<Text variant="body" className="text-zinc-200">
|
|
||||||
{label}: ON
|
|
||||||
</Text>
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<>
|
|
||||||
<ShieldIcon weight="bold" size={16} />
|
|
||||||
<Text variant="body" className="text-zinc-600">
|
|
||||||
{label}: OFF
|
|
||||||
</Text>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>
|
|
||||||
<div className="text-center">
|
|
||||||
<div className="font-medium">
|
|
||||||
{label}: {isEnabled ? "ON" : "OFF"}
|
|
||||||
</div>
|
|
||||||
<div className="mt-1 text-xs text-muted-foreground">
|
|
||||||
{isEnabled ? tooltipEnabled : tooltipDisabled}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</TooltipContent>
|
|
||||||
</Tooltip>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function FloatingSafeModeToggle({
|
export function FloatingSafeModeToggle({
|
||||||
graph,
|
graph,
|
||||||
className,
|
className,
|
||||||
fullWidth = false,
|
fullWidth = false,
|
||||||
}: Props) {
|
}: Props) {
|
||||||
const {
|
const {
|
||||||
currentHITLSafeMode,
|
currentSafeMode,
|
||||||
showHITLToggle,
|
|
||||||
isHITLStateUndetermined,
|
|
||||||
handleHITLToggle,
|
|
||||||
currentSensitiveActionSafeMode,
|
|
||||||
showSensitiveActionToggle,
|
|
||||||
handleSensitiveActionToggle,
|
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
|
isStateUndetermined,
|
||||||
|
handleToggle,
|
||||||
} = useAgentSafeMode(graph);
|
} = useAgentSafeMode(graph);
|
||||||
|
|
||||||
if (!shouldShowToggle || isPending) {
|
if (!shouldShowToggle || isStateUndetermined || isPending) {
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const showHITL = showHITLToggle && !isHITLStateUndetermined;
|
|
||||||
const showSensitive = showSensitiveActionToggle;
|
|
||||||
|
|
||||||
if (!showHITL && !showSensitive) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cn("fixed z-50 flex flex-col gap-2", className)}>
|
<div className={cn("fixed z-50", className)}>
|
||||||
{showHITL && (
|
<Tooltip delayDuration={100}>
|
||||||
<SafeModeButton
|
<TooltipTrigger asChild>
|
||||||
isEnabled={currentHITLSafeMode}
|
<Button
|
||||||
label="Human in the loop block approval"
|
variant={currentSafeMode! ? "primary" : "outline"}
|
||||||
tooltipEnabled="The agent will pause at human-in-the-loop blocks and wait for your approval"
|
key={graph.id}
|
||||||
tooltipDisabled="Human in the loop blocks will proceed automatically"
|
size="small"
|
||||||
onToggle={handleHITLToggle}
|
title={
|
||||||
isPending={isPending}
|
currentSafeMode!
|
||||||
fullWidth={fullWidth}
|
? "Safe Mode: ON. Human in the loop blocks require manual review"
|
||||||
/>
|
: "Safe Mode: OFF. Human in the loop blocks proceed automatically"
|
||||||
)}
|
}
|
||||||
{showSensitive && (
|
onClick={handleToggle}
|
||||||
<SafeModeButton
|
className={cn(fullWidth ? "w-full" : "")}
|
||||||
isEnabled={currentSensitiveActionSafeMode}
|
>
|
||||||
label="Sensitive actions blocks approval"
|
{currentSafeMode! ? (
|
||||||
tooltipEnabled="The agent will pause at sensitive action blocks and wait for your approval"
|
<>
|
||||||
tooltipDisabled="Sensitive action blocks will proceed automatically"
|
<ShieldCheckIcon weight="bold" size={16} />
|
||||||
onToggle={handleSensitiveActionToggle}
|
<Text variant="body" className="text-zinc-200">
|
||||||
isPending={isPending}
|
Safe Mode: ON
|
||||||
fullWidth={fullWidth}
|
</Text>
|
||||||
/>
|
</>
|
||||||
)}
|
) : (
|
||||||
|
<>
|
||||||
|
<ShieldIcon weight="bold" size={16} />
|
||||||
|
<Text variant="body" className="text-zinc-600">
|
||||||
|
Safe Mode: OFF
|
||||||
|
</Text>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>
|
||||||
|
<div className="text-center">
|
||||||
|
<div className="font-medium">
|
||||||
|
Safe Mode: {currentSafeMode! ? "ON" : "OFF"}
|
||||||
|
</div>
|
||||||
|
<div className="mt-1 text-xs text-muted-foreground">
|
||||||
|
{currentSafeMode!
|
||||||
|
? "Human in the loop blocks require manual review"
|
||||||
|
: "Human in the loop blocks proceed automatically"}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,14 +53,14 @@ export const CustomControls = memo(
|
|||||||
const controls = [
|
const controls = [
|
||||||
{
|
{
|
||||||
id: "zoom-in-button",
|
id: "zoom-in-button",
|
||||||
icon: <PlusIcon className="size-3.5 text-zinc-600" />,
|
icon: <PlusIcon className="size-4" />,
|
||||||
label: "Zoom In",
|
label: "Zoom In",
|
||||||
onClick: () => zoomIn(),
|
onClick: () => zoomIn(),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "zoom-out-button",
|
id: "zoom-out-button",
|
||||||
icon: <MinusIcon className="size-3.5 text-zinc-600" />,
|
icon: <MinusIcon className="size-4" />,
|
||||||
label: "Zoom Out",
|
label: "Zoom Out",
|
||||||
onClick: () => zoomOut(),
|
onClick: () => zoomOut(),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
@@ -68,9 +68,9 @@ export const CustomControls = memo(
|
|||||||
{
|
{
|
||||||
id: "tutorial-button",
|
id: "tutorial-button",
|
||||||
icon: isTutorialLoading ? (
|
icon: isTutorialLoading ? (
|
||||||
<CircleNotchIcon className="size-3.5 animate-spin text-zinc-600" />
|
<CircleNotchIcon className="size-4 animate-spin" />
|
||||||
) : (
|
) : (
|
||||||
<ChalkboardIcon className="size-3.5 text-zinc-600" />
|
<ChalkboardIcon className="size-4" />
|
||||||
),
|
),
|
||||||
label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial",
|
label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial",
|
||||||
onClick: handleTutorialClick,
|
onClick: handleTutorialClick,
|
||||||
@@ -79,7 +79,7 @@ export const CustomControls = memo(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "fit-view-button",
|
id: "fit-view-button",
|
||||||
icon: <FrameCornersIcon className="size-3.5 text-zinc-600" />,
|
icon: <FrameCornersIcon className="size-4" />,
|
||||||
label: "Fit View",
|
label: "Fit View",
|
||||||
onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }),
|
onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
@@ -87,9 +87,9 @@ export const CustomControls = memo(
|
|||||||
{
|
{
|
||||||
id: "lock-button",
|
id: "lock-button",
|
||||||
icon: !isLocked ? (
|
icon: !isLocked ? (
|
||||||
<LockOpenIcon className="size-3.5 text-zinc-600" />
|
<LockOpenIcon className="size-4" />
|
||||||
) : (
|
) : (
|
||||||
<LockIcon className="size-3.5 text-zinc-600" />
|
<LockIcon className="size-4" />
|
||||||
),
|
),
|
||||||
label: "Toggle Lock",
|
label: "Toggle Lock",
|
||||||
onClick: () => setIsLocked(!isLocked),
|
onClick: () => setIsLocked(!isLocked),
|
||||||
|
|||||||
@@ -139,6 +139,14 @@ export const useFlow = () => {
|
|||||||
useNodeStore.getState().setNodes([]);
|
useNodeStore.getState().setNodes([]);
|
||||||
useNodeStore.getState().clearResolutionState();
|
useNodeStore.getState().clearResolutionState();
|
||||||
addNodes(customNodes);
|
addNodes(customNodes);
|
||||||
|
|
||||||
|
// Sync hardcoded values with handle IDs.
|
||||||
|
// If a key–value field has a key without a value, the backend omits it from hardcoded values.
|
||||||
|
// But if a handleId exists for that key, it causes inconsistency.
|
||||||
|
// This ensures hardcoded values stay in sync with handle IDs.
|
||||||
|
customNodes.forEach((node) => {
|
||||||
|
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}, [customNodes, addNodes]);
|
}, [customNodes, addNodes]);
|
||||||
|
|
||||||
@@ -150,14 +158,6 @@ export const useFlow = () => {
|
|||||||
}
|
}
|
||||||
}, [graph?.links, addLinks]);
|
}, [graph?.links, addLinks]);
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (customNodes.length > 0 && graph?.links) {
|
|
||||||
customNodes.forEach((node) => {
|
|
||||||
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}, [customNodes, graph?.links]);
|
|
||||||
|
|
||||||
// update node execution status in nodes
|
// update node execution status in nodes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (
|
if (
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ export type CustomEdgeData = {
|
|||||||
beadUp?: number;
|
beadUp?: number;
|
||||||
beadDown?: number;
|
beadDown?: number;
|
||||||
beadData?: Map<string, NodeExecutionResult["status"]>;
|
beadData?: Map<string, NodeExecutionResult["status"]>;
|
||||||
edgeColorClass?: string;
|
|
||||||
edgeHexColor?: string;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export type CustomEdge = XYEdge<CustomEdgeData, "custom">;
|
export type CustomEdge = XYEdge<CustomEdgeData, "custom">;
|
||||||
@@ -38,6 +36,7 @@ const CustomEdge = ({
|
|||||||
selected,
|
selected,
|
||||||
}: EdgeProps<CustomEdge>) => {
|
}: EdgeProps<CustomEdge>) => {
|
||||||
const removeConnection = useEdgeStore((state) => state.removeEdge);
|
const removeConnection = useEdgeStore((state) => state.removeEdge);
|
||||||
|
// Subscribe to the brokenEdgeIDs map and check if this edge is broken across any node
|
||||||
const isBroken = useNodeStore((state) => state.isEdgeBroken(id));
|
const isBroken = useNodeStore((state) => state.isEdgeBroken(id));
|
||||||
const [isHovered, setIsHovered] = useState(false);
|
const [isHovered, setIsHovered] = useState(false);
|
||||||
|
|
||||||
@@ -53,7 +52,6 @@ const CustomEdge = ({
|
|||||||
const isStatic = data?.isStatic ?? false;
|
const isStatic = data?.isStatic ?? false;
|
||||||
const beadUp = data?.beadUp ?? 0;
|
const beadUp = data?.beadUp ?? 0;
|
||||||
const beadDown = data?.beadDown ?? 0;
|
const beadDown = data?.beadDown ?? 0;
|
||||||
const edgeColorClass = data?.edgeColorClass;
|
|
||||||
|
|
||||||
const handleRemoveEdge = () => {
|
const handleRemoveEdge = () => {
|
||||||
removeConnection(id);
|
removeConnection(id);
|
||||||
@@ -72,9 +70,7 @@ const CustomEdge = ({
|
|||||||
? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]"
|
? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]"
|
||||||
: selected
|
: selected
|
||||||
? "stroke-zinc-800"
|
? "stroke-zinc-800"
|
||||||
: edgeColorClass
|
: "stroke-zinc-500/50 hover:stroke-zinc-500",
|
||||||
? cn(edgeColorClass, "opacity-70 hover:opacity-100")
|
|
||||||
: "stroke-zinc-500/50 hover:stroke-zinc-500",
|
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
<JSBeads
|
<JSBeads
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import { useCallback } from "react";
|
|||||||
import { useNodeStore } from "../../../stores/nodeStore";
|
import { useNodeStore } from "../../../stores/nodeStore";
|
||||||
import { useHistoryStore } from "../../../stores/historyStore";
|
import { useHistoryStore } from "../../../stores/historyStore";
|
||||||
import { CustomEdge } from "./CustomEdge";
|
import { CustomEdge } from "./CustomEdge";
|
||||||
import { getEdgeColorFromOutputType } from "../nodes/helpers";
|
|
||||||
|
|
||||||
export const useCustomEdge = () => {
|
export const useCustomEdge = () => {
|
||||||
const edges = useEdgeStore((s) => s.edges);
|
const edges = useEdgeStore((s) => s.edges);
|
||||||
@@ -35,13 +34,8 @@ export const useCustomEdge = () => {
|
|||||||
if (exists) return;
|
if (exists) return;
|
||||||
|
|
||||||
const nodes = useNodeStore.getState().nodes;
|
const nodes = useNodeStore.getState().nodes;
|
||||||
const sourceNode = nodes.find((n) => n.id === conn.source);
|
const isStatic = nodes.find((n) => n.id === conn.source)?.data
|
||||||
const isStatic = sourceNode?.data?.staticOutput;
|
?.staticOutput;
|
||||||
|
|
||||||
const { colorClass, hexColor } = getEdgeColorFromOutputType(
|
|
||||||
sourceNode?.data?.outputSchema,
|
|
||||||
conn.sourceHandle,
|
|
||||||
);
|
|
||||||
|
|
||||||
addEdge({
|
addEdge({
|
||||||
source: conn.source,
|
source: conn.source,
|
||||||
@@ -50,8 +44,6 @@ export const useCustomEdge = () => {
|
|||||||
targetHandle: conn.targetHandle,
|
targetHandle: conn.targetHandle,
|
||||||
data: {
|
data: {
|
||||||
isStatic,
|
isStatic,
|
||||||
edgeColorClass: colorClass,
|
|
||||||
edgeHexColor: hexColor,
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,21 +1,22 @@
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
import {
|
|
||||||
Accordion,
|
|
||||||
AccordionContent,
|
|
||||||
AccordionItem,
|
|
||||||
AccordionTrigger,
|
|
||||||
} from "@/components/molecules/Accordion/Accordion";
|
|
||||||
import { beautifyString, cn } from "@/lib/utils";
|
import { beautifyString, cn } from "@/lib/utils";
|
||||||
import { CopyIcon, CheckIcon } from "@phosphor-icons/react";
|
import { CaretDownIcon, CopyIcon, CheckIcon } from "@phosphor-icons/react";
|
||||||
import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer";
|
import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer";
|
||||||
import { ContentRenderer } from "./components/ContentRenderer";
|
import { ContentRenderer } from "./components/ContentRenderer";
|
||||||
import { useNodeOutput } from "./useNodeOutput";
|
import { useNodeOutput } from "./useNodeOutput";
|
||||||
import { ViewMoreData } from "./components/ViewMoreData";
|
import { ViewMoreData } from "./components/ViewMoreData";
|
||||||
|
|
||||||
export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||||
const { outputData, copiedKey, handleCopy, executionResultId, inputData } =
|
const {
|
||||||
useNodeOutput(nodeId);
|
outputData,
|
||||||
|
isExpanded,
|
||||||
|
setIsExpanded,
|
||||||
|
copiedKey,
|
||||||
|
handleCopy,
|
||||||
|
executionResultId,
|
||||||
|
inputData,
|
||||||
|
} = useNodeOutput(nodeId);
|
||||||
|
|
||||||
if (Object.keys(outputData).length === 0) {
|
if (Object.keys(outputData).length === 0) {
|
||||||
return null;
|
return null;
|
||||||
@@ -24,117 +25,122 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
data-tutorial-id={`node-output`}
|
data-tutorial-id={`node-output`}
|
||||||
className="rounded-b-xl border-t border-zinc-200 px-4 py-2"
|
className="flex flex-col gap-3 rounded-b-xl border-t border-zinc-200 px-4 py-4"
|
||||||
>
|
>
|
||||||
<Accordion type="single" collapsible defaultValue="node-output">
|
<div className="flex items-center justify-between">
|
||||||
<AccordionItem value="node-output" className="border-none">
|
<Text variant="body-medium" className="!font-semibold text-slate-700">
|
||||||
<AccordionTrigger className="py-2 hover:no-underline">
|
Node Output
|
||||||
<Text
|
</Text>
|
||||||
variant="body-medium"
|
<Button
|
||||||
className="!font-semibold text-slate-700"
|
variant="ghost"
|
||||||
>
|
size="small"
|
||||||
Node Output
|
onClick={() => setIsExpanded(!isExpanded)}
|
||||||
</Text>
|
className="h-fit min-w-0 p-1 text-slate-600 hover:text-slate-900"
|
||||||
</AccordionTrigger>
|
>
|
||||||
<AccordionContent className="pt-2">
|
<CaretDownIcon
|
||||||
<div className="flex max-w-[350px] flex-col gap-4">
|
size={16}
|
||||||
<div className="space-y-2">
|
weight="bold"
|
||||||
<Text variant="small-medium">Input</Text>
|
className={`transition-transform ${isExpanded ? "rotate-180" : ""}`}
|
||||||
|
/>
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
|
||||||
<ContentRenderer value={inputData} shortContent={false} />
|
{isExpanded && (
|
||||||
|
<>
|
||||||
|
<div className="flex max-w-[350px] flex-col gap-4">
|
||||||
|
<div className="space-y-2">
|
||||||
|
<Text variant="small-medium">Input</Text>
|
||||||
|
|
||||||
<div className="mt-1 flex justify-end gap-1">
|
<ContentRenderer value={inputData} shortContent={false} />
|
||||||
<NodeDataViewer
|
|
||||||
data={inputData}
|
<div className="mt-1 flex justify-end gap-1">
|
||||||
pinName="Input"
|
<NodeDataViewer
|
||||||
execId={executionResultId}
|
data={inputData}
|
||||||
/>
|
pinName="Input"
|
||||||
<Button
|
execId={executionResultId}
|
||||||
variant="secondary"
|
/>
|
||||||
size="small"
|
<Button
|
||||||
onClick={() => handleCopy("input", inputData)}
|
variant="secondary"
|
||||||
className={cn(
|
size="small"
|
||||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
onClick={() => handleCopy("input", inputData)}
|
||||||
copiedKey === "input" &&
|
className={cn(
|
||||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||||
)}
|
copiedKey === "input" &&
|
||||||
>
|
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||||
{copiedKey === "input" ? (
|
)}
|
||||||
<CheckIcon size={12} className="text-green-600" />
|
>
|
||||||
) : (
|
{copiedKey === "input" ? (
|
||||||
<CopyIcon size={12} />
|
<CheckIcon size={12} className="text-green-600" />
|
||||||
)}
|
) : (
|
||||||
</Button>
|
<CopyIcon size={12} />
|
||||||
</div>
|
)}
|
||||||
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
{Object.entries(outputData)
|
{Object.entries(outputData)
|
||||||
.slice(0, 2)
|
.slice(0, 2)
|
||||||
.map(([key, value]) => (
|
.map(([key, value]) => (
|
||||||
<div key={key} className="flex flex-col gap-2">
|
<div key={key} className="flex flex-col gap-2">
|
||||||
<div className="flex items-center gap-2">
|
<div className="flex items-center gap-2">
|
||||||
<Text
|
<Text
|
||||||
variant="small-medium"
|
variant="small-medium"
|
||||||
className="!font-semibold text-slate-600"
|
className="!font-semibold text-slate-600"
|
||||||
>
|
>
|
||||||
Pin:
|
Pin:
|
||||||
</Text>
|
</Text>
|
||||||
<Text variant="small" className="text-slate-700">
|
<Text variant="small" className="text-slate-700">
|
||||||
{beautifyString(key)}
|
{beautifyString(key)}
|
||||||
</Text>
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
<div className="w-full space-y-2">
|
<div className="w-full space-y-2">
|
||||||
<Text
|
<Text
|
||||||
variant="small"
|
variant="small"
|
||||||
className="!font-semibold text-slate-600"
|
className="!font-semibold text-slate-600"
|
||||||
>
|
>
|
||||||
Data:
|
Data:
|
||||||
</Text>
|
</Text>
|
||||||
<div className="relative space-y-2">
|
<div className="relative space-y-2">
|
||||||
{value.map((item, index) => (
|
{value.map((item, index) => (
|
||||||
<div key={index}>
|
<div key={index}>
|
||||||
<ContentRenderer value={item} shortContent={true} />
|
<ContentRenderer value={item} shortContent={true} />
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
|
|
||||||
<div className="mt-1 flex justify-end gap-1">
|
|
||||||
<NodeDataViewer
|
|
||||||
data={value}
|
|
||||||
pinName={key}
|
|
||||||
execId={executionResultId}
|
|
||||||
/>
|
|
||||||
<Button
|
|
||||||
variant="secondary"
|
|
||||||
size="small"
|
|
||||||
onClick={() => handleCopy(key, value)}
|
|
||||||
className={cn(
|
|
||||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
|
||||||
copiedKey === key &&
|
|
||||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
|
||||||
)}
|
|
||||||
>
|
|
||||||
{copiedKey === key ? (
|
|
||||||
<CheckIcon size={12} className="text-green-600" />
|
|
||||||
) : (
|
|
||||||
<CopyIcon size={12} />
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
</div>
|
</div>
|
||||||
|
))}
|
||||||
|
|
||||||
|
<div className="mt-1 flex justify-end gap-1">
|
||||||
|
<NodeDataViewer
|
||||||
|
data={value}
|
||||||
|
pinName={key}
|
||||||
|
execId={executionResultId}
|
||||||
|
/>
|
||||||
|
<Button
|
||||||
|
variant="secondary"
|
||||||
|
size="small"
|
||||||
|
onClick={() => handleCopy(key, value)}
|
||||||
|
className={cn(
|
||||||
|
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||||
|
copiedKey === key &&
|
||||||
|
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{copiedKey === key ? (
|
||||||
|
<CheckIcon size={12} className="text-green-600" />
|
||||||
|
) : (
|
||||||
|
<CopyIcon size={12} />
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
))}
|
</div>
|
||||||
</div>
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
{Object.keys(outputData).length > 2 && (
|
{Object.keys(outputData).length > 2 && (
|
||||||
<ViewMoreData
|
<ViewMoreData outputData={outputData} execId={executionResultId} />
|
||||||
outputData={outputData}
|
)}
|
||||||
execId={executionResultId}
|
</>
|
||||||
/>
|
)}
|
||||||
)}
|
|
||||||
</AccordionContent>
|
|
||||||
</AccordionItem>
|
|
||||||
</Accordion>
|
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import { useShallow } from "zustand/react/shallow";
|
|||||||
import { useState } from "react";
|
import { useState } from "react";
|
||||||
|
|
||||||
export const useNodeOutput = (nodeId: string) => {
|
export const useNodeOutput = (nodeId: string) => {
|
||||||
|
const [isExpanded, setIsExpanded] = useState(true);
|
||||||
const [copiedKey, setCopiedKey] = useState<string | null>(null);
|
const [copiedKey, setCopiedKey] = useState<string | null>(null);
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
|
|
||||||
@@ -36,10 +37,13 @@ export const useNodeOutput = (nodeId: string) => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
return {
|
return {
|
||||||
outputData,
|
outputData: outputData,
|
||||||
inputData,
|
inputData: inputData,
|
||||||
copiedKey,
|
isExpanded: isExpanded,
|
||||||
handleCopy,
|
setIsExpanded: setIsExpanded,
|
||||||
|
copiedKey: copiedKey,
|
||||||
|
setCopiedKey: setCopiedKey,
|
||||||
|
handleCopy: handleCopy,
|
||||||
executionResultId: nodeExecutionResult?.node_exec_id,
|
executionResultId: nodeExecutionResult?.node_exec_id,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -187,38 +187,3 @@ export const getTypeDisplayInfo = (schema: any) => {
|
|||||||
hexColor,
|
hexColor,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
export function getEdgeColorFromOutputType(
|
|
||||||
outputSchema: RJSFSchema | undefined,
|
|
||||||
sourceHandle: string,
|
|
||||||
): { colorClass: string; hexColor: string } {
|
|
||||||
const defaultColor = {
|
|
||||||
colorClass: "stroke-zinc-500/50",
|
|
||||||
hexColor: "#6b7280",
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!outputSchema?.properties) return defaultColor;
|
|
||||||
|
|
||||||
const properties = outputSchema.properties as Record<string, unknown>;
|
|
||||||
const handleParts = sourceHandle.split("_#_");
|
|
||||||
let currentSchema: Record<string, unknown> = properties;
|
|
||||||
|
|
||||||
for (let i = 0; i < handleParts.length; i++) {
|
|
||||||
const part = handleParts[i];
|
|
||||||
const fieldSchema = currentSchema[part] as Record<string, unknown>;
|
|
||||||
if (!fieldSchema) return defaultColor;
|
|
||||||
|
|
||||||
if (i === handleParts.length - 1) {
|
|
||||||
const { hexColor, colorClass } = getTypeDisplayInfo(fieldSchema);
|
|
||||||
return { colorClass: colorClass.replace("!text-", "stroke-"), hexColor };
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fieldSchema.properties) {
|
|
||||||
currentSchema = fieldSchema.properties as Record<string, unknown>;
|
|
||||||
} else {
|
|
||||||
return defaultColor;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultColor;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,32 +1,7 @@
|
|||||||
type IconOptions = {
|
// These are SVG Phosphor icons
|
||||||
size?: number;
|
|
||||||
color?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
const DEFAULT_SIZE = 16;
|
|
||||||
const DEFAULT_COLOR = "#52525b"; // zinc-600
|
|
||||||
|
|
||||||
const iconPaths = {
|
|
||||||
ClickIcon: `M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z`,
|
|
||||||
Keyboard: `M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z`,
|
|
||||||
Drag: `M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z`,
|
|
||||||
};
|
|
||||||
|
|
||||||
function createIcon(path: string, options: IconOptions = {}): string {
|
|
||||||
const size = options.size ?? DEFAULT_SIZE;
|
|
||||||
const color = options.color ?? DEFAULT_COLOR;
|
|
||||||
return `<svg xmlns="http://www.w3.org/2000/svg" width="${size}" height="${size}" fill="${color}" viewBox="0 0 256 256"><path d="${path}"></path></svg>`;
|
|
||||||
}
|
|
||||||
|
|
||||||
export const ICONS = {
|
export const ICONS = {
|
||||||
ClickIcon: createIcon(iconPaths.ClickIcon),
|
ClickIcon: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z"></path></svg>`,
|
||||||
Keyboard: createIcon(iconPaths.Keyboard),
|
Keyboard: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z"></path></svg>`,
|
||||||
Drag: createIcon(iconPaths.Drag),
|
Drag: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z"></path></svg>`,
|
||||||
};
|
};
|
||||||
|
|
||||||
export function getIcon(
|
|
||||||
name: keyof typeof iconPaths,
|
|
||||||
options?: IconOptions,
|
|
||||||
): string {
|
|
||||||
return createIcon(iconPaths[name], options);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import {
|
|||||||
} from "./helpers";
|
} from "./helpers";
|
||||||
import { useNodeStore } from "../../../stores/nodeStore";
|
import { useNodeStore } from "../../../stores/nodeStore";
|
||||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||||
import { useTutorialStore } from "../../../stores/tutorialStore";
|
|
||||||
|
|
||||||
let isTutorialLoading = false;
|
let isTutorialLoading = false;
|
||||||
let tutorialLoadingCallback: ((loading: boolean) => void) | null = null;
|
let tutorialLoadingCallback: ((loading: boolean) => void) | null = null;
|
||||||
@@ -61,14 +60,12 @@ export const startTutorial = async () => {
|
|||||||
handleTutorialComplete();
|
handleTutorialComplete();
|
||||||
removeTutorialStyles();
|
removeTutorialStyles();
|
||||||
clearPrefetchedBlocks();
|
clearPrefetchedBlocks();
|
||||||
useTutorialStore.getState().setIsTutorialRunning(false);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
tour.on("cancel", () => {
|
tour.on("cancel", () => {
|
||||||
handleTutorialCancel(tour);
|
handleTutorialCancel(tour);
|
||||||
removeTutorialStyles();
|
removeTutorialStyles();
|
||||||
clearPrefetchedBlocks();
|
clearPrefetchedBlocks();
|
||||||
useTutorialStore.getState().setIsTutorialRunning(false);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
for (const step of tour.steps) {
|
for (const step of tour.steps) {
|
||||||
|
|||||||
@@ -61,18 +61,12 @@ export const convertNodesPlusBlockInfoIntoCustomNodes = (
|
|||||||
return customNode;
|
return customNode;
|
||||||
};
|
};
|
||||||
|
|
||||||
const isToolSourceName = (sourceName: string): boolean =>
|
|
||||||
sourceName.startsWith("tools_^_");
|
|
||||||
|
|
||||||
const cleanupSourceName = (sourceName: string): string =>
|
|
||||||
isToolSourceName(sourceName) ? "tools" : sourceName;
|
|
||||||
|
|
||||||
export const linkToCustomEdge = (link: Link): CustomEdge => ({
|
export const linkToCustomEdge = (link: Link): CustomEdge => ({
|
||||||
id: link.id ?? "",
|
id: link.id ?? "",
|
||||||
type: "custom" as const,
|
type: "custom" as const,
|
||||||
source: link.source_id,
|
source: link.source_id,
|
||||||
target: link.sink_id,
|
target: link.sink_id,
|
||||||
sourceHandle: cleanupSourceName(link.source_name),
|
sourceHandle: link.source_name,
|
||||||
targetHandle: link.sink_name,
|
targetHandle: link.sink_name,
|
||||||
data: {
|
data: {
|
||||||
isStatic: link.is_static,
|
isStatic: link.is_static,
|
||||||
|
|||||||
@@ -267,34 +267,23 @@ export function extractCredentialsNeeded(
|
|||||||
| undefined;
|
| undefined;
|
||||||
if (missingCreds && Object.keys(missingCreds).length > 0) {
|
if (missingCreds && Object.keys(missingCreds).length > 0) {
|
||||||
const agentName = (setupInfo?.agent_name as string) || "this block";
|
const agentName = (setupInfo?.agent_name as string) || "this block";
|
||||||
const credentials = Object.values(missingCreds).map((credInfo) => {
|
const credentials = Object.values(missingCreds).map((credInfo) => ({
|
||||||
// Normalize to array at boundary - prefer 'types' array, fall back to single 'type'
|
provider: (credInfo.provider as string) || "unknown",
|
||||||
const typesArray = credInfo.types as
|
providerName:
|
||||||
| Array<"api_key" | "oauth2" | "user_password" | "host_scoped">
|
(credInfo.provider_name as string) ||
|
||||||
| undefined;
|
(credInfo.provider as string) ||
|
||||||
const singleType =
|
"Unknown Provider",
|
||||||
|
credentialType:
|
||||||
(credInfo.type as
|
(credInfo.type as
|
||||||
| "api_key"
|
| "api_key"
|
||||||
| "oauth2"
|
| "oauth2"
|
||||||
| "user_password"
|
| "user_password"
|
||||||
| "host_scoped"
|
| "host_scoped") || "api_key",
|
||||||
| undefined) || "api_key";
|
title:
|
||||||
const credentialTypes =
|
(credInfo.title as string) ||
|
||||||
typesArray && typesArray.length > 0 ? typesArray : [singleType];
|
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
|
||||||
|
scopes: credInfo.scopes as string[] | undefined,
|
||||||
return {
|
}));
|
||||||
provider: (credInfo.provider as string) || "unknown",
|
|
||||||
providerName:
|
|
||||||
(credInfo.provider_name as string) ||
|
|
||||||
(credInfo.provider as string) ||
|
|
||||||
"Unknown Provider",
|
|
||||||
credentialTypes,
|
|
||||||
title:
|
|
||||||
(credInfo.title as string) ||
|
|
||||||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
|
|
||||||
scopes: credInfo.scopes as string[] | undefined,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
return {
|
return {
|
||||||
type: "credentials_needed",
|
type: "credentials_needed",
|
||||||
toolName,
|
toolName,
|
||||||
@@ -369,14 +358,11 @@ export function extractInputsNeeded(
|
|||||||
credentials.forEach((cred) => {
|
credentials.forEach((cred) => {
|
||||||
const id = cred.id as string;
|
const id = cred.id as string;
|
||||||
if (id) {
|
if (id) {
|
||||||
const credentialTypes = Array.isArray(cred.types)
|
|
||||||
? cred.types
|
|
||||||
: [(cred.type as string) || "api_key"];
|
|
||||||
credentialsSchema[id] = {
|
credentialsSchema[id] = {
|
||||||
type: "object",
|
type: "object",
|
||||||
properties: {},
|
properties: {},
|
||||||
credentials_provider: [cred.provider as string],
|
credentials_provider: [cred.provider as string],
|
||||||
credentials_types: credentialTypes,
|
credentials_types: [(cred.type as string) || "api_key"],
|
||||||
credentials_scopes: cred.scopes as string[] | undefined,
|
credentials_scopes: cred.scopes as string[] | undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,9 +9,7 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
|
|||||||
export interface CredentialInfo {
|
export interface CredentialInfo {
|
||||||
provider: string;
|
provider: string;
|
||||||
providerName: string;
|
providerName: string;
|
||||||
credentialTypes: Array<
|
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
|
||||||
"api_key" | "oauth2" | "user_password" | "host_scoped"
|
|
||||||
>;
|
|
||||||
title: string;
|
title: string;
|
||||||
scopes?: string[];
|
scopes?: string[];
|
||||||
}
|
}
|
||||||
@@ -32,7 +30,7 @@ function createSchemaFromCredentialInfo(
|
|||||||
type: "object",
|
type: "object",
|
||||||
properties: {},
|
properties: {},
|
||||||
credentials_provider: [credential.provider],
|
credentials_provider: [credential.provider],
|
||||||
credentials_types: credential.credentialTypes,
|
credentials_types: [credential.credentialType],
|
||||||
credentials_scopes: credential.scopes,
|
credentials_scopes: credential.scopes,
|
||||||
discriminator: undefined,
|
discriminator: undefined,
|
||||||
discriminator_mapping: undefined,
|
discriminator_mapping: undefined,
|
||||||
|
|||||||
@@ -41,9 +41,7 @@ export type ChatMessageData =
|
|||||||
credentials: Array<{
|
credentials: Array<{
|
||||||
provider: string;
|
provider: string;
|
||||||
providerName: string;
|
providerName: string;
|
||||||
credentialTypes: Array<
|
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
|
||||||
"api_key" | "oauth2" | "user_password" | "host_scoped"
|
|
||||||
>;
|
|
||||||
title: string;
|
title: string;
|
||||||
scopes?: string[];
|
scopes?: string[];
|
||||||
}>;
|
}>;
|
||||||
|
|||||||
@@ -31,18 +31,10 @@ export function AgentSettingsModal({
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const {
|
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
||||||
currentHITLSafeMode,
|
useAgentSafeMode(agent);
|
||||||
showHITLToggle,
|
|
||||||
handleHITLToggle,
|
|
||||||
currentSensitiveActionSafeMode,
|
|
||||||
showSensitiveActionToggle,
|
|
||||||
handleSensitiveActionToggle,
|
|
||||||
isPending,
|
|
||||||
shouldShowToggle,
|
|
||||||
} = useAgentSafeMode(agent);
|
|
||||||
|
|
||||||
if (!shouldShowToggle) return null;
|
if (!hasHITLBlocks) return null;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Dialog
|
<Dialog
|
||||||
@@ -65,48 +57,23 @@ export function AgentSettingsModal({
|
|||||||
)}
|
)}
|
||||||
<Dialog.Content>
|
<Dialog.Content>
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
{showHITLToggle && (
|
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
<div className="flex-1">
|
||||||
<div className="flex-1">
|
<Text variant="large-semibold">Require human approval</Text>
|
||||||
<Text variant="large-semibold">
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
Human-in-the-loop approval
|
The agent will pause and wait for your review before
|
||||||
</Text>
|
continuing
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
</Text>
|
||||||
The agent will pause at human-in-the-loop blocks and wait
|
|
||||||
for your review before continuing
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<Switch
|
|
||||||
checked={currentHITLSafeMode || false}
|
|
||||||
onCheckedChange={handleHITLToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
</div>
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={currentSafeMode || false}
|
||||||
|
onCheckedChange={handleToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className="mt-1"
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
)}
|
</div>
|
||||||
{showSensitiveActionToggle && (
|
|
||||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
|
||||||
<div className="flex-1">
|
|
||||||
<Text variant="large-semibold">
|
|
||||||
Sensitive action approval
|
|
||||||
</Text>
|
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
|
||||||
The agent will pause at sensitive action blocks and wait for
|
|
||||||
your review before continuing
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<Switch
|
|
||||||
checked={currentSensitiveActionSafeMode}
|
|
||||||
onCheckedChange={handleSensitiveActionToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</Dialog.Content>
|
</Dialog.Content>
|
||||||
</Dialog>
|
</Dialog>
|
||||||
|
|||||||
@@ -5,37 +5,30 @@ import {
|
|||||||
AccordionItem,
|
AccordionItem,
|
||||||
AccordionTrigger,
|
AccordionTrigger,
|
||||||
} from "@/components/molecules/Accordion/Accordion";
|
} from "@/components/molecules/Accordion/Accordion";
|
||||||
import {
|
|
||||||
CredentialsMetaInput,
|
|
||||||
CredentialsType,
|
|
||||||
} from "@/lib/autogpt-server-api/types";
|
|
||||||
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
|
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
|
||||||
import { SlidersHorizontalIcon } from "@phosphor-icons/react";
|
import { SlidersHorizontal } from "@phosphor-icons/react";
|
||||||
import { useContext, useEffect, useMemo, useRef } from "react";
|
import { useContext, useEffect, useMemo, useRef } from "react";
|
||||||
|
import { useRunAgentModalContext } from "../../context";
|
||||||
import {
|
import {
|
||||||
areSystemCredentialProvidersLoading,
|
areSystemCredentialProvidersLoading,
|
||||||
CredentialField,
|
CredentialField,
|
||||||
findSavedCredentialByProviderAndType,
|
findSavedCredentialByProviderAndType,
|
||||||
hasMissingRequiredSystemCredentials,
|
hasMissingRequiredSystemCredentials,
|
||||||
splitCredentialFieldsBySystem,
|
splitCredentialFieldsBySystem,
|
||||||
} from "./helpers";
|
} from "../helpers";
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
credentialFields: CredentialField[];
|
credentialFields: CredentialField[];
|
||||||
requiredCredentials: Set<string>;
|
requiredCredentials: Set<string>;
|
||||||
inputCredentials: Record<string, CredentialsMetaInput | undefined>;
|
|
||||||
inputValues: Record<string, any>;
|
|
||||||
onCredentialChange: (key: string, value?: CredentialsMetaInput) => void;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export function CredentialsGroupedView({
|
export function CredentialsGroupedView({
|
||||||
credentialFields,
|
credentialFields,
|
||||||
requiredCredentials,
|
requiredCredentials,
|
||||||
inputCredentials,
|
|
||||||
inputValues,
|
|
||||||
onCredentialChange,
|
|
||||||
}: Props) {
|
}: Props) {
|
||||||
const allProviders = useContext(CredentialsProvidersContext);
|
const allProviders = useContext(CredentialsProvidersContext);
|
||||||
|
const { inputCredentials, setInputCredentialsValue, inputValues } =
|
||||||
|
useRunAgentModalContext();
|
||||||
|
|
||||||
const { userCredentialFields, systemCredentialFields } = useMemo(
|
const { userCredentialFields, systemCredentialFields } = useMemo(
|
||||||
() =>
|
() =>
|
||||||
@@ -94,11 +87,11 @@ export function CredentialsGroupedView({
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (savedCredential) {
|
if (savedCredential) {
|
||||||
onCredentialChange(key, {
|
setInputCredentialsValue(key, {
|
||||||
id: savedCredential.id,
|
id: savedCredential.id,
|
||||||
provider: savedCredential.provider,
|
provider: savedCredential.provider,
|
||||||
type: savedCredential.type as CredentialsType,
|
type: savedCredential.type,
|
||||||
title: savedCredential.title,
|
title: (savedCredential as { title?: string }).title,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -110,7 +103,7 @@ export function CredentialsGroupedView({
|
|||||||
systemCredentialFields,
|
systemCredentialFields,
|
||||||
requiredCredentials,
|
requiredCredentials,
|
||||||
inputCredentials,
|
inputCredentials,
|
||||||
onCredentialChange,
|
setInputCredentialsValue,
|
||||||
isLoadingProviders,
|
isLoadingProviders,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
@@ -130,7 +123,7 @@ export function CredentialsGroupedView({
|
|||||||
}
|
}
|
||||||
selectedCredentials={selectedCred}
|
selectedCredentials={selectedCred}
|
||||||
onSelectCredentials={(value) => {
|
onSelectCredentials={(value) => {
|
||||||
onCredentialChange(key, value);
|
setInputCredentialsValue(key, value);
|
||||||
}}
|
}}
|
||||||
siblingInputs={inputValues}
|
siblingInputs={inputValues}
|
||||||
isOptional={!requiredCredentials.has(key)}
|
isOptional={!requiredCredentials.has(key)}
|
||||||
@@ -150,8 +143,7 @@ export function CredentialsGroupedView({
|
|||||||
<AccordionItem value="system-credentials" className="border-none">
|
<AccordionItem value="system-credentials" className="border-none">
|
||||||
<AccordionTrigger className="py-2 text-sm text-muted-foreground hover:no-underline">
|
<AccordionTrigger className="py-2 text-sm text-muted-foreground hover:no-underline">
|
||||||
<div className="flex items-center gap-1">
|
<div className="flex items-center gap-1">
|
||||||
<SlidersHorizontalIcon size={16} weight="bold" /> System
|
<SlidersHorizontal size={16} weight="bold" /> System credentials
|
||||||
credentials
|
|
||||||
{hasMissingSystemCredentials && (
|
{hasMissingSystemCredentials && (
|
||||||
<span className="text-destructive">(missing)</span>
|
<span className="text-destructive">(missing)</span>
|
||||||
)}
|
)}
|
||||||
@@ -171,7 +163,7 @@ export function CredentialsGroupedView({
|
|||||||
}
|
}
|
||||||
selectedCredentials={selectedCred}
|
selectedCredentials={selectedCred}
|
||||||
onSelectCredentials={(value) => {
|
onSelectCredentials={(value) => {
|
||||||
onCredentialChange(key, value);
|
setInputCredentialsValue(key, value);
|
||||||
}}
|
}}
|
||||||
siblingInputs={inputValues}
|
siblingInputs={inputValues}
|
||||||
isOptional={!requiredCredentials.has(key)}
|
isOptional={!requiredCredentials.has(key)}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
import { Input } from "@/components/atoms/Input/Input";
|
import { Input } from "@/components/atoms/Input/Input";
|
||||||
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
|
|
||||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||||
import { useMemo } from "react";
|
import { useMemo } from "react";
|
||||||
import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs";
|
import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs";
|
||||||
import { useRunAgentModalContext } from "../../context";
|
import { useRunAgentModalContext } from "../../context";
|
||||||
|
import { CredentialsGroupedView } from "../CredentialsGroupedView/CredentialsGroupedView";
|
||||||
import { ModalSection } from "../ModalSection/ModalSection";
|
import { ModalSection } from "../ModalSection/ModalSection";
|
||||||
import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner";
|
import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner";
|
||||||
|
|
||||||
@@ -19,8 +19,6 @@ export function ModalRunSection() {
|
|||||||
setInputValue,
|
setInputValue,
|
||||||
agentInputFields,
|
agentInputFields,
|
||||||
agentCredentialsInputFields,
|
agentCredentialsInputFields,
|
||||||
inputCredentials,
|
|
||||||
setInputCredentialsValue,
|
|
||||||
} = useRunAgentModalContext();
|
} = useRunAgentModalContext();
|
||||||
|
|
||||||
const inputFields = Object.entries(agentInputFields || {});
|
const inputFields = Object.entries(agentInputFields || {});
|
||||||
@@ -104,9 +102,6 @@ export function ModalRunSection() {
|
|||||||
<CredentialsGroupedView
|
<CredentialsGroupedView
|
||||||
credentialFields={credentialFields}
|
credentialFields={credentialFields}
|
||||||
requiredCredentials={requiredCredentials}
|
requiredCredentials={requiredCredentials}
|
||||||
inputCredentials={inputCredentials}
|
|
||||||
inputValues={inputValues}
|
|
||||||
onCredentialChange={setInputCredentialsValue}
|
|
||||||
/>
|
/>
|
||||||
</ModalSection>
|
</ModalSection>
|
||||||
) : null}
|
) : null}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider";
|
import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider";
|
||||||
import { getSystemCredentials } from "../../helpers";
|
import { getSystemCredentials } from "../../../../../../../../../../../components/contextual/CredentialsInput/helpers";
|
||||||
|
|
||||||
export type CredentialField = [string, any];
|
export type CredentialField = [string, any];
|
||||||
|
|
||||||
@@ -5,112 +5,48 @@ import { Graph } from "@/lib/autogpt-server-api/types";
|
|||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react";
|
import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react";
|
||||||
import { useAgentSafeMode } from "@/hooks/useAgentSafeMode";
|
import { useAgentSafeMode } from "@/hooks/useAgentSafeMode";
|
||||||
import {
|
|
||||||
Tooltip,
|
|
||||||
TooltipContent,
|
|
||||||
TooltipTrigger,
|
|
||||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
graph: GraphModel | LibraryAgent | Graph;
|
graph: GraphModel | LibraryAgent | Graph;
|
||||||
className?: string;
|
className?: string;
|
||||||
|
fullWidth?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface SafeModeIconButtonProps {
|
export function SafeModeToggle({ graph }: Props) {
|
||||||
isEnabled: boolean;
|
|
||||||
label: string;
|
|
||||||
tooltipEnabled: string;
|
|
||||||
tooltipDisabled: string;
|
|
||||||
onToggle: () => void;
|
|
||||||
isPending: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
function SafeModeIconButton({
|
|
||||||
isEnabled,
|
|
||||||
label,
|
|
||||||
tooltipEnabled,
|
|
||||||
tooltipDisabled,
|
|
||||||
onToggle,
|
|
||||||
isPending,
|
|
||||||
}: SafeModeIconButtonProps) {
|
|
||||||
return (
|
|
||||||
<Tooltip delayDuration={100}>
|
|
||||||
<TooltipTrigger asChild>
|
|
||||||
<Button
|
|
||||||
variant="icon"
|
|
||||||
size="icon"
|
|
||||||
aria-label={`${label}: ${isEnabled ? "ON" : "OFF"}. ${isEnabled ? tooltipEnabled : tooltipDisabled}`}
|
|
||||||
onClick={onToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className={cn(isPending ? "opacity-0" : "opacity-100")}
|
|
||||||
>
|
|
||||||
{isEnabled ? (
|
|
||||||
<ShieldCheckIcon weight="bold" size={16} />
|
|
||||||
) : (
|
|
||||||
<ShieldIcon weight="bold" size={16} />
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>
|
|
||||||
<div className="text-center">
|
|
||||||
<div className="font-medium">
|
|
||||||
{label}: {isEnabled ? "ON" : "OFF"}
|
|
||||||
</div>
|
|
||||||
<div className="mt-1 text-xs text-muted-foreground">
|
|
||||||
{isEnabled ? tooltipEnabled : tooltipDisabled}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</TooltipContent>
|
|
||||||
</Tooltip>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function SafeModeToggle({ graph, className }: Props) {
|
|
||||||
const {
|
const {
|
||||||
currentHITLSafeMode,
|
currentSafeMode,
|
||||||
showHITLToggle,
|
|
||||||
isHITLStateUndetermined,
|
|
||||||
handleHITLToggle,
|
|
||||||
currentSensitiveActionSafeMode,
|
|
||||||
showSensitiveActionToggle,
|
|
||||||
handleSensitiveActionToggle,
|
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
|
isStateUndetermined,
|
||||||
|
handleToggle,
|
||||||
} = useAgentSafeMode(graph);
|
} = useAgentSafeMode(graph);
|
||||||
|
|
||||||
if (!shouldShowToggle || isHITLStateUndetermined) {
|
if (!shouldShowToggle || isStateUndetermined) {
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const showHITL = showHITLToggle && !isHITLStateUndetermined;
|
|
||||||
const showSensitive = showSensitiveActionToggle;
|
|
||||||
|
|
||||||
if (!showHITL && !showSensitive) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cn("flex gap-1", className)}>
|
<Button
|
||||||
{showHITL && (
|
variant="icon"
|
||||||
<SafeModeIconButton
|
key={graph.id}
|
||||||
isEnabled={currentHITLSafeMode}
|
size="icon"
|
||||||
label="Human-in-the-loop"
|
aria-label={
|
||||||
tooltipEnabled="The agent will pause at human-in-the-loop blocks and wait for your approval"
|
currentSafeMode!
|
||||||
tooltipDisabled="Human-in-the-loop blocks will proceed automatically"
|
? "Safe Mode: ON. Human in the loop blocks require manual review"
|
||||||
onToggle={handleHITLToggle}
|
: "Safe Mode: OFF. Human in the loop blocks proceed automatically"
|
||||||
isPending={isPending}
|
}
|
||||||
/>
|
onClick={handleToggle}
|
||||||
|
className={cn(isPending ? "opacity-0" : "opacity-100")}
|
||||||
|
>
|
||||||
|
{currentSafeMode! ? (
|
||||||
|
<>
|
||||||
|
<ShieldCheckIcon weight="bold" size={16} />
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<ShieldIcon weight="bold" size={16} />
|
||||||
|
</>
|
||||||
)}
|
)}
|
||||||
{showSensitive && (
|
</Button>
|
||||||
<SafeModeIconButton
|
|
||||||
isEnabled={currentSensitiveActionSafeMode}
|
|
||||||
label="Sensitive actions"
|
|
||||||
tooltipEnabled="The agent will pause at sensitive action blocks and wait for your approval"
|
|
||||||
tooltipDisabled="Sensitive action blocks will proceed automatically"
|
|
||||||
onToggle={handleSensitiveActionToggle}
|
|
||||||
isPending={isPending}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,16 +13,8 @@ interface Props {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
||||||
const {
|
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
||||||
currentHITLSafeMode,
|
useAgentSafeMode(agent);
|
||||||
showHITLToggle,
|
|
||||||
handleHITLToggle,
|
|
||||||
currentSensitiveActionSafeMode,
|
|
||||||
showSensitiveActionToggle,
|
|
||||||
handleSensitiveActionToggle,
|
|
||||||
isPending,
|
|
||||||
shouldShowToggle,
|
|
||||||
} = useAgentSafeMode(agent);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<SelectedViewLayout agent={agent}>
|
<SelectedViewLayout agent={agent}>
|
||||||
@@ -42,51 +34,24 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className={`${AGENT_LIBRARY_SECTION_PADDING_X} space-y-6`}>
|
<div className={`${AGENT_LIBRARY_SECTION_PADDING_X} space-y-6`}>
|
||||||
{shouldShowToggle ? (
|
{hasHITLBlocks ? (
|
||||||
<>
|
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
{showHITLToggle && (
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
<div className="flex-1">
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
<Text variant="large-semibold">Require human approval</Text>
|
||||||
<div className="flex-1">
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
<Text variant="large-semibold">
|
The agent will pause and wait for your review before
|
||||||
Human-in-the-loop approval
|
continuing
|
||||||
</Text>
|
</Text>
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
|
||||||
The agent will pause at human-in-the-loop blocks and
|
|
||||||
wait for your review before continuing
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<Switch
|
|
||||||
checked={currentHITLSafeMode || false}
|
|
||||||
onCheckedChange={handleHITLToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
<Switch
|
||||||
{showSensitiveActionToggle && (
|
checked={currentSafeMode || false}
|
||||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
onCheckedChange={handleToggle}
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
disabled={isPending}
|
||||||
<div className="flex-1">
|
className="mt-1"
|
||||||
<Text variant="large-semibold">
|
/>
|
||||||
Sensitive action approval
|
</div>
|
||||||
</Text>
|
</div>
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
|
||||||
The agent will pause at sensitive action blocks and wait
|
|
||||||
for your review before continuing
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<Switch
|
|
||||||
checked={currentSensitiveActionSafeMode}
|
|
||||||
onCheckedChange={handleSensitiveActionToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
) : (
|
) : (
|
||||||
<div className="rounded-xl border border-zinc-100 bg-white p-6">
|
<div className="rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
<Text variant="body" className="text-muted-foreground">
|
<Text variant="body" className="text-muted-foreground">
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
import { Input } from "@/components/atoms/Input/Input";
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import {
|
import {
|
||||||
Form,
|
Form,
|
||||||
@@ -121,7 +120,7 @@ export default function LibraryUploadAgentDialog() {
|
|||||||
>
|
>
|
||||||
{isUploading ? (
|
{isUploading ? (
|
||||||
<div className="flex items-center gap-2">
|
<div className="flex items-center gap-2">
|
||||||
<LoadingSpinner size="small" className="text-white" />
|
<div className="h-4 w-4 animate-spin rounded-full border-b-2 border-t-2 border-white"></div>
|
||||||
<span>Uploading...</span>
|
<span>Uploading...</span>
|
||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
|
|||||||
@@ -6383,11 +6383,6 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
"has_sensitive_action": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Has Sensitive Action",
|
|
||||||
"readOnly": true
|
|
||||||
},
|
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -6404,7 +6399,6 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
"has_sensitive_action",
|
|
||||||
"trigger_setup_info"
|
"trigger_setup_info"
|
||||||
],
|
],
|
||||||
"title": "BaseGraph"
|
"title": "BaseGraph"
|
||||||
@@ -7635,11 +7629,6 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
"has_sensitive_action": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Has Sensitive Action",
|
|
||||||
"readOnly": true
|
|
||||||
},
|
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -7663,7 +7652,6 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
"has_sensitive_action",
|
|
||||||
"trigger_setup_info",
|
"trigger_setup_info",
|
||||||
"credentials_input_schema"
|
"credentials_input_schema"
|
||||||
],
|
],
|
||||||
@@ -7742,11 +7730,6 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
"has_sensitive_action": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Has Sensitive Action",
|
|
||||||
"readOnly": true
|
|
||||||
},
|
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -7771,7 +7754,6 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
"has_sensitive_action",
|
|
||||||
"trigger_setup_info",
|
"trigger_setup_info",
|
||||||
"credentials_input_schema"
|
"credentials_input_schema"
|
||||||
],
|
],
|
||||||
@@ -7780,14 +7762,8 @@
|
|||||||
"GraphSettings": {
|
"GraphSettings": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"human_in_the_loop_safe_mode": {
|
"human_in_the_loop_safe_mode": {
|
||||||
"type": "boolean",
|
"anyOf": [{ "type": "boolean" }, { "type": "null" }],
|
||||||
"title": "Human In The Loop Safe Mode",
|
"title": "Human In The Loop Safe Mode"
|
||||||
"default": true
|
|
||||||
},
|
|
||||||
"sensitive_action_safe_mode": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Sensitive Action Safe Mode",
|
|
||||||
"default": false
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -7945,16 +7921,6 @@
|
|||||||
"title": "Has External Trigger",
|
"title": "Has External Trigger",
|
||||||
"description": "Whether the agent has an external trigger (e.g. webhook) node"
|
"description": "Whether the agent has an external trigger (e.g. webhook) node"
|
||||||
},
|
},
|
||||||
"has_human_in_the_loop": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Has Human In The Loop",
|
|
||||||
"description": "Whether the agent has human-in-the-loop blocks"
|
|
||||||
},
|
|
||||||
"has_sensitive_action": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Has Sensitive Action",
|
|
||||||
"description": "Whether the agent has sensitive action blocks"
|
|
||||||
},
|
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -8001,8 +7967,6 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"credentials_input_schema",
|
"credentials_input_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
|
||||||
"has_sensitive_action",
|
|
||||||
"new_output",
|
"new_output",
|
||||||
"can_access_graph",
|
"can_access_graph",
|
||||||
"is_latest_version",
|
"is_latest_version",
|
||||||
|
|||||||
@@ -35,13 +35,12 @@ export const CredentialFieldTitle = (props: {
|
|||||||
uiOptions,
|
uiOptions,
|
||||||
);
|
);
|
||||||
|
|
||||||
const provider = getCredentialProviderFromSchema(
|
const credentialProvider = toDisplayName(
|
||||||
useNodeStore.getState().getHardCodedValues(nodeId),
|
getCredentialProviderFromSchema(
|
||||||
schema as BlockIOCredentialsSubSchema,
|
useNodeStore.getState().getHardCodedValues(nodeId),
|
||||||
|
schema as BlockIOCredentialsSubSchema,
|
||||||
|
) ?? "",
|
||||||
);
|
);
|
||||||
const credentialProvider = provider
|
|
||||||
? `${toDisplayName(provider)} credential`
|
|
||||||
: "credential";
|
|
||||||
|
|
||||||
const updatedUiSchema = updateUiOption(uiSchema, {
|
const updatedUiSchema = updateUiOption(uiSchema, {
|
||||||
showHandles: false,
|
showHandles: false,
|
||||||
|
|||||||
@@ -20,15 +20,11 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean {
|
|||||||
if ("has_human_in_the_loop" in graph) {
|
if ("has_human_in_the_loop" in graph) {
|
||||||
return !!graph.has_human_in_the_loop;
|
return !!graph.has_human_in_the_loop;
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
function hasSensitiveActionBlocks(
|
if (isLibraryAgent(graph)) {
|
||||||
graph: GraphModel | LibraryAgent | Graph,
|
return graph.settings?.human_in_the_loop_safe_mode !== null;
|
||||||
): boolean {
|
|
||||||
if ("has_sensitive_action" in graph) {
|
|
||||||
return !!graph.has_sensitive_action;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,9 +40,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
|
|
||||||
const graphId = getGraphId(graph);
|
const graphId = getGraphId(graph);
|
||||||
const isAgent = isLibraryAgent(graph);
|
const isAgent = isLibraryAgent(graph);
|
||||||
const showHITLToggle = hasHITLBlocks(graph);
|
const shouldShowToggle = hasHITLBlocks(graph);
|
||||||
const showSensitiveActionToggle = hasSensitiveActionBlocks(graph);
|
|
||||||
const shouldShowToggle = showHITLToggle || showSensitiveActionToggle;
|
|
||||||
|
|
||||||
const { mutateAsync: updateGraphSettings, isPending } =
|
const { mutateAsync: updateGraphSettings, isPending } =
|
||||||
usePatchV1UpdateGraphSettings();
|
usePatchV1UpdateGraphSettings();
|
||||||
@@ -62,37 +56,27 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
const [localHITLSafeMode, setLocalHITLSafeMode] = useState<boolean>(true);
|
const [localSafeMode, setLocalSafeMode] = useState<boolean | null>(null);
|
||||||
const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] =
|
|
||||||
useState<boolean>(false);
|
|
||||||
const [isLocalStateLoaded, setIsLocalStateLoaded] = useState<boolean>(false);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!isAgent && libraryAgent) {
|
if (!isAgent && libraryAgent) {
|
||||||
setLocalHITLSafeMode(
|
const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode;
|
||||||
libraryAgent.settings?.human_in_the_loop_safe_mode ?? true,
|
if (backendValue !== undefined) {
|
||||||
);
|
setLocalSafeMode(backendValue);
|
||||||
setLocalSensitiveActionSafeMode(
|
}
|
||||||
libraryAgent.settings?.sensitive_action_safe_mode ?? false,
|
|
||||||
);
|
|
||||||
setIsLocalStateLoaded(true);
|
|
||||||
}
|
}
|
||||||
}, [isAgent, libraryAgent]);
|
}, [isAgent, libraryAgent]);
|
||||||
|
|
||||||
const currentHITLSafeMode = isAgent
|
const currentSafeMode = isAgent
|
||||||
? (graph.settings?.human_in_the_loop_safe_mode ?? true)
|
? graph.settings?.human_in_the_loop_safe_mode
|
||||||
: localHITLSafeMode;
|
: localSafeMode;
|
||||||
|
|
||||||
const currentSensitiveActionSafeMode = isAgent
|
const isStateUndetermined = isAgent
|
||||||
? (graph.settings?.sensitive_action_safe_mode ?? false)
|
? graph.settings?.human_in_the_loop_safe_mode == null
|
||||||
: localSensitiveActionSafeMode;
|
: isLoading || localSafeMode === null;
|
||||||
|
|
||||||
const isHITLStateUndetermined = isAgent
|
const handleToggle = useCallback(async () => {
|
||||||
? false
|
const newSafeMode = !currentSafeMode;
|
||||||
: isLoading || !isLocalStateLoaded;
|
|
||||||
|
|
||||||
const handleHITLToggle = useCallback(async () => {
|
|
||||||
const newSafeMode = !currentHITLSafeMode;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await updateGraphSettings({
|
await updateGraphSettings({
|
||||||
@@ -101,7 +85,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
if (!isAgent) {
|
if (!isAgent) {
|
||||||
setLocalHITLSafeMode(newSafeMode);
|
setLocalSafeMode(newSafeMode);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isAgent) {
|
if (isAgent) {
|
||||||
@@ -117,62 +101,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
||||||
|
|
||||||
toast({
|
toast({
|
||||||
title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||||
description: newSafeMode
|
description: newSafeMode
|
||||||
? "Human-in-the-loop blocks will require manual review"
|
? "Human-in-the-loop blocks will require manual review"
|
||||||
: "Human-in-the-loop blocks will proceed automatically",
|
: "Human-in-the-loop blocks will proceed automatically",
|
||||||
duration: 2000,
|
duration: 2000,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
handleToggleError(error, isAgent, toast);
|
const isNotFoundError =
|
||||||
}
|
error instanceof Error &&
|
||||||
}, [
|
(error.message.includes("404") || error.message.includes("not found"));
|
||||||
currentHITLSafeMode,
|
|
||||||
graphId,
|
|
||||||
isAgent,
|
|
||||||
graph.id,
|
|
||||||
updateGraphSettings,
|
|
||||||
queryClient,
|
|
||||||
toast,
|
|
||||||
]);
|
|
||||||
|
|
||||||
const handleSensitiveActionToggle = useCallback(async () => {
|
if (!isAgent && isNotFoundError) {
|
||||||
const newSafeMode = !currentSensitiveActionSafeMode;
|
toast({
|
||||||
|
title: "Safe mode not available",
|
||||||
try {
|
description:
|
||||||
await updateGraphSettings({
|
"To configure safe mode, please save this graph to your library first.",
|
||||||
graphId,
|
variant: "destructive",
|
||||||
data: { sensitive_action_safe_mode: newSafeMode },
|
});
|
||||||
});
|
} else {
|
||||||
|
toast({
|
||||||
if (!isAgent) {
|
title: "Failed to update safe mode",
|
||||||
setLocalSensitiveActionSafeMode(newSafeMode);
|
description:
|
||||||
}
|
error instanceof Error
|
||||||
|
? error.message
|
||||||
if (isAgent) {
|
: "An unexpected error occurred.",
|
||||||
queryClient.invalidateQueries({
|
variant: "destructive",
|
||||||
queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString())
|
|
||||||
.queryKey,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
queryClient.invalidateQueries({
|
|
||||||
queryKey: ["v1", "graphs", graphId, "executions"],
|
|
||||||
});
|
|
||||||
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
|
||||||
|
|
||||||
toast({
|
|
||||||
title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
|
||||||
description: newSafeMode
|
|
||||||
? "Sensitive action blocks will require manual review"
|
|
||||||
: "Sensitive action blocks will proceed automatically",
|
|
||||||
duration: 2000,
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
handleToggleError(error, isAgent, toast);
|
|
||||||
}
|
}
|
||||||
}, [
|
}, [
|
||||||
currentSensitiveActionSafeMode,
|
currentSafeMode,
|
||||||
graphId,
|
graphId,
|
||||||
isAgent,
|
isAgent,
|
||||||
graph.id,
|
graph.id,
|
||||||
@@ -182,53 +141,11 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
]);
|
]);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
// HITL safe mode
|
currentSafeMode,
|
||||||
currentHITLSafeMode,
|
|
||||||
showHITLToggle,
|
|
||||||
isHITLStateUndetermined,
|
|
||||||
handleHITLToggle,
|
|
||||||
|
|
||||||
// Sensitive action safe mode
|
|
||||||
currentSensitiveActionSafeMode,
|
|
||||||
showSensitiveActionToggle,
|
|
||||||
handleSensitiveActionToggle,
|
|
||||||
|
|
||||||
// General
|
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
|
isStateUndetermined,
|
||||||
// Backwards compatibility
|
handleToggle,
|
||||||
currentSafeMode: currentHITLSafeMode,
|
hasHITLBlocks: shouldShowToggle,
|
||||||
isStateUndetermined: isHITLStateUndetermined,
|
|
||||||
handleToggle: handleHITLToggle,
|
|
||||||
hasHITLBlocks: showHITLToggle,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
function handleToggleError(
|
|
||||||
error: unknown,
|
|
||||||
isAgent: boolean,
|
|
||||||
toast: ReturnType<typeof useToast>["toast"],
|
|
||||||
) {
|
|
||||||
const isNotFoundError =
|
|
||||||
error instanceof Error &&
|
|
||||||
(error.message.includes("404") || error.message.includes("not found"));
|
|
||||||
|
|
||||||
if (!isAgent && isNotFoundError) {
|
|
||||||
toast({
|
|
||||||
title: "Safe mode not available",
|
|
||||||
description:
|
|
||||||
"To configure safe mode, please save this graph to your library first.",
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
toast({
|
|
||||||
title: "Failed to update safe mode",
|
|
||||||
description:
|
|
||||||
error instanceof Error
|
|
||||||
? error.message
|
|
||||||
: "An unexpected error occurred.",
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import isEqual from "lodash/isEqual";
|
|||||||
export function cleanNode(node: CustomNode) {
|
export function cleanNode(node: CustomNode) {
|
||||||
return {
|
return {
|
||||||
id: node.id,
|
id: node.id,
|
||||||
// Note: position is intentionally excluded to prevent draft saves when dragging nodes
|
position: node.position,
|
||||||
data: {
|
data: {
|
||||||
hardcodedValues: node.data.hardcodedValues,
|
hardcodedValues: node.data.hardcodedValues,
|
||||||
title: node.data.title,
|
title: node.data.title,
|
||||||
|
|||||||
@@ -1 +1,28 @@
|
|||||||
# Video editing blocks
|
"""Video editing blocks for AutoGPT Platform.
|
||||||
|
|
||||||
|
This module provides blocks for:
|
||||||
|
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||||
|
- Clipping/trimming video segments
|
||||||
|
- Concatenating multiple videos
|
||||||
|
- Adding text overlays
|
||||||
|
- Adding AI-generated narration
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
- yt-dlp: For video downloading
|
||||||
|
- moviepy: For video editing operations
|
||||||
|
- requests: For API calls (narration block)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .download import VideoDownloadBlock
|
||||||
|
from .clip import VideoClipBlock
|
||||||
|
from .concat import VideoConcatBlock
|
||||||
|
from .text_overlay import VideoTextOverlayBlock
|
||||||
|
from .narration import VideoNarrationBlock
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"VideoClipBlock",
|
||||||
|
"VideoConcatBlock",
|
||||||
|
"VideoDownloadBlock",
|
||||||
|
"VideoNarrationBlock",
|
||||||
|
"VideoTextOverlayBlock",
|
||||||
|
]
|
||||||
|
|||||||
93
backend/blocks/video/clip.py
Normal file
93
backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
"""
|
||||||
|
VideoClipBlock - Extract a segment from a video file
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||||
|
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|
||||||
|
class VideoClipBlock(Block):
|
||||||
|
"""Extract a time segment from a video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: str = SchemaField(
|
||||||
|
description="Input video (URL, data URI, or file path)",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
start_time: float = SchemaField(
|
||||||
|
description="Start time in seconds",
|
||||||
|
ge=0.0
|
||||||
|
)
|
||||||
|
end_time: float = SchemaField(
|
||||||
|
description="End time in seconds",
|
||||||
|
ge=0.0
|
||||||
|
)
|
||||||
|
output_format: str = SchemaField(
|
||||||
|
description="Output format",
|
||||||
|
default="mp4",
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: str = SchemaField(
|
||||||
|
description="Clipped video file",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
duration: float = SchemaField(description="Clip duration in seconds")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||||
|
description="Extract a time segment from a video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={"video_in": "/tmp/test.mp4", "start_time": 0.0, "end_time": 10.0},
|
||||||
|
test_output=[("video_out", str), ("duration", float)],
|
||||||
|
test_mock={"_clip_video": lambda *args: ("/tmp/clip.mp4", 10.0)}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
# Validate time range
|
||||||
|
if input_data.end_time <= input_data.start_time:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
except ImportError as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
clip = None
|
||||||
|
subclip = None
|
||||||
|
try:
|
||||||
|
clip = VideoFileClip(input_data.video_in)
|
||||||
|
subclip = clip.subclip(input_data.start_time, input_data.end_time)
|
||||||
|
|
||||||
|
output_path = f"/tmp/clip_{uuid.uuid4()}.{input_data.output_format}"
|
||||||
|
subclip.write_videofile(output_path, logger=None)
|
||||||
|
|
||||||
|
yield "video_out", output_path
|
||||||
|
yield "duration", subclip.duration
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to clip video: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
finally:
|
||||||
|
if subclip:
|
||||||
|
subclip.close()
|
||||||
|
if clip:
|
||||||
|
clip.close()
|
||||||
123
backend/blocks/video/concat.py
Normal file
123
backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
"""
|
||||||
|
VideoConcatBlock - Concatenate multiple video clips into one
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||||
|
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|
||||||
|
class VideoConcatBlock(Block):
|
||||||
|
"""Merge multiple video clips into one continuous video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
videos: list[str] = SchemaField(
|
||||||
|
description="List of video files to concatenate (in order)"
|
||||||
|
)
|
||||||
|
transition: str = SchemaField(
|
||||||
|
description="Transition between clips",
|
||||||
|
default="none",
|
||||||
|
enum=["none", "crossfade", "fade_black"]
|
||||||
|
)
|
||||||
|
transition_duration: float = SchemaField(
|
||||||
|
description="Transition duration in seconds",
|
||||||
|
default=0.5,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
output_format: str = SchemaField(
|
||||||
|
description="Output format",
|
||||||
|
default="mp4",
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: str = SchemaField(
|
||||||
|
description="Concatenated video file",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="c3d4e5f6-a7b8-9012-cdef-345678901234",
|
||||||
|
description="Merge multiple video clips into one continuous video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={"videos": ["/tmp/a.mp4", "/tmp/b.mp4"]},
|
||||||
|
test_output=[("video_out", str), ("total_duration", float)],
|
||||||
|
test_mock={"_concat_videos": lambda *args: ("/tmp/concat.mp4", 20.0)}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
||||||
|
except ImportError as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
# Validate minimum clips
|
||||||
|
if len(input_data.videos) < 2:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="At least 2 videos are required for concatenation",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
)
|
||||||
|
|
||||||
|
clips = []
|
||||||
|
faded_clips = []
|
||||||
|
final = None
|
||||||
|
try:
|
||||||
|
# Load clips one by one to handle partial failures
|
||||||
|
for v in input_data.videos:
|
||||||
|
clips.append(VideoFileClip(v))
|
||||||
|
|
||||||
|
if input_data.transition == "crossfade":
|
||||||
|
# Apply crossfade between clips using crossfadein/crossfadeout
|
||||||
|
transition_dur = input_data.transition_duration
|
||||||
|
for i, clip in enumerate(clips):
|
||||||
|
if i > 0:
|
||||||
|
clip = clip.crossfadein(transition_dur)
|
||||||
|
if i < len(clips) - 1:
|
||||||
|
clip = clip.crossfadeout(transition_dur)
|
||||||
|
faded_clips.append(clip)
|
||||||
|
final = concatenate_videoclips(
|
||||||
|
faded_clips,
|
||||||
|
method="compose",
|
||||||
|
padding=-transition_dur
|
||||||
|
)
|
||||||
|
elif input_data.transition == "fade_black":
|
||||||
|
# Fade to black between clips
|
||||||
|
for clip in clips:
|
||||||
|
faded = clip.fadein(input_data.transition_duration).fadeout(
|
||||||
|
input_data.transition_duration
|
||||||
|
)
|
||||||
|
faded_clips.append(faded)
|
||||||
|
final = concatenate_videoclips(faded_clips)
|
||||||
|
else:
|
||||||
|
final = concatenate_videoclips(clips)
|
||||||
|
|
||||||
|
output_path = f"/tmp/concat_{uuid.uuid4()}.{input_data.output_format}"
|
||||||
|
final.write_videofile(output_path, logger=None)
|
||||||
|
|
||||||
|
yield "video_out", output_path
|
||||||
|
yield "total_duration", final.duration
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to concatenate videos: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
finally:
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
for clip in faded_clips:
|
||||||
|
clip.close()
|
||||||
|
for clip in clips:
|
||||||
|
clip.close()
|
||||||
102
backend/blocks/video/download.py
Normal file
102
backend/blocks/video/download.py
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
"""
|
||||||
|
VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||||
|
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|
||||||
|
class VideoDownloadBlock(Block):
|
||||||
|
"""Download video from URL using yt-dlp."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
url: str = SchemaField(
|
||||||
|
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||||
|
placeholder="https://www.youtube.com/watch?v=..."
|
||||||
|
)
|
||||||
|
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||||
|
description="Video quality preference",
|
||||||
|
default="720p"
|
||||||
|
)
|
||||||
|
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||||
|
description="Output video format",
|
||||||
|
default="mp4",
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_file: str = SchemaField(
|
||||||
|
description="Path or data URI of downloaded video",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
duration: float = SchemaField(description="Video duration in seconds")
|
||||||
|
title: str = SchemaField(description="Video title from source")
|
||||||
|
source_url: str = SchemaField(description="Original source URL")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||||
|
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "quality": "480p"},
|
||||||
|
test_output=[("video_file", str), ("duration", float), ("title", str), ("source_url", str)],
|
||||||
|
test_mock={"_download_video": lambda *args: ("/tmp/video.mp4", 212.0, "Test Video")}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_format_string(self, quality: str) -> str:
|
||||||
|
formats = {
|
||||||
|
"best": "bestvideo+bestaudio/best",
|
||||||
|
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||||
|
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||||
|
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||||
|
"audio_only": "bestaudio/best"
|
||||||
|
}
|
||||||
|
return formats.get(quality, formats["720p"])
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
import yt_dlp
|
||||||
|
except ImportError as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="yt-dlp is not installed. Please install it with: pip install yt-dlp",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
video_id = str(uuid.uuid4())[:8]
|
||||||
|
output_template = f"/tmp/{video_id}.%(ext)s"
|
||||||
|
|
||||||
|
ydl_opts = {
|
||||||
|
"format": self._get_format_string(input_data.quality),
|
||||||
|
"outtmpl": output_template,
|
||||||
|
"merge_output_format": input_data.output_format,
|
||||||
|
"quiet": True,
|
||||||
|
"no_warnings": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
info = ydl.extract_info(input_data.url, download=True)
|
||||||
|
video_path = ydl.prepare_filename(info)
|
||||||
|
|
||||||
|
# Handle format conversion in filename
|
||||||
|
if not video_path.endswith(f".{input_data.output_format}"):
|
||||||
|
video_path = video_path.rsplit(".", 1)[0] + f".{input_data.output_format}"
|
||||||
|
|
||||||
|
yield "video_file", video_path
|
||||||
|
yield "duration", info.get("duration") or 0.0
|
||||||
|
yield "title", info.get("title") or "Unknown"
|
||||||
|
yield "source_url", input_data.url
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to download video: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
167
backend/blocks/video/narration.py
Normal file
167
backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
"""
|
||||||
|
VideoNarrationBlock - Generate AI voice narration and add to video
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||||
|
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||||
|
from backend.data.model import SchemaField, CredentialsMetaInput, APIKeyCredentials
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|
||||||
|
class VideoNarrationBlock(Block):
|
||||||
|
"""Generate AI narration and add to video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
credentials: CredentialsMetaInput[
|
||||||
|
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||||
|
] = SchemaField(
|
||||||
|
description="ElevenLabs API key for voice synthesis"
|
||||||
|
)
|
||||||
|
video_in: str = SchemaField(
|
||||||
|
description="Input video file",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
script: str = SchemaField(
|
||||||
|
description="Narration script text"
|
||||||
|
)
|
||||||
|
voice_id: str = SchemaField(
|
||||||
|
description="ElevenLabs voice ID",
|
||||||
|
default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||||
|
)
|
||||||
|
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||||
|
description="How to combine with original audio",
|
||||||
|
default="ducking"
|
||||||
|
)
|
||||||
|
narration_volume: float = SchemaField(
|
||||||
|
description="Narration volume (0.0 to 2.0)",
|
||||||
|
default=1.0,
|
||||||
|
ge=0.0,
|
||||||
|
le=2.0,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
original_volume: float = SchemaField(
|
||||||
|
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||||
|
default=0.3,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: str = SchemaField(
|
||||||
|
description="Video with narration",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
audio_file: str = SchemaField(
|
||||||
|
description="Generated audio file",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="e5f6a7b8-c9d0-1234-ef56-789012345678",
|
||||||
|
description="Generate AI narration and add to video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={
|
||||||
|
"video_in": "/tmp/test.mp4",
|
||||||
|
"script": "Hello world",
|
||||||
|
"credentials": {"provider": "elevenlabs", "id": "test", "type": "api_key"}
|
||||||
|
},
|
||||||
|
test_output=[("video_out", str), ("audio_file", str)],
|
||||||
|
test_mock={"_generate_narration": lambda *args: ("/tmp/narrated.mp4", "/tmp/audio.mp3")}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
credentials: APIKeyCredentials,
|
||||||
|
**kwargs
|
||||||
|
) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
|
||||||
|
except ImportError as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Missing dependency: {e}. Install moviepy and requests.",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
video = None
|
||||||
|
final = None
|
||||||
|
narration = None
|
||||||
|
try:
|
||||||
|
# Generate narration via ElevenLabs
|
||||||
|
response = requests.post(
|
||||||
|
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}",
|
||||||
|
headers={
|
||||||
|
"xi-api-key": credentials.api_key.get_secret_value(),
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
},
|
||||||
|
json={
|
||||||
|
"text": input_data.script,
|
||||||
|
"model_id": "eleven_monolingual_v1"
|
||||||
|
},
|
||||||
|
timeout=120
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
audio_path = f"/tmp/narration_{uuid.uuid4()}.mp3"
|
||||||
|
with open(audio_path, "wb") as f:
|
||||||
|
f.write(response.content)
|
||||||
|
|
||||||
|
# Combine with video
|
||||||
|
video = VideoFileClip(input_data.video_in)
|
||||||
|
narration = AudioFileClip(audio_path)
|
||||||
|
narration = narration.volumex(input_data.narration_volume)
|
||||||
|
|
||||||
|
if input_data.mix_mode == "replace":
|
||||||
|
final_audio = narration
|
||||||
|
elif input_data.mix_mode == "mix":
|
||||||
|
if video.audio:
|
||||||
|
original = video.audio.volumex(input_data.original_volume)
|
||||||
|
final_audio = CompositeAudioClip([original, narration])
|
||||||
|
else:
|
||||||
|
final_audio = narration
|
||||||
|
else: # ducking - lower original volume more when narration plays
|
||||||
|
if video.audio:
|
||||||
|
# Apply stronger attenuation for ducking effect
|
||||||
|
ducking_volume = input_data.original_volume * 0.3
|
||||||
|
original = video.audio.volumex(ducking_volume)
|
||||||
|
final_audio = CompositeAudioClip([original, narration])
|
||||||
|
else:
|
||||||
|
final_audio = narration
|
||||||
|
|
||||||
|
final = video.set_audio(final_audio)
|
||||||
|
|
||||||
|
output_path = f"/tmp/narrated_{uuid.uuid4()}.mp4"
|
||||||
|
final.write_videofile(output_path, logger=None)
|
||||||
|
|
||||||
|
yield "video_out", output_path
|
||||||
|
yield "audio_file", audio_path
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"ElevenLabs API error: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to add narration: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
finally:
|
||||||
|
if narration:
|
||||||
|
narration.close()
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
if video:
|
||||||
|
video.close()
|
||||||
149
backend/blocks/video/text_overlay.py
Normal file
149
backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
"""
|
||||||
|
VideoTextOverlayBlock - Add text overlay to video
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from backend.data.block import Block, BlockCategory, BlockOutput
|
||||||
|
from backend.data.block import BlockSchemaInput, BlockSchemaOutput
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|
||||||
|
class VideoTextOverlayBlock(Block):
|
||||||
|
"""Add text overlay/caption to video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: str = SchemaField(
|
||||||
|
description="Input video file",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
text: str = SchemaField(
|
||||||
|
description="Text to overlay on video"
|
||||||
|
)
|
||||||
|
position: Literal[
|
||||||
|
"top", "center", "bottom",
|
||||||
|
"top-left", "top-right",
|
||||||
|
"bottom-left", "bottom-right"
|
||||||
|
] = SchemaField(
|
||||||
|
description="Position of text on screen",
|
||||||
|
default="bottom"
|
||||||
|
)
|
||||||
|
start_time: float | None = SchemaField(
|
||||||
|
description="When to show text (seconds). None = entire video",
|
||||||
|
default=None,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
end_time: float | None = SchemaField(
|
||||||
|
description="When to hide text (seconds). None = until end",
|
||||||
|
default=None,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
font_size: int = SchemaField(
|
||||||
|
description="Font size",
|
||||||
|
default=48,
|
||||||
|
ge=12,
|
||||||
|
le=200,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
font_color: str = SchemaField(
|
||||||
|
description="Font color (hex or name)",
|
||||||
|
default="white",
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
bg_color: str | None = SchemaField(
|
||||||
|
description="Background color behind text (None for transparent)",
|
||||||
|
default=None,
|
||||||
|
advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: str = SchemaField(
|
||||||
|
description="Video with text overlay",
|
||||||
|
json_schema_extra={"format": "file"}
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="d4e5f6a7-b8c9-0123-def4-567890123456",
|
||||||
|
description="Add text overlay/caption to video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||||
|
test_output=[("video_out", str)],
|
||||||
|
test_mock={"_add_text": lambda *args: "/tmp/overlay.mp4"}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
|
||||||
|
except ImportError as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="moviepy is not installed. Please install it with: pip install moviepy",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
# Validate time range if both are provided
|
||||||
|
if (input_data.start_time is not None and
|
||||||
|
input_data.end_time is not None and
|
||||||
|
input_data.end_time <= input_data.start_time):
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
)
|
||||||
|
|
||||||
|
video = None
|
||||||
|
final = None
|
||||||
|
txt_clip = None
|
||||||
|
try:
|
||||||
|
video = VideoFileClip(input_data.video_in)
|
||||||
|
|
||||||
|
txt_clip = TextClip(
|
||||||
|
input_data.text,
|
||||||
|
fontsize=input_data.font_size,
|
||||||
|
color=input_data.font_color,
|
||||||
|
bg_color=input_data.bg_color,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Position mapping
|
||||||
|
pos_map = {
|
||||||
|
"top": ("center", "top"),
|
||||||
|
"center": ("center", "center"),
|
||||||
|
"bottom": ("center", "bottom"),
|
||||||
|
"top-left": ("left", "top"),
|
||||||
|
"top-right": ("right", "top"),
|
||||||
|
"bottom-left": ("left", "bottom"),
|
||||||
|
"bottom-right": ("right", "bottom"),
|
||||||
|
}
|
||||||
|
|
||||||
|
txt_clip = txt_clip.set_position(pos_map[input_data.position])
|
||||||
|
|
||||||
|
# Set timing
|
||||||
|
start = input_data.start_time or 0
|
||||||
|
end = input_data.end_time or video.duration
|
||||||
|
duration = max(0, end - start)
|
||||||
|
txt_clip = txt_clip.set_start(start).set_end(end).set_duration(duration)
|
||||||
|
|
||||||
|
final = CompositeVideoClip([video, txt_clip])
|
||||||
|
|
||||||
|
output_path = f"/tmp/overlay_{uuid.uuid4()}.mp4"
|
||||||
|
final.write_videofile(output_path, logger=None)
|
||||||
|
|
||||||
|
yield "video_out", output_path
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to add text overlay: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id)
|
||||||
|
) from e
|
||||||
|
finally:
|
||||||
|
if txt_clip:
|
||||||
|
txt_clip.close()
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
if video:
|
||||||
|
video.close()
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
# Documentation Guidelines
|
|
||||||
|
|
||||||
## Block Documentation Manual Sections
|
|
||||||
|
|
||||||
When updating manual sections (`<!-- MANUAL: ... -->`) in block documentation files (e.g., `docs/integrations/basic.md`), follow these formats:
|
|
||||||
|
|
||||||
### How It Works Section
|
|
||||||
|
|
||||||
Provide a technical explanation of how the block functions:
|
|
||||||
- Describe the processing logic in 1-2 paragraphs
|
|
||||||
- Mention any validation, error handling, or edge cases
|
|
||||||
- Use code examples with backticks when helpful (e.g., `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`)
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```markdown
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
The block iterates through each list in the input and extends a result list with all elements from each one. It processes lists in order, so `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`.
|
|
||||||
|
|
||||||
The block includes validation to ensure each item is actually a list. If a non-list value is encountered, the block outputs an error message instead of proceeding.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
```
|
|
||||||
|
|
||||||
### Use Case Section
|
|
||||||
|
|
||||||
Provide 3 practical use cases in this format:
|
|
||||||
- **Bold Heading**: Short one-sentence description
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```markdown
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Paginated API Merging**: Combine results from multiple API pages into a single list for batch processing or display.
|
|
||||||
|
|
||||||
**Parallel Task Aggregation**: Merge outputs from parallel workflow branches that each produce a list of results.
|
|
||||||
|
|
||||||
**Multi-Source Data Collection**: Combine data collected from different sources (like multiple RSS feeds or API endpoints) into one unified list.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
```
|
|
||||||
|
|
||||||
### Style Guidelines
|
|
||||||
|
|
||||||
- Keep descriptions concise and action-oriented
|
|
||||||
- Focus on practical, real-world scenarios
|
|
||||||
- Use consistent terminology with other blocks
|
|
||||||
- Avoid overly technical jargon unless necessary
|
|
||||||
@@ -1,559 +0,0 @@
|
|||||||
# AutoGPT Blocks Overview
|
|
||||||
|
|
||||||
AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.
|
|
||||||
|
|
||||||
!!! info "Creating Your Own Blocks"
|
|
||||||
Want to create your own custom blocks? Check out our guides:
|
|
||||||
|
|
||||||
- [Build your own Blocks](https://docs.agpt.co/platform/new_blocks/) - Step-by-step tutorial with examples
|
|
||||||
- [Block SDK Guide](https://docs.agpt.co/platform/block-sdk-guide/) - Advanced SDK patterns with OAuth, webhooks, and provider configuration
|
|
||||||
|
|
||||||
Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation.
|
|
||||||
|
|
||||||
## Basic Operations
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Add Memory](basic.md#add-memory) | Add new memories to Mem0 with user segmentation |
|
|
||||||
| [Add To Dictionary](basic.md#add-to-dictionary) | Adds a new key-value pair to a dictionary |
|
|
||||||
| [Add To Library From Store](system/library_operations.md#add-to-library-from-store) | Add an agent from the store to your personal library |
|
|
||||||
| [Add To List](basic.md#add-to-list) | Adds a new entry to a list |
|
|
||||||
| [Agent Date Input](basic.md#agent-date-input) | Block for date input |
|
|
||||||
| [Agent Dropdown Input](basic.md#agent-dropdown-input) | Block for dropdown text selection |
|
|
||||||
| [Agent File Input](basic.md#agent-file-input) | Block for file upload input (string path for example) |
|
|
||||||
| [Agent Google Drive File Input](basic.md#agent-google-drive-file-input) | Block for selecting a file from Google Drive |
|
|
||||||
| [Agent Input](basic.md#agent-input) | A block that accepts and processes user input values within a workflow, supporting various input types and validation |
|
|
||||||
| [Agent Long Text Input](basic.md#agent-long-text-input) | Block for long text input (multi-line) |
|
|
||||||
| [Agent Number Input](basic.md#agent-number-input) | Block for number input |
|
|
||||||
| [Agent Output](basic.md#agent-output) | A block that records and formats workflow results for display to users, with optional Jinja2 template formatting support |
|
|
||||||
| [Agent Short Text Input](basic.md#agent-short-text-input) | Block for short text input (single-line) |
|
|
||||||
| [Agent Table Input](basic.md#agent-table-input) | Block for table data input with customizable headers |
|
|
||||||
| [Agent Time Input](basic.md#agent-time-input) | Block for time input |
|
|
||||||
| [Agent Toggle Input](basic.md#agent-toggle-input) | Block for boolean toggle input |
|
|
||||||
| [Block Installation](basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system |
|
|
||||||
| [Concatenate Lists](basic.md#concatenate-lists) | Concatenates multiple lists into a single list |
|
|
||||||
| [Dictionary Is Empty](basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
|
|
||||||
| [File Store](basic.md#file-store) | Stores the input file in the temporary directory |
|
|
||||||
| [Find In Dictionary](basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
|
|
||||||
| [Find In List](basic.md#find-in-list) | Finds the index of the value in the list |
|
|
||||||
| [Get All Memories](basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering |
|
|
||||||
| [Get Latest Memory](basic.md#get-latest-memory) | Retrieve the latest memory from Mem0 with optional key filtering |
|
|
||||||
| [Get List Item](basic.md#get-list-item) | Returns the element at the given index |
|
|
||||||
| [Get Store Agent Details](system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
|
||||||
| [Get Weather Information](basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
|
||||||
| [Human In The Loop](basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
|
||||||
| [Linear Search Issues](linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
|
||||||
| [List Is Empty](basic.md#list-is-empty) | Checks if a list is empty |
|
|
||||||
| [List Library Agents](system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
|
||||||
| [Note](basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
|
||||||
| [Print To Console](basic.md#print-to-console) | A debugging block that outputs text to the console for monitoring and troubleshooting workflow execution |
|
|
||||||
| [Remove From Dictionary](basic.md#remove-from-dictionary) | Removes a key-value pair from a dictionary |
|
|
||||||
| [Remove From List](basic.md#remove-from-list) | Removes an item from a list by value or index |
|
|
||||||
| [Replace Dictionary Value](basic.md#replace-dictionary-value) | Replaces the value for a specified key in a dictionary |
|
|
||||||
| [Replace List Item](basic.md#replace-list-item) | Replaces an item at the specified index |
|
|
||||||
| [Reverse List Order](basic.md#reverse-list-order) | Reverses the order of elements in a list |
|
|
||||||
| [Search Memory](basic.md#search-memory) | Search memories in Mem0 by user |
|
|
||||||
| [Search Store Agents](system/store_operations.md#search-store-agents) | Search for agents in the store |
|
|
||||||
| [Slant3D Cancel Order](slant3d/order.md#slant3d-cancel-order) | Cancel an existing order |
|
|
||||||
| [Slant3D Create Order](slant3d/order.md#slant3d-create-order) | Create a new print order |
|
|
||||||
| [Slant3D Estimate Order](slant3d/order.md#slant3d-estimate-order) | Get order cost estimate |
|
|
||||||
| [Slant3D Estimate Shipping](slant3d/order.md#slant3d-estimate-shipping) | Get shipping cost estimate |
|
|
||||||
| [Slant3D Filament](slant3d/filament.md#slant3d-filament) | Get list of available filaments |
|
|
||||||
| [Slant3D Get Orders](slant3d/order.md#slant3d-get-orders) | Get all orders for the account |
|
|
||||||
| [Slant3D Slicer](slant3d/slicing.md#slant3d-slicer) | Slice a 3D model file and get pricing information |
|
|
||||||
| [Slant3D Tracking](slant3d/order.md#slant3d-tracking) | Track order status and shipping |
|
|
||||||
| [Store Value](basic.md#store-value) | A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks |
|
|
||||||
| [Universal Type Converter](basic.md#universal-type-converter) | This block is used to convert a value to a universal type |
|
|
||||||
| [XML Parser](basic.md#xml-parser) | Parses XML using gravitasml to tokenize and coverts it to dict |
|
|
||||||
|
|
||||||
## Data Processing
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Airtable Create Base](airtable/bases.md#airtable-create-base) | Create or find a base in Airtable |
|
|
||||||
| [Airtable Create Field](airtable/schema.md#airtable-create-field) | Add a new field to an Airtable table |
|
|
||||||
| [Airtable Create Records](airtable/records.md#airtable-create-records) | Create records in an Airtable table |
|
|
||||||
| [Airtable Create Table](airtable/schema.md#airtable-create-table) | Create a new table in an Airtable base |
|
|
||||||
| [Airtable Delete Records](airtable/records.md#airtable-delete-records) | Delete records from an Airtable table |
|
|
||||||
| [Airtable Get Record](airtable/records.md#airtable-get-record) | Get a single record from Airtable |
|
|
||||||
| [Airtable List Bases](airtable/bases.md#airtable-list-bases) | List all bases in Airtable |
|
|
||||||
| [Airtable List Records](airtable/records.md#airtable-list-records) | List records from an Airtable table |
|
|
||||||
| [Airtable List Schema](airtable/schema.md#airtable-list-schema) | Get the complete schema of an Airtable base |
|
|
||||||
| [Airtable Update Field](airtable/schema.md#airtable-update-field) | Update field properties in an Airtable table |
|
|
||||||
| [Airtable Update Records](airtable/records.md#airtable-update-records) | Update records in an Airtable table |
|
|
||||||
| [Airtable Update Table](airtable/schema.md#airtable-update-table) | Update table properties |
|
|
||||||
| [Airtable Webhook Trigger](airtable/triggers.md#airtable-webhook-trigger) | Starts a flow whenever Airtable emits a webhook event |
|
|
||||||
| [Baas Bot Delete Recording](baas/bots.md#baas-bot-delete-recording) | Permanently delete a meeting's recorded data |
|
|
||||||
| [Baas Bot Fetch Meeting Data](baas/bots.md#baas-bot-fetch-meeting-data) | Retrieve recorded meeting data |
|
|
||||||
| [Create Dictionary](data.md#create-dictionary) | Creates a dictionary with the specified key-value pairs |
|
|
||||||
| [Create List](data.md#create-list) | Creates a list with the specified values |
|
|
||||||
| [Data For Seo Keyword Suggestions](dataforseo/keyword_suggestions.md#data-for-seo-keyword-suggestions) | Get keyword suggestions from DataForSEO Labs Google API |
|
|
||||||
| [Data For Seo Related Keywords](dataforseo/related_keywords.md#data-for-seo-related-keywords) | Get related keywords from DataForSEO Labs Google API |
|
|
||||||
| [Exa Create Import](exa/websets_import_export.md#exa-create-import) | Import CSV data to use with websets for targeted searches |
|
|
||||||
| [Exa Delete Import](exa/websets_import_export.md#exa-delete-import) | Delete an import |
|
|
||||||
| [Exa Export Webset](exa/websets_import_export.md#exa-export-webset) | Export webset data in JSON, CSV, or JSON Lines format |
|
|
||||||
| [Exa Get Import](exa/websets_import_export.md#exa-get-import) | Get the status and details of an import |
|
|
||||||
| [Exa Get New Items](exa/websets_items.md#exa-get-new-items) | Get items added since a cursor - enables incremental processing without reprocessing |
|
|
||||||
| [Exa List Imports](exa/websets_import_export.md#exa-list-imports) | List all imports with pagination support |
|
|
||||||
| [File Read](data.md#file-read) | Reads a file and returns its content as a string, with optional chunking by delimiter and size limits |
|
|
||||||
| [Google Calendar Read Events](google/calendar.md#google-calendar-read-events) | Retrieves upcoming events from a Google Calendar with filtering options |
|
|
||||||
| [Google Docs Append Markdown](google/docs.md#google-docs-append-markdown) | Append Markdown content to the end of a Google Doc with full formatting - ideal for LLM/AI output |
|
|
||||||
| [Google Docs Append Plain Text](google/docs.md#google-docs-append-plain-text) | Append plain text to the end of a Google Doc (no formatting applied) |
|
|
||||||
| [Google Docs Create](google/docs.md#google-docs-create) | Create a new Google Doc |
|
|
||||||
| [Google Docs Delete Content](google/docs.md#google-docs-delete-content) | Delete a range of content from a Google Doc |
|
|
||||||
| [Google Docs Export](google/docs.md#google-docs-export) | Export a Google Doc to PDF, Word, text, or other formats |
|
|
||||||
| [Google Docs Find Replace Plain Text](google/docs.md#google-docs-find-replace-plain-text) | Find and replace plain text in a Google Doc (no formatting applied to replacement) |
|
|
||||||
| [Google Docs Format Text](google/docs.md#google-docs-format-text) | Apply formatting (bold, italic, color, etc |
|
|
||||||
| [Google Docs Get Metadata](google/docs.md#google-docs-get-metadata) | Get metadata about a Google Doc |
|
|
||||||
| [Google Docs Get Structure](google/docs.md#google-docs-get-structure) | Get document structure with index positions for precise editing operations |
|
|
||||||
| [Google Docs Insert Markdown At](google/docs.md#google-docs-insert-markdown-at) | Insert formatted Markdown at a specific position in a Google Doc - ideal for LLM/AI output |
|
|
||||||
| [Google Docs Insert Page Break](google/docs.md#google-docs-insert-page-break) | Insert a page break into a Google Doc |
|
|
||||||
| [Google Docs Insert Plain Text](google/docs.md#google-docs-insert-plain-text) | Insert plain text at a specific position in a Google Doc (no formatting applied) |
|
|
||||||
| [Google Docs Insert Table](google/docs.md#google-docs-insert-table) | Insert a table into a Google Doc, optionally with content and Markdown formatting |
|
|
||||||
| [Google Docs Read](google/docs.md#google-docs-read) | Read text content from a Google Doc |
|
|
||||||
| [Google Docs Replace All With Markdown](google/docs.md#google-docs-replace-all-with-markdown) | Replace entire Google Doc content with formatted Markdown - ideal for LLM/AI output |
|
|
||||||
| [Google Docs Replace Content With Markdown](google/docs.md#google-docs-replace-content-with-markdown) | Find text and replace it with formatted Markdown - ideal for LLM/AI output and templates |
|
|
||||||
| [Google Docs Replace Range With Markdown](google/docs.md#google-docs-replace-range-with-markdown) | Replace a specific index range in a Google Doc with formatted Markdown - ideal for LLM/AI output |
|
|
||||||
| [Google Docs Set Public Access](google/docs.md#google-docs-set-public-access) | Make a Google Doc public or private |
|
|
||||||
| [Google Docs Share](google/docs.md#google-docs-share) | Share a Google Doc with specific users |
|
|
||||||
| [Google Sheets Add Column](google/sheets.md#google-sheets-add-column) | Add a new column with a header |
|
|
||||||
| [Google Sheets Add Dropdown](google/sheets.md#google-sheets-add-dropdown) | Add a dropdown list (data validation) to cells |
|
|
||||||
| [Google Sheets Add Note](google/sheets.md#google-sheets-add-note) | Add a note to a cell in a Google Sheet |
|
|
||||||
| [Google Sheets Append Row](google/sheets.md#google-sheets-append-row) | Append or Add a single row to the end of a Google Sheet |
|
|
||||||
| [Google Sheets Batch Operations](google/sheets.md#google-sheets-batch-operations) | This block performs multiple operations on a Google Sheets spreadsheet in a single batch request |
|
|
||||||
| [Google Sheets Clear](google/sheets.md#google-sheets-clear) | This block clears data from a specified range in a Google Sheets spreadsheet |
|
|
||||||
| [Google Sheets Copy To Spreadsheet](google/sheets.md#google-sheets-copy-to-spreadsheet) | Copy a sheet from one spreadsheet to another |
|
|
||||||
| [Google Sheets Create Named Range](google/sheets.md#google-sheets-create-named-range) | Create a named range to reference cells by name instead of A1 notation |
|
|
||||||
| [Google Sheets Create Spreadsheet](google/sheets.md#google-sheets-create-spreadsheet) | This block creates a new Google Sheets spreadsheet with specified sheets |
|
|
||||||
| [Google Sheets Delete Column](google/sheets.md#google-sheets-delete-column) | Delete a column by header name or column letter |
|
|
||||||
| [Google Sheets Delete Rows](google/sheets.md#google-sheets-delete-rows) | Delete specific rows from a Google Sheet by their row indices |
|
|
||||||
| [Google Sheets Export Csv](google/sheets.md#google-sheets-export-csv) | Export a Google Sheet as CSV data |
|
|
||||||
| [Google Sheets Filter Rows](google/sheets.md#google-sheets-filter-rows) | Filter rows in a Google Sheet based on a column condition |
|
|
||||||
| [Google Sheets Find](google/sheets.md#google-sheets-find) | Find text in a Google Sheets spreadsheet |
|
|
||||||
| [Google Sheets Find Replace](google/sheets.md#google-sheets-find-replace) | This block finds and replaces text in a Google Sheets spreadsheet |
|
|
||||||
| [Google Sheets Format](google/sheets.md#google-sheets-format) | Format a range in a Google Sheet (sheet optional) |
|
|
||||||
| [Google Sheets Get Column](google/sheets.md#google-sheets-get-column) | Extract all values from a specific column |
|
|
||||||
| [Google Sheets Get Notes](google/sheets.md#google-sheets-get-notes) | Get notes from cells in a Google Sheet |
|
|
||||||
| [Google Sheets Get Row](google/sheets.md#google-sheets-get-row) | Get a specific row by its index |
|
|
||||||
| [Google Sheets Get Row Count](google/sheets.md#google-sheets-get-row-count) | Get row count and dimensions of a Google Sheet |
|
|
||||||
| [Google Sheets Get Unique Values](google/sheets.md#google-sheets-get-unique-values) | Get unique values from a column |
|
|
||||||
| [Google Sheets Import Csv](google/sheets.md#google-sheets-import-csv) | Import CSV data into a Google Sheet |
|
|
||||||
| [Google Sheets Insert Row](google/sheets.md#google-sheets-insert-row) | Insert a single row at a specific position |
|
|
||||||
| [Google Sheets List Named Ranges](google/sheets.md#google-sheets-list-named-ranges) | List all named ranges in a spreadsheet |
|
|
||||||
| [Google Sheets Lookup Row](google/sheets.md#google-sheets-lookup-row) | Look up a row by finding a value in a specific column |
|
|
||||||
| [Google Sheets Manage Sheet](google/sheets.md#google-sheets-manage-sheet) | Create, delete, or copy sheets (sheet optional) |
|
|
||||||
| [Google Sheets Metadata](google/sheets.md#google-sheets-metadata) | This block retrieves metadata about a Google Sheets spreadsheet including sheet names and properties |
|
|
||||||
| [Google Sheets Protect Range](google/sheets.md#google-sheets-protect-range) | Protect a cell range or entire sheet from editing |
|
|
||||||
| [Google Sheets Read](google/sheets.md#google-sheets-read) | A block that reads data from a Google Sheets spreadsheet using A1 notation range selection |
|
|
||||||
| [Google Sheets Remove Duplicates](google/sheets.md#google-sheets-remove-duplicates) | Remove duplicate rows based on specified columns |
|
|
||||||
| [Google Sheets Set Public Access](google/sheets.md#google-sheets-set-public-access) | Make a Google Spreadsheet public or private |
|
|
||||||
| [Google Sheets Share Spreadsheet](google/sheets.md#google-sheets-share-spreadsheet) | Share a Google Spreadsheet with users or get shareable link |
|
|
||||||
| [Google Sheets Sort](google/sheets.md#google-sheets-sort) | Sort a Google Sheet by one or two columns |
|
|
||||||
| [Google Sheets Update Cell](google/sheets.md#google-sheets-update-cell) | Update a single cell in a Google Sheets spreadsheet |
|
|
||||||
| [Google Sheets Update Row](google/sheets.md#google-sheets-update-row) | Update a specific row by its index |
|
|
||||||
| [Google Sheets Write](google/sheets.md#google-sheets-write) | A block that writes data to a Google Sheets spreadsheet at a specified A1 notation range |
|
|
||||||
| [Keyword Suggestion Extractor](dataforseo/keyword_suggestions.md#keyword-suggestion-extractor) | Extract individual fields from a KeywordSuggestion object |
|
|
||||||
| [Persist Information](data.md#persist-information) | Persist key-value information for the current user |
|
|
||||||
| [Read Spreadsheet](data.md#read-spreadsheet) | Reads CSV and Excel files and outputs the data as a list of dictionaries and individual rows |
|
|
||||||
| [Related Keyword Extractor](dataforseo/related_keywords.md#related-keyword-extractor) | Extract individual fields from a RelatedKeyword object |
|
|
||||||
| [Retrieve Information](data.md#retrieve-information) | Retrieve key-value information for the current user |
|
|
||||||
| [Screenshot Web Page](data.md#screenshot-web-page) | Takes a screenshot of a specified website using ScreenshotOne API |
|
|
||||||
|
|
||||||
## Text Processing
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Code Extraction](text.md#code-extraction) | Extracts code blocks from text and identifies their programming languages |
|
|
||||||
| [Combine Texts](text.md#combine-texts) | This block combines multiple input texts into a single output text |
|
|
||||||
| [Countdown Timer](text.md#countdown-timer) | This block triggers after a specified duration |
|
|
||||||
| [Extract Text Information](text.md#extract-text-information) | This block extracts the text from the given text using the pattern (regex) |
|
|
||||||
| [Fill Text Template](text.md#fill-text-template) | This block formats the given texts using the format template |
|
|
||||||
| [Get Current Date](text.md#get-current-date) | This block outputs the current date with an optional offset |
|
|
||||||
| [Get Current Date And Time](text.md#get-current-date-and-time) | This block outputs the current date and time |
|
|
||||||
| [Get Current Time](text.md#get-current-time) | This block outputs the current time |
|
|
||||||
| [Match Text Pattern](text.md#match-text-pattern) | Matches text against a regex pattern and forwards data to positive or negative output based on the match |
|
|
||||||
| [Text Decoder](text.md#text-decoder) | Decodes a string containing escape sequences into actual text |
|
|
||||||
| [Text Replace](text.md#text-replace) | This block is used to replace a text with a new text |
|
|
||||||
| [Text Split](text.md#text-split) | This block is used to split a text into a list of strings |
|
|
||||||
| [Word Character Count](text.md#word-character-count) | Counts the number of words and characters in a given text |
|
|
||||||
|
|
||||||
## AI and Language Models
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [AI Ad Maker Video Creator](llm.md#ai-ad-maker-video-creator) | Creates an AI‑generated 30‑second advert (text + images) |
|
|
||||||
| [AI Condition](llm.md#ai-condition) | Uses AI to evaluate natural language conditions and provide conditional outputs |
|
|
||||||
| [AI Conversation](llm.md#ai-conversation) | A block that facilitates multi-turn conversations with a Large Language Model (LLM), maintaining context across message exchanges |
|
|
||||||
| [AI Image Customizer](llm.md#ai-image-customizer) | Generate and edit custom images using Google's Nano-Banana model from Gemini 2 |
|
|
||||||
| [AI Image Editor](llm.md#ai-image-editor) | Edit images using BlackForest Labs' Flux Kontext models |
|
|
||||||
| [AI Image Generator](llm.md#ai-image-generator) | Generate images using various AI models through a unified interface |
|
|
||||||
| [AI List Generator](llm.md#ai-list-generator) | A block that creates lists of items based on prompts using a Large Language Model (LLM), with optional source data for context |
|
|
||||||
| [AI Music Generator](llm.md#ai-music-generator) | This block generates music using Meta's MusicGen model on Replicate |
|
|
||||||
| [AI Screenshot To Video Ad](llm.md#ai-screenshot-to-video-ad) | Turns a screenshot into an engaging, avatar‑narrated video advert |
|
|
||||||
| [AI Shortform Video Creator](llm.md#ai-shortform-video-creator) | Creates a shortform video using revid |
|
|
||||||
| [AI Structured Response Generator](llm.md#ai-structured-response-generator) | A block that generates structured JSON responses using a Large Language Model (LLM), with schema validation and format enforcement |
|
|
||||||
| [AI Text Generator](llm.md#ai-text-generator) | A block that produces text responses using a Large Language Model (LLM) based on customizable prompts and system instructions |
|
|
||||||
| [AI Text Summarizer](llm.md#ai-text-summarizer) | A block that summarizes long texts using a Large Language Model (LLM), with configurable focus topics and summary styles |
|
|
||||||
| [AI Video Generator](fal/ai_video_generator.md#ai-video-generator) | Generate videos using FAL AI models |
|
|
||||||
| [Bannerbear Text Overlay](bannerbear/text_overlay.md#bannerbear-text-overlay) | Add text overlay to images using Bannerbear templates |
|
|
||||||
| [Code Generation](llm.md#code-generation) | Generate or refactor code using OpenAI's Codex (Responses API) |
|
|
||||||
| [Create Talking Avatar Video](llm.md#create-talking-avatar-video) | This block integrates with D-ID to create video clips and retrieve their URLs |
|
|
||||||
| [Exa Answer](exa/answers.md#exa-answer) | Get an LLM answer to a question informed by Exa search results |
|
|
||||||
| [Exa Create Enrichment](exa/websets_enrichment.md#exa-create-enrichment) | Create enrichments to extract additional structured data from webset items |
|
|
||||||
| [Exa Create Research](exa/research.md#exa-create-research) | Create research task with optional waiting - explores web and synthesizes findings with citations |
|
|
||||||
| [Ideogram Model](llm.md#ideogram-model) | This block runs Ideogram models with both simple and advanced settings |
|
|
||||||
| [Jina Chunking](jina/chunking.md#jina-chunking) | Chunks texts using Jina AI's segmentation service |
|
|
||||||
| [Jina Embedding](jina/embeddings.md#jina-embedding) | Generates embeddings using Jina AI |
|
|
||||||
| [Perplexity](llm.md#perplexity) | Query Perplexity's sonar models with real-time web search capabilities and receive annotated responses with source citations |
|
|
||||||
| [Replicate Flux Advanced Model](replicate/flux_advanced.md#replicate-flux-advanced-model) | This block runs Flux models on Replicate with advanced settings |
|
|
||||||
| [Replicate Model](replicate/replicate_block.md#replicate-model) | Run Replicate models synchronously |
|
|
||||||
| [Smart Decision Maker](llm.md#smart-decision-maker) | Uses AI to intelligently decide what tool to use |
|
|
||||||
| [Stagehand Act](stagehand/blocks.md#stagehand-act) | Interact with a web page by performing actions on a web page |
|
|
||||||
| [Stagehand Extract](stagehand/blocks.md#stagehand-extract) | Extract structured data from a webpage |
|
|
||||||
| [Stagehand Observe](stagehand/blocks.md#stagehand-observe) | Find suggested actions for your workflows |
|
|
||||||
| [Unreal Text To Speech](llm.md#unreal-text-to-speech) | Converts text to speech using the Unreal Speech API |
|
|
||||||
|
|
||||||
## Search and Information Retrieval
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Ask Wolfram](wolfram/llm_api.md#ask-wolfram) | Ask Wolfram Alpha a question |
|
|
||||||
| [Exa Bulk Webset Items](exa/websets_items.md#exa-bulk-webset-items) | Get all items from a webset in bulk (with configurable limits) |
|
|
||||||
| [Exa Cancel Enrichment](exa/websets_enrichment.md#exa-cancel-enrichment) | Cancel a running enrichment operation |
|
|
||||||
| [Exa Cancel Webset](exa/websets.md#exa-cancel-webset) | Cancel all operations being performed on a Webset |
|
|
||||||
| [Exa Cancel Webset Search](exa/websets_search.md#exa-cancel-webset-search) | Cancel a running webset search |
|
|
||||||
| [Exa Contents](exa/contents.md#exa-contents) | Retrieves document contents using Exa's contents API |
|
|
||||||
| [Exa Create Monitor](exa/websets_monitor.md#exa-create-monitor) | Create automated monitors to keep websets updated with fresh data on a schedule |
|
|
||||||
| [Exa Create Or Find Webset](exa/websets.md#exa-create-or-find-webset) | Create a new webset or return existing one by external_id (idempotent operation) |
|
|
||||||
| [Exa Create Webset](exa/websets.md#exa-create-webset) | Create a new Exa Webset for persistent web search collections with optional waiting for initial results |
|
|
||||||
| [Exa Create Webset Search](exa/websets_search.md#exa-create-webset-search) | Add a new search to an existing webset to find more items |
|
|
||||||
| [Exa Delete Enrichment](exa/websets_enrichment.md#exa-delete-enrichment) | Delete an enrichment from a webset |
|
|
||||||
| [Exa Delete Monitor](exa/websets_monitor.md#exa-delete-monitor) | Delete a monitor from a webset |
|
|
||||||
| [Exa Delete Webset](exa/websets.md#exa-delete-webset) | Delete a Webset and all its items |
|
|
||||||
| [Exa Delete Webset Item](exa/websets_items.md#exa-delete-webset-item) | Delete a specific item from a webset |
|
|
||||||
| [Exa Find Or Create Search](exa/websets_search.md#exa-find-or-create-search) | Find existing search by query or create new - prevents duplicate searches in workflows |
|
|
||||||
| [Exa Find Similar](exa/similar.md#exa-find-similar) | Finds similar links using Exa's findSimilar API |
|
|
||||||
| [Exa Get Enrichment](exa/websets_enrichment.md#exa-get-enrichment) | Get the status and details of a webset enrichment |
|
|
||||||
| [Exa Get Monitor](exa/websets_monitor.md#exa-get-monitor) | Get the details and status of a webset monitor |
|
|
||||||
| [Exa Get Research](exa/research.md#exa-get-research) | Get status and results of a research task |
|
|
||||||
| [Exa Get Webset](exa/websets.md#exa-get-webset) | Retrieve a Webset by ID or external ID |
|
|
||||||
| [Exa Get Webset Item](exa/websets_items.md#exa-get-webset-item) | Get a specific item from a webset by its ID |
|
|
||||||
| [Exa Get Webset Search](exa/websets_search.md#exa-get-webset-search) | Get the status and details of a webset search |
|
|
||||||
| [Exa List Monitors](exa/websets_monitor.md#exa-list-monitors) | List all monitors with optional webset filtering |
|
|
||||||
| [Exa List Research](exa/research.md#exa-list-research) | List all research tasks with pagination support |
|
|
||||||
| [Exa List Webset Items](exa/websets_items.md#exa-list-webset-items) | List items in a webset with pagination support |
|
|
||||||
| [Exa List Websets](exa/websets.md#exa-list-websets) | List all Websets with pagination support |
|
|
||||||
| [Exa Preview Webset](exa/websets.md#exa-preview-webset) | Preview how a search query will be interpreted before creating a webset |
|
|
||||||
| [Exa Search](exa/search.md#exa-search) | Searches the web using Exa's advanced search API |
|
|
||||||
| [Exa Update Enrichment](exa/websets_enrichment.md#exa-update-enrichment) | Update an existing enrichment configuration |
|
|
||||||
| [Exa Update Monitor](exa/websets_monitor.md#exa-update-monitor) | Update a monitor's status, schedule, or metadata |
|
|
||||||
| [Exa Update Webset](exa/websets.md#exa-update-webset) | Update metadata for an existing Webset |
|
|
||||||
| [Exa Wait For Enrichment](exa/websets_polling.md#exa-wait-for-enrichment) | Wait for a webset enrichment to complete with progress tracking |
|
|
||||||
| [Exa Wait For Research](exa/research.md#exa-wait-for-research) | Wait for a research task to complete with configurable timeout |
|
|
||||||
| [Exa Wait For Search](exa/websets_polling.md#exa-wait-for-search) | Wait for a specific webset search to complete with progress tracking |
|
|
||||||
| [Exa Wait For Webset](exa/websets_polling.md#exa-wait-for-webset) | Wait for a webset to reach a specific status with progress tracking |
|
|
||||||
| [Exa Webset Items Summary](exa/websets_items.md#exa-webset-items-summary) | Get a summary of webset items without retrieving all data |
|
|
||||||
| [Exa Webset Status](exa/websets.md#exa-webset-status) | Get a quick status overview of a webset |
|
|
||||||
| [Exa Webset Summary](exa/websets.md#exa-webset-summary) | Get a comprehensive summary of a webset with samples and statistics |
|
|
||||||
| [Extract Website Content](jina/search.md#extract-website-content) | This block scrapes the content from the given web URL |
|
|
||||||
| [Fact Checker](jina/fact_checker.md#fact-checker) | This block checks the factuality of a given statement using Jina AI's Grounding API |
|
|
||||||
| [Firecrawl Crawl](firecrawl/crawl.md#firecrawl-crawl) | Firecrawl crawls websites to extract comprehensive data while bypassing blockers |
|
|
||||||
| [Firecrawl Extract](firecrawl/extract.md#firecrawl-extract) | Firecrawl crawls websites to extract comprehensive data while bypassing blockers |
|
|
||||||
| [Firecrawl Map Website](firecrawl/map.md#firecrawl-map-website) | Firecrawl maps a website to extract all the links |
|
|
||||||
| [Firecrawl Scrape](firecrawl/scrape.md#firecrawl-scrape) | Firecrawl scrapes a website to extract comprehensive data while bypassing blockers |
|
|
||||||
| [Firecrawl Search](firecrawl/search.md#firecrawl-search) | Firecrawl searches the web for the given query |
|
|
||||||
| [Get Person Detail](apollo/person.md#get-person-detail) | Get detailed person data with Apollo API, including email reveal |
|
|
||||||
| [Get Wikipedia Summary](search.md#get-wikipedia-summary) | This block fetches the summary of a given topic from Wikipedia |
|
|
||||||
| [Google Maps Search](search.md#google-maps-search) | This block searches for local businesses using Google Maps API |
|
|
||||||
| [Search Organizations](apollo/organization.md#search-organizations) | Search for organizations in Apollo |
|
|
||||||
| [Search People](apollo/people.md#search-people) | Search for people in Apollo |
|
|
||||||
| [Search The Web](jina/search.md#search-the-web) | This block searches the internet for the given search query |
|
|
||||||
| [Validate Emails](zerobounce/validate_emails.md#validate-emails) | Validate emails |
|
|
||||||
|
|
||||||
## Social Media and Content
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Create Discord Thread](discord/bot_blocks.md#create-discord-thread) | Creates a new thread in a Discord channel |
|
|
||||||
| [Create Reddit Post](misc.md#create-reddit-post) | Create a new post on a subreddit |
|
|
||||||
| [Delete Reddit Comment](misc.md#delete-reddit-comment) | Delete a Reddit comment that you own |
|
|
||||||
| [Delete Reddit Post](misc.md#delete-reddit-post) | Delete a Reddit post that you own |
|
|
||||||
| [Discord Channel Info](discord/bot_blocks.md#discord-channel-info) | Resolves Discord channel names to IDs and vice versa |
|
|
||||||
| [Discord Get Current User](discord/oauth_blocks.md#discord-get-current-user) | Gets information about the currently authenticated Discord user using OAuth2 credentials |
|
|
||||||
| [Discord User Info](discord/bot_blocks.md#discord-user-info) | Gets information about a Discord user by their ID |
|
|
||||||
| [Edit Reddit Post](misc.md#edit-reddit-post) | Edit the body text of an existing Reddit post that you own |
|
|
||||||
| [Get Linkedin Profile](enrichlayer/linkedin.md#get-linkedin-profile) | Fetch LinkedIn profile data using Enrichlayer |
|
|
||||||
| [Get Linkedin Profile Picture](enrichlayer/linkedin.md#get-linkedin-profile-picture) | Get LinkedIn profile pictures using Enrichlayer |
|
|
||||||
| [Get Reddit Comment](misc.md#get-reddit-comment) | Get details about a specific Reddit comment by its ID |
|
|
||||||
| [Get Reddit Comment Replies](misc.md#get-reddit-comment-replies) | Get replies to a specific Reddit comment |
|
|
||||||
| [Get Reddit Inbox](misc.md#get-reddit-inbox) | Get messages, mentions, and comment replies from your Reddit inbox |
|
|
||||||
| [Get Reddit Post](misc.md#get-reddit-post) | Get detailed information about a specific Reddit post by its ID |
|
|
||||||
| [Get Reddit Post Comments](misc.md#get-reddit-post-comments) | Get top-level comments on a Reddit post |
|
|
||||||
| [Get Reddit Posts](misc.md#get-reddit-posts) | This block fetches Reddit posts from a defined subreddit name |
|
|
||||||
| [Get Reddit User Info](misc.md#get-reddit-user-info) | Get information about a Reddit user including karma, account age, and verification status |
|
|
||||||
| [Get Subreddit Flairs](misc.md#get-subreddit-flairs) | Get available link flair options for a subreddit |
|
|
||||||
| [Get Subreddit Info](misc.md#get-subreddit-info) | Get information about a subreddit including subscriber count, description, and rules |
|
|
||||||
| [Get Subreddit Rules](misc.md#get-subreddit-rules) | Get the rules for a subreddit to ensure compliance before posting |
|
|
||||||
| [Get User Posts](misc.md#get-user-posts) | Fetch posts by a specific Reddit user |
|
|
||||||
| [Linkedin Person Lookup](enrichlayer/linkedin.md#linkedin-person-lookup) | Look up LinkedIn profiles by person information using Enrichlayer |
|
|
||||||
| [Linkedin Role Lookup](enrichlayer/linkedin.md#linkedin-role-lookup) | Look up LinkedIn profiles by role in a company using Enrichlayer |
|
|
||||||
| [Post Reddit Comment](misc.md#post-reddit-comment) | This block posts a Reddit comment on a specified Reddit post |
|
|
||||||
| [Post To Bluesky](ayrshare/post_to_bluesky.md#post-to-bluesky) | Post to Bluesky using Ayrshare |
|
|
||||||
| [Post To Facebook](ayrshare/post_to_facebook.md#post-to-facebook) | Post to Facebook using Ayrshare |
|
|
||||||
| [Post To GMB](ayrshare/post_to_gmb.md#post-to-gmb) | Post to Google My Business using Ayrshare |
|
|
||||||
| [Post To Instagram](ayrshare/post_to_instagram.md#post-to-instagram) | Post to Instagram using Ayrshare |
|
|
||||||
| [Post To Linked In](ayrshare/post_to_linkedin.md#post-to-linked-in) | Post to LinkedIn using Ayrshare |
|
|
||||||
| [Post To Pinterest](ayrshare/post_to_pinterest.md#post-to-pinterest) | Post to Pinterest using Ayrshare |
|
|
||||||
| [Post To Reddit](ayrshare/post_to_reddit.md#post-to-reddit) | Post to Reddit using Ayrshare |
|
|
||||||
| [Post To Snapchat](ayrshare/post_to_snapchat.md#post-to-snapchat) | Post to Snapchat using Ayrshare |
|
|
||||||
| [Post To Telegram](ayrshare/post_to_telegram.md#post-to-telegram) | Post to Telegram using Ayrshare |
|
|
||||||
| [Post To Threads](ayrshare/post_to_threads.md#post-to-threads) | Post to Threads using Ayrshare |
|
|
||||||
| [Post To Tik Tok](ayrshare/post_to_tiktok.md#post-to-tik-tok) | Post to TikTok using Ayrshare |
|
|
||||||
| [Post To X](ayrshare/post_to_x.md#post-to-x) | Post to X / Twitter using Ayrshare |
|
|
||||||
| [Post To You Tube](ayrshare/post_to_youtube.md#post-to-you-tube) | Post to YouTube using Ayrshare |
|
|
||||||
| [Publish To Medium](misc.md#publish-to-medium) | Publishes a post to Medium |
|
|
||||||
| [Read Discord Messages](discord/bot_blocks.md#read-discord-messages) | Reads messages from a Discord channel using a bot token |
|
|
||||||
| [Reddit Get My Posts](misc.md#reddit-get-my-posts) | Fetch posts created by the authenticated Reddit user (you) |
|
|
||||||
| [Reply To Discord Message](discord/bot_blocks.md#reply-to-discord-message) | Replies to a specific Discord message |
|
|
||||||
| [Reply To Reddit Comment](misc.md#reply-to-reddit-comment) | Reply to a specific Reddit comment |
|
|
||||||
| [Search Reddit](misc.md#search-reddit) | Search Reddit for posts matching a query |
|
|
||||||
| [Send Discord DM](discord/bot_blocks.md#send-discord-dm) | Sends a direct message to a Discord user using their user ID |
|
|
||||||
| [Send Discord Embed](discord/bot_blocks.md#send-discord-embed) | Sends a rich embed message to a Discord channel |
|
|
||||||
| [Send Discord File](discord/bot_blocks.md#send-discord-file) | Sends a file attachment to a Discord channel |
|
|
||||||
| [Send Discord Message](discord/bot_blocks.md#send-discord-message) | Sends a message to a Discord channel using a bot token |
|
|
||||||
| [Send Reddit Message](misc.md#send-reddit-message) | Send a private message (DM) to a Reddit user |
|
|
||||||
| [Transcribe Youtube Video](misc.md#transcribe-youtube-video) | Transcribes a YouTube video using a proxy |
|
|
||||||
| [Twitter Add List Member](twitter/list_members.md#twitter-add-list-member) | This block adds a specified user to a Twitter List owned by the authenticated user |
|
|
||||||
| [Twitter Bookmark Tweet](twitter/bookmark.md#twitter-bookmark-tweet) | This block bookmarks a tweet on Twitter |
|
|
||||||
| [Twitter Create List](twitter/manage_lists.md#twitter-create-list) | This block creates a new Twitter List for the authenticated user |
|
|
||||||
| [Twitter Delete List](twitter/manage_lists.md#twitter-delete-list) | This block deletes a specified Twitter List owned by the authenticated user |
|
|
||||||
| [Twitter Delete Tweet](twitter/manage.md#twitter-delete-tweet) | This block deletes a tweet on Twitter |
|
|
||||||
| [Twitter Follow List](twitter/list_follows.md#twitter-follow-list) | This block follows a specified Twitter list for the authenticated user |
|
|
||||||
| [Twitter Follow User](twitter/follows.md#twitter-follow-user) | This block follows a specified Twitter user |
|
|
||||||
| [Twitter Get Blocked Users](twitter/blocks.md#twitter-get-blocked-users) | This block retrieves a list of users blocked by the authenticating user |
|
|
||||||
| [Twitter Get Bookmarked Tweets](twitter/bookmark.md#twitter-get-bookmarked-tweets) | This block retrieves bookmarked tweets from Twitter |
|
|
||||||
| [Twitter Get Followers](twitter/follows.md#twitter-get-followers) | This block retrieves followers of a specified Twitter user |
|
|
||||||
| [Twitter Get Following](twitter/follows.md#twitter-get-following) | This block retrieves the users that a specified Twitter user is following |
|
|
||||||
| [Twitter Get Home Timeline](twitter/timeline.md#twitter-get-home-timeline) | This block retrieves the authenticated user's home timeline |
|
|
||||||
| [Twitter Get Liked Tweets](twitter/like.md#twitter-get-liked-tweets) | This block gets information about tweets liked by a user |
|
|
||||||
| [Twitter Get Liking Users](twitter/like.md#twitter-get-liking-users) | This block gets information about users who liked a tweet |
|
|
||||||
| [Twitter Get List](twitter/list_lookup.md#twitter-get-list) | This block retrieves information about a specified Twitter List |
|
|
||||||
| [Twitter Get List Members](twitter/list_members.md#twitter-get-list-members) | This block retrieves the members of a specified Twitter List |
|
|
||||||
| [Twitter Get List Memberships](twitter/list_members.md#twitter-get-list-memberships) | This block retrieves all Lists that a specified user is a member of |
|
|
||||||
| [Twitter Get List Tweets](twitter/list_tweets_lookup.md#twitter-get-list-tweets) | This block retrieves tweets from a specified Twitter list |
|
|
||||||
| [Twitter Get Muted Users](twitter/mutes.md#twitter-get-muted-users) | This block gets a list of users muted by the authenticating user |
|
|
||||||
| [Twitter Get Owned Lists](twitter/list_lookup.md#twitter-get-owned-lists) | This block retrieves all Lists owned by a specified Twitter user |
|
|
||||||
| [Twitter Get Pinned Lists](twitter/pinned_lists.md#twitter-get-pinned-lists) | This block returns the Lists pinned by the authenticated user |
|
|
||||||
| [Twitter Get Quote Tweets](twitter/quote.md#twitter-get-quote-tweets) | This block gets quote tweets for a specific tweet |
|
|
||||||
| [Twitter Get Retweeters](twitter/retweet.md#twitter-get-retweeters) | This block gets information about who has retweeted a tweet |
|
|
||||||
| [Twitter Get Space Buyers](twitter/spaces_lookup.md#twitter-get-space-buyers) | This block retrieves a list of users who purchased tickets to a Twitter Space |
|
|
||||||
| [Twitter Get Space By Id](twitter/spaces_lookup.md#twitter-get-space-by-id) | This block retrieves information about a single Twitter Space |
|
|
||||||
| [Twitter Get Space Tweets](twitter/spaces_lookup.md#twitter-get-space-tweets) | This block retrieves tweets shared in a Twitter Space |
|
|
||||||
| [Twitter Get Spaces](twitter/spaces_lookup.md#twitter-get-spaces) | This block retrieves information about multiple Twitter Spaces |
|
|
||||||
| [Twitter Get Tweet](twitter/tweet_lookup.md#twitter-get-tweet) | This block retrieves information about a specific Tweet |
|
|
||||||
| [Twitter Get Tweets](twitter/tweet_lookup.md#twitter-get-tweets) | This block retrieves information about multiple Tweets |
|
|
||||||
| [Twitter Get User](twitter/user_lookup.md#twitter-get-user) | This block retrieves information about a specified Twitter user |
|
|
||||||
| [Twitter Get User Mentions](twitter/timeline.md#twitter-get-user-mentions) | This block retrieves Tweets mentioning a specific user |
|
|
||||||
| [Twitter Get User Tweets](twitter/timeline.md#twitter-get-user-tweets) | This block retrieves Tweets composed by a single user |
|
|
||||||
| [Twitter Get Users](twitter/user_lookup.md#twitter-get-users) | This block retrieves information about multiple Twitter users |
|
|
||||||
| [Twitter Hide Reply](twitter/hide.md#twitter-hide-reply) | This block hides a reply to a tweet |
|
|
||||||
| [Twitter Like Tweet](twitter/like.md#twitter-like-tweet) | This block likes a tweet |
|
|
||||||
| [Twitter Mute User](twitter/mutes.md#twitter-mute-user) | This block mutes a specified Twitter user |
|
|
||||||
| [Twitter Pin List](twitter/pinned_lists.md#twitter-pin-list) | This block allows the authenticated user to pin a specified List |
|
|
||||||
| [Twitter Post Tweet](twitter/manage.md#twitter-post-tweet) | This block posts a tweet on Twitter |
|
|
||||||
| [Twitter Remove Bookmark Tweet](twitter/bookmark.md#twitter-remove-bookmark-tweet) | This block removes a bookmark from a tweet on Twitter |
|
|
||||||
| [Twitter Remove List Member](twitter/list_members.md#twitter-remove-list-member) | This block removes a specified user from a Twitter List owned by the authenticated user |
|
|
||||||
| [Twitter Remove Retweet](twitter/retweet.md#twitter-remove-retweet) | This block removes a retweet on Twitter |
|
|
||||||
| [Twitter Retweet](twitter/retweet.md#twitter-retweet) | This block retweets a tweet on Twitter |
|
|
||||||
| [Twitter Search Recent Tweets](twitter/manage.md#twitter-search-recent-tweets) | This block searches all public Tweets in Twitter history |
|
|
||||||
| [Twitter Search Spaces](twitter/search_spaces.md#twitter-search-spaces) | This block searches for Twitter Spaces based on specified terms |
|
|
||||||
| [Twitter Unfollow List](twitter/list_follows.md#twitter-unfollow-list) | This block unfollows a specified Twitter list for the authenticated user |
|
|
||||||
| [Twitter Unfollow User](twitter/follows.md#twitter-unfollow-user) | This block unfollows a specified Twitter user |
|
|
||||||
| [Twitter Unhide Reply](twitter/hide.md#twitter-unhide-reply) | This block unhides a reply to a tweet |
|
|
||||||
| [Twitter Unlike Tweet](twitter/like.md#twitter-unlike-tweet) | This block unlikes a tweet |
|
|
||||||
| [Twitter Unmute User](twitter/mutes.md#twitter-unmute-user) | This block unmutes a specified Twitter user |
|
|
||||||
| [Twitter Unpin List](twitter/pinned_lists.md#twitter-unpin-list) | This block allows the authenticated user to unpin a specified List |
|
|
||||||
| [Twitter Update List](twitter/manage_lists.md#twitter-update-list) | This block updates a specified Twitter List owned by the authenticated user |
|
|
||||||
|
|
||||||
## Communication
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Baas Bot Join Meeting](baas/bots.md#baas-bot-join-meeting) | Deploy a bot to join and record a meeting |
|
|
||||||
| [Baas Bot Leave Meeting](baas/bots.md#baas-bot-leave-meeting) | Remove a bot from an ongoing meeting |
|
|
||||||
| [Gmail Add Label](google/gmail.md#gmail-add-label) | A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist |
|
|
||||||
| [Gmail Create Draft](google/gmail.md#gmail-create-draft) | Create draft emails in Gmail with automatic HTML detection and proper text formatting |
|
|
||||||
| [Gmail Draft Reply](google/gmail.md#gmail-draft-reply) | Create draft replies to Gmail threads with automatic HTML detection and proper text formatting |
|
|
||||||
| [Gmail Forward](google/gmail.md#gmail-forward) | Forward Gmail messages to other recipients with automatic HTML detection and proper formatting |
|
|
||||||
| [Gmail Get Profile](google/gmail.md#gmail-get-profile) | Get the authenticated user's Gmail profile details including email address and message statistics |
|
|
||||||
| [Gmail Get Thread](google/gmail.md#gmail-get-thread) | A block that retrieves an entire Gmail thread (email conversation) by ID, returning all messages with decoded bodies for reading complete conversations |
|
|
||||||
| [Gmail List Labels](google/gmail.md#gmail-list-labels) | A block that retrieves all labels (categories) from a Gmail account for organizing and categorizing emails |
|
|
||||||
| [Gmail Read](google/gmail.md#gmail-read) | A block that retrieves and reads emails from a Gmail account based on search criteria, returning detailed message information including subject, sender, body, and attachments |
|
|
||||||
| [Gmail Remove Label](google/gmail.md#gmail-remove-label) | A block that removes a label from a specific email message in a Gmail account |
|
|
||||||
| [Gmail Reply](google/gmail.md#gmail-reply) | Reply to Gmail threads with automatic HTML detection and proper text formatting |
|
|
||||||
| [Gmail Send](google/gmail.md#gmail-send) | Send emails via Gmail with automatic HTML detection and proper text formatting |
|
|
||||||
| [Hub Spot Engagement](hubspot/engagement.md#hub-spot-engagement) | Manages HubSpot engagements - sends emails and tracks engagement metrics |
|
|
||||||
|
|
||||||
## Developer Tools
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Exa Code Context](exa/code_context.md#exa-code-context) | Search billions of GitHub repos, docs, and Stack Overflow for relevant code examples |
|
|
||||||
| [Execute Code](misc.md#execute-code) | Executes code in a sandbox environment with internet access |
|
|
||||||
| [Execute Code Step](misc.md#execute-code-step) | Execute code in a previously instantiated sandbox |
|
|
||||||
| [Github Add Label](github/issues.md#github-add-label) | A block that adds a label to a GitHub issue or pull request for categorization and organization |
|
|
||||||
| [Github Assign Issue](github/issues.md#github-assign-issue) | A block that assigns a GitHub user to an issue for task ownership and tracking |
|
|
||||||
| [Github Assign PR Reviewer](github/pull_requests.md#github-assign-pr-reviewer) | This block assigns a reviewer to a specified GitHub pull request |
|
|
||||||
| [Github Comment](github/issues.md#github-comment) | A block that posts comments on GitHub issues or pull requests using the GitHub API |
|
|
||||||
| [Github Create Check Run](github/checks.md#github-create-check-run) | Creates a new check run for a specific commit in a GitHub repository |
|
|
||||||
| [Github Create Comment Object](github/reviews.md#github-create-comment-object) | Creates a comment object for use with GitHub blocks |
|
|
||||||
| [Github Create File](github/repo.md#github-create-file) | This block creates a new file in a GitHub repository |
|
|
||||||
| [Github Create PR Review](github/reviews.md#github-create-pr-review) | This block creates a review on a GitHub pull request with optional inline comments |
|
|
||||||
| [Github Create Repository](github/repo.md#github-create-repository) | This block creates a new GitHub repository |
|
|
||||||
| [Github Create Status](github/statuses.md#github-create-status) | Creates a new commit status in a GitHub repository |
|
|
||||||
| [Github Delete Branch](github/repo.md#github-delete-branch) | This block deletes a specified branch |
|
|
||||||
| [Github Discussion Trigger](github/triggers.md#github-discussion-trigger) | This block triggers on GitHub Discussions events |
|
|
||||||
| [Github Get CI Results](github/ci.md#github-get-ci-results) | This block gets CI results for a commit or PR, with optional search for specific errors/warnings in logs |
|
|
||||||
| [Github Get PR Review Comments](github/reviews.md#github-get-pr-review-comments) | This block gets all review comments from a GitHub pull request or from a specific review |
|
|
||||||
| [Github Issues Trigger](github/triggers.md#github-issues-trigger) | This block triggers on GitHub issues events |
|
|
||||||
| [Github List Branches](github/repo.md#github-list-branches) | This block lists all branches for a specified GitHub repository |
|
|
||||||
| [Github List Comments](github/issues.md#github-list-comments) | A block that retrieves all comments from a GitHub issue or pull request, including comment metadata and content |
|
|
||||||
| [Github List Discussions](github/repo.md#github-list-discussions) | This block lists recent discussions for a specified GitHub repository |
|
|
||||||
| [Github List Issues](github/issues.md#github-list-issues) | A block that retrieves a list of issues from a GitHub repository with their titles and URLs |
|
|
||||||
| [Github List PR Reviewers](github/pull_requests.md#github-list-pr-reviewers) | This block lists all reviewers for a specified GitHub pull request |
|
|
||||||
| [Github List PR Reviews](github/reviews.md#github-list-pr-reviews) | This block lists all reviews for a specified GitHub pull request |
|
|
||||||
| [Github List Pull Requests](github/pull_requests.md#github-list-pull-requests) | This block lists all pull requests for a specified GitHub repository |
|
|
||||||
| [Github List Releases](github/repo.md#github-list-releases) | This block lists all releases for a specified GitHub repository |
|
|
||||||
| [Github List Stargazers](github/repo.md#github-list-stargazers) | This block lists all users who have starred a specified GitHub repository |
|
|
||||||
| [Github List Tags](github/repo.md#github-list-tags) | This block lists all tags for a specified GitHub repository |
|
|
||||||
| [Github Make Branch](github/repo.md#github-make-branch) | This block creates a new branch from a specified source branch |
|
|
||||||
| [Github Make Issue](github/issues.md#github-make-issue) | A block that creates new issues on GitHub repositories with a title and body content |
|
|
||||||
| [Github Make Pull Request](github/pull_requests.md#github-make-pull-request) | This block creates a new pull request on a specified GitHub repository |
|
|
||||||
| [Github Pull Request Trigger](github/triggers.md#github-pull-request-trigger) | This block triggers on pull request events and outputs the event type and payload |
|
|
||||||
| [Github Read File](github/repo.md#github-read-file) | This block reads the content of a specified file from a GitHub repository |
|
|
||||||
| [Github Read Folder](github/repo.md#github-read-folder) | This block reads the content of a specified folder from a GitHub repository |
|
|
||||||
| [Github Read Issue](github/issues.md#github-read-issue) | A block that retrieves information about a specific GitHub issue, including its title, body content, and creator |
|
|
||||||
| [Github Read Pull Request](github/pull_requests.md#github-read-pull-request) | This block reads the body, title, user, and changes of a specified GitHub pull request |
|
|
||||||
| [Github Release Trigger](github/triggers.md#github-release-trigger) | This block triggers on GitHub release events |
|
|
||||||
| [Github Remove Label](github/issues.md#github-remove-label) | A block that removes a label from a GitHub issue or pull request |
|
|
||||||
| [Github Resolve Review Discussion](github/reviews.md#github-resolve-review-discussion) | This block resolves or unresolves a review discussion thread on a GitHub pull request |
|
|
||||||
| [Github Star Trigger](github/triggers.md#github-star-trigger) | This block triggers on GitHub star events |
|
|
||||||
| [Github Submit Pending Review](github/reviews.md#github-submit-pending-review) | This block submits a pending (draft) review on a GitHub pull request |
|
|
||||||
| [Github Unassign Issue](github/issues.md#github-unassign-issue) | A block that removes a user's assignment from a GitHub issue |
|
|
||||||
| [Github Unassign PR Reviewer](github/pull_requests.md#github-unassign-pr-reviewer) | This block unassigns a reviewer from a specified GitHub pull request |
|
|
||||||
| [Github Update Check Run](github/checks.md#github-update-check-run) | Updates an existing check run in a GitHub repository |
|
|
||||||
| [Github Update Comment](github/issues.md#github-update-comment) | A block that updates an existing comment on a GitHub issue or pull request |
|
|
||||||
| [Github Update File](github/repo.md#github-update-file) | This block updates an existing file in a GitHub repository |
|
|
||||||
| [Instantiate Code Sandbox](misc.md#instantiate-code-sandbox) | Instantiate a sandbox environment with internet access in which you can execute code with the Execute Code Step block |
|
|
||||||
| [Slant3D Order Webhook](slant3d/webhook.md#slant3d-order-webhook) | This block triggers on Slant3D order status updates and outputs the event details, including tracking information when orders are shipped |
|
|
||||||
|
|
||||||
## Media Generation
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Add Audio To Video](multimedia.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
|
||||||
| [Loop Video](multimedia.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
|
||||||
| [Media Duration](multimedia.md#media-duration) | Block to get the duration of a media file |
|
|
||||||
|
|
||||||
## Productivity
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Google Calendar Create Event](google/calendar.md#google-calendar-create-event) | This block creates a new event in Google Calendar with customizable parameters |
|
|
||||||
| [Notion Create Page](notion/create_page.md#notion-create-page) | Create a new page in Notion |
|
|
||||||
| [Notion Read Database](notion/read_database.md#notion-read-database) | Query a Notion database with optional filtering and sorting, returning structured entries |
|
|
||||||
| [Notion Read Page](notion/read_page.md#notion-read-page) | Read a Notion page by its ID and return its raw JSON |
|
|
||||||
| [Notion Read Page Markdown](notion/read_page_markdown.md#notion-read-page-markdown) | Read a Notion page and convert it to Markdown format with proper formatting for headings, lists, links, and rich text |
|
|
||||||
| [Notion Search](notion/search.md#notion-search) | Search your Notion workspace for pages and databases by text query |
|
|
||||||
| [Todoist Close Task](todoist/tasks.md#todoist-close-task) | Closes a task in Todoist |
|
|
||||||
| [Todoist Create Comment](todoist/comments.md#todoist-create-comment) | Creates a new comment on a Todoist task or project |
|
|
||||||
| [Todoist Create Label](todoist/labels.md#todoist-create-label) | Creates a new label in Todoist, It will not work if same name already exists |
|
|
||||||
| [Todoist Create Project](todoist/projects.md#todoist-create-project) | Creates a new project in Todoist |
|
|
||||||
| [Todoist Create Task](todoist/tasks.md#todoist-create-task) | Creates a new task in a Todoist project |
|
|
||||||
| [Todoist Delete Comment](todoist/comments.md#todoist-delete-comment) | Deletes a Todoist comment |
|
|
||||||
| [Todoist Delete Label](todoist/labels.md#todoist-delete-label) | Deletes a personal label in Todoist |
|
|
||||||
| [Todoist Delete Project](todoist/projects.md#todoist-delete-project) | Deletes a Todoist project and all its contents |
|
|
||||||
| [Todoist Delete Section](todoist/sections.md#todoist-delete-section) | Deletes a section and all its tasks from Todoist |
|
|
||||||
| [Todoist Delete Task](todoist/tasks.md#todoist-delete-task) | Deletes a task in Todoist |
|
|
||||||
| [Todoist Get Comment](todoist/comments.md#todoist-get-comment) | Get a single comment from Todoist |
|
|
||||||
| [Todoist Get Comments](todoist/comments.md#todoist-get-comments) | Get all comments for a Todoist task or project |
|
|
||||||
| [Todoist Get Label](todoist/labels.md#todoist-get-label) | Gets a personal label from Todoist by ID |
|
|
||||||
| [Todoist Get Project](todoist/projects.md#todoist-get-project) | Gets details for a specific Todoist project |
|
|
||||||
| [Todoist Get Section](todoist/sections.md#todoist-get-section) | Gets a single section by ID from Todoist |
|
|
||||||
| [Todoist Get Shared Labels](todoist/labels.md#todoist-get-shared-labels) | Gets all shared labels from Todoist |
|
|
||||||
| [Todoist Get Task](todoist/tasks.md#todoist-get-task) | Get an active task from Todoist |
|
|
||||||
| [Todoist Get Tasks](todoist/tasks.md#todoist-get-tasks) | Get active tasks from Todoist |
|
|
||||||
| [Todoist List Collaborators](todoist/projects.md#todoist-list-collaborators) | Gets all collaborators for a specific Todoist project |
|
|
||||||
| [Todoist List Labels](todoist/labels.md#todoist-list-labels) | Gets all personal labels from Todoist |
|
|
||||||
| [Todoist List Projects](todoist/projects.md#todoist-list-projects) | Gets all projects and their details from Todoist |
|
|
||||||
| [Todoist List Sections](todoist/sections.md#todoist-list-sections) | Gets all sections and their details from Todoist |
|
|
||||||
| [Todoist Remove Shared Labels](todoist/labels.md#todoist-remove-shared-labels) | Removes all instances of a shared label |
|
|
||||||
| [Todoist Rename Shared Labels](todoist/labels.md#todoist-rename-shared-labels) | Renames all instances of a shared label |
|
|
||||||
| [Todoist Reopen Task](todoist/tasks.md#todoist-reopen-task) | Reopens a task in Todoist |
|
|
||||||
| [Todoist Update Comment](todoist/comments.md#todoist-update-comment) | Updates a Todoist comment |
|
|
||||||
| [Todoist Update Label](todoist/labels.md#todoist-update-label) | Updates a personal label in Todoist |
|
|
||||||
| [Todoist Update Project](todoist/projects.md#todoist-update-project) | Updates an existing project in Todoist |
|
|
||||||
| [Todoist Update Task](todoist/tasks.md#todoist-update-task) | Updates an existing task in Todoist |
|
|
||||||
|
|
||||||
## Logic and Control Flow
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Calculator](logic.md#calculator) | Performs a mathematical operation on two numbers |
|
|
||||||
| [Condition](logic.md#condition) | Handles conditional logic based on comparison operators |
|
|
||||||
| [Count Items](logic.md#count-items) | Counts the number of items in a collection |
|
|
||||||
| [Data Sampling](logic.md#data-sampling) | This block samples data from a given dataset using various sampling methods |
|
|
||||||
| [Exa Webset Ready Check](exa/websets.md#exa-webset-ready-check) | Check if webset is ready for next operation - enables conditional workflow branching |
|
|
||||||
| [If Input Matches](logic.md#if-input-matches) | Handles conditional logic based on comparison operators |
|
|
||||||
| [Pinecone Init](logic.md#pinecone-init) | Initializes a Pinecone index |
|
|
||||||
| [Pinecone Insert](logic.md#pinecone-insert) | Upload data to a Pinecone index |
|
|
||||||
| [Pinecone Query](logic.md#pinecone-query) | Queries a Pinecone index |
|
|
||||||
| [Step Through Items](logic.md#step-through-items) | Iterates over a list or dictionary and outputs each item |
|
|
||||||
|
|
||||||
## Input/Output
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Exa Webset Webhook](exa/webhook_blocks.md#exa-webset-webhook) | Receive webhook notifications for Exa webset events |
|
|
||||||
| [Generic Webhook Trigger](generic_webhook/triggers.md#generic-webhook-trigger) | This block will output the contents of the generic input for the webhook |
|
|
||||||
| [Read RSS Feed](misc.md#read-rss-feed) | Reads RSS feed entries from a given URL |
|
|
||||||
| [Send Authenticated Web Request](misc.md#send-authenticated-web-request) | Make an authenticated HTTP request with host-scoped credentials (JSON / form / multipart) |
|
|
||||||
| [Send Email](misc.md#send-email) | This block sends an email using the provided SMTP credentials |
|
|
||||||
| [Send Web Request](misc.md#send-web-request) | Make an HTTP request (JSON / form / multipart) |
|
|
||||||
|
|
||||||
## Agent Integration
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Agent Executor](misc.md#agent-executor) | Executes an existing agent inside your agent |
|
|
||||||
|
|
||||||
## CRM Services
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Add Lead To Campaign](smartlead/campaign.md#add-lead-to-campaign) | Add a lead to a campaign in SmartLead |
|
|
||||||
| [Create Campaign](smartlead/campaign.md#create-campaign) | Create a campaign in SmartLead |
|
|
||||||
| [Hub Spot Company](hubspot/company.md#hub-spot-company) | Manages HubSpot companies - create, update, and retrieve company information |
|
|
||||||
| [Hub Spot Contact](hubspot/contact.md#hub-spot-contact) | Manages HubSpot contacts - create, update, and retrieve contact information |
|
|
||||||
| [Save Campaign Sequences](smartlead/campaign.md#save-campaign-sequences) | Save sequences within a campaign |
|
|
||||||
|
|
||||||
## AI Safety
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Nvidia Deepfake Detect](nvidia/deepfake.md#nvidia-deepfake-detect) | Detects potential deepfakes in images using Nvidia's AI API |
|
|
||||||
|
|
||||||
## Issue Tracking
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Linear Create Comment](linear/comment.md#linear-create-comment) | Creates a new comment on a Linear issue |
|
|
||||||
| [Linear Create Issue](linear/issues.md#linear-create-issue) | Creates a new issue on Linear |
|
|
||||||
| [Linear Get Project Issues](linear/issues.md#linear-get-project-issues) | Gets issues from a Linear project filtered by status and assignee |
|
|
||||||
| [Linear Search Projects](linear/projects.md#linear-search-projects) | Searches for projects on Linear |
|
|
||||||
|
|
||||||
## Hardware
|
|
||||||
|
|
||||||
| Block Name | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| [Compass AI Trigger](compass/triggers.md#compass-ai-trigger) | This block will output the contents of the compass transcription |
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
# Airtable Bases
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for creating and managing Airtable bases, which are the top-level containers for tables, records, and data in Airtable.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Airtable Create Base
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Create or find a base in Airtable
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block creates a new Airtable base in a specified workspace, or finds an existing one with the same name. When creating, you can optionally define initial tables and their fields to set up the schema.
|
|
||||||
|
|
||||||
Enable find_existing to search for a base with the same name before creating a new one, preventing duplicates in your workspace.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| workspace_id | The workspace ID where the base will be created | str | Yes |
|
|
||||||
| name | The name of the new base | str | Yes |
|
|
||||||
| find_existing | If true, return existing base with same name instead of creating duplicate | bool | No |
|
|
||||||
| tables | At least one table and field must be specified. Array of table objects to create in the base. Each table should have 'name' and 'fields' properties | List[Dict[str, Any]] | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| base_id | The ID of the created or found base | str |
|
|
||||||
| tables | Array of table objects | List[Dict[str, Any]] |
|
|
||||||
| table | A single table object | Dict[str, Any] |
|
|
||||||
| was_created | True if a new base was created, False if existing was found | bool |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Project Setup**: Automatically create new bases when projects start with predefined table structures.
|
|
||||||
|
|
||||||
**Template Deployment**: Deploy standardized base templates across teams or clients.
|
|
||||||
|
|
||||||
**Multi-Tenant Apps**: Create separate bases for each customer or project programmatically.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable List Bases
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
List all bases in Airtable
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block retrieves a list of all Airtable bases accessible to your connected account. It returns basic information about each base including ID, name, and permission level.
|
|
||||||
|
|
||||||
Results are paginated; use the offset output to retrieve additional pages if there are more bases than returned in a single call.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| trigger | Trigger the block to run - value is ignored | str | No |
|
|
||||||
| offset | Pagination offset from previous request | str | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| bases | Array of base objects | List[Dict[str, Any]] |
|
|
||||||
| offset | Offset for next page (null if no more bases) | str |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Base Discovery**: Find available bases for building dynamic dropdowns or navigation.
|
|
||||||
|
|
||||||
**Inventory Management**: List all bases in an organization for auditing or documentation.
|
|
||||||
|
|
||||||
**Cross-Base Operations**: Enumerate bases to perform operations across multiple databases.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,214 +0,0 @@
|
|||||||
# Airtable Records
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for creating, reading, updating, and deleting records in Airtable tables.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Airtable Create Records
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Create records in an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block creates new records in an Airtable table using the Airtable API. Each record is specified with a fields object containing field names and values. You can create up to 10 records in a single call.
|
|
||||||
|
|
||||||
Enable typecast to automatically convert string values to appropriate field types (dates, numbers, etc.). The block returns the created records with their assigned IDs.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Table ID or name | str | Yes |
|
|
||||||
| records | Array of records to create (each with 'fields' object) | List[Dict[str, Any]] | Yes |
|
|
||||||
| skip_normalization | Skip output normalization to get raw Airtable response (faster but may have missing fields) | bool | No |
|
|
||||||
| typecast | Automatically convert string values to appropriate types | bool | No |
|
|
||||||
| return_fields_by_field_id | Return fields by field ID | bool | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| records | Array of created record objects | List[Dict[str, Any]] |
|
|
||||||
| details | Details of the created records | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Data Import**: Bulk import data from external sources into Airtable tables.
|
|
||||||
|
|
||||||
**Form Submissions**: Create records from form submissions or API integrations.
|
|
||||||
|
|
||||||
**Workflow Output**: Save workflow results or processed data to Airtable for tracking.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Delete Records
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Delete records from an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block deletes records from an Airtable table by their record IDs. You can delete up to 10 records in a single call. The operation is permanent and cannot be undone.
|
|
||||||
|
|
||||||
Provide an array of record IDs to delete. Using the table ID instead of the name is recommended for reliability.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Table ID or name - It's better to use the table ID instead of the name | str | Yes |
|
|
||||||
| record_ids | Array of upto 10 record IDs to delete | List[str] | Yes |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| records | Array of deletion results | List[Dict[str, Any]] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Data Cleanup**: Remove outdated or duplicate records from tables.
|
|
||||||
|
|
||||||
**Workflow Cleanup**: Delete temporary records after processing is complete.
|
|
||||||
|
|
||||||
**Batch Removal**: Remove multiple records that match certain criteria.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Get Record
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Get a single record from Airtable
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block retrieves a single record from an Airtable table by its ID. The record includes all field values and metadata like creation time. Enable normalize_output to ensure all fields are included with proper empty values.
|
|
||||||
|
|
||||||
Optionally include field metadata for type information and configuration details about each field.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Table ID or name | str | Yes |
|
|
||||||
| record_id | The record ID to retrieve | str | Yes |
|
|
||||||
| normalize_output | Normalize output to include all fields with proper empty values (disable to skip schema fetch and get raw Airtable response) | bool | No |
|
|
||||||
| include_field_metadata | Include field type and configuration metadata (requires normalize_output=true) | bool | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| id | The record ID | str |
|
|
||||||
| fields | The record fields | Dict[str, Any] |
|
|
||||||
| created_time | The record created time | str |
|
|
||||||
| field_metadata | Field type and configuration metadata (only when include_field_metadata=true) | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Detail View**: Fetch complete record data for display or detailed processing.
|
|
||||||
|
|
||||||
**Record Lookup**: Retrieve specific records by ID from webhook payloads or references.
|
|
||||||
|
|
||||||
**Data Validation**: Check record contents before performing updates or related operations.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable List Records
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
List records from an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block queries records from an Airtable table with optional filtering, sorting, and pagination. Use Airtable formulas to filter records and specify sort order by field and direction.
|
|
||||||
|
|
||||||
Results can be limited, paginated with offsets, and restricted to specific fields. Enable normalize_output for consistent field values across records.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Table ID or name | str | Yes |
|
|
||||||
| filter_formula | Airtable formula to filter records | str | No |
|
|
||||||
| view | View ID or name to use | str | No |
|
|
||||||
| sort | Sort configuration (array of {field, direction}) | List[Dict[str, Any]] | No |
|
|
||||||
| max_records | Maximum number of records to return | int | No |
|
|
||||||
| page_size | Number of records per page (max 100) | int | No |
|
|
||||||
| offset | Pagination offset from previous request | str | No |
|
|
||||||
| return_fields | Specific fields to return (comma-separated) | List[str] | No |
|
|
||||||
| normalize_output | Normalize output to include all fields with proper empty values (disable to skip schema fetch and get raw Airtable response) | bool | No |
|
|
||||||
| include_field_metadata | Include field type and configuration metadata (requires normalize_output=true) | bool | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| records | Array of record objects | List[Dict[str, Any]] |
|
|
||||||
| offset | Offset for next page (null if no more records) | str |
|
|
||||||
| field_metadata | Field type and configuration metadata (only when include_field_metadata=true) | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Report Generation**: Query records with filters to build reports or dashboards.
|
|
||||||
|
|
||||||
**Data Export**: Fetch records matching criteria for export to other systems.
|
|
||||||
|
|
||||||
**Batch Processing**: List records to process in subsequent workflow steps.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Update Records
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Update records in an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block updates existing records in an Airtable table. Each record update requires the record ID and a fields object with the values to update. Only specified fields are modified; other fields remain unchanged.
|
|
||||||
|
|
||||||
Enable typecast to automatically convert string values to appropriate types. You can update up to 10 records per call.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Table ID or name - It's better to use the table ID instead of the name | str | Yes |
|
|
||||||
| records | Array of records to update (each with 'id' and 'fields') | List[Dict[str, Any]] | Yes |
|
|
||||||
| typecast | Automatically convert string values to appropriate types | bool | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| records | Array of updated record objects | List[Dict[str, Any]] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Status Updates**: Update record status fields as workflows progress.
|
|
||||||
|
|
||||||
**Data Enrichment**: Add computed or fetched data to existing records.
|
|
||||||
|
|
||||||
**Batch Modifications**: Update multiple records based on processed results.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,202 +0,0 @@
|
|||||||
# Airtable Schema
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for managing Airtable schema including tables, fields, and their configurations.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Airtable Create Field
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Add a new field to an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block adds a new field to an existing Airtable table using the Airtable API. Specify the field type (text, email, URL, etc.), name, and optional description and configuration options.
|
|
||||||
|
|
||||||
The field is created immediately and becomes available for use in all records. Returns the created field object with its assigned ID.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id | The table ID to add field to | str | Yes |
|
|
||||||
| field_type | The type of the field to create | "singleLineText" \| "email" \| "url" \| "multilineText" \| "number" \| "percent" \| "currency" \| "singleSelect" \| "multipleSelects" \| "singleCollaborator" \| "multipleCollaborators" \| "multipleRecordLinks" \| "date" \| "dateTime" \| "phoneNumber" \| "multipleAttachments" \| "checkbox" \| "formula" \| "createdTime" \| "rollup" \| "count" \| "lookup" \| "multipleLookupValues" \| "autoNumber" \| "barcode" \| "rating" \| "richText" \| "duration" \| "lastModifiedTime" \| "button" \| "createdBy" \| "lastModifiedBy" \| "externalSyncSource" \| "aiText" | No |
|
|
||||||
| name | The name of the field to create | str | Yes |
|
|
||||||
| description | The description of the field to create | str | No |
|
|
||||||
| options | The options of the field to create | Dict[str, str] | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| field | Created field object | Dict[str, Any] |
|
|
||||||
| field_id | ID of the created field | str |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Schema Evolution**: Add new fields to tables as application requirements grow.
|
|
||||||
|
|
||||||
**Dynamic Forms**: Create fields based on user configuration or form builder settings.
|
|
||||||
|
|
||||||
**Data Integration**: Add fields to capture data from newly integrated external systems.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Create Table
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Create a new table in an Airtable base
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block creates a new table in an Airtable base with the specified name and optional field definitions. Each field definition includes name, type, and type-specific options.
|
|
||||||
|
|
||||||
The table is created with the defined schema and is immediately ready for use. Returns the created table object with its ID.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_name | The name of the table to create | str | Yes |
|
|
||||||
| table_fields | Table fields with name, type, and options | List[Dict[str, Any]] | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| table | Created table object | Dict[str, Any] |
|
|
||||||
| table_id | ID of the created table | str |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Application Scaffolding**: Create tables programmatically when setting up new application modules.
|
|
||||||
|
|
||||||
**Multi-Tenant Setup**: Generate customer-specific tables dynamically.
|
|
||||||
|
|
||||||
**Feature Expansion**: Add new tables as features are enabled or installed.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable List Schema
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Get the complete schema of an Airtable base
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block retrieves the complete schema of an Airtable base, including all tables, their fields, field types, and views. This metadata is essential for building dynamic integrations that need to understand table structure.
|
|
||||||
|
|
||||||
The schema includes field configurations, validation rules, and relationship definitions between tables.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| base_schema | Complete base schema with tables, fields, and views | Dict[str, Any] |
|
|
||||||
| tables | Array of table objects | List[Dict[str, Any]] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Schema Discovery**: Understand table structure for building dynamic forms or queries.
|
|
||||||
|
|
||||||
**Documentation**: Generate documentation of database schema automatically.
|
|
||||||
|
|
||||||
**Migration Planning**: Analyze schema before migrating data to other systems.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Update Field
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Update field properties in an Airtable table
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block updates properties of an existing field in an Airtable table. You can modify the field name and description. Note that field type cannot be changed after creation.
|
|
||||||
|
|
||||||
Changes take effect immediately across all records and views that use the field.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id | The table ID containing the field | str | Yes |
|
|
||||||
| field_id | The field ID to update | str | Yes |
|
|
||||||
| name | The name of the field to update | str | No |
|
|
||||||
| description | The description of the field to update | str | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| field | Updated field object | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Field Renaming**: Update field names to match evolving terminology or standards.
|
|
||||||
|
|
||||||
**Documentation Updates**: Add or update field descriptions for better team understanding.
|
|
||||||
|
|
||||||
**Schema Maintenance**: Keep field metadata current as application requirements change.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Airtable Update Table
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Update table properties
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block updates table properties in an Airtable base. You can change the table name, description, and date dependency settings. Changes apply immediately and affect all users accessing the table.
|
|
||||||
|
|
||||||
This is useful for maintaining table metadata and organizing your base structure.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | The Airtable base ID | str | Yes |
|
|
||||||
| table_id | The table ID to update | str | Yes |
|
|
||||||
| table_name | The name of the table to update | str | No |
|
|
||||||
| table_description | The description of the table to update | str | No |
|
|
||||||
| date_dependency | The date dependency of the table to update | Dict[str, Any] | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| table | Updated table object | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Table Organization**: Rename tables to follow naming conventions or reflect current usage.
|
|
||||||
|
|
||||||
**Description Management**: Update table descriptions for documentation purposes.
|
|
||||||
|
|
||||||
**Configuration Updates**: Modify table settings like date dependencies as requirements change.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
# Airtable Triggers
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for triggering workflows based on Airtable events like record creation, updates, and deletions.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Airtable Webhook Trigger
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Starts a flow whenever Airtable emits a webhook event
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block subscribes to Airtable webhook events for a specific base and table. When records are created, updated, or deleted, Airtable sends a webhook notification that triggers your workflow.
|
|
||||||
|
|
||||||
You specify which events to listen for using the event selector. The webhook payload includes details about the changed records and the type of change that occurred.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| base_id | Airtable base ID | str | Yes |
|
|
||||||
| table_id_or_name | Airtable table ID or name | str | Yes |
|
|
||||||
| events | Airtable webhook event filter | AirtableEventSelector | Yes |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the operation failed | str |
|
|
||||||
| payload | Airtable webhook payload | WebhookPayload |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Real-Time Sync**: Automatically sync Airtable changes to other systems like CRMs or databases.
|
|
||||||
|
|
||||||
**Notification Workflows**: Send alerts when specific records are created or modified in Airtable.
|
|
||||||
|
|
||||||
**Automated Processing**: Trigger document generation or emails when new entries are added to a table.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
# Apollo Organization
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for searching and retrieving organization data from Apollo's B2B database.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Search Organizations
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Search for organizations in Apollo
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block searches the Apollo database for organizations using various filters like employee count, location, and keywords. Apollo maintains a comprehensive database of company information for sales and marketing purposes.
|
|
||||||
|
|
||||||
Results can be filtered by headquarters location, excluded locations, industry keywords, and specific Apollo organization IDs.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| organization_num_employees_range | The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma. | List[int] | No |
|
|
||||||
| organization_locations | The location of the company headquarters. You can search across cities, US states, and countries. If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appear in your search results, even if they match other parameters. To exclude companies based on location, use the organization_not_locations parameter. | List[str] | No |
|
|
||||||
| organizations_not_locations | Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude. This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results. | List[str] | No |
|
|
||||||
| q_organization_keyword_tags | Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry. | List[str] | No |
|
|
||||||
| q_organization_name | Filter search results to include a specific company name. If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible. | str | No |
|
|
||||||
| organization_ids | The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID. To find IDs, identify the values for organization_id when you call this endpoint. | List[str] | No |
|
|
||||||
| max_results | The maximum number of results to return. If you don't specify this parameter, the default is 100. | int | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the search failed | str |
|
|
||||||
| organizations | List of organizations found | List[Dict[str, Any]] |
|
|
||||||
| organization | Each found organization, one at a time | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Market Research**: Find companies matching specific criteria for market analysis.
|
|
||||||
|
|
||||||
**Lead List Building**: Build targeted lists of companies for outbound sales campaigns.
|
|
||||||
|
|
||||||
**Competitive Intelligence**: Research competitors and similar companies in your market.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
# Apollo People
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for searching people in Apollo's B2B contact database with various filters.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Search People
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Search for people in Apollo
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block searches Apollo's database for people based on job titles, seniority, location, company, and other criteria. It's designed for finding prospects and contacts for sales and marketing.
|
|
||||||
|
|
||||||
Enable enrich_info to get detailed contact information including verified email addresses (costs more credits).
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| person_titles | Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results. Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager. Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels. | List[str] | No |
|
|
||||||
| person_locations | The location where people live. You can search across cities, US states, and countries. To find people based on the headquarters locations of their current employer, use the organization_locations parameter. | List[str] | No |
|
|
||||||
| person_seniorities | The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level. For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results. Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results. Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels. | List["owner" \| "founder" \| "c_suite" \| "partner" \| "vp" \| "head" \| "director" \| "manager" \| "senior" \| "entry" \| "intern"] | No |
|
|
||||||
| organization_locations | The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries. If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters. To find people based on their personal location, use the person_locations parameter. | List[str] | No |
|
|
||||||
| q_organization_domains | The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar. You can add multiple domains to search across companies. Examples: apollo.io and microsoft.com | List[str] | No |
|
|
||||||
| contact_email_statuses | The email statuses for the people you want to find. You can add multiple statuses to expand your search. | List["verified" \| "unverified" \| "likely_to_engage" \| "unavailable"] | No |
|
|
||||||
| organization_ids | The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID. To find IDs, call the Organization Search endpoint and identify the values for organization_id. | List[str] | No |
|
|
||||||
| organization_num_employees_range | The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma. | List[int] | No |
|
|
||||||
| q_keywords | A string of words over which we want to filter the results | str | No |
|
|
||||||
| max_results | The maximum number of results to return. If you don't specify this parameter, the default is 25. Limited to 500 to prevent overspending. | int | No |
|
|
||||||
| enrich_info | Whether to enrich contacts with detailed information including real email addresses. This will double the search cost. | bool | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if the search failed | str |
|
|
||||||
| people | List of people found | List[Dict[str, Any]] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Prospecting**: Find decision-makers at target companies for outbound sales.
|
|
||||||
|
|
||||||
**Recruiting**: Search for candidates with specific titles and experience.
|
|
||||||
|
|
||||||
**ABM Campaigns**: Build contact lists at specific accounts for account-based marketing.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
# Apollo Person
|
|
||||||
<!-- MANUAL: file_description -->
|
|
||||||
Blocks for enriching individual person data including contact details and email discovery.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
## Get Person Detail
|
|
||||||
|
|
||||||
### What it is
|
|
||||||
Get detailed person data with Apollo API, including email reveal
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
<!-- MANUAL: how_it_works -->
|
|
||||||
This block enriches person data using Apollo's API. You can look up by Apollo person ID for best accuracy, or match by name plus company information, LinkedIn URL, or email address.
|
|
||||||
|
|
||||||
Returns comprehensive contact details including email addresses (if available), job title, company information, and social profiles.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
### Inputs
|
|
||||||
|
|
||||||
| Input | Description | Type | Required |
|
|
||||||
|-------|-------------|------|----------|
|
|
||||||
| person_id | Apollo person ID to enrich (most accurate method) | str | No |
|
|
||||||
| first_name | First name of the person to enrich | str | No |
|
|
||||||
| last_name | Last name of the person to enrich | str | No |
|
|
||||||
| name | Full name of the person to enrich (alternative to first_name + last_name) | str | No |
|
|
||||||
| email | Known email address of the person (helps with matching) | str | No |
|
|
||||||
| domain | Company domain of the person (e.g., 'google.com') | str | No |
|
|
||||||
| company | Company name of the person | str | No |
|
|
||||||
| linkedin_url | LinkedIn URL of the person | str | No |
|
|
||||||
| organization_id | Apollo organization ID of the person's company | str | No |
|
|
||||||
| title | Job title of the person to enrich | str | No |
|
|
||||||
|
|
||||||
### Outputs
|
|
||||||
|
|
||||||
| Output | Description | Type |
|
|
||||||
|--------|-------------|------|
|
|
||||||
| error | Error message if enrichment failed | str |
|
|
||||||
| contact | Enriched contact information | Dict[str, Any] |
|
|
||||||
|
|
||||||
### Possible use case
|
|
||||||
<!-- MANUAL: use_case -->
|
|
||||||
**Contact Enrichment**: Get full contact details from partial information like name and company.
|
|
||||||
|
|
||||||
**Email Discovery**: Find verified email addresses for outreach campaigns.
|
|
||||||
|
|
||||||
**Profile Completion**: Fill in missing contact details in your CRM or database.
|
|
||||||
<!-- END MANUAL -->
|
|
||||||
|
|
||||||
---
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user