mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 16:48:06 -05:00
Compare commits
12 Commits
ntindle/wa
...
figure-out
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04cf8d01b4 | ||
|
|
3d78b833f0 | ||
|
|
eea4bca826 | ||
|
|
0bf55b453b | ||
|
|
f25d2a0ae6 | ||
|
|
4baf0f7ee3 | ||
|
|
56f296af36 | ||
|
|
302e6d548d | ||
|
|
8d7defc89a | ||
|
|
7f94404dc2 | ||
|
|
7fda42d48b | ||
|
|
615d20613b |
74
.github/workflows/docs-block-sync.yml
vendored
Normal file
74
.github/workflows/docs-block-sync.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: Block Documentation Sync Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
||||
- ".github/workflows/docs-block-sync.yml"
|
||||
pull_request:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/scripts/generate_block_docs.py"
|
||||
- ".github/workflows/docs-block-sync.yml"
|
||||
|
||||
jobs:
|
||||
check-docs-sync:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Check block documentation is in sync
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
echo "Checking if block documentation is in sync with code..."
|
||||
poetry run python scripts/generate_block_docs.py --check
|
||||
|
||||
- name: Show diff if out of sync
|
||||
if: failure()
|
||||
run: |
|
||||
echo "::error::Block documentation is out of sync with code!"
|
||||
echo ""
|
||||
echo "To fix this, run the following command locally:"
|
||||
echo " cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
||||
echo ""
|
||||
echo "Then commit the updated documentation files."
|
||||
echo ""
|
||||
echo "Changes detected:"
|
||||
git diff docs/content/platform/blocks/ || true
|
||||
94
.github/workflows/docs-claude-review.yml
vendored
Normal file
94
.github/workflows/docs-claude-review.yml
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
name: Claude Block Docs Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
paths:
|
||||
- "docs/content/platform/blocks/**"
|
||||
- "autogpt_platform/backend/backend/blocks/**"
|
||||
|
||||
jobs:
|
||||
claude-review:
|
||||
# Only run for PRs from members/collaborators
|
||||
if: |
|
||||
github.event.pull_request.author_association == 'OWNER' ||
|
||||
github.event.pull_request.author_association == 'MEMBER' ||
|
||||
github.event.pull_request.author_association == 'COLLABORATOR'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Run Claude Code Review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Read,Glob,Grep,Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*)"
|
||||
prompt: |
|
||||
You are reviewing a PR that modifies block documentation or block code for AutoGPT.
|
||||
|
||||
## Your Task
|
||||
Review the changes in this PR and provide constructive feedback. Focus on:
|
||||
|
||||
1. **Documentation Accuracy**: For any block code changes, verify that:
|
||||
- Input/output tables in docs match the actual block schemas
|
||||
- Description text accurately reflects what the block does
|
||||
- Any new blocks have corresponding documentation
|
||||
|
||||
2. **Manual Content Quality**: Check manual sections (marked with `<!-- MANUAL: -->` markers):
|
||||
- "How it works" sections should have clear technical explanations
|
||||
- "Possible use case" sections should have practical, real-world examples
|
||||
- Content should be helpful for users trying to understand the blocks
|
||||
|
||||
3. **Template Compliance**: Ensure docs follow the standard template:
|
||||
- What it is (brief intro)
|
||||
- What it does (description)
|
||||
- How it works (technical explanation)
|
||||
- Inputs table
|
||||
- Outputs table
|
||||
- Possible use case
|
||||
|
||||
4. **Cross-references**: Check that links and anchors are correct
|
||||
|
||||
## Review Process
|
||||
1. First, get the PR diff to see what changed: `gh pr diff ${{ github.event.pull_request.number }}`
|
||||
2. Read any modified block files to understand the implementation
|
||||
3. Read corresponding documentation files to verify accuracy
|
||||
4. Provide your feedback as a PR comment
|
||||
|
||||
Be constructive and specific. If everything looks good, say so!
|
||||
If there are issues, explain what's wrong and suggest how to fix it.
|
||||
193
.github/workflows/docs-enhance.yml
vendored
Normal file
193
.github/workflows/docs-enhance.yml
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
name: Enhance Block Documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
block_pattern:
|
||||
description: 'Block file pattern to enhance (e.g., "google/*.md" or "*" for all blocks)'
|
||||
required: true
|
||||
default: '*'
|
||||
type: string
|
||||
dry_run:
|
||||
description: 'Dry run mode - show proposed changes without committing'
|
||||
type: boolean
|
||||
default: true
|
||||
max_blocks:
|
||||
description: 'Maximum number of blocks to process (0 for unlimited)'
|
||||
type: number
|
||||
default: 10
|
||||
|
||||
jobs:
|
||||
enhance-docs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
poetry-${{ runner.os }}-
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry install --only main
|
||||
poetry run prisma generate
|
||||
|
||||
- name: Run Claude Enhancement
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Read,Edit,Glob,Grep,Write,Bash(git:*),Bash(gh:*),Bash(find:*),Bash(ls:*)"
|
||||
prompt: |
|
||||
You are enhancing block documentation for AutoGPT. Your task is to improve the MANUAL sections
|
||||
of block documentation files by reading the actual block implementations and writing helpful content.
|
||||
|
||||
## Configuration
|
||||
- Block pattern: ${{ inputs.block_pattern }}
|
||||
- Dry run: ${{ inputs.dry_run }}
|
||||
- Max blocks to process: ${{ inputs.max_blocks }}
|
||||
|
||||
## Your Task
|
||||
|
||||
1. **Find Documentation Files**
|
||||
Find block documentation files matching the pattern in `docs/content/platform/blocks/`
|
||||
Pattern: ${{ inputs.block_pattern }}
|
||||
|
||||
Use: `find docs/content/platform/blocks -name "*.md" -type f`
|
||||
|
||||
2. **For Each Documentation File** (up to ${{ inputs.max_blocks }} files):
|
||||
|
||||
a. Read the documentation file
|
||||
|
||||
b. Identify which block(s) it documents (look for the block class name)
|
||||
|
||||
c. Find and read the corresponding block implementation in `autogpt_platform/backend/backend/blocks/`
|
||||
|
||||
d. Improve the MANUAL sections:
|
||||
|
||||
**"How it works" section** (within `<!-- MANUAL: how_it_works -->` markers):
|
||||
- Explain the technical flow of the block
|
||||
- Describe what APIs or services it connects to
|
||||
- Note any important configuration or prerequisites
|
||||
- Keep it concise but informative (2-4 paragraphs)
|
||||
|
||||
**"Possible use case" section** (within `<!-- MANUAL: use_case -->` markers):
|
||||
- Provide 2-3 practical, real-world examples
|
||||
- Make them specific and actionable
|
||||
- Show how this block could be used in an automation workflow
|
||||
|
||||
3. **Important Rules**
|
||||
- ONLY modify content within `<!-- MANUAL: -->` and `<!-- END MANUAL -->` markers
|
||||
- Do NOT modify auto-generated sections (inputs/outputs tables, descriptions)
|
||||
- Keep content accurate based on the actual block implementation
|
||||
- Write for users who may not be technical experts
|
||||
|
||||
4. **Output**
|
||||
${{ inputs.dry_run == true && 'DRY RUN MODE: Show proposed changes for each file but do NOT actually edit the files. Describe what you would change.' || 'LIVE MODE: Actually edit the files to improve the documentation.' }}
|
||||
|
||||
## Example Improvements
|
||||
|
||||
**Before (How it works):**
|
||||
```
|
||||
_Add technical explanation here._
|
||||
```
|
||||
|
||||
**After (How it works):**
|
||||
```
|
||||
This block connects to the GitHub API to retrieve issue information. When executed,
|
||||
it authenticates using your GitHub credentials and fetches issue details including
|
||||
title, body, labels, and assignees.
|
||||
|
||||
The block requires a valid GitHub OAuth connection with repository access permissions.
|
||||
It supports both public and private repositories you have access to.
|
||||
```
|
||||
|
||||
**Before (Possible use case):**
|
||||
```
|
||||
_Add practical use case examples here._
|
||||
```
|
||||
|
||||
**After (Possible use case):**
|
||||
```
|
||||
**Customer Support Automation**: Monitor a GitHub repository for new issues with
|
||||
the "bug" label, then automatically create a ticket in your support system and
|
||||
notify the on-call engineer via Slack.
|
||||
|
||||
**Release Notes Generation**: When a new release is published, gather all closed
|
||||
issues since the last release and generate a summary for your changelog.
|
||||
```
|
||||
|
||||
Begin by finding and listing the documentation files to process.
|
||||
|
||||
- name: Create PR with enhanced documentation
|
||||
if: ${{ inputs.dry_run == false }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Check if there are changes
|
||||
if git diff --quiet docs/content/platform/blocks/; then
|
||||
echo "No changes to commit"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Configure git
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Create branch and commit
|
||||
BRANCH_NAME="docs/enhance-blocks-$(date +%Y%m%d-%H%M%S)"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
git add docs/content/platform/blocks/
|
||||
git commit -m "docs: enhance block documentation with LLM-generated content
|
||||
|
||||
Pattern: ${{ inputs.block_pattern }}
|
||||
Max blocks: ${{ inputs.max_blocks }}
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
||||
|
||||
# Push and create PR
|
||||
git push -u origin "$BRANCH_NAME"
|
||||
gh pr create \
|
||||
--title "docs: LLM-enhanced block documentation" \
|
||||
--body "## Summary
|
||||
This PR contains LLM-enhanced documentation for block files matching pattern: \`${{ inputs.block_pattern }}\`
|
||||
|
||||
The following manual sections were improved:
|
||||
- **How it works**: Technical explanations based on block implementations
|
||||
- **Possible use case**: Practical, real-world examples
|
||||
|
||||
## Review Checklist
|
||||
- [ ] Content is accurate based on block implementations
|
||||
- [ ] Examples are practical and helpful
|
||||
- [ ] No auto-generated sections were modified
|
||||
|
||||
---
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)" \
|
||||
--base dev
|
||||
@@ -1,250 +0,0 @@
|
||||
import logging
|
||||
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
import fastapi.responses
|
||||
|
||||
import backend.api.features.store.db as store_db
|
||||
import backend.api.features.store.model as store_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
prefix="/admin/waitlist",
|
||||
tags=["store", "admin", "waitlist"],
|
||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
summary="Create Waitlist",
|
||||
response_model=store_model.WaitlistAdminResponse,
|
||||
)
|
||||
async def create_waitlist(
|
||||
request: store_model.WaitlistCreateRequest,
|
||||
user_id: str = fastapi.Security(autogpt_libs.auth.get_user_id),
|
||||
):
|
||||
"""
|
||||
Create a new waitlist (admin only).
|
||||
|
||||
Args:
|
||||
request: Waitlist creation details
|
||||
user_id: Authenticated admin user creating the waitlist
|
||||
|
||||
Returns:
|
||||
WaitlistAdminResponse with the created waitlist details
|
||||
"""
|
||||
try:
|
||||
waitlist = await store_db.create_waitlist_admin(
|
||||
admin_user_id=user_id,
|
||||
data=request,
|
||||
)
|
||||
return waitlist
|
||||
except Exception as e:
|
||||
logger.exception("Error creating waitlist: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while creating the waitlist"},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
summary="List All Waitlists",
|
||||
response_model=store_model.WaitlistAdminListResponse,
|
||||
)
|
||||
async def list_waitlists():
|
||||
"""
|
||||
Get all waitlists with admin details (admin only).
|
||||
|
||||
Returns:
|
||||
WaitlistAdminListResponse with all waitlists
|
||||
"""
|
||||
try:
|
||||
return await store_db.get_waitlists_admin()
|
||||
except Exception as e:
|
||||
logger.exception("Error listing waitlists: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while fetching waitlists"},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{waitlist_id}",
|
||||
summary="Get Waitlist Details",
|
||||
response_model=store_model.WaitlistAdminResponse,
|
||||
)
|
||||
async def get_waitlist(
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
||||
):
|
||||
"""
|
||||
Get a single waitlist with admin details (admin only).
|
||||
|
||||
Args:
|
||||
waitlist_id: ID of the waitlist to retrieve
|
||||
|
||||
Returns:
|
||||
WaitlistAdminResponse with waitlist details
|
||||
"""
|
||||
try:
|
||||
return await store_db.get_waitlist_admin(waitlist_id)
|
||||
except ValueError:
|
||||
logger.warning("Waitlist not found: %s", waitlist_id)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=404,
|
||||
content={"detail": "Waitlist not found"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error fetching waitlist: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while fetching the waitlist"},
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{waitlist_id}",
|
||||
summary="Update Waitlist",
|
||||
response_model=store_model.WaitlistAdminResponse,
|
||||
)
|
||||
async def update_waitlist(
|
||||
request: store_model.WaitlistUpdateRequest,
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
||||
):
|
||||
"""
|
||||
Update a waitlist (admin only).
|
||||
|
||||
Args:
|
||||
waitlist_id: ID of the waitlist to update
|
||||
request: Fields to update
|
||||
|
||||
Returns:
|
||||
WaitlistAdminResponse with updated waitlist details
|
||||
"""
|
||||
try:
|
||||
return await store_db.update_waitlist_admin(waitlist_id, request)
|
||||
except ValueError:
|
||||
logger.warning("Waitlist not found for update: %s", waitlist_id)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=404,
|
||||
content={"detail": "Waitlist not found"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error updating waitlist: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while updating the waitlist"},
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{waitlist_id}",
|
||||
summary="Delete Waitlist",
|
||||
)
|
||||
async def delete_waitlist(
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
||||
):
|
||||
"""
|
||||
Soft delete a waitlist (admin only).
|
||||
|
||||
Args:
|
||||
waitlist_id: ID of the waitlist to delete
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
"""
|
||||
try:
|
||||
deleted = await store_db.delete_waitlist_admin(waitlist_id)
|
||||
if deleted:
|
||||
return {"message": "Waitlist deleted successfully"}
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=404,
|
||||
content={"detail": "Waitlist not found"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error deleting waitlist: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while deleting the waitlist"},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{waitlist_id}/signups",
|
||||
summary="Get Waitlist Signups",
|
||||
response_model=store_model.WaitlistSignupListResponse,
|
||||
)
|
||||
async def get_waitlist_signups(
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
||||
):
|
||||
"""
|
||||
Get all signups for a waitlist (admin only).
|
||||
|
||||
Args:
|
||||
waitlist_id: ID of the waitlist
|
||||
|
||||
Returns:
|
||||
WaitlistSignupListResponse with all signups
|
||||
"""
|
||||
try:
|
||||
return await store_db.get_waitlist_signups_admin(waitlist_id)
|
||||
except ValueError:
|
||||
logger.warning("Waitlist not found for signups: %s", waitlist_id)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=404,
|
||||
content={"detail": "Waitlist not found"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error fetching waitlist signups: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while fetching waitlist signups"},
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{waitlist_id}/link",
|
||||
summary="Link Waitlist to Store Listing",
|
||||
response_model=store_model.WaitlistAdminResponse,
|
||||
)
|
||||
async def link_waitlist_to_listing(
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
||||
store_listing_id: str = fastapi.Body(
|
||||
..., embed=True, description="The ID of the store listing"
|
||||
),
|
||||
):
|
||||
"""
|
||||
Link a waitlist to a store listing (admin only).
|
||||
|
||||
When the linked store listing is approved/published, waitlist users
|
||||
will be automatically notified.
|
||||
|
||||
Args:
|
||||
waitlist_id: ID of the waitlist
|
||||
store_listing_id: ID of the store listing to link
|
||||
|
||||
Returns:
|
||||
WaitlistAdminResponse with updated waitlist details
|
||||
"""
|
||||
try:
|
||||
return await store_db.link_waitlist_to_listing_admin(
|
||||
waitlist_id, store_listing_id
|
||||
)
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
"Link failed - waitlist or listing not found: %s, %s",
|
||||
waitlist_id,
|
||||
store_listing_id,
|
||||
)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=404,
|
||||
content={"detail": "Waitlist or store listing not found"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error linking waitlist to listing: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while linking the waitlist"},
|
||||
)
|
||||
@@ -23,7 +23,6 @@ from backend.data.notifications import (
|
||||
AgentApprovalData,
|
||||
AgentRejectionData,
|
||||
NotificationEventModel,
|
||||
WaitlistLaunchData,
|
||||
)
|
||||
from backend.notifications.notifications import queue_notification_async
|
||||
from backend.util.exceptions import DatabaseError
|
||||
@@ -1743,29 +1742,6 @@ async def review_store_submission(
|
||||
# Don't fail the review process if email sending fails
|
||||
pass
|
||||
|
||||
# Notify waitlist users if this is an approval and has a linked waitlist
|
||||
if is_approved and submission.StoreListing:
|
||||
try:
|
||||
frontend_base_url = (
|
||||
settings.config.frontend_base_url
|
||||
or settings.config.platform_base_url
|
||||
)
|
||||
store_agent = (
|
||||
await prisma.models.StoreAgent.prisma().find_first_or_raise(
|
||||
where={"storeListingVersionId": submission.id}
|
||||
)
|
||||
)
|
||||
creator_username = store_agent.creator_username or "unknown"
|
||||
store_url = f"{frontend_base_url}/marketplace/agent/{creator_username}/{store_agent.slug}"
|
||||
await notify_waitlist_users_on_launch(
|
||||
store_listing_id=submission.StoreListing.id,
|
||||
agent_name=submission.name,
|
||||
store_url=store_url,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to notify waitlist users on agent approval: {e}")
|
||||
# Don't fail the approval process
|
||||
|
||||
# Convert to Pydantic model for consistency
|
||||
return store_model.StoreSubmission(
|
||||
listing_id=(submission.StoreListing.id if submission.StoreListing else ""),
|
||||
@@ -2013,507 +1989,3 @@ async def get_agent_as_admin(
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
def _waitlist_to_store_entry(
|
||||
waitlist: prisma.models.WaitlistEntry,
|
||||
) -> store_model.StoreWaitlistEntry:
|
||||
"""Convert a WaitlistEntry to StoreWaitlistEntry for public display."""
|
||||
return store_model.StoreWaitlistEntry(
|
||||
waitlistId=waitlist.id,
|
||||
slug=waitlist.slug,
|
||||
name=waitlist.name,
|
||||
subHeading=waitlist.subHeading,
|
||||
videoUrl=waitlist.videoUrl,
|
||||
agentOutputDemoUrl=waitlist.agentOutputDemoUrl,
|
||||
imageUrls=waitlist.imageUrls or [],
|
||||
description=waitlist.description,
|
||||
categories=waitlist.categories,
|
||||
)
|
||||
|
||||
|
||||
async def get_waitlist() -> list[store_model.StoreWaitlistEntry]:
|
||||
"""Get all active waitlists for public display."""
|
||||
try:
|
||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
||||
where=prisma.types.WaitlistEntryWhereInput(isDeleted=False),
|
||||
)
|
||||
|
||||
# Filter out closed/done waitlists and sort by votes (descending)
|
||||
excluded_statuses = {
|
||||
prisma.enums.WaitlistExternalStatus.CANCELED,
|
||||
prisma.enums.WaitlistExternalStatus.DONE,
|
||||
}
|
||||
active_waitlists = [w for w in waitlists if w.status not in excluded_statuses]
|
||||
sorted_list = sorted(active_waitlists, key=lambda x: x.votes, reverse=True)
|
||||
|
||||
return [_waitlist_to_store_entry(w) for w in sorted_list]
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching waitlists: {e}")
|
||||
raise DatabaseError("Failed to fetch waitlists") from e
|
||||
|
||||
|
||||
async def get_user_waitlist_memberships(user_id: str) -> list[str]:
|
||||
"""Get all waitlist IDs that a user has joined."""
|
||||
try:
|
||||
user = await prisma.models.User.prisma().find_unique(
|
||||
where={"id": user_id},
|
||||
include={"joinedWaitlists": True},
|
||||
)
|
||||
if not user or not user.joinedWaitlists:
|
||||
return []
|
||||
return [w.id for w in user.joinedWaitlists]
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching user waitlist memberships: {e}")
|
||||
raise DatabaseError("Failed to fetch waitlist memberships") from e
|
||||
|
||||
|
||||
async def add_user_to_waitlist(
|
||||
waitlist_id: str, user_id: str | None, email: str | None
|
||||
) -> store_model.StoreWaitlistEntry:
|
||||
"""
|
||||
Add a user to a waitlist.
|
||||
|
||||
For logged-in users: connects via joinedUsers relation
|
||||
For anonymous users: adds email to unaffiliatedEmailUsers array
|
||||
"""
|
||||
logger.debug(f"Adding user {user_id or email} to waitlist {waitlist_id}")
|
||||
|
||||
if not user_id and not email:
|
||||
raise ValueError("Either user_id or email must be provided")
|
||||
|
||||
try:
|
||||
# Find the waitlist
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
||||
where={"id": waitlist_id},
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlist:
|
||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
||||
|
||||
if waitlist.isDeleted:
|
||||
raise ValueError(f"Waitlist {waitlist_id} is no longer available")
|
||||
|
||||
if waitlist.status in [
|
||||
prisma.enums.WaitlistExternalStatus.CANCELED,
|
||||
prisma.enums.WaitlistExternalStatus.DONE,
|
||||
]:
|
||||
raise ValueError(f"Waitlist {waitlist_id} is closed")
|
||||
|
||||
if user_id:
|
||||
# Check if user already joined
|
||||
joined_user_ids = [u.id for u in (waitlist.joinedUsers or [])]
|
||||
if user_id in joined_user_ids:
|
||||
# Already joined - return waitlist info
|
||||
logger.debug(f"User {user_id} already joined waitlist {waitlist_id}")
|
||||
else:
|
||||
# Connect user to waitlist
|
||||
await prisma.models.WaitlistEntry.prisma().update(
|
||||
where={"id": waitlist_id},
|
||||
data={"joinedUsers": {"connect": [{"id": user_id}]}},
|
||||
)
|
||||
logger.info(f"User {user_id} joined waitlist {waitlist_id}")
|
||||
|
||||
# If user was previously in email list, remove them
|
||||
# Use transaction to prevent race conditions
|
||||
if email:
|
||||
async with transaction() as tx:
|
||||
current_waitlist = await tx.waitlistentry.find_unique(
|
||||
where={"id": waitlist_id}
|
||||
)
|
||||
if current_waitlist and email in (
|
||||
current_waitlist.unaffiliatedEmailUsers or []
|
||||
):
|
||||
updated_emails: list[str] = [
|
||||
e
|
||||
for e in (current_waitlist.unaffiliatedEmailUsers or [])
|
||||
if e != email
|
||||
]
|
||||
await tx.waitlistentry.update(
|
||||
where={"id": waitlist_id},
|
||||
data={"unaffiliatedEmailUsers": updated_emails},
|
||||
)
|
||||
elif email:
|
||||
# Add email to unaffiliated list if not already present
|
||||
# Use transaction to prevent race conditions with concurrent signups
|
||||
async with transaction() as tx:
|
||||
# Re-fetch within transaction to get latest state
|
||||
current_waitlist = await tx.waitlistentry.find_unique(
|
||||
where={"id": waitlist_id}
|
||||
)
|
||||
if current_waitlist:
|
||||
current_emails: list[str] = list(
|
||||
current_waitlist.unaffiliatedEmailUsers or []
|
||||
)
|
||||
if email not in current_emails:
|
||||
current_emails.append(email)
|
||||
await tx.waitlistentry.update(
|
||||
where={"id": waitlist_id},
|
||||
data={"unaffiliatedEmailUsers": current_emails},
|
||||
)
|
||||
logger.info(f"Email {email} added to waitlist {waitlist_id}")
|
||||
else:
|
||||
logger.debug(f"Email {email} already on waitlist {waitlist_id}")
|
||||
|
||||
# Re-fetch to return updated data
|
||||
updated_waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
||||
where={"id": waitlist_id}
|
||||
)
|
||||
return _waitlist_to_store_entry(updated_waitlist or waitlist)
|
||||
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding user to waitlist: {e}")
|
||||
raise DatabaseError("Failed to add user to waitlist") from e
|
||||
|
||||
|
||||
# ============== Admin Waitlist Functions ==============
|
||||
|
||||
|
||||
def _waitlist_to_admin_response(
|
||||
waitlist: prisma.models.WaitlistEntry,
|
||||
) -> store_model.WaitlistAdminResponse:
|
||||
"""Convert a WaitlistEntry to WaitlistAdminResponse."""
|
||||
joined_count = len(waitlist.joinedUsers) if waitlist.joinedUsers else 0
|
||||
email_count = (
|
||||
len(waitlist.unaffiliatedEmailUsers) if waitlist.unaffiliatedEmailUsers else 0
|
||||
)
|
||||
|
||||
return store_model.WaitlistAdminResponse(
|
||||
id=waitlist.id,
|
||||
createdAt=waitlist.createdAt.isoformat() if waitlist.createdAt else "",
|
||||
updatedAt=waitlist.updatedAt.isoformat() if waitlist.updatedAt else "",
|
||||
slug=waitlist.slug,
|
||||
name=waitlist.name,
|
||||
subHeading=waitlist.subHeading,
|
||||
description=waitlist.description,
|
||||
categories=waitlist.categories,
|
||||
imageUrls=waitlist.imageUrls or [],
|
||||
videoUrl=waitlist.videoUrl,
|
||||
agentOutputDemoUrl=waitlist.agentOutputDemoUrl,
|
||||
status=waitlist.status or prisma.enums.WaitlistExternalStatus.NOT_STARTED,
|
||||
votes=waitlist.votes,
|
||||
signupCount=joined_count + email_count,
|
||||
storeListingId=waitlist.storeListingId,
|
||||
owningUserId=waitlist.owningUserId,
|
||||
)
|
||||
|
||||
|
||||
async def create_waitlist_admin(
|
||||
admin_user_id: str,
|
||||
data: store_model.WaitlistCreateRequest,
|
||||
) -> store_model.WaitlistAdminResponse:
|
||||
"""Create a new waitlist (admin only)."""
|
||||
logger.info(f"Admin {admin_user_id} creating waitlist: {data.name}")
|
||||
|
||||
try:
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().create(
|
||||
data=prisma.types.WaitlistEntryCreateInput(
|
||||
name=data.name,
|
||||
slug=data.slug,
|
||||
subHeading=data.subHeading,
|
||||
description=data.description,
|
||||
categories=data.categories,
|
||||
imageUrls=data.imageUrls,
|
||||
videoUrl=data.videoUrl,
|
||||
agentOutputDemoUrl=data.agentOutputDemoUrl,
|
||||
owningUserId=admin_user_id,
|
||||
status=prisma.enums.WaitlistExternalStatus.NOT_STARTED,
|
||||
),
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
return _waitlist_to_admin_response(waitlist)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating waitlist: {e}")
|
||||
raise DatabaseError("Failed to create waitlist") from e
|
||||
|
||||
|
||||
async def get_waitlists_admin() -> store_model.WaitlistAdminListResponse:
|
||||
"""Get all waitlists with admin details."""
|
||||
try:
|
||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
||||
where=prisma.types.WaitlistEntryWhereInput(isDeleted=False),
|
||||
include={"joinedUsers": True},
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
|
||||
return store_model.WaitlistAdminListResponse(
|
||||
waitlists=[_waitlist_to_admin_response(w) for w in waitlists],
|
||||
totalCount=len(waitlists),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching waitlists for admin: {e}")
|
||||
raise DatabaseError("Failed to fetch waitlists") from e
|
||||
|
||||
|
||||
async def get_waitlist_admin(
|
||||
waitlist_id: str,
|
||||
) -> store_model.WaitlistAdminResponse:
|
||||
"""Get a single waitlist with admin details."""
|
||||
try:
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
||||
where={"id": waitlist_id},
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlist:
|
||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
||||
|
||||
if waitlist.isDeleted:
|
||||
raise ValueError(f"Waitlist {waitlist_id} has been deleted")
|
||||
|
||||
return _waitlist_to_admin_response(waitlist)
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching waitlist {waitlist_id}: {e}")
|
||||
raise DatabaseError("Failed to fetch waitlist") from e
|
||||
|
||||
|
||||
async def update_waitlist_admin(
|
||||
waitlist_id: str,
|
||||
data: store_model.WaitlistUpdateRequest,
|
||||
) -> store_model.WaitlistAdminResponse:
|
||||
"""Update a waitlist (admin only)."""
|
||||
logger.info(f"Updating waitlist {waitlist_id}")
|
||||
|
||||
try:
|
||||
# Build update data from explicitly provided fields
|
||||
# Use model_fields_set to allow clearing fields by setting them to None
|
||||
field_mappings = {
|
||||
"name": data.name,
|
||||
"slug": data.slug,
|
||||
"subHeading": data.subHeading,
|
||||
"description": data.description,
|
||||
"categories": data.categories,
|
||||
"imageUrls": data.imageUrls,
|
||||
"videoUrl": data.videoUrl,
|
||||
"agentOutputDemoUrl": data.agentOutputDemoUrl,
|
||||
"storeListingId": data.storeListingId,
|
||||
}
|
||||
update_data: dict[str, typing.Any] = {
|
||||
k: v for k, v in field_mappings.items() if k in data.model_fields_set
|
||||
}
|
||||
|
||||
# Handle status separately due to enum conversion
|
||||
if "status" in data.model_fields_set and data.status is not None:
|
||||
update_data["status"] = prisma.enums.WaitlistExternalStatus(data.status)
|
||||
|
||||
if not update_data:
|
||||
# No updates, just return current data
|
||||
return await get_waitlist_admin(waitlist_id)
|
||||
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().update(
|
||||
where={"id": waitlist_id},
|
||||
data=prisma.types.WaitlistEntryUpdateInput(**update_data),
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlist:
|
||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
||||
|
||||
return _waitlist_to_admin_response(waitlist)
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating waitlist {waitlist_id}: {e}")
|
||||
raise DatabaseError("Failed to update waitlist") from e
|
||||
|
||||
|
||||
async def delete_waitlist_admin(waitlist_id: str) -> bool:
|
||||
"""Soft delete a waitlist (admin only)."""
|
||||
logger.info(f"Soft deleting waitlist {waitlist_id}")
|
||||
|
||||
try:
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().update(
|
||||
where={"id": waitlist_id},
|
||||
data={"isDeleted": True},
|
||||
)
|
||||
|
||||
return waitlist is not None
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting waitlist {waitlist_id}: {e}")
|
||||
raise DatabaseError("Failed to delete waitlist") from e
|
||||
|
||||
|
||||
async def get_waitlist_signups_admin(
|
||||
waitlist_id: str,
|
||||
) -> store_model.WaitlistSignupListResponse:
|
||||
"""Get all signups for a waitlist (admin only)."""
|
||||
try:
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
||||
where={"id": waitlist_id},
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlist:
|
||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
||||
|
||||
signups: list[store_model.WaitlistSignup] = []
|
||||
|
||||
# Add user signups
|
||||
for user in waitlist.joinedUsers or []:
|
||||
signups.append(
|
||||
store_model.WaitlistSignup(
|
||||
type="user",
|
||||
userId=user.id,
|
||||
email=user.email,
|
||||
username=user.name,
|
||||
)
|
||||
)
|
||||
|
||||
# Add email signups
|
||||
for email in waitlist.unaffiliatedEmailUsers or []:
|
||||
signups.append(
|
||||
store_model.WaitlistSignup(
|
||||
type="email",
|
||||
email=email,
|
||||
)
|
||||
)
|
||||
|
||||
return store_model.WaitlistSignupListResponse(
|
||||
waitlistId=waitlist_id,
|
||||
signups=signups,
|
||||
totalCount=len(signups),
|
||||
)
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching signups for waitlist {waitlist_id}: {e}")
|
||||
raise DatabaseError("Failed to fetch waitlist signups") from e
|
||||
|
||||
|
||||
async def link_waitlist_to_listing_admin(
|
||||
waitlist_id: str,
|
||||
store_listing_id: str,
|
||||
) -> store_model.WaitlistAdminResponse:
|
||||
"""Link a waitlist to a store listing (admin only)."""
|
||||
logger.info(f"Linking waitlist {waitlist_id} to listing {store_listing_id}")
|
||||
|
||||
try:
|
||||
# Verify the store listing exists
|
||||
listing = await prisma.models.StoreListing.prisma().find_unique(
|
||||
where={"id": store_listing_id}
|
||||
)
|
||||
|
||||
if not listing:
|
||||
raise ValueError(f"Store listing {store_listing_id} not found")
|
||||
|
||||
waitlist = await prisma.models.WaitlistEntry.prisma().update(
|
||||
where={"id": waitlist_id},
|
||||
data={"StoreListing": {"connect": {"id": store_listing_id}}},
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlist:
|
||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
||||
|
||||
return _waitlist_to_admin_response(waitlist)
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error linking waitlist to listing: {e}")
|
||||
raise DatabaseError("Failed to link waitlist to listing") from e
|
||||
|
||||
|
||||
async def notify_waitlist_users_on_launch(
|
||||
store_listing_id: str,
|
||||
agent_name: str,
|
||||
store_url: str,
|
||||
) -> int:
|
||||
"""
|
||||
Notify all users on waitlists linked to a store listing when the agent is launched.
|
||||
|
||||
Args:
|
||||
store_listing_id: The ID of the store listing that was approved
|
||||
agent_name: The name of the approved agent
|
||||
store_url: The URL to the agent's store page
|
||||
|
||||
Returns:
|
||||
The number of notifications sent
|
||||
"""
|
||||
logger.info(f"Notifying waitlist users for store listing {store_listing_id}")
|
||||
|
||||
try:
|
||||
# Find all waitlists linked to this store listing
|
||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
||||
where={
|
||||
"storeListingId": store_listing_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={"joinedUsers": True},
|
||||
)
|
||||
|
||||
if not waitlists:
|
||||
logger.info(f"No waitlists found for store listing {store_listing_id}")
|
||||
return 0
|
||||
|
||||
notification_count = 0
|
||||
launched_at = datetime.now(tz=timezone.utc)
|
||||
|
||||
for waitlist in waitlists:
|
||||
# Track notification results for this waitlist
|
||||
users_to_notify = waitlist.joinedUsers or []
|
||||
failed_user_ids: list[str] = []
|
||||
|
||||
# Notify registered users
|
||||
for user in users_to_notify:
|
||||
try:
|
||||
notification_data = WaitlistLaunchData(
|
||||
agent_name=agent_name,
|
||||
waitlist_name=waitlist.name,
|
||||
store_url=store_url,
|
||||
launched_at=launched_at,
|
||||
)
|
||||
|
||||
notification_event = NotificationEventModel[WaitlistLaunchData](
|
||||
user_id=user.id,
|
||||
type=prisma.enums.NotificationType.WAITLIST_LAUNCH,
|
||||
data=notification_data,
|
||||
)
|
||||
|
||||
await queue_notification_async(notification_event)
|
||||
notification_count += 1
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to send waitlist launch notification to user {user.id}: {e}"
|
||||
)
|
||||
failed_user_ids.append(user.id)
|
||||
|
||||
# Note: For unaffiliated email users, you would need to send emails directly
|
||||
# since they don't have user IDs for the notification system.
|
||||
# This could be done via a separate email service.
|
||||
# For now, we log these for potential manual follow-up or future implementation.
|
||||
if waitlist.unaffiliatedEmailUsers:
|
||||
logger.info(
|
||||
f"Waitlist {waitlist.id} has {len(waitlist.unaffiliatedEmailUsers)} "
|
||||
f"unaffiliated email users that need email notifications"
|
||||
)
|
||||
|
||||
# Only mark waitlist as DONE if all registered user notifications succeeded
|
||||
if not failed_user_ids:
|
||||
await prisma.models.WaitlistEntry.prisma().update(
|
||||
where={"id": waitlist.id},
|
||||
data={"status": prisma.enums.WaitlistExternalStatus.DONE},
|
||||
)
|
||||
logger.info(f"Updated waitlist {waitlist.id} status to DONE")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Waitlist {waitlist.id} not marked as DONE due to "
|
||||
f"{len(failed_user_ids)} failed notifications"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Sent {notification_count} waitlist launch notifications for store listing {store_listing_id}"
|
||||
)
|
||||
return notification_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error notifying waitlist users for store listing {store_listing_id}: {e}"
|
||||
)
|
||||
# Don't raise - we don't want to fail the approval process
|
||||
return 0
|
||||
|
||||
@@ -221,99 +221,3 @@ class ReviewSubmissionRequest(pydantic.BaseModel):
|
||||
is_approved: bool
|
||||
comments: str # External comments visible to creator
|
||||
internal_comments: str | None = None # Private admin notes
|
||||
|
||||
|
||||
class StoreWaitlistEntry(pydantic.BaseModel):
|
||||
"""Public waitlist entry - no PII fields exposed."""
|
||||
|
||||
waitlistId: str
|
||||
slug: str
|
||||
|
||||
# Content fields
|
||||
name: str
|
||||
subHeading: str
|
||||
videoUrl: str | None = None
|
||||
agentOutputDemoUrl: str | None = None
|
||||
imageUrls: list[str]
|
||||
description: str
|
||||
categories: list[str]
|
||||
|
||||
|
||||
class StoreWaitlistsAllResponse(pydantic.BaseModel):
|
||||
listings: list[StoreWaitlistEntry]
|
||||
|
||||
|
||||
# Admin Waitlist Models
|
||||
|
||||
|
||||
class WaitlistCreateRequest(pydantic.BaseModel):
|
||||
"""Request model for creating a new waitlist."""
|
||||
|
||||
name: str
|
||||
slug: str
|
||||
subHeading: str
|
||||
description: str
|
||||
categories: list[str] = []
|
||||
imageUrls: list[str] = []
|
||||
videoUrl: str | None = None
|
||||
agentOutputDemoUrl: str | None = None
|
||||
|
||||
|
||||
class WaitlistUpdateRequest(pydantic.BaseModel):
|
||||
"""Request model for updating a waitlist."""
|
||||
|
||||
name: str | None = None
|
||||
slug: str | None = None
|
||||
subHeading: str | None = None
|
||||
description: str | None = None
|
||||
categories: list[str] | None = None
|
||||
imageUrls: list[str] | None = None
|
||||
videoUrl: str | None = None
|
||||
agentOutputDemoUrl: str | None = None
|
||||
status: str | None = None # WaitlistExternalStatus enum value
|
||||
storeListingId: str | None = None # Link to a store listing
|
||||
|
||||
|
||||
class WaitlistAdminResponse(pydantic.BaseModel):
|
||||
"""Admin response model with full waitlist details including internal data."""
|
||||
|
||||
id: str
|
||||
createdAt: str
|
||||
updatedAt: str
|
||||
slug: str
|
||||
name: str
|
||||
subHeading: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
imageUrls: list[str]
|
||||
videoUrl: str | None = None
|
||||
agentOutputDemoUrl: str | None = None
|
||||
status: prisma.enums.WaitlistExternalStatus
|
||||
votes: int
|
||||
signupCount: int # Total count of joinedUsers + unaffiliatedEmailUsers
|
||||
storeListingId: str | None = None
|
||||
owningUserId: str
|
||||
|
||||
|
||||
class WaitlistSignup(pydantic.BaseModel):
|
||||
"""Individual signup entry for a waitlist."""
|
||||
|
||||
type: str # "user" or "email"
|
||||
userId: str | None = None
|
||||
email: str | None = None
|
||||
username: str | None = None # For user signups
|
||||
|
||||
|
||||
class WaitlistSignupListResponse(pydantic.BaseModel):
|
||||
"""Response model for listing waitlist signups."""
|
||||
|
||||
waitlistId: str
|
||||
signups: list[WaitlistSignup]
|
||||
totalCount: int
|
||||
|
||||
|
||||
class WaitlistAdminListResponse(pydantic.BaseModel):
|
||||
"""Response model for listing all waitlists (admin view)."""
|
||||
|
||||
waitlists: list[WaitlistAdminResponse]
|
||||
totalCount: int
|
||||
|
||||
@@ -7,7 +7,6 @@ from typing import Literal
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
import fastapi.responses
|
||||
from autogpt_libs.auth.dependencies import get_optional_user_id
|
||||
|
||||
import backend.data.graph
|
||||
import backend.util.json
|
||||
@@ -79,63 +78,6 @@ async def update_or_create_profile(
|
||||
return updated_profile
|
||||
|
||||
|
||||
##############################################
|
||||
############## Waitlist Endpoints ############
|
||||
##############################################
|
||||
@router.get(
|
||||
"/waitlist",
|
||||
summary="Get the agent waitlist",
|
||||
tags=["store", "public"],
|
||||
response_model=store_model.StoreWaitlistsAllResponse,
|
||||
)
|
||||
async def get_waitlist():
|
||||
"""
|
||||
Get all active waitlists for public display.
|
||||
"""
|
||||
waitlists = await store_db.get_waitlist()
|
||||
return store_model.StoreWaitlistsAllResponse(listings=waitlists)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/waitlist/my-memberships",
|
||||
summary="Get waitlist IDs the current user has joined",
|
||||
tags=["store", "private"],
|
||||
)
|
||||
async def get_my_waitlist_memberships(
|
||||
user_id: str = fastapi.Security(autogpt_libs.auth.get_user_id),
|
||||
) -> list[str]:
|
||||
"""Returns list of waitlist IDs the authenticated user has joined."""
|
||||
return await store_db.get_user_waitlist_memberships(user_id)
|
||||
|
||||
|
||||
@router.post(
|
||||
path="/waitlist/{waitlist_id}/join",
|
||||
summary="Add self to the agent waitlist",
|
||||
tags=["store", "public"],
|
||||
response_model=store_model.StoreWaitlistEntry,
|
||||
)
|
||||
async def add_self_to_waitlist(
|
||||
user_id: str | None = fastapi.Security(get_optional_user_id),
|
||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist to join"),
|
||||
email: str | None = fastapi.Body(
|
||||
default=None, embed=True, description="Email address for unauthenticated users"
|
||||
),
|
||||
):
|
||||
"""
|
||||
Add the current user to the agent waitlist.
|
||||
"""
|
||||
if not user_id and not email:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=400,
|
||||
detail="Either user authentication or email address is required",
|
||||
)
|
||||
|
||||
waitlist_entry = await store_db.add_user_to_waitlist(
|
||||
waitlist_id=waitlist_id, user_id=user_id, email=email
|
||||
)
|
||||
return waitlist_entry
|
||||
|
||||
|
||||
##############################################
|
||||
############### Agent Endpoints ##############
|
||||
##############################################
|
||||
|
||||
@@ -19,7 +19,6 @@ from prisma.errors import PrismaError
|
||||
import backend.api.features.admin.credit_admin_routes
|
||||
import backend.api.features.admin.execution_analytics_routes
|
||||
import backend.api.features.admin.store_admin_routes
|
||||
import backend.api.features.admin.waitlist_admin_routes
|
||||
import backend.api.features.builder
|
||||
import backend.api.features.builder.routes
|
||||
import backend.api.features.chat.routes as chat_routes
|
||||
@@ -284,11 +283,6 @@ app.include_router(
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/store",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.admin.waitlist_admin_routes.router,
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/store",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.admin.credit_admin_routes.router,
|
||||
tags=["v2", "admin"],
|
||||
|
||||
@@ -81,7 +81,7 @@ class StoreValueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
|
||||
description="This block forwards an input value as output, allowing reuse without change.",
|
||||
description="A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=StoreValueBlock.Input,
|
||||
output_schema=StoreValueBlock.Output,
|
||||
@@ -111,7 +111,7 @@ class PrintToConsoleBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
|
||||
description="Print the given text to the console, this is used for a debugging purpose.",
|
||||
description="A debugging block that outputs text to the console for monitoring and troubleshooting workflow execution.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=PrintToConsoleBlock.Input,
|
||||
output_schema=PrintToConsoleBlock.Output,
|
||||
@@ -137,7 +137,7 @@ class NoteBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||
description="This block is used to display a sticky note with the given text.",
|
||||
description="A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=NoteBlock.Input,
|
||||
output_schema=NoteBlock.Output,
|
||||
|
||||
@@ -159,7 +159,7 @@ class FindInDictionaryBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
|
||||
description="Lookup the given key in the input dictionary/object/list and return the value.",
|
||||
description="A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value.",
|
||||
input_schema=FindInDictionaryBlock.Input,
|
||||
output_schema=FindInDictionaryBlock.Output,
|
||||
test_input=[
|
||||
|
||||
@@ -51,7 +51,7 @@ class GithubCommentBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a8db4d8d-db1c-4a25-a1b0-416a8c33602b",
|
||||
description="This block posts a comment on a specified GitHub issue or pull request.",
|
||||
description="A block that posts comments on GitHub issues or pull requests using the GitHub API.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCommentBlock.Input,
|
||||
output_schema=GithubCommentBlock.Output,
|
||||
@@ -151,7 +151,7 @@ class GithubUpdateCommentBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
|
||||
description="This block updates a comment on a specified GitHub issue or pull request.",
|
||||
description="A block that updates an existing comment on a GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateCommentBlock.Input,
|
||||
output_schema=GithubUpdateCommentBlock.Output,
|
||||
@@ -249,7 +249,7 @@ class GithubListCommentsBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
|
||||
description="This block lists all comments for a specified GitHub issue or pull request.",
|
||||
description="A block that retrieves all comments from a GitHub issue or pull request, including comment metadata and content.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListCommentsBlock.Input,
|
||||
output_schema=GithubListCommentsBlock.Output,
|
||||
@@ -363,7 +363,7 @@ class GithubMakeIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="691dad47-f494-44c3-a1e8-05b7990f2dab",
|
||||
description="This block creates a new issue on a specified GitHub repository.",
|
||||
description="A block that creates new issues on GitHub repositories with a title and body content.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubMakeIssueBlock.Input,
|
||||
output_schema=GithubMakeIssueBlock.Output,
|
||||
@@ -433,7 +433,7 @@ class GithubReadIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6443c75d-032a-4772-9c08-230c707c8acc",
|
||||
description="This block reads the body, title, and user of a specified GitHub issue.",
|
||||
description="A block that retrieves information about a specific GitHub issue, including its title, body content, and creator.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubReadIssueBlock.Input,
|
||||
output_schema=GithubReadIssueBlock.Output,
|
||||
@@ -510,7 +510,7 @@ class GithubListIssuesBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c215bfd7-0e57-4573-8f8c-f7d4963dcd74",
|
||||
description="This block lists all issues for a specified GitHub repository.",
|
||||
description="A block that retrieves a list of issues from a GitHub repository with their titles and URLs.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListIssuesBlock.Input,
|
||||
output_schema=GithubListIssuesBlock.Output,
|
||||
@@ -597,7 +597,7 @@ class GithubAddLabelBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="98bd6b77-9506-43d5-b669-6b9733c4b1f1",
|
||||
description="This block adds a label to a specified GitHub issue or pull request.",
|
||||
description="A block that adds a label to a GitHub issue or pull request for categorization and organization.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubAddLabelBlock.Input,
|
||||
output_schema=GithubAddLabelBlock.Output,
|
||||
@@ -657,7 +657,7 @@ class GithubRemoveLabelBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="78f050c5-3e3a-48c0-9e5b-ef1ceca5589c",
|
||||
description="This block removes a label from a specified GitHub issue or pull request.",
|
||||
description="A block that removes a label from a GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubRemoveLabelBlock.Input,
|
||||
output_schema=GithubRemoveLabelBlock.Output,
|
||||
@@ -720,7 +720,7 @@ class GithubAssignIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="90507c72-b0ff-413a-886a-23bbbd66f542",
|
||||
description="This block assigns a user to a specified GitHub issue.",
|
||||
description="A block that assigns a GitHub user to an issue for task ownership and tracking.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubAssignIssueBlock.Input,
|
||||
output_schema=GithubAssignIssueBlock.Output,
|
||||
@@ -786,7 +786,7 @@ class GithubUnassignIssueBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d154002a-38f4-46c2-962d-2488f2b05ece",
|
||||
description="This block unassigns a user from a specified GitHub issue.",
|
||||
description="A block that removes a user's assignment from a GitHub issue.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUnassignIssueBlock.Input,
|
||||
output_schema=GithubUnassignIssueBlock.Output,
|
||||
|
||||
@@ -353,7 +353,7 @@ class GmailReadBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
|
||||
description="This block reads emails from Gmail.",
|
||||
description="A block that retrieves and reads emails from a Gmail account based on search criteria, returning detailed message information including subject, sender, body, and attachments.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
input_schema=GmailReadBlock.Input,
|
||||
@@ -743,7 +743,7 @@ class GmailListLabelsBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
|
||||
description="This block lists all labels in Gmail.",
|
||||
description="A block that retrieves all labels (categories) from a Gmail account for organizing and categorizing emails.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailListLabelsBlock.Input,
|
||||
output_schema=GmailListLabelsBlock.Output,
|
||||
@@ -807,7 +807,7 @@ class GmailAddLabelBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f884b2fb-04f4-4265-9658-14f433926ac9",
|
||||
description="This block adds a label to a Gmail message.",
|
||||
description="A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailAddLabelBlock.Input,
|
||||
output_schema=GmailAddLabelBlock.Output,
|
||||
@@ -893,7 +893,7 @@ class GmailRemoveLabelBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
|
||||
description="This block removes a label from a Gmail message.",
|
||||
description="A block that removes a label from a specific email message in a Gmail account.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailRemoveLabelBlock.Input,
|
||||
output_schema=GmailRemoveLabelBlock.Output,
|
||||
@@ -961,7 +961,7 @@ class GmailGetThreadBlock(GmailBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="21a79166-9df7-4b5f-9f36-96f639d86112",
|
||||
description="Get a full Gmail thread by ID",
|
||||
description="A block that retrieves an entire Gmail thread (email conversation) by ID, returning all messages with decoded bodies for reading complete conversations.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailGetThreadBlock.Input,
|
||||
output_schema=GmailGetThreadBlock.Output,
|
||||
|
||||
@@ -282,7 +282,7 @@ class GoogleSheetsReadBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
||||
description="This block reads data from a Google Sheets spreadsheet.",
|
||||
description="A block that reads data from a Google Sheets spreadsheet using A1 notation range selection.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsReadBlock.Input,
|
||||
output_schema=GoogleSheetsReadBlock.Output,
|
||||
@@ -409,7 +409,7 @@ class GoogleSheetsWriteBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d9291e87-301d-47a8-91fe-907fb55460e5",
|
||||
description="This block writes data to a Google Sheets spreadsheet.",
|
||||
description="A block that writes data to a Google Sheets spreadsheet at a specified A1 notation range.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsWriteBlock.Input,
|
||||
output_schema=GoogleSheetsWriteBlock.Output,
|
||||
|
||||
@@ -76,7 +76,7 @@ class AgentInputBlock(Block):
|
||||
super().__init__(
|
||||
**{
|
||||
"id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"description": "Base block for user inputs.",
|
||||
"description": "A block that accepts and processes user input values within a workflow, supporting various input types and validation.",
|
||||
"input_schema": AgentInputBlock.Input,
|
||||
"output_schema": AgentInputBlock.Output,
|
||||
"test_input": [
|
||||
@@ -168,7 +168,7 @@ class AgentOutputBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
description="Stores the output of the graph for users to see.",
|
||||
description="A block that records and formats workflow results for display to users, with optional Jinja2 template formatting support.",
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
|
||||
@@ -854,7 +854,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
|
||||
description="A block that generates structured JSON responses using a Large Language Model (LLM), with schema validation and format enforcement.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
@@ -1265,7 +1265,7 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
|
||||
description="A block that produces text responses using a Large Language Model (LLM) based on customizable prompts and system instructions.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AITextGeneratorBlock.Input,
|
||||
output_schema=AITextGeneratorBlock.Output,
|
||||
@@ -1361,7 +1361,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a0a69be1-4528-491c-a85a-a4ab6873e3f0",
|
||||
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||
description="A block that summarizes long texts using a Large Language Model (LLM), with configurable focus topics and summary styles.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AITextSummarizerBlock.Input,
|
||||
output_schema=AITextSummarizerBlock.Output,
|
||||
@@ -1562,7 +1562,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="32a87eab-381e-4dd4-bdb8-4c47151be35a",
|
||||
description="Advanced LLM call that takes a list of messages and sends them to the language model.",
|
||||
description="A block that facilitates multi-turn conversations with a Large Language Model (LLM), maintaining context across message exchanges.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIConversationBlock.Input,
|
||||
output_schema=AIConversationBlock.Output,
|
||||
@@ -1682,7 +1682,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9c0b0450-d199-458b-a731-072189dd6593",
|
||||
description="Generate a list of values based on the given prompt using a Large Language Model (LLM).",
|
||||
description="A block that creates lists of items based on prompts using a Large Language Model (LLM), with optional source data for context.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AIListGeneratorBlock.Input,
|
||||
output_schema=AIListGeneratorBlock.Output,
|
||||
|
||||
@@ -391,12 +391,8 @@ class SmartDecisionMakerBlock(Block):
|
||||
"""
|
||||
block = sink_node.block
|
||||
|
||||
# Use custom name from node metadata if set, otherwise fall back to block.name
|
||||
custom_name = sink_node.metadata.get("customized_name")
|
||||
tool_name = custom_name if custom_name else block.name
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": SmartDecisionMakerBlock.cleanup(tool_name),
|
||||
"name": SmartDecisionMakerBlock.cleanup(block.name),
|
||||
"description": block.description,
|
||||
}
|
||||
sink_block_input_schema = block.input_schema
|
||||
@@ -493,24 +489,14 @@ class SmartDecisionMakerBlock(Block):
|
||||
f"Sink graph metadata not found: {graph_id} {graph_version}"
|
||||
)
|
||||
|
||||
# Use custom name from node metadata if set, otherwise fall back to graph name
|
||||
custom_name = sink_node.metadata.get("customized_name")
|
||||
tool_name = custom_name if custom_name else sink_graph_meta.name
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": SmartDecisionMakerBlock.cleanup(tool_name),
|
||||
"name": SmartDecisionMakerBlock.cleanup(sink_graph_meta.name),
|
||||
"description": sink_graph_meta.description,
|
||||
}
|
||||
|
||||
properties = {}
|
||||
field_mapping = {}
|
||||
|
||||
for link in links:
|
||||
field_name = link.sink_name
|
||||
|
||||
clean_field_name = SmartDecisionMakerBlock.cleanup(field_name)
|
||||
field_mapping[clean_field_name] = field_name
|
||||
|
||||
sink_block_input_schema = sink_node.input_default["input_schema"]
|
||||
sink_block_properties = sink_block_input_schema.get("properties", {}).get(
|
||||
link.sink_name, {}
|
||||
@@ -520,7 +506,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
if "description" in sink_block_properties
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[clean_field_name] = {
|
||||
properties[link.sink_name] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
"default": json.dumps(sink_block_properties.get("default", None)),
|
||||
@@ -533,7 +519,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
"strict": True,
|
||||
}
|
||||
|
||||
tool_function["_field_mapping"] = field_mapping
|
||||
# Store node info for later use in output processing
|
||||
tool_function["_sink_node_id"] = sink_node.id
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
@@ -1161,9 +1147,8 @@ class SmartDecisionMakerBlock(Block):
|
||||
original_field_name = field_mapping.get(clean_arg_name, clean_arg_name)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
|
||||
# Use original_field_name directly (not sanitized) to match link sink_name
|
||||
# The field_mapping already translates from LLM's cleaned names to original names
|
||||
emit_key = f"tools_^_{sink_node_id}_~_{original_field_name}"
|
||||
sanitized_arg_name = self.cleanup(original_field_name)
|
||||
emit_key = f"tools_^_{sink_node_id}_~_{sanitized_arg_name}"
|
||||
|
||||
logger.debug(
|
||||
"[SmartDecisionMakerBlock|geid:%s|neid:%s] emit %s",
|
||||
|
||||
@@ -1057,153 +1057,3 @@ async def test_smart_decision_maker_traditional_mode_default():
|
||||
) # Should yield individual tool parameters
|
||||
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
|
||||
assert "conversations" in outputs
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_uses_customized_name_for_blocks():
|
||||
"""Test that SmartDecisionMakerBlock uses customized_name from node metadata for tool names."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from backend.blocks.basic import StoreValueBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.data.graph import Link, Node
|
||||
|
||||
# Create a mock node with customized_name in metadata
|
||||
mock_node = MagicMock(spec=Node)
|
||||
mock_node.id = "test-node-id"
|
||||
mock_node.block_id = StoreValueBlock().id
|
||||
mock_node.metadata = {"customized_name": "My Custom Tool Name"}
|
||||
mock_node.block = StoreValueBlock()
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
mock_link.sink_name = "input"
|
||||
|
||||
# Call the function directly
|
||||
result = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, [mock_link]
|
||||
)
|
||||
|
||||
# Verify the tool name uses the customized name (cleaned up)
|
||||
assert result["type"] == "function"
|
||||
assert result["function"]["name"] == "my_custom_tool_name" # Cleaned version
|
||||
assert result["function"]["_sink_node_id"] == "test-node-id"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_falls_back_to_block_name():
|
||||
"""Test that SmartDecisionMakerBlock falls back to block.name when no customized_name."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from backend.blocks.basic import StoreValueBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.data.graph import Link, Node
|
||||
|
||||
# Create a mock node without customized_name
|
||||
mock_node = MagicMock(spec=Node)
|
||||
mock_node.id = "test-node-id"
|
||||
mock_node.block_id = StoreValueBlock().id
|
||||
mock_node.metadata = {} # No customized_name
|
||||
mock_node.block = StoreValueBlock()
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
mock_link.sink_name = "input"
|
||||
|
||||
# Call the function directly
|
||||
result = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, [mock_link]
|
||||
)
|
||||
|
||||
# Verify the tool name uses the block's default name
|
||||
assert result["type"] == "function"
|
||||
assert result["function"]["name"] == "storevalueblock" # Default block name cleaned
|
||||
assert result["function"]["_sink_node_id"] == "test-node-id"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_uses_customized_name_for_agents():
|
||||
"""Test that SmartDecisionMakerBlock uses customized_name from metadata for agent nodes."""
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.data.graph import Link, Node
|
||||
|
||||
# Create a mock node with customized_name in metadata
|
||||
mock_node = MagicMock(spec=Node)
|
||||
mock_node.id = "test-agent-node-id"
|
||||
mock_node.metadata = {"customized_name": "My Custom Agent"}
|
||||
mock_node.input_default = {
|
||||
"graph_id": "test-graph-id",
|
||||
"graph_version": 1,
|
||||
"input_schema": {"properties": {"test_input": {"description": "Test input"}}},
|
||||
}
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
mock_link.sink_name = "test_input"
|
||||
|
||||
# Mock the database client
|
||||
mock_graph_meta = MagicMock()
|
||||
mock_graph_meta.name = "Original Agent Name"
|
||||
mock_graph_meta.description = "Agent description"
|
||||
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_client.get_graph_metadata.return_value = mock_graph_meta
|
||||
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
):
|
||||
result = await SmartDecisionMakerBlock._create_agent_function_signature(
|
||||
mock_node, [mock_link]
|
||||
)
|
||||
|
||||
# Verify the tool name uses the customized name (cleaned up)
|
||||
assert result["type"] == "function"
|
||||
assert result["function"]["name"] == "my_custom_agent" # Cleaned version
|
||||
assert result["function"]["_sink_node_id"] == "test-agent-node-id"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_agent_falls_back_to_graph_name():
|
||||
"""Test that agent node falls back to graph name when no customized_name."""
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.data.graph import Link, Node
|
||||
|
||||
# Create a mock node without customized_name
|
||||
mock_node = MagicMock(spec=Node)
|
||||
mock_node.id = "test-agent-node-id"
|
||||
mock_node.metadata = {} # No customized_name
|
||||
mock_node.input_default = {
|
||||
"graph_id": "test-graph-id",
|
||||
"graph_version": 1,
|
||||
"input_schema": {"properties": {"test_input": {"description": "Test input"}}},
|
||||
}
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
mock_link.sink_name = "test_input"
|
||||
|
||||
# Mock the database client
|
||||
mock_graph_meta = MagicMock()
|
||||
mock_graph_meta.name = "Original Agent Name"
|
||||
mock_graph_meta.description = "Agent description"
|
||||
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_client.get_graph_metadata.return_value = mock_graph_meta
|
||||
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
):
|
||||
result = await SmartDecisionMakerBlock._create_agent_function_signature(
|
||||
mock_node, [mock_link]
|
||||
)
|
||||
|
||||
# Verify the tool name uses the graph's default name
|
||||
assert result["type"] == "function"
|
||||
assert result["function"]["name"] == "original_agent_name" # Graph name cleaned
|
||||
assert result["function"]["_sink_node_id"] == "test-agent-node-id"
|
||||
|
||||
@@ -15,7 +15,6 @@ async def test_smart_decision_maker_handles_dynamic_dict_fields():
|
||||
mock_node.block = CreateDictionaryBlock()
|
||||
mock_node.block_id = CreateDictionaryBlock().id
|
||||
mock_node.input_default = {}
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Create mock links with dynamic dictionary fields
|
||||
mock_links = [
|
||||
@@ -78,7 +77,6 @@ async def test_smart_decision_maker_handles_dynamic_list_fields():
|
||||
mock_node.block = AddToListBlock()
|
||||
mock_node.block_id = AddToListBlock().id
|
||||
mock_node.input_default = {}
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Create mock links with dynamic list fields
|
||||
mock_links = [
|
||||
|
||||
@@ -44,7 +44,6 @@ async def test_create_block_function_signature_with_dict_fields():
|
||||
mock_node.block = CreateDictionaryBlock()
|
||||
mock_node.block_id = CreateDictionaryBlock().id
|
||||
mock_node.input_default = {}
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Create mock links with dynamic dictionary fields (source sanitized, sink original)
|
||||
mock_links = [
|
||||
@@ -107,7 +106,6 @@ async def test_create_block_function_signature_with_list_fields():
|
||||
mock_node.block = AddToListBlock()
|
||||
mock_node.block_id = AddToListBlock().id
|
||||
mock_node.input_default = {}
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Create mock links with dynamic list fields
|
||||
mock_links = [
|
||||
@@ -161,7 +159,6 @@ async def test_create_block_function_signature_with_object_fields():
|
||||
mock_node.block = MatchTextPatternBlock()
|
||||
mock_node.block_id = MatchTextPatternBlock().id
|
||||
mock_node.input_default = {}
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Create mock links with dynamic object fields
|
||||
mock_links = [
|
||||
@@ -211,13 +208,11 @@ async def test_create_tool_node_signatures():
|
||||
mock_dict_node.block = CreateDictionaryBlock()
|
||||
mock_dict_node.block_id = CreateDictionaryBlock().id
|
||||
mock_dict_node.input_default = {}
|
||||
mock_dict_node.metadata = {}
|
||||
|
||||
mock_list_node = Mock()
|
||||
mock_list_node.block = AddToListBlock()
|
||||
mock_list_node.block_id = AddToListBlock().id
|
||||
mock_list_node.input_default = {}
|
||||
mock_list_node.metadata = {}
|
||||
|
||||
# Mock links with dynamic fields
|
||||
dict_link1 = Mock(
|
||||
@@ -428,7 +423,6 @@ async def test_mixed_regular_and_dynamic_fields():
|
||||
mock_node.block.name = "TestBlock"
|
||||
mock_node.block.description = "A test block"
|
||||
mock_node.block.input_schema = Mock()
|
||||
mock_node.metadata = {}
|
||||
|
||||
# Mock the get_field_schema to return a proper schema for regular fields
|
||||
def get_field_schema(field_name):
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .blog import WordPressCreatePostBlock, WordPressGetAllPostsBlock
|
||||
from .blog import WordPressCreatePostBlock
|
||||
|
||||
__all__ = ["WordPressCreatePostBlock", "WordPressGetAllPostsBlock"]
|
||||
__all__ = ["WordPressCreatePostBlock"]
|
||||
|
||||
@@ -161,7 +161,7 @@ async def oauth_exchange_code_for_tokens(
|
||||
grant_type="authorization_code",
|
||||
).model_dump(exclude_none=True)
|
||||
|
||||
response = await Requests(raise_for_status=False).post(
|
||||
response = await Requests().post(
|
||||
f"{WORDPRESS_BASE_URL}oauth2/token",
|
||||
headers=headers,
|
||||
data=data,
|
||||
@@ -205,7 +205,7 @@ async def oauth_refresh_tokens(
|
||||
grant_type="refresh_token",
|
||||
).model_dump(exclude_none=True)
|
||||
|
||||
response = await Requests(raise_for_status=False).post(
|
||||
response = await Requests().post(
|
||||
f"{WORDPRESS_BASE_URL}oauth2/token",
|
||||
headers=headers,
|
||||
data=data,
|
||||
@@ -252,7 +252,7 @@ async def validate_token(
|
||||
"token": token,
|
||||
}
|
||||
|
||||
response = await Requests(raise_for_status=False).get(
|
||||
response = await Requests().get(
|
||||
f"{WORDPRESS_BASE_URL}oauth2/token-info",
|
||||
params=params,
|
||||
)
|
||||
@@ -296,7 +296,7 @@ async def make_api_request(
|
||||
|
||||
url = f"{WORDPRESS_BASE_URL.rstrip('/')}{endpoint}"
|
||||
|
||||
request_method = getattr(Requests(raise_for_status=False), method.lower())
|
||||
request_method = getattr(Requests(), method.lower())
|
||||
response = await request_method(
|
||||
url,
|
||||
headers=headers,
|
||||
@@ -476,7 +476,6 @@ async def create_post(
|
||||
data["tags"] = ",".join(str(t) for t in data["tags"])
|
||||
|
||||
# Make the API request
|
||||
site = normalize_site(site)
|
||||
endpoint = f"/rest/v1.1/sites/{site}/posts/new"
|
||||
|
||||
headers = {
|
||||
@@ -484,7 +483,7 @@ async def create_post(
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
}
|
||||
|
||||
response = await Requests(raise_for_status=False).post(
|
||||
response = await Requests().post(
|
||||
f"{WORDPRESS_BASE_URL.rstrip('/')}{endpoint}",
|
||||
headers=headers,
|
||||
data=data,
|
||||
@@ -500,132 +499,3 @@ async def create_post(
|
||||
)
|
||||
error_message = error_data.get("message", response.text)
|
||||
raise ValueError(f"Failed to create post: {response.status} - {error_message}")
|
||||
|
||||
|
||||
class Post(BaseModel):
|
||||
"""Response model for individual posts in a posts list response.
|
||||
|
||||
This is a simplified version compared to PostResponse, as the list endpoint
|
||||
returns less detailed information than the create/get single post endpoints.
|
||||
"""
|
||||
|
||||
ID: int
|
||||
site_ID: int
|
||||
author: PostAuthor
|
||||
date: datetime
|
||||
modified: datetime
|
||||
title: str
|
||||
URL: str
|
||||
short_URL: str
|
||||
content: str | None = None
|
||||
excerpt: str | None = None
|
||||
slug: str
|
||||
guid: str
|
||||
status: str
|
||||
sticky: bool
|
||||
password: str | None = ""
|
||||
parent: Union[Dict[str, Any], bool, None] = None
|
||||
type: str
|
||||
discussion: Dict[str, Union[str, bool, int]] | None = None
|
||||
likes_enabled: bool | None = None
|
||||
sharing_enabled: bool | None = None
|
||||
like_count: int | None = None
|
||||
i_like: bool | None = None
|
||||
is_reblogged: bool | None = None
|
||||
is_following: bool | None = None
|
||||
global_ID: str | None = None
|
||||
featured_image: str | None = None
|
||||
post_thumbnail: Dict[str, Any] | None = None
|
||||
format: str | None = None
|
||||
geo: Union[Dict[str, Any], bool, None] = None
|
||||
menu_order: int | None = None
|
||||
page_template: str | None = None
|
||||
publicize_URLs: List[str] | None = None
|
||||
terms: Dict[str, Dict[str, Any]] | None = None
|
||||
tags: Dict[str, Dict[str, Any]] | None = None
|
||||
categories: Dict[str, Dict[str, Any]] | None = None
|
||||
attachments: Dict[str, Dict[str, Any]] | None = None
|
||||
attachment_count: int | None = None
|
||||
metadata: List[Dict[str, Any]] | None = None
|
||||
meta: Dict[str, Any] | None = None
|
||||
capabilities: Dict[str, bool] | None = None
|
||||
revisions: List[int] | None = None
|
||||
other_URLs: Dict[str, Any] | None = None
|
||||
|
||||
|
||||
class PostsResponse(BaseModel):
|
||||
"""Response model for WordPress posts list."""
|
||||
|
||||
found: int
|
||||
posts: List[Post]
|
||||
meta: Dict[str, Any]
|
||||
|
||||
|
||||
def normalize_site(site: str) -> str:
|
||||
"""
|
||||
Normalize a site identifier by stripping protocol and trailing slashes.
|
||||
|
||||
Args:
|
||||
site: Site URL, domain, or ID (e.g., "https://myblog.wordpress.com/", "myblog.wordpress.com", "123456789")
|
||||
|
||||
Returns:
|
||||
Normalized site identifier (domain or ID only)
|
||||
"""
|
||||
site = site.strip()
|
||||
if site.startswith("https://"):
|
||||
site = site[8:]
|
||||
elif site.startswith("http://"):
|
||||
site = site[7:]
|
||||
return site.rstrip("/")
|
||||
|
||||
|
||||
async def get_posts(
|
||||
credentials: Credentials,
|
||||
site: str,
|
||||
status: PostStatus | None = None,
|
||||
number: int = 100,
|
||||
offset: int = 0,
|
||||
) -> PostsResponse:
|
||||
"""
|
||||
Get posts from a WordPress site.
|
||||
|
||||
Args:
|
||||
credentials: OAuth credentials
|
||||
site: Site ID or domain (e.g., "myblog.wordpress.com" or "123456789")
|
||||
status: Filter by post status using PostStatus enum, or None for all
|
||||
number: Number of posts to retrieve (max 100)
|
||||
offset: Number of posts to skip (for pagination)
|
||||
|
||||
Returns:
|
||||
PostsResponse with the list of posts
|
||||
"""
|
||||
site = normalize_site(site)
|
||||
endpoint = f"/rest/v1.1/sites/{site}/posts"
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.auth_header(),
|
||||
}
|
||||
|
||||
params: Dict[str, Any] = {
|
||||
"number": max(1, min(number, 100)), # 1–100 posts per request
|
||||
"offset": offset,
|
||||
}
|
||||
|
||||
if status:
|
||||
params["status"] = status.value
|
||||
response = await Requests(raise_for_status=False).get(
|
||||
f"{WORDPRESS_BASE_URL.rstrip('/')}{endpoint}",
|
||||
headers=headers,
|
||||
params=params,
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
return PostsResponse.model_validate(response.json())
|
||||
|
||||
error_data = (
|
||||
response.json()
|
||||
if response.headers.get("content-type", "").startswith("application/json")
|
||||
else {}
|
||||
)
|
||||
error_message = error_data.get("message", response.text)
|
||||
raise ValueError(f"Failed to get posts: {response.status} - {error_message}")
|
||||
|
||||
@@ -9,15 +9,7 @@ from backend.sdk import (
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._api import (
|
||||
CreatePostRequest,
|
||||
Post,
|
||||
PostResponse,
|
||||
PostsResponse,
|
||||
PostStatus,
|
||||
create_post,
|
||||
get_posts,
|
||||
)
|
||||
from ._api import CreatePostRequest, PostResponse, PostStatus, create_post
|
||||
from ._config import wordpress
|
||||
|
||||
|
||||
@@ -57,15 +49,8 @@ class WordPressCreatePostBlock(Block):
|
||||
media_urls: list[str] = SchemaField(
|
||||
description="URLs of images to sideload and attach to the post", default=[]
|
||||
)
|
||||
publish_as_draft: bool = SchemaField(
|
||||
description="If True, publishes the post as a draft. If False, publishes it publicly.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
site: str = SchemaField(
|
||||
description="The site ID or domain (pass-through for chaining with other blocks)"
|
||||
)
|
||||
post_id: int = SchemaField(description="The ID of the created post")
|
||||
post_url: str = SchemaField(description="The full URL of the created post")
|
||||
short_url: str = SchemaField(description="The shortened wp.me URL")
|
||||
@@ -93,9 +78,7 @@ class WordPressCreatePostBlock(Block):
|
||||
tags=input_data.tags,
|
||||
featured_image=input_data.featured_image,
|
||||
media_urls=input_data.media_urls,
|
||||
status=(
|
||||
PostStatus.DRAFT if input_data.publish_as_draft else PostStatus.PUBLISH
|
||||
),
|
||||
status=PostStatus.PUBLISH,
|
||||
)
|
||||
|
||||
post_response: PostResponse = await create_post(
|
||||
@@ -104,69 +87,7 @@ class WordPressCreatePostBlock(Block):
|
||||
post_data=post_request,
|
||||
)
|
||||
|
||||
yield "site", input_data.site
|
||||
yield "post_id", post_response.ID
|
||||
yield "post_url", post_response.URL
|
||||
yield "short_url", post_response.short_URL
|
||||
yield "post_data", post_response.model_dump()
|
||||
|
||||
|
||||
class WordPressGetAllPostsBlock(Block):
|
||||
"""
|
||||
Fetches all posts from a WordPress.com site or Jetpack-enabled site.
|
||||
Supports filtering by status and pagination.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = wordpress.credentials_field()
|
||||
site: str = SchemaField(
|
||||
description="Site ID or domain (e.g., 'myblog.wordpress.com' or '123456789')"
|
||||
)
|
||||
status: PostStatus | None = SchemaField(
|
||||
description="Filter by post status, or None for all",
|
||||
default=None,
|
||||
)
|
||||
number: int = SchemaField(
|
||||
description="Number of posts to retrieve (max 100 per request)", default=20
|
||||
)
|
||||
offset: int = SchemaField(
|
||||
description="Number of posts to skip (for pagination)", default=0
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
site: str = SchemaField(
|
||||
description="The site ID or domain (pass-through for chaining with other blocks)"
|
||||
)
|
||||
found: int = SchemaField(description="Total number of posts found")
|
||||
posts: list[Post] = SchemaField(
|
||||
description="List of post objects with their details"
|
||||
)
|
||||
post: Post = SchemaField(
|
||||
description="Individual post object (yielded for each post)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="97728fa7-7f6f-4789-ba0c-f2c114119536",
|
||||
description="Fetch all posts from WordPress.com or Jetpack sites",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: Credentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
posts_response: PostsResponse = await get_posts(
|
||||
credentials=credentials,
|
||||
site=input_data.site,
|
||||
status=input_data.status,
|
||||
number=input_data.number,
|
||||
offset=input_data.offset,
|
||||
)
|
||||
|
||||
yield "site", input_data.site
|
||||
yield "found", posts_response.found
|
||||
yield "posts", posts_response.posts
|
||||
for post in posts_response.posts:
|
||||
yield "post", post
|
||||
|
||||
@@ -211,22 +211,6 @@ class AgentRejectionData(BaseNotificationData):
|
||||
return value
|
||||
|
||||
|
||||
class WaitlistLaunchData(BaseNotificationData):
|
||||
"""Notification data for when an agent from a waitlist is launched."""
|
||||
|
||||
agent_name: str
|
||||
waitlist_name: str
|
||||
store_url: str
|
||||
launched_at: datetime
|
||||
|
||||
@field_validator("launched_at")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
NotificationData = Annotated[
|
||||
Union[
|
||||
AgentRunData,
|
||||
@@ -239,7 +223,6 @@ NotificationData = Annotated[
|
||||
DailySummaryData,
|
||||
RefundRequestData,
|
||||
BaseSummaryData,
|
||||
WaitlistLaunchData,
|
||||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
@@ -290,7 +273,6 @@ def get_notif_data_type(
|
||||
NotificationType.REFUND_PROCESSED: RefundRequestData,
|
||||
NotificationType.AGENT_APPROVED: AgentApprovalData,
|
||||
NotificationType.AGENT_REJECTED: AgentRejectionData,
|
||||
NotificationType.WAITLIST_LAUNCH: WaitlistLaunchData,
|
||||
}[notification_type]
|
||||
|
||||
|
||||
@@ -336,7 +318,6 @@ class NotificationTypeOverride:
|
||||
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
||||
NotificationType.AGENT_APPROVED: QueueType.IMMEDIATE,
|
||||
NotificationType.AGENT_REJECTED: QueueType.IMMEDIATE,
|
||||
NotificationType.WAITLIST_LAUNCH: QueueType.IMMEDIATE,
|
||||
}
|
||||
return BATCHING_RULES.get(self.notification_type, QueueType.IMMEDIATE)
|
||||
|
||||
@@ -356,7 +337,6 @@ class NotificationTypeOverride:
|
||||
NotificationType.REFUND_PROCESSED: "refund_processed.html",
|
||||
NotificationType.AGENT_APPROVED: "agent_approved.html",
|
||||
NotificationType.AGENT_REJECTED: "agent_rejected.html",
|
||||
NotificationType.WAITLIST_LAUNCH: "waitlist_launch.html",
|
||||
}[self.notification_type]
|
||||
|
||||
@property
|
||||
@@ -374,7 +354,6 @@ class NotificationTypeOverride:
|
||||
NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed",
|
||||
NotificationType.AGENT_APPROVED: "🎉 Your agent '{{data.agent_name}}' has been approved!",
|
||||
NotificationType.AGENT_REJECTED: "Your agent '{{data.agent_name}}' needs some updates",
|
||||
NotificationType.WAITLIST_LAUNCH: "🚀 {{data.agent_name}} is now available!",
|
||||
}[self.notification_type]
|
||||
|
||||
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
-- CreateEnum
|
||||
CREATE TYPE "WaitlistExternalStatus" AS ENUM ('DONE', 'NOT_STARTED', 'CANCELED', 'WORK_IN_PROGRESS');
|
||||
|
||||
-- AlterEnum
|
||||
ALTER TYPE "NotificationType" ADD VALUE 'WAITLIST_LAUNCH';
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "WaitlistEntry" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
"storeListingId" TEXT,
|
||||
"owningUserId" TEXT NOT NULL,
|
||||
"slug" TEXT NOT NULL,
|
||||
"search" tsvector DEFAULT ''::tsvector,
|
||||
"name" TEXT NOT NULL,
|
||||
"subHeading" TEXT NOT NULL,
|
||||
"videoUrl" TEXT,
|
||||
"agentOutputDemoUrl" TEXT,
|
||||
"imageUrls" TEXT[],
|
||||
"description" TEXT NOT NULL,
|
||||
"categories" TEXT[],
|
||||
"status" "WaitlistExternalStatus" NOT NULL DEFAULT 'NOT_STARTED',
|
||||
"votes" INTEGER NOT NULL DEFAULT 0,
|
||||
"unaffiliatedEmailUsers" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
"isDeleted" BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
CONSTRAINT "WaitlistEntry_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "_joinedWaitlists" (
|
||||
"A" TEXT NOT NULL,
|
||||
"B" TEXT NOT NULL
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "_joinedWaitlists_AB_unique" ON "_joinedWaitlists"("A", "B");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "_joinedWaitlists_B_index" ON "_joinedWaitlists"("B");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "WaitlistEntry" ADD CONSTRAINT "WaitlistEntry_storeListingId_fkey" FOREIGN KEY ("storeListingId") REFERENCES "StoreListing"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "WaitlistEntry" ADD CONSTRAINT "WaitlistEntry_owningUserId_fkey" FOREIGN KEY ("owningUserId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "_joinedWaitlists" ADD CONSTRAINT "_joinedWaitlists_A_fkey" FOREIGN KEY ("A") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "_joinedWaitlists" ADD CONSTRAINT "_joinedWaitlists_B_fkey" FOREIGN KEY ("B") REFERENCES "WaitlistEntry"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -67,10 +67,6 @@ model User {
|
||||
OAuthAuthorizationCodes OAuthAuthorizationCode[]
|
||||
OAuthAccessTokens OAuthAccessToken[]
|
||||
OAuthRefreshTokens OAuthRefreshToken[]
|
||||
|
||||
// Waitlist relations
|
||||
waitlistEntries WaitlistEntry[]
|
||||
joinedWaitlists WaitlistEntry[] @relation("joinedWaitlists")
|
||||
}
|
||||
|
||||
enum OnboardingStep {
|
||||
@@ -232,7 +228,6 @@ enum NotificationType {
|
||||
REFUND_PROCESSED
|
||||
AGENT_APPROVED
|
||||
AGENT_REJECTED
|
||||
WAITLIST_LAUNCH
|
||||
}
|
||||
|
||||
model NotificationEvent {
|
||||
@@ -839,8 +834,7 @@ model StoreListing {
|
||||
OwningUser User @relation(fields: [owningUserId], references: [id])
|
||||
|
||||
// Relations
|
||||
Versions StoreListingVersion[] @relation("ListingVersions")
|
||||
waitlistEntries WaitlistEntry[]
|
||||
Versions StoreListingVersion[] @relation("ListingVersions")
|
||||
|
||||
// Unique index on agentId to ensure only one listing per agent, regardless of number of versions the agent has.
|
||||
@@unique([agentGraphId])
|
||||
@@ -930,47 +924,6 @@ model StoreListingReview {
|
||||
@@index([reviewByUserId])
|
||||
}
|
||||
|
||||
enum WaitlistExternalStatus {
|
||||
DONE
|
||||
NOT_STARTED
|
||||
CANCELED
|
||||
WORK_IN_PROGRESS
|
||||
}
|
||||
|
||||
model WaitlistEntry {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
storeListingId String?
|
||||
StoreListing StoreListing? @relation(fields: [storeListingId], references: [id], onDelete: SetNull)
|
||||
|
||||
owningUserId String
|
||||
OwningUser User @relation(fields: [owningUserId], references: [id])
|
||||
|
||||
slug String
|
||||
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
|
||||
|
||||
// Content fields
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
agentOutputDemoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
categories String[]
|
||||
|
||||
//Waitlist specific fields
|
||||
status WaitlistExternalStatus @default(NOT_STARTED)
|
||||
votes Int @default(0) // Hide from frontend api
|
||||
joinedUsers User[] @relation("joinedWaitlists")
|
||||
// NOTE: DO NOT DOUBLE SEND TO THESE USERS, IF THEY HAVE SIGNED UP SINCE THEY MAY HAVE ALREADY RECEIVED AN EMAIL
|
||||
// DOUBLE CHECK WHEN SENDING THAT THEY ARE NOT IN THE JOINED USERS LIST ALSO
|
||||
unaffiliatedEmailUsers String[] @default([])
|
||||
|
||||
isDeleted Boolean @default(false)
|
||||
}
|
||||
|
||||
enum SubmissionStatus {
|
||||
DRAFT // Being prepared, not yet submitted
|
||||
PENDING // Submitted, awaiting review
|
||||
|
||||
746
autogpt_platform/backend/scripts/generate_block_docs.py
Normal file
746
autogpt_platform/backend/scripts/generate_block_docs.py
Normal file
@@ -0,0 +1,746 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Block Documentation Generator
|
||||
|
||||
Generates markdown documentation for all blocks from code introspection.
|
||||
Preserves manually-written content between marker comments.
|
||||
|
||||
Usage:
|
||||
# Generate all docs
|
||||
poetry run python scripts/generate_block_docs.py
|
||||
|
||||
# Check mode for CI (exits 1 if stale)
|
||||
poetry run python scripts/generate_block_docs.py --check
|
||||
|
||||
# Migrate existing docs (add markers, preserve content)
|
||||
poetry run python scripts/generate_block_docs.py --migrate
|
||||
|
||||
# Verbose output
|
||||
poetry run python scripts/generate_block_docs.py -v
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
# Add backend to path for imports
|
||||
backend_dir = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(backend_dir))
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default output directory relative to repo root
|
||||
DEFAULT_OUTPUT_DIR = (
|
||||
Path(__file__).parent.parent.parent.parent / "docs" / "platform" / "blocks"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FieldDoc:
|
||||
"""Documentation for a single input/output field."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
type_str: str
|
||||
required: bool
|
||||
default: Any = None
|
||||
advanced: bool = False
|
||||
hidden: bool = False
|
||||
placeholder: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockDoc:
|
||||
"""Documentation data extracted from a block."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
class_name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
category_descriptions: dict[str, str]
|
||||
inputs: list[FieldDoc]
|
||||
outputs: list[FieldDoc]
|
||||
block_type: str
|
||||
source_file: str
|
||||
contributors: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
# Category to human-readable name mapping
|
||||
CATEGORY_DISPLAY_NAMES = {
|
||||
"AI": "AI and Language Models",
|
||||
"BASIC": "Basic Operations",
|
||||
"TEXT": "Text Processing",
|
||||
"SEARCH": "Search and Information Retrieval",
|
||||
"SOCIAL": "Social Media and Content",
|
||||
"DEVELOPER_TOOLS": "Developer Tools",
|
||||
"DATA": "Data Processing",
|
||||
"LOGIC": "Logic and Control Flow",
|
||||
"COMMUNICATION": "Communication",
|
||||
"INPUT": "Input/Output",
|
||||
"OUTPUT": "Input/Output",
|
||||
"MULTIMEDIA": "Media Generation",
|
||||
"PRODUCTIVITY": "Productivity",
|
||||
"HARDWARE": "Hardware",
|
||||
"AGENT": "Agent Integration",
|
||||
"CRM": "CRM Services",
|
||||
"SAFETY": "AI Safety",
|
||||
"ISSUE_TRACKING": "Issue Tracking",
|
||||
"MARKETING": "Marketing",
|
||||
}
|
||||
|
||||
# Category to doc file mapping (for grouping related blocks)
|
||||
CATEGORY_FILE_MAP = {
|
||||
"BASIC": "basic",
|
||||
"TEXT": "text",
|
||||
"AI": "llm",
|
||||
"SEARCH": "search",
|
||||
"DATA": "data",
|
||||
"LOGIC": "logic",
|
||||
"COMMUNICATION": "communication",
|
||||
"MULTIMEDIA": "multimedia",
|
||||
"PRODUCTIVITY": "productivity",
|
||||
}
|
||||
|
||||
|
||||
def class_name_to_display_name(class_name: str) -> str:
|
||||
"""Convert BlockClassName to 'Block Class Name'."""
|
||||
# Remove 'Block' suffix
|
||||
name = class_name.replace("Block", "")
|
||||
# Insert space before capitals
|
||||
name = re.sub(r"([a-z])([A-Z])", r"\1 \2", name)
|
||||
# Handle consecutive capitals (e.g., 'HTTPRequest' -> 'HTTP Request')
|
||||
name = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1 \2", name)
|
||||
return name.strip()
|
||||
|
||||
|
||||
def type_to_readable(type_schema: dict[str, Any]) -> str:
|
||||
"""Convert JSON schema type to human-readable string."""
|
||||
if not isinstance(type_schema, dict):
|
||||
return str(type_schema) if type_schema else "Any"
|
||||
|
||||
if "anyOf" in type_schema:
|
||||
# Union type - show options
|
||||
any_of = type_schema["anyOf"]
|
||||
if not isinstance(any_of, list):
|
||||
return "Any"
|
||||
options = []
|
||||
for opt in any_of:
|
||||
if isinstance(opt, dict) and opt.get("type") == "null":
|
||||
continue
|
||||
options.append(type_to_readable(opt))
|
||||
if len(options) == 1:
|
||||
return options[0]
|
||||
return " | ".join(options)
|
||||
|
||||
if "allOf" in type_schema:
|
||||
all_of = type_schema["allOf"]
|
||||
if not isinstance(all_of, list) or not all_of:
|
||||
return "Any"
|
||||
return type_to_readable(all_of[0])
|
||||
|
||||
schema_type = type_schema.get("type")
|
||||
|
||||
if schema_type == "array":
|
||||
items = type_schema.get("items", {})
|
||||
item_type = type_to_readable(items)
|
||||
return f"List[{item_type}]"
|
||||
|
||||
if schema_type == "object":
|
||||
if "additionalProperties" in type_schema:
|
||||
value_type = type_to_readable(type_schema["additionalProperties"])
|
||||
return f"Dict[str, {value_type}]"
|
||||
# Check if it's a specific model
|
||||
title = type_schema.get("title", "Object")
|
||||
return title
|
||||
|
||||
if schema_type == "string":
|
||||
if "enum" in type_schema:
|
||||
return " | ".join(f'"{v}"' for v in type_schema["enum"][:3])
|
||||
if "format" in type_schema:
|
||||
return f"str ({type_schema['format']})"
|
||||
return "str"
|
||||
|
||||
if schema_type == "integer":
|
||||
return "int"
|
||||
|
||||
if schema_type == "number":
|
||||
return "float"
|
||||
|
||||
if schema_type == "boolean":
|
||||
return "bool"
|
||||
|
||||
if schema_type == "null":
|
||||
return "None"
|
||||
|
||||
# Fallback
|
||||
return type_schema.get("title", schema_type or "Any")
|
||||
|
||||
|
||||
def safe_get(d: Any, key: str, default: Any = None) -> Any:
|
||||
"""Safely get a value from a dict-like object."""
|
||||
if isinstance(d, dict):
|
||||
return d.get(key, default)
|
||||
return default
|
||||
|
||||
|
||||
def extract_block_doc(block_cls: type) -> BlockDoc:
|
||||
"""Extract documentation data from a block class."""
|
||||
block = block_cls.create()
|
||||
|
||||
# Get source file
|
||||
try:
|
||||
source_file = inspect.getfile(block_cls)
|
||||
# Make relative to blocks directory
|
||||
blocks_dir = Path(source_file).parent
|
||||
while blocks_dir.name != "blocks" and blocks_dir.parent != blocks_dir:
|
||||
blocks_dir = blocks_dir.parent
|
||||
source_file = str(Path(source_file).relative_to(blocks_dir.parent))
|
||||
except (TypeError, ValueError):
|
||||
source_file = "unknown"
|
||||
|
||||
# Extract input fields
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
input_properties = safe_get(input_schema, "properties", {})
|
||||
if not isinstance(input_properties, dict):
|
||||
input_properties = {}
|
||||
required_raw = safe_get(input_schema, "required", [])
|
||||
# Handle edge cases where required might not be a list
|
||||
if isinstance(required_raw, (list, set, tuple)):
|
||||
required_inputs = set(required_raw)
|
||||
else:
|
||||
required_inputs = set()
|
||||
|
||||
inputs = []
|
||||
for field_name, field_schema in input_properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
# Skip credentials fields in docs (they're auto-handled)
|
||||
if "credentials" in field_name.lower():
|
||||
continue
|
||||
|
||||
inputs.append(
|
||||
FieldDoc(
|
||||
name=field_name,
|
||||
description=safe_get(field_schema, "description", ""),
|
||||
type_str=type_to_readable(field_schema),
|
||||
required=field_name in required_inputs,
|
||||
default=safe_get(field_schema, "default"),
|
||||
advanced=safe_get(field_schema, "advanced", False) or False,
|
||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
||||
placeholder=safe_get(field_schema, "placeholder"),
|
||||
)
|
||||
)
|
||||
|
||||
# Extract output fields
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
output_properties = safe_get(output_schema, "properties", {})
|
||||
if not isinstance(output_properties, dict):
|
||||
output_properties = {}
|
||||
|
||||
outputs = []
|
||||
for field_name, field_schema in output_properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
outputs.append(
|
||||
FieldDoc(
|
||||
name=field_name,
|
||||
description=safe_get(field_schema, "description", ""),
|
||||
type_str=type_to_readable(field_schema),
|
||||
required=True, # Outputs are always produced
|
||||
hidden=safe_get(field_schema, "hidden", False) or False,
|
||||
)
|
||||
)
|
||||
|
||||
# Get category info (sort for deterministic ordering since it's a set)
|
||||
categories = []
|
||||
category_descriptions = {}
|
||||
for cat in sorted(block.categories, key=lambda c: c.name):
|
||||
categories.append(cat.name)
|
||||
category_descriptions[cat.name] = cat.value
|
||||
|
||||
# Get contributors
|
||||
contributors = []
|
||||
for contrib in block.contributors:
|
||||
contributors.append(contrib.name if hasattr(contrib, "name") else str(contrib))
|
||||
|
||||
return BlockDoc(
|
||||
id=block.id,
|
||||
name=class_name_to_display_name(block.name),
|
||||
class_name=block.name,
|
||||
description=block.description,
|
||||
categories=categories,
|
||||
category_descriptions=category_descriptions,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
block_type=block.block_type.value,
|
||||
source_file=source_file,
|
||||
contributors=contributors,
|
||||
)
|
||||
|
||||
|
||||
def generate_anchor(name: str) -> str:
|
||||
"""Generate markdown anchor from block name."""
|
||||
return name.lower().replace(" ", "-").replace("(", "").replace(")", "")
|
||||
|
||||
|
||||
def extract_manual_content(existing_content: str) -> dict[str, str]:
|
||||
"""Extract content between MANUAL markers from existing file."""
|
||||
manual_sections = {}
|
||||
|
||||
# Pattern: <!-- MANUAL: section_name -->content<!-- END MANUAL -->
|
||||
pattern = r"<!-- MANUAL: (\w+) -->\s*(.*?)\s*<!-- END MANUAL -->"
|
||||
matches = re.findall(pattern, existing_content, re.DOTALL)
|
||||
|
||||
for section_name, content in matches:
|
||||
manual_sections[section_name] = content.strip()
|
||||
|
||||
return manual_sections
|
||||
|
||||
|
||||
def strip_markers(content: str) -> str:
|
||||
"""Remove MANUAL markers from content."""
|
||||
# Remove opening markers
|
||||
content = re.sub(r"<!-- MANUAL: \w+ -->\s*", "", content)
|
||||
# Remove closing markers
|
||||
content = re.sub(r"\s*<!-- END MANUAL -->", "", content)
|
||||
return content.strip()
|
||||
|
||||
|
||||
def extract_legacy_content(existing_content: str) -> dict[str, str]:
|
||||
"""Extract content from legacy docs without markers (for migration)."""
|
||||
manual_sections = {}
|
||||
|
||||
# Try to extract "How it works" section
|
||||
how_it_works_match = re.search(
|
||||
r"### How it works\s*\n(.*?)(?=\n### |\n## |\Z)", existing_content, re.DOTALL
|
||||
)
|
||||
if how_it_works_match:
|
||||
content = strip_markers(how_it_works_match.group(1).strip())
|
||||
if content and not content.startswith("|"): # Not a table
|
||||
manual_sections["how_it_works"] = content
|
||||
|
||||
# Try to extract "Possible use case" section
|
||||
use_case_match = re.search(
|
||||
r"### Possible use case\s*\n(.*?)(?=\n### |\n## |\n---|\Z)",
|
||||
existing_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if use_case_match:
|
||||
content = strip_markers(use_case_match.group(1).strip())
|
||||
if content:
|
||||
manual_sections["use_case"] = content
|
||||
|
||||
return manual_sections
|
||||
|
||||
|
||||
def generate_block_markdown(
|
||||
block: BlockDoc,
|
||||
manual_content: dict[str, str] | None = None,
|
||||
is_first_in_file: bool = True,
|
||||
) -> str:
|
||||
"""Generate markdown documentation for a single block."""
|
||||
manual_content = manual_content or {}
|
||||
lines = []
|
||||
|
||||
# Block heading
|
||||
heading_level = "#" if is_first_in_file else "##"
|
||||
lines.append(f"{heading_level} {block.name}")
|
||||
lines.append("")
|
||||
|
||||
# What it is (full description)
|
||||
lines.append("### What it is")
|
||||
lines.append(block.description or "No description available.")
|
||||
lines.append("")
|
||||
|
||||
# How it works (manual section)
|
||||
lines.append("### How it works")
|
||||
how_it_works = manual_content.get(
|
||||
"how_it_works", "_Add technical explanation here._"
|
||||
)
|
||||
lines.append("<!-- MANUAL: how_it_works -->")
|
||||
lines.append(how_it_works)
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
# Inputs table (auto-generated)
|
||||
visible_inputs = [f for f in block.inputs if not f.hidden]
|
||||
if visible_inputs:
|
||||
lines.append("### Inputs")
|
||||
lines.append("| Input | Description | Type | Required |")
|
||||
lines.append("|-------|-------------|------|----------|")
|
||||
for inp in visible_inputs:
|
||||
required = "Yes" if inp.required else "No"
|
||||
desc = inp.description or "-"
|
||||
# Escape pipes in description
|
||||
desc = desc.replace("|", "\\|")
|
||||
lines.append(f"| {inp.name} | {desc} | {inp.type_str} | {required} |")
|
||||
lines.append("")
|
||||
|
||||
# Outputs table (auto-generated)
|
||||
visible_outputs = [f for f in block.outputs if not f.hidden]
|
||||
if visible_outputs:
|
||||
lines.append("### Outputs")
|
||||
lines.append("| Output | Description | Type |")
|
||||
lines.append("|--------|-------------|------|")
|
||||
for out in visible_outputs:
|
||||
desc = out.description or "-"
|
||||
desc = desc.replace("|", "\\|")
|
||||
lines.append(f"| {out.name} | {desc} | {out.type_str} |")
|
||||
lines.append("")
|
||||
|
||||
# Possible use case (manual section)
|
||||
lines.append("### Possible use case")
|
||||
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
|
||||
lines.append("<!-- MANUAL: use_case -->")
|
||||
lines.append(use_case)
|
||||
lines.append("<!-- END MANUAL -->")
|
||||
lines.append("")
|
||||
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def get_block_file_mapping(blocks: list[BlockDoc]) -> dict[str, list[BlockDoc]]:
|
||||
"""
|
||||
Map blocks to their documentation files.
|
||||
|
||||
Returns dict of {relative_file_path: [blocks]}
|
||||
"""
|
||||
file_mapping = defaultdict(list)
|
||||
|
||||
for block in blocks:
|
||||
# Determine file path based on source file or category
|
||||
source_path = Path(block.source_file)
|
||||
|
||||
# If source is in a subdirectory (e.g., google/gmail.py), use that structure
|
||||
if len(source_path.parts) > 2: # blocks/subdir/file.py
|
||||
subdir = source_path.parts[1] # e.g., "google"
|
||||
# Use the Python filename as the md filename
|
||||
md_file = source_path.stem + ".md" # e.g., "gmail.md"
|
||||
file_path = f"{subdir}/{md_file}"
|
||||
else:
|
||||
# Use category-based grouping for top-level blocks
|
||||
primary_category = block.categories[0] if block.categories else "BASIC"
|
||||
file_name = CATEGORY_FILE_MAP.get(primary_category, "misc")
|
||||
file_path = f"{file_name}.md"
|
||||
|
||||
file_mapping[file_path].append(block)
|
||||
|
||||
return dict(file_mapping)
|
||||
|
||||
|
||||
def generate_overview_table(blocks: list[BlockDoc]) -> str:
|
||||
"""Generate the overview table markdown (blocks.md)."""
|
||||
lines = []
|
||||
|
||||
lines.append("# AutoGPT Blocks Overview")
|
||||
lines.append("")
|
||||
lines.append(
|
||||
'AutoGPT uses a modular approach with various "blocks" to handle different tasks. These blocks are the building blocks of AutoGPT workflows, allowing users to create complex automations by combining simple, specialized components.'
|
||||
)
|
||||
lines.append("")
|
||||
lines.append('!!! info "Creating Your Own Blocks"')
|
||||
lines.append(" Want to create your own custom blocks? Check out our guides:")
|
||||
lines.append(" ")
|
||||
lines.append(
|
||||
" - [Build your own Blocks](../new_blocks.md) - Step-by-step tutorial with examples"
|
||||
)
|
||||
lines.append(
|
||||
" - [Block SDK Guide](../block-sdk-guide.md) - Advanced SDK patterns with OAuth, webhooks, and provider configuration"
|
||||
)
|
||||
lines.append("")
|
||||
lines.append(
|
||||
"Below is a comprehensive list of all available blocks, categorized by their primary function. Click on any block name to view its detailed documentation."
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Group blocks by category
|
||||
by_category = defaultdict(list)
|
||||
for block in blocks:
|
||||
primary_cat = block.categories[0] if block.categories else "BASIC"
|
||||
by_category[primary_cat].append(block)
|
||||
|
||||
# Sort categories
|
||||
category_order = [
|
||||
"BASIC",
|
||||
"DATA",
|
||||
"TEXT",
|
||||
"AI",
|
||||
"SEARCH",
|
||||
"SOCIAL",
|
||||
"COMMUNICATION",
|
||||
"DEVELOPER_TOOLS",
|
||||
"MULTIMEDIA",
|
||||
"PRODUCTIVITY",
|
||||
"LOGIC",
|
||||
"INPUT",
|
||||
"OUTPUT",
|
||||
"AGENT",
|
||||
"CRM",
|
||||
"SAFETY",
|
||||
"ISSUE_TRACKING",
|
||||
"HARDWARE",
|
||||
"MARKETING",
|
||||
]
|
||||
|
||||
for category in category_order:
|
||||
if category not in by_category:
|
||||
continue
|
||||
|
||||
cat_blocks = sorted(by_category[category], key=lambda b: b.name)
|
||||
display_name = CATEGORY_DISPLAY_NAMES.get(category, category)
|
||||
|
||||
lines.append(f"## {display_name}")
|
||||
lines.append("| Block Name | Description |")
|
||||
lines.append("|------------|-------------|")
|
||||
|
||||
for block in cat_blocks:
|
||||
# Determine link path
|
||||
file_mapping = get_block_file_mapping([block])
|
||||
file_path = list(file_mapping.keys())[0]
|
||||
anchor = generate_anchor(block.name)
|
||||
|
||||
# Short description (first sentence)
|
||||
short_desc = (
|
||||
block.description.split(".")[0]
|
||||
if block.description
|
||||
else "No description"
|
||||
)
|
||||
short_desc = short_desc.replace("|", "\\|")
|
||||
|
||||
lines.append(f"| [{block.name}]({file_path}#{anchor}) | {short_desc} |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def load_all_blocks_for_docs() -> list[BlockDoc]:
|
||||
"""Load all blocks and extract documentation."""
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
block_classes = load_all_blocks()
|
||||
blocks = []
|
||||
|
||||
for _block_id, block_cls in block_classes.items():
|
||||
try:
|
||||
block_doc = extract_block_doc(block_cls)
|
||||
blocks.append(block_doc)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract docs for {block_cls.__name__}: {e}")
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def write_block_docs(
|
||||
output_dir: Path,
|
||||
blocks: list[BlockDoc],
|
||||
migrate: bool = False,
|
||||
verbose: bool = False,
|
||||
) -> dict[str, str]:
|
||||
"""
|
||||
Write block documentation files.
|
||||
|
||||
Returns dict of {file_path: content} for all generated files.
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
generated_files = {}
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
|
||||
# Create subdirectories if needed
|
||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing content for manual section preservation
|
||||
existing_content = ""
|
||||
if full_path.exists():
|
||||
existing_content = full_path.read_text()
|
||||
|
||||
# Generate content for each block
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
# Try to extract manual content
|
||||
if migrate:
|
||||
manual_content = extract_legacy_content(existing_content)
|
||||
else:
|
||||
# Extract manual content specific to this block
|
||||
# Look for content after the block heading
|
||||
block_pattern = (
|
||||
rf"(?:^|\n)##? {re.escape(block.name)}\s*\n(.*?)(?=\n##? |\Z)"
|
||||
)
|
||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
||||
if block_match:
|
||||
manual_content = extract_manual_content(block_match.group(1))
|
||||
else:
|
||||
manual_content = {}
|
||||
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
full_content = "\n".join(content_parts)
|
||||
generated_files[str(file_path)] = full_content
|
||||
|
||||
if verbose:
|
||||
print(f" Writing {file_path} ({len(file_blocks)} blocks)")
|
||||
|
||||
full_path.write_text(full_content)
|
||||
|
||||
# Generate overview file
|
||||
overview_content = generate_overview_table(blocks)
|
||||
overview_path = output_dir / "blocks.md"
|
||||
generated_files["blocks.md"] = overview_content
|
||||
overview_path.write_text(overview_content)
|
||||
|
||||
if verbose:
|
||||
print(" Writing blocks.md (overview)")
|
||||
|
||||
return generated_files
|
||||
|
||||
|
||||
def check_docs_in_sync(output_dir: Path, blocks: list[BlockDoc]) -> bool:
|
||||
"""
|
||||
Check if generated docs match existing docs.
|
||||
|
||||
Returns True if in sync, False otherwise.
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
|
||||
all_match = True
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
|
||||
if not full_path.exists():
|
||||
print(f"MISSING: {file_path}")
|
||||
all_match = False
|
||||
continue
|
||||
|
||||
existing_content = full_path.read_text()
|
||||
|
||||
# Extract manual content from existing file
|
||||
manual_sections_by_block = {}
|
||||
for block in file_blocks:
|
||||
block_pattern = (
|
||||
rf"(?:^|\n)##? {re.escape(block.name)}\s*\n(.*?)(?=\n##? |\Z)"
|
||||
)
|
||||
block_match = re.search(block_pattern, existing_content, re.DOTALL)
|
||||
if block_match:
|
||||
manual_sections_by_block[block.name] = extract_manual_content(
|
||||
block_match.group(1)
|
||||
)
|
||||
|
||||
# Generate expected content
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
manual_content = manual_sections_by_block.get(block.name, {})
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
expected_content = "\n".join(content_parts)
|
||||
|
||||
if existing_content.strip() != expected_content.strip():
|
||||
print(f"OUT OF SYNC: {file_path}")
|
||||
all_match = False
|
||||
|
||||
# Check overview
|
||||
overview_path = output_dir / "blocks.md"
|
||||
if overview_path.exists():
|
||||
existing_overview = overview_path.read_text()
|
||||
expected_overview = generate_overview_table(blocks)
|
||||
if existing_overview.strip() != expected_overview.strip():
|
||||
print("OUT OF SYNC: blocks.md (overview)")
|
||||
all_match = False
|
||||
else:
|
||||
print("MISSING: blocks.md (overview)")
|
||||
all_match = False
|
||||
|
||||
return all_match
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate block documentation from code introspection"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=Path,
|
||||
default=DEFAULT_OUTPUT_DIR,
|
||||
help="Output directory for generated docs",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check",
|
||||
action="store_true",
|
||||
help="Check if docs are in sync (for CI), exit 1 if not",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--migrate",
|
||||
action="store_true",
|
||||
help="Migrate existing docs (extract legacy manual content)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Verbose output",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
format="%(levelname)s: %(message)s",
|
||||
)
|
||||
|
||||
print("Loading blocks...")
|
||||
blocks = load_all_blocks_for_docs()
|
||||
print(f"Found {len(blocks)} blocks")
|
||||
|
||||
if args.check:
|
||||
print(f"Checking docs in {args.output_dir}...")
|
||||
in_sync = check_docs_in_sync(args.output_dir, blocks)
|
||||
if in_sync:
|
||||
print("All documentation is in sync!")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("\nDocumentation is out of sync!")
|
||||
print(
|
||||
"Run: cd autogpt_platform/backend && poetry run python scripts/generate_block_docs.py"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Generating docs to {args.output_dir}...")
|
||||
write_block_docs(
|
||||
args.output_dir,
|
||||
blocks,
|
||||
migrate=args.migrate,
|
||||
verbose=args.verbose,
|
||||
)
|
||||
print("Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
211
autogpt_platform/backend/scripts/migrate_block_docs.py
Normal file
211
autogpt_platform/backend/scripts/migrate_block_docs.py
Normal file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migration script to preserve manual content from existing docs.
|
||||
|
||||
This script:
|
||||
1. Reads all existing block documentation (from git HEAD)
|
||||
2. Extracts manual content (How it works, Possible use case) by block name
|
||||
3. Creates a JSON mapping of block_name -> manual_content
|
||||
4. Generates new docs using current block structure while preserving manual content
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
generate_block_markdown,
|
||||
generate_overview_table,
|
||||
get_block_file_mapping,
|
||||
load_all_blocks_for_docs,
|
||||
strip_markers,
|
||||
)
|
||||
|
||||
|
||||
def get_git_file_content(file_path: str) -> str | None:
|
||||
"""Get file content from git HEAD."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "show", f"HEAD:{file_path}"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).parent.parent.parent.parent, # repo root
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def extract_blocks_from_doc(content: str) -> dict[str, dict[str, str]]:
|
||||
"""Extract all block sections and their manual content from a doc file."""
|
||||
blocks = {}
|
||||
|
||||
# Find all block headings (# or ##)
|
||||
block_pattern = r"(?:^|\n)(##?) ([^\n]+)\n"
|
||||
matches = list(re.finditer(block_pattern, content))
|
||||
|
||||
for i, match in enumerate(matches):
|
||||
block_name = match.group(2).strip()
|
||||
start = match.end()
|
||||
|
||||
# Find end (next heading or end of file)
|
||||
if i + 1 < len(matches):
|
||||
end = matches[i + 1].start()
|
||||
else:
|
||||
end = len(content)
|
||||
|
||||
block_content = content[start:end]
|
||||
|
||||
# Extract manual sections
|
||||
manual_content = {}
|
||||
|
||||
# How it works
|
||||
how_match = re.search(
|
||||
r"### How it works\s*\n(.*?)(?=\n### |\Z)", block_content, re.DOTALL
|
||||
)
|
||||
if how_match:
|
||||
text = strip_markers(how_match.group(1).strip())
|
||||
# Skip if it's just placeholder or a table
|
||||
if text and not text.startswith("|") and not text.startswith("_Add"):
|
||||
manual_content["how_it_works"] = text
|
||||
|
||||
# Possible use case
|
||||
use_case_match = re.search(
|
||||
r"### Possible use case\s*\n(.*?)(?=\n### |\n## |\n---|\Z)",
|
||||
block_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if use_case_match:
|
||||
text = strip_markers(use_case_match.group(1).strip())
|
||||
if text and not text.startswith("_Add"):
|
||||
manual_content["use_case"] = text
|
||||
|
||||
if manual_content:
|
||||
blocks[block_name] = manual_content
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def collect_existing_manual_content() -> dict[str, dict[str, str]]:
|
||||
"""Collect all manual content from existing git HEAD docs."""
|
||||
all_manual_content = {}
|
||||
|
||||
# Find all existing md files via git
|
||||
result = subprocess.run(
|
||||
["git", "ls-files", "docs/content/platform/blocks/"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).parent.parent.parent.parent,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
print("Failed to list git files")
|
||||
return {}
|
||||
|
||||
for file_path in result.stdout.strip().split("\n"):
|
||||
if not file_path.endswith(".md"):
|
||||
continue
|
||||
if file_path.endswith("blocks.md"): # Skip overview
|
||||
continue
|
||||
|
||||
print(f"Processing: {file_path}")
|
||||
content = get_git_file_content(file_path)
|
||||
if content:
|
||||
blocks = extract_blocks_from_doc(content)
|
||||
for block_name, manual_content in blocks.items():
|
||||
if block_name in all_manual_content:
|
||||
# Merge if already exists
|
||||
all_manual_content[block_name].update(manual_content)
|
||||
else:
|
||||
all_manual_content[block_name] = manual_content
|
||||
|
||||
return all_manual_content
|
||||
|
||||
|
||||
def run_migration():
|
||||
"""Run the migration."""
|
||||
print("Step 1: Collecting existing manual content from git HEAD...")
|
||||
manual_content_cache = collect_existing_manual_content()
|
||||
|
||||
print(f"\nFound manual content for {len(manual_content_cache)} blocks")
|
||||
|
||||
# Show some examples
|
||||
for name, content in list(manual_content_cache.items())[:3]:
|
||||
print(f" - {name}: {list(content.keys())}")
|
||||
|
||||
# Save cache for reference
|
||||
cache_path = Path(__file__).parent / "manual_content_cache.json"
|
||||
with open(cache_path, "w") as f:
|
||||
json.dump(manual_content_cache, f, indent=2)
|
||||
print(f"\nSaved cache to {cache_path}")
|
||||
|
||||
print("\nStep 2: Loading blocks from code...")
|
||||
blocks = load_all_blocks_for_docs()
|
||||
print(f"Found {len(blocks)} blocks")
|
||||
|
||||
print("\nStep 3: Generating new documentation...")
|
||||
output_dir = (
|
||||
Path(__file__).parent.parent.parent.parent
|
||||
/ "docs"
|
||||
/ "content"
|
||||
/ "platform"
|
||||
/ "blocks"
|
||||
)
|
||||
|
||||
file_mapping = get_block_file_mapping(blocks)
|
||||
|
||||
# Track statistics
|
||||
preserved_count = 0
|
||||
missing_count = 0
|
||||
|
||||
for file_path, file_blocks in file_mapping.items():
|
||||
full_path = output_dir / file_path
|
||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content_parts = []
|
||||
for i, block in enumerate(sorted(file_blocks, key=lambda b: b.name)):
|
||||
# Look up manual content by block name
|
||||
manual_content = manual_content_cache.get(block.name, {})
|
||||
|
||||
if manual_content:
|
||||
preserved_count += 1
|
||||
else:
|
||||
# Try with class name
|
||||
manual_content = manual_content_cache.get(block.class_name, {})
|
||||
if not manual_content:
|
||||
missing_count += 1
|
||||
|
||||
content_parts.append(
|
||||
generate_block_markdown(
|
||||
block,
|
||||
manual_content,
|
||||
is_first_in_file=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
full_content = "\n".join(content_parts)
|
||||
full_path.write_text(full_content)
|
||||
print(f" Wrote {file_path} ({len(file_blocks)} blocks)")
|
||||
|
||||
# Generate overview
|
||||
overview_content = generate_overview_table(blocks)
|
||||
overview_path = output_dir / "blocks.md"
|
||||
overview_path.write_text(overview_content)
|
||||
print(" Wrote blocks.md (overview)")
|
||||
|
||||
print("\nMigration complete!")
|
||||
print(f" - Blocks with preserved manual content: {preserved_count}")
|
||||
print(f" - Blocks without manual content: {missing_count}")
|
||||
print(
|
||||
"\nYou can now run `poetry run python scripts/generate_block_docs.py --check` to verify"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_migration()
|
||||
233
autogpt_platform/backend/scripts/test_generate_block_docs.py
Normal file
233
autogpt_platform/backend/scripts/test_generate_block_docs.py
Normal file
@@ -0,0 +1,233 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for the block documentation generator."""
|
||||
import pytest
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
class_name_to_display_name,
|
||||
extract_manual_content,
|
||||
generate_anchor,
|
||||
strip_markers,
|
||||
type_to_readable,
|
||||
)
|
||||
|
||||
|
||||
class TestClassNameToDisplayName:
|
||||
"""Tests for class_name_to_display_name function."""
|
||||
|
||||
def test_simple_block_name(self):
|
||||
assert class_name_to_display_name("PrintBlock") == "Print"
|
||||
|
||||
def test_multi_word_block_name(self):
|
||||
assert class_name_to_display_name("GetWeatherBlock") == "Get Weather"
|
||||
|
||||
def test_consecutive_capitals(self):
|
||||
assert class_name_to_display_name("HTTPRequestBlock") == "HTTP Request"
|
||||
|
||||
def test_ai_prefix(self):
|
||||
assert class_name_to_display_name("AIConditionBlock") == "AI Condition"
|
||||
|
||||
def test_no_block_suffix(self):
|
||||
assert class_name_to_display_name("SomeClass") == "Some Class"
|
||||
|
||||
|
||||
class TestTypeToReadable:
|
||||
"""Tests for type_to_readable function."""
|
||||
|
||||
def test_string_type(self):
|
||||
assert type_to_readable({"type": "string"}) == "str"
|
||||
|
||||
def test_integer_type(self):
|
||||
assert type_to_readable({"type": "integer"}) == "int"
|
||||
|
||||
def test_number_type(self):
|
||||
assert type_to_readable({"type": "number"}) == "float"
|
||||
|
||||
def test_boolean_type(self):
|
||||
assert type_to_readable({"type": "boolean"}) == "bool"
|
||||
|
||||
def test_array_type(self):
|
||||
result = type_to_readable({"type": "array", "items": {"type": "string"}})
|
||||
assert result == "List[str]"
|
||||
|
||||
def test_object_type(self):
|
||||
result = type_to_readable({"type": "object", "title": "MyModel"})
|
||||
assert result == "MyModel"
|
||||
|
||||
def test_anyof_with_null(self):
|
||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "null"}]})
|
||||
assert result == "str"
|
||||
|
||||
def test_anyof_multiple_types(self):
|
||||
result = type_to_readable({"anyOf": [{"type": "string"}, {"type": "integer"}]})
|
||||
assert result == "str | int"
|
||||
|
||||
def test_enum_type(self):
|
||||
result = type_to_readable(
|
||||
{"type": "string", "enum": ["option1", "option2", "option3"]}
|
||||
)
|
||||
assert result == '"option1" | "option2" | "option3"'
|
||||
|
||||
def test_none_input(self):
|
||||
assert type_to_readable(None) == "Any"
|
||||
|
||||
def test_non_dict_input(self):
|
||||
assert type_to_readable("string") == "string"
|
||||
|
||||
|
||||
class TestExtractManualContent:
|
||||
"""Tests for extract_manual_content function."""
|
||||
|
||||
def test_extract_how_it_works(self):
|
||||
content = """
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This is how it works.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {"how_it_works": "This is how it works."}
|
||||
|
||||
def test_extract_use_case(self):
|
||||
content = """
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
Example use case here.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {"use_case": "Example use case here."}
|
||||
|
||||
def test_extract_multiple_sections(self):
|
||||
content = """
|
||||
<!-- MANUAL: how_it_works -->
|
||||
How it works content.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
<!-- MANUAL: use_case -->
|
||||
Use case content.
|
||||
<!-- END MANUAL -->
|
||||
"""
|
||||
result = extract_manual_content(content)
|
||||
assert result == {
|
||||
"how_it_works": "How it works content.",
|
||||
"use_case": "Use case content.",
|
||||
}
|
||||
|
||||
def test_empty_content(self):
|
||||
result = extract_manual_content("")
|
||||
assert result == {}
|
||||
|
||||
def test_no_markers(self):
|
||||
result = extract_manual_content("Some content without markers")
|
||||
assert result == {}
|
||||
|
||||
|
||||
class TestStripMarkers:
|
||||
"""Tests for strip_markers function."""
|
||||
|
||||
def test_strip_opening_marker(self):
|
||||
content = "<!-- MANUAL: how_it_works -->\nContent here"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_strip_closing_marker(self):
|
||||
content = "Content here\n<!-- END MANUAL -->"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_strip_both_markers(self):
|
||||
content = "<!-- MANUAL: section -->\nContent here\n<!-- END MANUAL -->"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content here"
|
||||
|
||||
def test_no_markers(self):
|
||||
content = "Content without markers"
|
||||
result = strip_markers(content)
|
||||
assert result == "Content without markers"
|
||||
|
||||
|
||||
class TestGenerateAnchor:
|
||||
"""Tests for generate_anchor function."""
|
||||
|
||||
def test_simple_name(self):
|
||||
assert generate_anchor("Print") == "print"
|
||||
|
||||
def test_multi_word_name(self):
|
||||
assert generate_anchor("Get Weather") == "get-weather"
|
||||
|
||||
def test_name_with_parentheses(self):
|
||||
assert generate_anchor("Something (Optional)") == "something-optional"
|
||||
|
||||
def test_already_lowercase(self):
|
||||
assert generate_anchor("already lowercase") == "already-lowercase"
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests that require block loading."""
|
||||
|
||||
def test_load_blocks(self):
|
||||
"""Test that blocks can be loaded successfully."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
||||
|
||||
blocks = load_all_blocks_for_docs()
|
||||
assert len(blocks) > 0, "Should load at least one block"
|
||||
|
||||
def test_block_doc_has_required_fields(self):
|
||||
"""Test that extracted block docs have required fields."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import load_all_blocks_for_docs
|
||||
|
||||
blocks = load_all_blocks_for_docs()
|
||||
block = blocks[0]
|
||||
|
||||
assert hasattr(block, "id")
|
||||
assert hasattr(block, "name")
|
||||
assert hasattr(block, "description")
|
||||
assert hasattr(block, "categories")
|
||||
assert hasattr(block, "inputs")
|
||||
assert hasattr(block, "outputs")
|
||||
|
||||
def test_file_mapping_is_deterministic(self):
|
||||
"""Test that file mapping produces consistent results."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logging.disable(logging.CRITICAL)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
get_block_file_mapping,
|
||||
load_all_blocks_for_docs,
|
||||
)
|
||||
|
||||
# Load blocks twice and compare mappings
|
||||
blocks1 = load_all_blocks_for_docs()
|
||||
blocks2 = load_all_blocks_for_docs()
|
||||
|
||||
mapping1 = get_block_file_mapping(blocks1)
|
||||
mapping2 = get_block_file_mapping(blocks2)
|
||||
|
||||
# Check same files are generated
|
||||
assert set(mapping1.keys()) == set(mapping2.keys())
|
||||
|
||||
# Check same block counts per file
|
||||
for file_path in mapping1:
|
||||
assert len(mapping1[file_path]) == len(mapping2[file_path])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||
import { Users, DollarSign, UserSearch, FileText, Clock } from "lucide-react";
|
||||
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||
|
||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||
|
||||
@@ -11,11 +11,6 @@ const sidebarLinkGroups = [
|
||||
href: "/admin/marketplace",
|
||||
icon: <Users className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Waitlist Management",
|
||||
href: "/admin/waitlist",
|
||||
icon: <Clock className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "User Spending",
|
||||
href: "/admin/spending",
|
||||
|
||||
@@ -1,217 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import {
|
||||
usePostV2CreateWaitlist,
|
||||
getGetV2ListAllWaitlistsQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { Plus } from "@phosphor-icons/react";
|
||||
|
||||
export function CreateWaitlistButton() {
|
||||
const [open, setOpen] = useState(false);
|
||||
const { toast } = useToast();
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
const createWaitlistMutation = usePostV2CreateWaitlist({
|
||||
mutation: {
|
||||
onSuccess: (response) => {
|
||||
if (response.status === 200) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: "Waitlist created successfully",
|
||||
});
|
||||
setOpen(false);
|
||||
setFormData({
|
||||
name: "",
|
||||
slug: "",
|
||||
subHeading: "",
|
||||
description: "",
|
||||
categories: "",
|
||||
imageUrls: "",
|
||||
videoUrl: "",
|
||||
agentOutputDemoUrl: "",
|
||||
});
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
||||
});
|
||||
} else {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to create waitlist",
|
||||
});
|
||||
}
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error("Error creating waitlist:", error);
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to create waitlist",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const [formData, setFormData] = useState({
|
||||
name: "",
|
||||
slug: "",
|
||||
subHeading: "",
|
||||
description: "",
|
||||
categories: "",
|
||||
imageUrls: "",
|
||||
videoUrl: "",
|
||||
agentOutputDemoUrl: "",
|
||||
});
|
||||
|
||||
function handleInputChange(id: string, value: string) {
|
||||
setFormData((prev) => ({
|
||||
...prev,
|
||||
[id]: value,
|
||||
}));
|
||||
}
|
||||
|
||||
function generateSlug(name: string) {
|
||||
return name
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+/g, "-")
|
||||
.replace(/^-|-$/g, "");
|
||||
}
|
||||
|
||||
function handleSubmit(e: React.FormEvent) {
|
||||
e.preventDefault();
|
||||
|
||||
createWaitlistMutation.mutate({
|
||||
data: {
|
||||
name: formData.name,
|
||||
slug: formData.slug || generateSlug(formData.name),
|
||||
subHeading: formData.subHeading,
|
||||
description: formData.description,
|
||||
categories: formData.categories
|
||||
? formData.categories.split(",").map((c) => c.trim())
|
||||
: [],
|
||||
imageUrls: formData.imageUrls
|
||||
? formData.imageUrls.split(",").map((u) => u.trim())
|
||||
: [],
|
||||
videoUrl: formData.videoUrl || null,
|
||||
agentOutputDemoUrl: formData.agentOutputDemoUrl || null,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Button onClick={() => setOpen(true)}>
|
||||
<Plus size={16} className="mr-2" />
|
||||
Create Waitlist
|
||||
</Button>
|
||||
|
||||
<Dialog
|
||||
title="Create New Waitlist"
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: async (isOpen) => setOpen(isOpen),
|
||||
}}
|
||||
onClose={() => setOpen(false)}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<p className="mb-4 text-sm text-zinc-500">
|
||||
Create a new waitlist for an upcoming agent. Users can sign up to be
|
||||
notified when it launches.
|
||||
</p>
|
||||
<form onSubmit={handleSubmit} className="flex flex-col gap-2">
|
||||
<Input
|
||||
id="name"
|
||||
label="Name"
|
||||
value={formData.name}
|
||||
onChange={(e) => handleInputChange("name", e.target.value)}
|
||||
placeholder="SEO Analysis Agent"
|
||||
required
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="slug"
|
||||
label="Slug"
|
||||
value={formData.slug}
|
||||
onChange={(e) => handleInputChange("slug", e.target.value)}
|
||||
placeholder="seo-analysis-agent (auto-generated if empty)"
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="subHeading"
|
||||
label="Subheading"
|
||||
value={formData.subHeading}
|
||||
onChange={(e) => handleInputChange("subHeading", e.target.value)}
|
||||
placeholder="Analyze your website's SEO in minutes"
|
||||
required
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="description"
|
||||
label="Description"
|
||||
type="textarea"
|
||||
value={formData.description}
|
||||
onChange={(e) => handleInputChange("description", e.target.value)}
|
||||
placeholder="Detailed description of what this agent does..."
|
||||
rows={4}
|
||||
required
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="categories"
|
||||
label="Categories (comma-separated)"
|
||||
value={formData.categories}
|
||||
onChange={(e) => handleInputChange("categories", e.target.value)}
|
||||
placeholder="SEO, Marketing, Analysis"
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="imageUrls"
|
||||
label="Image URLs (comma-separated)"
|
||||
value={formData.imageUrls}
|
||||
onChange={(e) => handleInputChange("imageUrls", e.target.value)}
|
||||
placeholder="https://example.com/image1.jpg, https://example.com/image2.jpg"
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="videoUrl"
|
||||
label="Video URL (optional)"
|
||||
value={formData.videoUrl}
|
||||
onChange={(e) => handleInputChange("videoUrl", e.target.value)}
|
||||
placeholder="https://youtube.com/watch?v=..."
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="agentOutputDemoUrl"
|
||||
label="Output Demo URL (optional)"
|
||||
value={formData.agentOutputDemoUrl}
|
||||
onChange={(e) =>
|
||||
handleInputChange("agentOutputDemoUrl", e.target.value)
|
||||
}
|
||||
placeholder="https://example.com/demo-output.mp4"
|
||||
/>
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
type="button"
|
||||
variant="secondary"
|
||||
onClick={() => setOpen(false)}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button type="submit" loading={createWaitlistMutation.isPending}>
|
||||
Create Waitlist
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Select } from "@/components/atoms/Select/Select";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { usePutV2UpdateWaitlist } from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import type { WaitlistAdminResponse } from "@/app/api/__generated__/models/waitlistAdminResponse";
|
||||
import type { WaitlistUpdateRequest } from "@/app/api/__generated__/models/waitlistUpdateRequest";
|
||||
import { WaitlistExternalStatus } from "@/app/api/__generated__/models/waitlistExternalStatus";
|
||||
|
||||
type EditWaitlistDialogProps = {
|
||||
waitlist: WaitlistAdminResponse;
|
||||
onClose: () => void;
|
||||
onSave: () => void;
|
||||
};
|
||||
|
||||
const STATUS_OPTIONS = [
|
||||
{ value: WaitlistExternalStatus.NOT_STARTED, label: "Not Started" },
|
||||
{ value: WaitlistExternalStatus.WORK_IN_PROGRESS, label: "Work In Progress" },
|
||||
{ value: WaitlistExternalStatus.DONE, label: "Done" },
|
||||
{ value: WaitlistExternalStatus.CANCELED, label: "Canceled" },
|
||||
];
|
||||
|
||||
export function EditWaitlistDialog({
|
||||
waitlist,
|
||||
onClose,
|
||||
onSave,
|
||||
}: EditWaitlistDialogProps) {
|
||||
const { toast } = useToast();
|
||||
const updateWaitlistMutation = usePutV2UpdateWaitlist();
|
||||
|
||||
const [formData, setFormData] = useState({
|
||||
name: waitlist.name,
|
||||
slug: waitlist.slug,
|
||||
subHeading: waitlist.subHeading,
|
||||
description: waitlist.description,
|
||||
categories: waitlist.categories.join(", "),
|
||||
imageUrls: waitlist.imageUrls.join(", "),
|
||||
videoUrl: waitlist.videoUrl || "",
|
||||
agentOutputDemoUrl: waitlist.agentOutputDemoUrl || "",
|
||||
status: waitlist.status,
|
||||
storeListingId: waitlist.storeListingId || "",
|
||||
});
|
||||
|
||||
function handleInputChange(id: string, value: string) {
|
||||
setFormData((prev) => ({
|
||||
...prev,
|
||||
[id]: value,
|
||||
}));
|
||||
}
|
||||
|
||||
function handleStatusChange(value: string) {
|
||||
setFormData((prev) => ({
|
||||
...prev,
|
||||
status: value as WaitlistExternalStatus,
|
||||
}));
|
||||
}
|
||||
|
||||
async function handleSubmit(e: React.FormEvent) {
|
||||
e.preventDefault();
|
||||
|
||||
const updateData: WaitlistUpdateRequest = {
|
||||
name: formData.name,
|
||||
slug: formData.slug,
|
||||
subHeading: formData.subHeading,
|
||||
description: formData.description,
|
||||
categories: formData.categories
|
||||
? formData.categories.split(",").map((c) => c.trim())
|
||||
: [],
|
||||
imageUrls: formData.imageUrls
|
||||
? formData.imageUrls.split(",").map((u) => u.trim())
|
||||
: [],
|
||||
videoUrl: formData.videoUrl || null,
|
||||
agentOutputDemoUrl: formData.agentOutputDemoUrl || null,
|
||||
status: formData.status,
|
||||
storeListingId: formData.storeListingId || null,
|
||||
};
|
||||
|
||||
updateWaitlistMutation.mutate(
|
||||
{ waitlistId: waitlist.id, data: updateData },
|
||||
{
|
||||
onSuccess: (response) => {
|
||||
if (response.status === 200) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: "Waitlist updated successfully",
|
||||
});
|
||||
onSave();
|
||||
} else {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to update waitlist",
|
||||
});
|
||||
}
|
||||
},
|
||||
onError: () => {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to update waitlist",
|
||||
});
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Waitlist"
|
||||
controlled={{
|
||||
isOpen: true,
|
||||
set: async (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<p className="mb-4 text-sm text-zinc-500">
|
||||
Update the waitlist details. Changes will be reflected immediately.
|
||||
</p>
|
||||
<form onSubmit={handleSubmit} className="flex flex-col gap-2">
|
||||
<Input
|
||||
id="name"
|
||||
label="Name"
|
||||
value={formData.name}
|
||||
onChange={(e) => handleInputChange("name", e.target.value)}
|
||||
required
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="slug"
|
||||
label="Slug"
|
||||
value={formData.slug}
|
||||
onChange={(e) => handleInputChange("slug", e.target.value)}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="subHeading"
|
||||
label="Subheading"
|
||||
value={formData.subHeading}
|
||||
onChange={(e) => handleInputChange("subHeading", e.target.value)}
|
||||
required
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="description"
|
||||
label="Description"
|
||||
type="textarea"
|
||||
value={formData.description}
|
||||
onChange={(e) => handleInputChange("description", e.target.value)}
|
||||
rows={4}
|
||||
required
|
||||
/>
|
||||
|
||||
<Select
|
||||
id="status"
|
||||
label="Status"
|
||||
value={formData.status}
|
||||
onValueChange={handleStatusChange}
|
||||
options={STATUS_OPTIONS}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="categories"
|
||||
label="Categories (comma-separated)"
|
||||
value={formData.categories}
|
||||
onChange={(e) => handleInputChange("categories", e.target.value)}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="imageUrls"
|
||||
label="Image URLs (comma-separated)"
|
||||
value={formData.imageUrls}
|
||||
onChange={(e) => handleInputChange("imageUrls", e.target.value)}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="videoUrl"
|
||||
label="Video URL"
|
||||
value={formData.videoUrl}
|
||||
onChange={(e) => handleInputChange("videoUrl", e.target.value)}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="agentOutputDemoUrl"
|
||||
label="Output Demo URL"
|
||||
value={formData.agentOutputDemoUrl}
|
||||
onChange={(e) =>
|
||||
handleInputChange("agentOutputDemoUrl", e.target.value)
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
id="storeListingId"
|
||||
label="Store Listing ID (for linking)"
|
||||
value={formData.storeListingId}
|
||||
onChange={(e) =>
|
||||
handleInputChange("storeListingId", e.target.value)
|
||||
}
|
||||
placeholder="Leave empty if not linked"
|
||||
/>
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button type="button" variant="secondary" onClick={onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button type="submit" loading={updateWaitlistMutation.isPending}>
|
||||
Save Changes
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { User, Envelope, DownloadSimple } from "@phosphor-icons/react";
|
||||
import { useGetV2GetWaitlistSignups } from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
|
||||
type WaitlistSignupsDialogProps = {
|
||||
waitlistId: string;
|
||||
onClose: () => void;
|
||||
};
|
||||
|
||||
export function WaitlistSignupsDialog({
|
||||
waitlistId,
|
||||
onClose,
|
||||
}: WaitlistSignupsDialogProps) {
|
||||
const {
|
||||
data: signupsResponse,
|
||||
isLoading,
|
||||
isError,
|
||||
} = useGetV2GetWaitlistSignups(waitlistId);
|
||||
|
||||
const signups = signupsResponse?.status === 200 ? signupsResponse.data : null;
|
||||
|
||||
function exportToCSV() {
|
||||
if (!signups) return;
|
||||
|
||||
const headers = ["Type", "Email", "User ID", "Username"];
|
||||
const rows = signups.signups.map((signup) => [
|
||||
signup.type,
|
||||
signup.email || "",
|
||||
signup.userId || "",
|
||||
signup.username || "",
|
||||
]);
|
||||
|
||||
const escapeCell = (cell: string) => `"${cell.replace(/"/g, '""')}"`;
|
||||
|
||||
const csvContent = [
|
||||
headers.join(","),
|
||||
...rows.map((row) => row.map(escapeCell).join(",")),
|
||||
].join("\n");
|
||||
|
||||
const blob = new Blob([csvContent], { type: "text/csv" });
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
const a = document.createElement("a");
|
||||
a.href = url;
|
||||
a.download = `waitlist-${waitlistId}-signups.csv`;
|
||||
a.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
function renderContent() {
|
||||
if (isLoading) {
|
||||
return <div className="py-10 text-center">Loading signups...</div>;
|
||||
}
|
||||
|
||||
if (isError) {
|
||||
return (
|
||||
<div className="py-10 text-center text-red-500">
|
||||
Failed to load signups. Please try again.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (!signups || signups.signups.length === 0) {
|
||||
return (
|
||||
<div className="py-10 text-center text-gray-500">
|
||||
No signups yet for this waitlist.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="flex justify-end">
|
||||
<Button variant="secondary" size="small" onClick={exportToCSV}>
|
||||
<DownloadSimple className="mr-2 h-4 w-4" size={16} />
|
||||
Export CSV
|
||||
</Button>
|
||||
</div>
|
||||
<div className="max-h-[400px] overflow-y-auto rounded-md border">
|
||||
<table className="w-full">
|
||||
<thead className="bg-gray-50 dark:bg-gray-800">
|
||||
<tr>
|
||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
||||
Type
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
||||
Email / Username
|
||||
</th>
|
||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
||||
User ID
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="divide-y">
|
||||
{signups.signups.map((signup, index) => (
|
||||
<tr key={index}>
|
||||
<td className="px-4 py-3">
|
||||
{signup.type === "user" ? (
|
||||
<span className="flex items-center gap-1 text-blue-600">
|
||||
<User className="h-4 w-4" size={16} /> User
|
||||
</span>
|
||||
) : (
|
||||
<span className="flex items-center gap-1 text-gray-600">
|
||||
<Envelope className="h-4 w-4" size={16} /> Email
|
||||
</span>
|
||||
)}
|
||||
</td>
|
||||
<td className="px-4 py-3">
|
||||
{signup.type === "user"
|
||||
? signup.username || signup.email
|
||||
: signup.email}
|
||||
</td>
|
||||
<td className="px-4 py-3 font-mono text-sm">
|
||||
{signup.userId || "-"}
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Waitlist Signups"
|
||||
controlled={{
|
||||
isOpen: true,
|
||||
set: async (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "700px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<p className="mb-4 text-sm text-zinc-500">
|
||||
{signups
|
||||
? `${signups.totalCount} total signups`
|
||||
: "Loading signups..."}
|
||||
</p>
|
||||
|
||||
{renderContent()}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button variant="secondary" onClick={onClose}>
|
||||
Close
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/__legacy__/ui/table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
useGetV2ListAllWaitlists,
|
||||
useDeleteV2DeleteWaitlist,
|
||||
getGetV2ListAllWaitlistsQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import type { WaitlistAdminResponse } from "@/app/api/__generated__/models/waitlistAdminResponse";
|
||||
import { EditWaitlistDialog } from "./EditWaitlistDialog";
|
||||
import { WaitlistSignupsDialog } from "./WaitlistSignupsDialog";
|
||||
import { Trash, PencilSimple, Users, Link } from "@phosphor-icons/react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
export function WaitlistTable() {
|
||||
const [editingWaitlist, setEditingWaitlist] =
|
||||
useState<WaitlistAdminResponse | null>(null);
|
||||
const [viewingSignups, setViewingSignups] = useState<string | null>(null);
|
||||
const { toast } = useToast();
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
const { data: response, isLoading, error } = useGetV2ListAllWaitlists();
|
||||
|
||||
const deleteWaitlistMutation = useDeleteV2DeleteWaitlist({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: "Waitlist deleted successfully",
|
||||
});
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
||||
});
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error("Error deleting waitlist:", error);
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to delete waitlist",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
function handleDelete(waitlistId: string) {
|
||||
if (!confirm("Are you sure you want to delete this waitlist?")) return;
|
||||
deleteWaitlistMutation.mutate({ waitlistId });
|
||||
}
|
||||
|
||||
function handleWaitlistSaved() {
|
||||
setEditingWaitlist(null);
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
||||
});
|
||||
}
|
||||
|
||||
function formatStatus(status: string) {
|
||||
const statusColors: Record<string, string> = {
|
||||
NOT_STARTED: "bg-gray-100 text-gray-800",
|
||||
WORK_IN_PROGRESS: "bg-blue-100 text-blue-800",
|
||||
DONE: "bg-green-100 text-green-800",
|
||||
CANCELED: "bg-red-100 text-red-800",
|
||||
};
|
||||
|
||||
return (
|
||||
<span
|
||||
className={`rounded-full px-2 py-1 text-xs font-medium ${statusColors[status] || "bg-gray-100 text-gray-700"}`}
|
||||
>
|
||||
{status.replace(/_/g, " ")}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
|
||||
function formatDate(dateStr: string) {
|
||||
if (!dateStr) return "-";
|
||||
return new Intl.DateTimeFormat("en-US", {
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
year: "numeric",
|
||||
}).format(new Date(dateStr));
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return <div className="py-10 text-center">Loading waitlists...</div>;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className="py-10 text-center text-red-500">
|
||||
Error loading waitlists. Please try again.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const waitlists = response?.status === 200 ? response.data.waitlists : [];
|
||||
|
||||
if (waitlists.length === 0) {
|
||||
return (
|
||||
<div className="py-10 text-center text-gray-500">
|
||||
No waitlists found. Create one to get started!
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="rounded-md border bg-white">
|
||||
<Table>
|
||||
<TableHeader className="bg-gray-50">
|
||||
<TableRow>
|
||||
<TableHead className="font-medium">Name</TableHead>
|
||||
<TableHead className="font-medium">Status</TableHead>
|
||||
<TableHead className="font-medium">Signups</TableHead>
|
||||
<TableHead className="font-medium">Votes</TableHead>
|
||||
<TableHead className="font-medium">Created</TableHead>
|
||||
<TableHead className="font-medium">Linked Agent</TableHead>
|
||||
<TableHead className="font-medium">Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{waitlists.map((waitlist) => (
|
||||
<TableRow key={waitlist.id}>
|
||||
<TableCell>
|
||||
<div>
|
||||
<div className="font-medium">{waitlist.name}</div>
|
||||
<div className="text-sm text-gray-500">
|
||||
{waitlist.subHeading}
|
||||
</div>
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>{formatStatus(waitlist.status)}</TableCell>
|
||||
<TableCell>{waitlist.signupCount}</TableCell>
|
||||
<TableCell>{waitlist.votes}</TableCell>
|
||||
<TableCell>{formatDate(waitlist.createdAt)}</TableCell>
|
||||
<TableCell>
|
||||
{waitlist.storeListingId ? (
|
||||
<span className="text-green-600">
|
||||
<Link size={16} className="inline" /> Linked
|
||||
</span>
|
||||
) : (
|
||||
<span className="text-gray-400">Not linked</span>
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => setViewingSignups(waitlist.id)}
|
||||
title="View signups"
|
||||
>
|
||||
<Users size={16} />
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => setEditingWaitlist(waitlist)}
|
||||
title="Edit"
|
||||
>
|
||||
<PencilSimple size={16} />
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => handleDelete(waitlist.id)}
|
||||
title="Delete"
|
||||
disabled={deleteWaitlistMutation.isPending}
|
||||
>
|
||||
<Trash size={16} className="text-red-500" />
|
||||
</Button>
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
|
||||
{editingWaitlist && (
|
||||
<EditWaitlistDialog
|
||||
waitlist={editingWaitlist}
|
||||
onClose={() => setEditingWaitlist(null)}
|
||||
onSave={handleWaitlistSaved}
|
||||
/>
|
||||
)}
|
||||
|
||||
{viewingSignups && (
|
||||
<WaitlistSignupsDialog
|
||||
waitlistId={viewingSignups}
|
||||
onClose={() => setViewingSignups(null)}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
||||
import { Suspense } from "react";
|
||||
import { WaitlistTable } from "./components/WaitlistTable";
|
||||
import { CreateWaitlistButton } from "./components/CreateWaitlistButton";
|
||||
|
||||
function WaitlistDashboard() {
|
||||
return (
|
||||
<div className="mx-auto p-6">
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold">Waitlist Management</h1>
|
||||
<p className="text-gray-500">
|
||||
Manage upcoming agent waitlists and track signups
|
||||
</p>
|
||||
</div>
|
||||
<CreateWaitlistButton />
|
||||
</div>
|
||||
|
||||
<Suspense
|
||||
fallback={
|
||||
<div className="py-10 text-center">Loading waitlists...</div>
|
||||
}
|
||||
>
|
||||
<WaitlistTable />
|
||||
</Suspense>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default async function WaitlistDashboardPage() {
|
||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
||||
const ProtectedWaitlistDashboard = await withAdminAccess(WaitlistDashboard);
|
||||
return <ProtectedWaitlistDashboard />;
|
||||
}
|
||||
@@ -68,10 +68,7 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<Text
|
||||
variant="large-semibold"
|
||||
className="line-clamp-1 hover:cursor-text"
|
||||
>
|
||||
<Text variant="large-semibold" className="line-clamp-1">
|
||||
{beautifyString(title).replace("Block", "").trim()}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
@@ -89,18 +89,6 @@ export function extractOptions(
|
||||
|
||||
// get display type and color for schema types [need for type display next to field name]
|
||||
export const getTypeDisplayInfo = (schema: any) => {
|
||||
if (
|
||||
schema?.type === "array" &&
|
||||
"format" in schema &&
|
||||
schema.format === "table"
|
||||
) {
|
||||
return {
|
||||
displayType: "table",
|
||||
colorClass: "!text-indigo-500",
|
||||
hexColor: "#6366f1",
|
||||
};
|
||||
}
|
||||
|
||||
if (schema?.type === "string" && schema?.format) {
|
||||
const formatMap: Record<
|
||||
string,
|
||||
|
||||
@@ -36,7 +36,6 @@ type Props = {
|
||||
readOnly?: boolean;
|
||||
isOptional?: boolean;
|
||||
showTitle?: boolean;
|
||||
variant?: "default" | "node";
|
||||
};
|
||||
|
||||
export function CredentialsInput({
|
||||
@@ -49,7 +48,6 @@ export function CredentialsInput({
|
||||
readOnly = false,
|
||||
isOptional = false,
|
||||
showTitle = true,
|
||||
variant = "default",
|
||||
}: Props) {
|
||||
const hookData = useCredentialsInput({
|
||||
schema,
|
||||
@@ -125,7 +123,6 @@ export function CredentialsInput({
|
||||
onClearCredential={() => onSelectCredential(undefined)}
|
||||
readOnly={readOnly}
|
||||
allowNone={isOptional}
|
||||
variant={variant}
|
||||
/>
|
||||
) : (
|
||||
<div className="mb-4 space-y-2">
|
||||
|
||||
@@ -30,8 +30,6 @@ type CredentialRowProps = {
|
||||
readOnly?: boolean;
|
||||
showCaret?: boolean;
|
||||
asSelectTrigger?: boolean;
|
||||
/** When "node", applies compact styling for node context */
|
||||
variant?: "default" | "node";
|
||||
};
|
||||
|
||||
export function CredentialRow({
|
||||
@@ -43,22 +41,14 @@ export function CredentialRow({
|
||||
readOnly = false,
|
||||
showCaret = false,
|
||||
asSelectTrigger = false,
|
||||
variant = "default",
|
||||
}: CredentialRowProps) {
|
||||
const ProviderIcon = providerIcons[provider] || fallbackIcon;
|
||||
const isNodeVariant = variant === "node";
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex items-center gap-3 rounded-medium border border-zinc-200 bg-white p-3 transition-colors",
|
||||
asSelectTrigger && isNodeVariant
|
||||
? "min-w-0 flex-1 overflow-hidden border-0 bg-transparent"
|
||||
: asSelectTrigger
|
||||
? "border-0 bg-transparent"
|
||||
: readOnly
|
||||
? "w-fit"
|
||||
: "",
|
||||
asSelectTrigger ? "border-0 bg-transparent" : readOnly ? "w-fit" : "",
|
||||
)}
|
||||
onClick={readOnly || showCaret || asSelectTrigger ? undefined : onSelect}
|
||||
style={
|
||||
@@ -71,31 +61,19 @@ export function CredentialRow({
|
||||
<ProviderIcon className="h-3 w-3 text-white" />
|
||||
</div>
|
||||
<IconKey className="h-5 w-5 shrink-0 text-zinc-800" />
|
||||
<div
|
||||
className={cn(
|
||||
"flex min-w-0 flex-1 flex-nowrap items-center gap-4",
|
||||
isNodeVariant && "overflow-hidden",
|
||||
)}
|
||||
>
|
||||
<div className="flex min-w-0 flex-1 flex-nowrap items-center gap-4">
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn(
|
||||
"tracking-tight",
|
||||
isNodeVariant
|
||||
? "truncate"
|
||||
: "line-clamp-1 flex-[0_0_50%] text-ellipsis",
|
||||
)}
|
||||
className="line-clamp-1 flex-[0_0_50%] text-ellipsis tracking-tight"
|
||||
>
|
||||
{getCredentialDisplayName(credential, displayName)}
|
||||
</Text>
|
||||
{!(asSelectTrigger && isNodeVariant) && (
|
||||
<Text
|
||||
variant="large"
|
||||
className="relative top-1 hidden overflow-hidden whitespace-nowrap font-mono tracking-tight md:block"
|
||||
>
|
||||
{"*".repeat(MASKED_KEY_LENGTH)}
|
||||
</Text>
|
||||
)}
|
||||
<Text
|
||||
variant="large"
|
||||
className="lex-[0_0_40%] relative top-1 hidden overflow-hidden whitespace-nowrap font-mono tracking-tight md:block"
|
||||
>
|
||||
{"*".repeat(MASKED_KEY_LENGTH)}
|
||||
</Text>
|
||||
</div>
|
||||
{showCaret && !asSelectTrigger && (
|
||||
<CaretDown className="h-4 w-4 shrink-0 text-gray-400" />
|
||||
|
||||
@@ -7,7 +7,6 @@ import {
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useEffect } from "react";
|
||||
import { getCredentialDisplayName } from "../../helpers";
|
||||
import { CredentialRow } from "../CredentialRow/CredentialRow";
|
||||
@@ -27,8 +26,6 @@ interface Props {
|
||||
onClearCredential?: () => void;
|
||||
readOnly?: boolean;
|
||||
allowNone?: boolean;
|
||||
/** When "node", applies compact styling for node context */
|
||||
variant?: "default" | "node";
|
||||
}
|
||||
|
||||
export function CredentialsSelect({
|
||||
@@ -40,7 +37,6 @@ export function CredentialsSelect({
|
||||
onClearCredential,
|
||||
readOnly = false,
|
||||
allowNone = true,
|
||||
variant = "default",
|
||||
}: Props) {
|
||||
// Auto-select first credential if none is selected (only if allowNone is false)
|
||||
useEffect(() => {
|
||||
@@ -63,12 +59,7 @@ export function CredentialsSelect({
|
||||
value={selectedCredentials?.id || (allowNone ? "__none__" : "")}
|
||||
onValueChange={handleValueChange}
|
||||
>
|
||||
<SelectTrigger
|
||||
className={cn(
|
||||
"h-auto min-h-12 w-full rounded-medium border-zinc-200 p-0 pr-4 shadow-none",
|
||||
variant === "node" && "overflow-hidden",
|
||||
)}
|
||||
>
|
||||
<SelectTrigger className="h-auto min-h-12 w-full rounded-medium border-zinc-200 p-0 pr-4 shadow-none">
|
||||
{selectedCredentials ? (
|
||||
<SelectValue key={selectedCredentials.id} asChild>
|
||||
<CredentialRow
|
||||
@@ -84,7 +75,6 @@ export function CredentialsSelect({
|
||||
onDelete={() => {}}
|
||||
readOnly={readOnly}
|
||||
asSelectTrigger={true}
|
||||
variant={variant}
|
||||
/>
|
||||
</SelectValue>
|
||||
) : (
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
||||
import { useSupabaseStore } from "@/lib/supabase/hooks/useSupabaseStore";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { usePostV2AddSelfToTheAgentWaitlist } from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { Check } from "@phosphor-icons/react";
|
||||
|
||||
interface JoinWaitlistModalProps {
|
||||
waitlist: StoreWaitlistEntry;
|
||||
onClose: () => void;
|
||||
onSuccess?: () => void;
|
||||
}
|
||||
|
||||
export function JoinWaitlistModal({
|
||||
waitlist,
|
||||
onClose,
|
||||
onSuccess,
|
||||
}: JoinWaitlistModalProps) {
|
||||
const { user } = useSupabaseStore();
|
||||
const [email, setEmail] = useState("");
|
||||
const [success, setSuccess] = useState(false);
|
||||
const { toast } = useToast();
|
||||
const joinWaitlistMutation = usePostV2AddSelfToTheAgentWaitlist();
|
||||
|
||||
function handleJoin() {
|
||||
joinWaitlistMutation.mutate(
|
||||
{
|
||||
waitlistId: waitlist.waitlistId,
|
||||
data: { email: user ? undefined : email },
|
||||
},
|
||||
{
|
||||
onSuccess: (response) => {
|
||||
if (response.status === 200) {
|
||||
setSuccess(true);
|
||||
toast({
|
||||
title: "You're on the list!",
|
||||
description: `We'll notify you when ${waitlist.name} is ready.`,
|
||||
});
|
||||
|
||||
// Close after a short delay to show success state
|
||||
setTimeout(() => {
|
||||
onSuccess?.();
|
||||
onClose();
|
||||
}, 1500);
|
||||
} else {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to join waitlist. Please try again.",
|
||||
});
|
||||
}
|
||||
},
|
||||
onError: () => {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: "Failed to join waitlist. Please try again.",
|
||||
});
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if (success) {
|
||||
return (
|
||||
<Dialog
|
||||
title=""
|
||||
controlled={{
|
||||
isOpen: true,
|
||||
set: async (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "400px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex flex-col items-center justify-center py-8">
|
||||
<div className="mb-4 flex h-16 w-16 items-center justify-center rounded-full bg-green-100 dark:bg-green-900">
|
||||
<Check
|
||||
className="h-8 w-8 text-green-600 dark:text-green-400"
|
||||
size={32}
|
||||
weight="bold"
|
||||
/>
|
||||
</div>
|
||||
<h2 className="mb-2 text-center text-xl font-semibold">
|
||||
You're on the list!
|
||||
</h2>
|
||||
<p className="text-center text-zinc-500">
|
||||
We'll notify you when {waitlist.name} is ready.
|
||||
</p>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Join waitlist"
|
||||
controlled={{
|
||||
isOpen: true,
|
||||
set: async (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "400px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<p className="mb-4 text-sm text-zinc-500">
|
||||
{user
|
||||
? `Get notified when ${waitlist.name} is ready to use.`
|
||||
: `Enter your email to get notified when ${waitlist.name} is ready.`}
|
||||
</p>
|
||||
|
||||
<div className="py-4">
|
||||
{user ? (
|
||||
<div className="rounded-lg bg-neutral-50 p-4 dark:bg-neutral-800">
|
||||
<p className="text-sm text-neutral-600 dark:text-neutral-400">
|
||||
You'll be notified at:
|
||||
</p>
|
||||
<p className="mt-1 font-medium text-neutral-900 dark:text-neutral-100">
|
||||
{user.email}
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<Input
|
||||
id="email"
|
||||
label="Email address"
|
||||
type="email"
|
||||
placeholder="you@example.com"
|
||||
value={email}
|
||||
onChange={(e) => setEmail(e.target.value)}
|
||||
required
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button type="button" variant="secondary" onClick={onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={handleJoin}
|
||||
loading={joinWaitlistMutation.isPending}
|
||||
disabled={!user && !email}
|
||||
className="bg-neutral-800 text-white hover:bg-neutral-700 dark:bg-neutral-700 dark:hover:bg-neutral-600"
|
||||
>
|
||||
{user ? "Join waitlist" : "Join with email"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import { useMainMarketplacePage } from "./useMainMarketplacePage";
|
||||
import { FeaturedCreators } from "../FeaturedCreators/FeaturedCreators";
|
||||
import { MainMarketplacePageLoading } from "../MainMarketplacePageLoading";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { WaitlistSection } from "../WaitlistSection/WaitlistSection";
|
||||
|
||||
export const MainMarkeplacePage = () => {
|
||||
const { featuredAgents, topAgents, featuredCreators, isLoading, hasError } =
|
||||
@@ -47,10 +46,6 @@ export const MainMarkeplacePage = () => {
|
||||
{/* 100px margin because our featured sections button are placed 40px below the container */}
|
||||
<Separator className="mb-6 mt-24" />
|
||||
|
||||
{/* Waitlist Section - "Help Shape What's Next" */}
|
||||
<WaitlistSection />
|
||||
<Separator className="mb-6 mt-12" />
|
||||
|
||||
{topAgents && (
|
||||
<AgentsSection sectionTitle="Top Agents" agents={topAgents.agents} />
|
||||
)}
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import Image from "next/image";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Check } from "@phosphor-icons/react";
|
||||
|
||||
interface WaitlistCardProps {
|
||||
name: string;
|
||||
subHeading: string;
|
||||
description: string;
|
||||
imageUrl: string | null;
|
||||
isMember?: boolean;
|
||||
onCardClick: () => void;
|
||||
onJoinClick: (e: React.MouseEvent) => void;
|
||||
}
|
||||
|
||||
export function WaitlistCard({
|
||||
name,
|
||||
subHeading,
|
||||
description,
|
||||
imageUrl,
|
||||
isMember = false,
|
||||
onCardClick,
|
||||
onJoinClick,
|
||||
}: WaitlistCardProps) {
|
||||
function handleJoinClick(e: React.MouseEvent) {
|
||||
e.stopPropagation();
|
||||
onJoinClick(e);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className="flex h-[24rem] w-full max-w-md cursor-pointer flex-col items-start rounded-3xl bg-background transition-all duration-300 hover:shadow-lg dark:hover:shadow-gray-700"
|
||||
onClick={onCardClick}
|
||||
data-testid="waitlist-card"
|
||||
role="button"
|
||||
tabIndex={0}
|
||||
aria-label={`${name} waitlist card`}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter" || e.key === " ") {
|
||||
onCardClick();
|
||||
}
|
||||
}}
|
||||
>
|
||||
{/* Image Section */}
|
||||
<div className="relative aspect-[2/1.2] w-full overflow-hidden rounded-3xl md:aspect-[2.17/1]">
|
||||
{imageUrl ? (
|
||||
<Image
|
||||
src={imageUrl}
|
||||
alt={`${name} preview image`}
|
||||
fill
|
||||
className="object-cover"
|
||||
/>
|
||||
) : (
|
||||
<div className="flex h-full w-full items-center justify-center bg-gradient-to-br from-neutral-200 to-neutral-300 dark:from-neutral-700 dark:to-neutral-800">
|
||||
<span className="text-4xl font-bold text-neutral-400 dark:text-neutral-500">
|
||||
{name.charAt(0)}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="mt-3 flex w-full flex-1 flex-col px-4">
|
||||
{/* Name and Subheading */}
|
||||
<div className="flex w-full flex-col">
|
||||
<h3 className="line-clamp-1 font-poppins text-xl font-semibold text-[#272727] dark:text-neutral-100">
|
||||
{name}
|
||||
</h3>
|
||||
<p className="mt-1 line-clamp-1 text-sm text-neutral-500 dark:text-neutral-400">
|
||||
{subHeading}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Description */}
|
||||
<div className="mt-2 flex w-full flex-col">
|
||||
<p className="line-clamp-3 text-sm font-normal leading-relaxed text-neutral-600 dark:text-neutral-400">
|
||||
{description}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="flex-grow" />
|
||||
|
||||
{/* Join Waitlist Button */}
|
||||
<div className="mt-4 w-full pb-4">
|
||||
{isMember ? (
|
||||
<Button
|
||||
disabled
|
||||
className="w-full rounded-full bg-green-600 text-white hover:bg-green-600 dark:bg-green-700 dark:hover:bg-green-700"
|
||||
>
|
||||
<Check className="mr-2" size={16} weight="bold" />
|
||||
On the waitlist
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
onClick={handleJoinClick}
|
||||
className="w-full rounded-full bg-neutral-800 text-white hover:bg-neutral-700 dark:bg-neutral-700 dark:hover:bg-neutral-600"
|
||||
>
|
||||
Join waitlist
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import Image from "next/image";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
||||
import { Check } from "@phosphor-icons/react";
|
||||
|
||||
interface WaitlistDetailModalProps {
|
||||
waitlist: StoreWaitlistEntry;
|
||||
isMember?: boolean;
|
||||
onClose: () => void;
|
||||
onJoin: () => void;
|
||||
}
|
||||
|
||||
export function WaitlistDetailModal({
|
||||
waitlist,
|
||||
isMember = false,
|
||||
onClose,
|
||||
onJoin,
|
||||
}: WaitlistDetailModalProps) {
|
||||
return (
|
||||
<Dialog
|
||||
title={waitlist.name}
|
||||
controlled={{
|
||||
isOpen: true,
|
||||
set: async (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "700px" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="space-y-6">
|
||||
{/* Main Image */}
|
||||
{waitlist.imageUrls.length > 0 && (
|
||||
<div className="relative aspect-video w-full overflow-hidden rounded-xl">
|
||||
<Image
|
||||
src={waitlist.imageUrls[0]}
|
||||
alt={`${waitlist.name} preview`}
|
||||
fill
|
||||
className="object-cover"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Subheading */}
|
||||
<p className="text-lg font-medium text-neutral-700 dark:text-neutral-300">
|
||||
{waitlist.subHeading}
|
||||
</p>
|
||||
|
||||
{/* Description */}
|
||||
<div className="prose prose-neutral dark:prose-invert max-w-none">
|
||||
<p className="whitespace-pre-wrap text-neutral-600 dark:text-neutral-400">
|
||||
{waitlist.description}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Video */}
|
||||
{waitlist.videoUrl && (
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-neutral-800 dark:text-neutral-200">
|
||||
Video
|
||||
</h4>
|
||||
<div className="relative aspect-video w-full overflow-hidden rounded-xl bg-neutral-100 dark:bg-neutral-800">
|
||||
<iframe
|
||||
src={waitlist.videoUrl}
|
||||
title={`${waitlist.name} video`}
|
||||
className="h-full w-full"
|
||||
allowFullScreen
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Output Demo */}
|
||||
{waitlist.agentOutputDemoUrl && (
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-neutral-800 dark:text-neutral-200">
|
||||
Output Demo
|
||||
</h4>
|
||||
<div className="relative aspect-video w-full overflow-hidden rounded-xl bg-neutral-100 dark:bg-neutral-800">
|
||||
<video
|
||||
src={waitlist.agentOutputDemoUrl}
|
||||
controls
|
||||
className="h-full w-full"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Categories */}
|
||||
{waitlist.categories.length > 0 && (
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-neutral-800 dark:text-neutral-200">
|
||||
Categories
|
||||
</h4>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{waitlist.categories.map((category, index) => (
|
||||
<span
|
||||
key={index}
|
||||
className="rounded-full bg-neutral-100 px-3 py-1 text-sm text-neutral-700 dark:bg-neutral-800 dark:text-neutral-300"
|
||||
>
|
||||
{category}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Join Button */}
|
||||
<Dialog.Footer>
|
||||
{isMember ? (
|
||||
<Button
|
||||
disabled
|
||||
className="w-full rounded-full bg-green-600 text-white hover:bg-green-600 dark:bg-green-700 dark:hover:bg-green-700"
|
||||
>
|
||||
<Check size={16} className="mr-2" />
|
||||
You're on the waitlist
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
onClick={onJoin}
|
||||
className="w-full rounded-full bg-neutral-800 text-white hover:bg-neutral-700 dark:bg-neutral-700 dark:hover:bg-neutral-600"
|
||||
>
|
||||
Join waitlist
|
||||
</Button>
|
||||
)}
|
||||
</Dialog.Footer>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import {
|
||||
Carousel,
|
||||
CarouselContent,
|
||||
CarouselItem,
|
||||
} from "@/components/__legacy__/ui/carousel";
|
||||
import { WaitlistCard } from "../WaitlistCard/WaitlistCard";
|
||||
import { WaitlistDetailModal } from "../WaitlistDetailModal/WaitlistDetailModal";
|
||||
import { JoinWaitlistModal } from "../JoinWaitlistModal/JoinWaitlistModal";
|
||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
||||
import { useWaitlistSection } from "./useWaitlistSection";
|
||||
|
||||
export function WaitlistSection() {
|
||||
const { waitlists, joinedWaitlistIds, isLoading, hasError, markAsJoined } =
|
||||
useWaitlistSection();
|
||||
const [selectedWaitlist, setSelectedWaitlist] =
|
||||
useState<StoreWaitlistEntry | null>(null);
|
||||
const [joiningWaitlist, setJoiningWaitlist] =
|
||||
useState<StoreWaitlistEntry | null>(null);
|
||||
|
||||
function handleCardClick(waitlist: StoreWaitlistEntry) {
|
||||
setSelectedWaitlist(waitlist);
|
||||
}
|
||||
|
||||
function handleJoinClick(waitlist: StoreWaitlistEntry) {
|
||||
setJoiningWaitlist(waitlist);
|
||||
}
|
||||
|
||||
function handleJoinFromDetail() {
|
||||
if (selectedWaitlist) {
|
||||
setJoiningWaitlist(selectedWaitlist);
|
||||
setSelectedWaitlist(null);
|
||||
}
|
||||
}
|
||||
|
||||
function handleJoinSuccess(waitlistId: string) {
|
||||
markAsJoined(waitlistId);
|
||||
setJoiningWaitlist(null);
|
||||
}
|
||||
|
||||
// Don't render if loading, error, or no waitlists
|
||||
if (isLoading || hasError || !waitlists || waitlists.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center">
|
||||
<div className="w-full max-w-[1360px]">
|
||||
{/* Section Header */}
|
||||
<div className="mb-6">
|
||||
<h2 className="font-poppins text-2xl font-semibold text-[#282828] dark:text-neutral-200">
|
||||
Help Shape What's Next
|
||||
</h2>
|
||||
<p className="mt-2 text-base text-neutral-600 dark:text-neutral-400">
|
||||
These agents are in development. Your interest helps us prioritize
|
||||
what gets built — and we'll notify you when they're ready.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Mobile Carousel View */}
|
||||
<Carousel
|
||||
className="md:hidden"
|
||||
opts={{
|
||||
loop: true,
|
||||
}}
|
||||
>
|
||||
<CarouselContent>
|
||||
{waitlists.map((waitlist) => (
|
||||
<CarouselItem
|
||||
key={waitlist.waitlistId}
|
||||
className="min-w-64 max-w-71"
|
||||
>
|
||||
<WaitlistCard
|
||||
name={waitlist.name}
|
||||
subHeading={waitlist.subHeading}
|
||||
description={waitlist.description}
|
||||
imageUrl={waitlist.imageUrls[0] || null}
|
||||
isMember={joinedWaitlistIds.has(waitlist.waitlistId)}
|
||||
onCardClick={() => handleCardClick(waitlist)}
|
||||
onJoinClick={() => handleJoinClick(waitlist)}
|
||||
/>
|
||||
</CarouselItem>
|
||||
))}
|
||||
</CarouselContent>
|
||||
</Carousel>
|
||||
|
||||
{/* Desktop Grid View */}
|
||||
<div className="hidden grid-cols-1 place-items-center gap-6 md:grid md:grid-cols-2 lg:grid-cols-3">
|
||||
{waitlists.map((waitlist) => (
|
||||
<WaitlistCard
|
||||
key={waitlist.waitlistId}
|
||||
name={waitlist.name}
|
||||
subHeading={waitlist.subHeading}
|
||||
description={waitlist.description}
|
||||
imageUrl={waitlist.imageUrls[0] || null}
|
||||
isMember={joinedWaitlistIds.has(waitlist.waitlistId)}
|
||||
onCardClick={() => handleCardClick(waitlist)}
|
||||
onJoinClick={() => handleJoinClick(waitlist)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Detail Modal */}
|
||||
{selectedWaitlist && (
|
||||
<WaitlistDetailModal
|
||||
waitlist={selectedWaitlist}
|
||||
isMember={joinedWaitlistIds.has(selectedWaitlist.waitlistId)}
|
||||
onClose={() => setSelectedWaitlist(null)}
|
||||
onJoin={handleJoinFromDetail}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Join Modal */}
|
||||
{joiningWaitlist && (
|
||||
<JoinWaitlistModal
|
||||
waitlist={joiningWaitlist}
|
||||
onClose={() => setJoiningWaitlist(null)}
|
||||
onSuccess={() => handleJoinSuccess(joiningWaitlist.waitlistId)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useMemo } from "react";
|
||||
import { useSupabaseStore } from "@/lib/supabase/hooks/useSupabaseStore";
|
||||
import {
|
||||
useGetV2GetTheAgentWaitlist,
|
||||
useGetV2GetWaitlistIdsTheCurrentUserHasJoined,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
|
||||
export function useWaitlistSection() {
|
||||
const { user } = useSupabaseStore();
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
// Fetch waitlists
|
||||
const {
|
||||
data: waitlistsResponse,
|
||||
isLoading: waitlistsLoading,
|
||||
isError: waitlistsError,
|
||||
} = useGetV2GetTheAgentWaitlist();
|
||||
|
||||
// Fetch memberships if logged in
|
||||
const { data: membershipsResponse, isLoading: membershipsLoading } =
|
||||
useGetV2GetWaitlistIdsTheCurrentUserHasJoined({
|
||||
query: {
|
||||
enabled: !!user,
|
||||
},
|
||||
});
|
||||
|
||||
const waitlists: StoreWaitlistEntry[] = useMemo(() => {
|
||||
if (waitlistsResponse?.status === 200) {
|
||||
return waitlistsResponse.data.listings;
|
||||
}
|
||||
return [];
|
||||
}, [waitlistsResponse]);
|
||||
|
||||
const joinedWaitlistIds: Set<string> = useMemo(() => {
|
||||
if (membershipsResponse?.status === 200) {
|
||||
return new Set(membershipsResponse.data);
|
||||
}
|
||||
return new Set();
|
||||
}, [membershipsResponse]);
|
||||
|
||||
const isLoading = waitlistsLoading || (!!user && membershipsLoading);
|
||||
const hasError = waitlistsError;
|
||||
|
||||
// Function to add a waitlist ID to joined set (called after successful join)
|
||||
function markAsJoined(_waitlistId: string) {
|
||||
// Invalidate the memberships query to refetch
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["getV2GetWaitlistIdsTheCurrentUserHasJoined"],
|
||||
});
|
||||
}
|
||||
|
||||
return { waitlists, joinedWaitlistIds, isLoading, hasError, markAsJoined };
|
||||
}
|
||||
@@ -4965,301 +4965,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/admin/waitlist": {
|
||||
"get": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "List All Waitlists",
|
||||
"description": "Get all waitlists with admin details (admin only).\n\nReturns:\n WaitlistAdminListResponse with all waitlists",
|
||||
"operationId": "getV2List all waitlists",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistAdminListResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
}
|
||||
},
|
||||
"security": [{ "HTTPBearerJWT": [] }]
|
||||
},
|
||||
"post": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Create Waitlist",
|
||||
"description": "Create a new waitlist (admin only).\n\nArgs:\n request: Waitlist creation details\n user_id: Authenticated admin user creating the waitlist\n\nReturns:\n WaitlistAdminResponse with the created waitlist details",
|
||||
"operationId": "postV2Create waitlist",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/WaitlistCreateRequest" }
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistAdminResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"security": [{ "HTTPBearerJWT": [] }]
|
||||
}
|
||||
},
|
||||
"/api/store/admin/waitlist/{waitlist_id}": {
|
||||
"delete": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Delete Waitlist",
|
||||
"description": "Soft delete a waitlist (admin only).\n\nArgs:\n waitlist_id: ID of the waitlist to delete\n\nReturns:\n Success message",
|
||||
"operationId": "deleteV2Delete waitlist",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": { "application/json": { "schema": {} } }
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"get": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Get Waitlist Details",
|
||||
"description": "Get a single waitlist with admin details (admin only).\n\nArgs:\n waitlist_id: ID of the waitlist to retrieve\n\nReturns:\n WaitlistAdminResponse with waitlist details",
|
||||
"operationId": "getV2Get waitlist details",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistAdminResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Update Waitlist",
|
||||
"description": "Update a waitlist (admin only).\n\nArgs:\n waitlist_id: ID of the waitlist to update\n request: Fields to update\n\nReturns:\n WaitlistAdminResponse with updated waitlist details",
|
||||
"operationId": "putV2Update waitlist",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist"
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/WaitlistUpdateRequest" }
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistAdminResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/admin/waitlist/{waitlist_id}/link": {
|
||||
"post": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Link Waitlist to Store Listing",
|
||||
"description": "Link a waitlist to a store listing (admin only).\n\nWhen the linked store listing is approved/published, waitlist users\nwill be automatically notified.\n\nArgs:\n waitlist_id: ID of the waitlist\n store_listing_id: ID of the store listing to link\n\nReturns:\n WaitlistAdminResponse with updated waitlist details",
|
||||
"operationId": "postV2Link waitlist to store listing",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist"
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Body_postV2Link_waitlist_to_store_listing"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistAdminResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/admin/waitlist/{waitlist_id}/signups": {
|
||||
"get": {
|
||||
"tags": ["v2", "admin", "store", "admin", "waitlist"],
|
||||
"summary": "Get Waitlist Signups",
|
||||
"description": "Get all signups for a waitlist (admin only).\n\nArgs:\n waitlist_id: ID of the waitlist\n\nReturns:\n WaitlistSignupListResponse with all signups",
|
||||
"operationId": "getV2Get waitlist signups",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/WaitlistSignupListResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/agents": {
|
||||
"get": {
|
||||
"tags": ["v2", "store", "public"],
|
||||
@@ -6042,101 +5747,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/waitlist": {
|
||||
"get": {
|
||||
"tags": ["v2", "store", "public"],
|
||||
"summary": "Get the agent waitlist",
|
||||
"description": "Get all active waitlists for public display.",
|
||||
"operationId": "getV2Get the agent waitlist",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/StoreWaitlistsAllResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/store/waitlist/my-memberships": {
|
||||
"get": {
|
||||
"tags": ["v2", "store", "private"],
|
||||
"summary": "Get waitlist IDs the current user has joined",
|
||||
"description": "Returns list of waitlist IDs the authenticated user has joined.",
|
||||
"operationId": "getV2Get waitlist ids the current user has joined",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Response Getv2Get Waitlist Ids The Current User Has Joined"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
}
|
||||
},
|
||||
"security": [{ "HTTPBearerJWT": [] }]
|
||||
}
|
||||
},
|
||||
"/api/store/waitlist/{waitlist_id}/join": {
|
||||
"post": {
|
||||
"tags": ["v2", "store", "public"],
|
||||
"summary": "Add self to the agent waitlist",
|
||||
"description": "Add the current user to the agent waitlist.",
|
||||
"operationId": "postV2Add self to the agent waitlist",
|
||||
"security": [{ "HTTPBearer": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "waitlist_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "The ID of the waitlist to join",
|
||||
"title": "Waitlist Id"
|
||||
},
|
||||
"description": "The ID of the waitlist to join"
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Body_postV2Add_self_to_the_agent_waitlist"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/StoreWaitlistEntry" }
|
||||
}
|
||||
}
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/health": {
|
||||
"get": {
|
||||
"tags": ["health"],
|
||||
@@ -6884,17 +6494,6 @@
|
||||
"required": ["store_listing_version_id"],
|
||||
"title": "Body_postV2Add marketplace agent"
|
||||
},
|
||||
"Body_postV2Add_self_to_the_agent_waitlist": {
|
||||
"properties": {
|
||||
"email": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Email",
|
||||
"description": "Email address for unauthenticated users"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"title": "Body_postV2Add self to the agent waitlist"
|
||||
},
|
||||
"Body_postV2Execute_a_preset": {
|
||||
"properties": {
|
||||
"inputs": {
|
||||
@@ -6913,18 +6512,6 @@
|
||||
"type": "object",
|
||||
"title": "Body_postV2Execute a preset"
|
||||
},
|
||||
"Body_postV2Link_waitlist_to_store_listing": {
|
||||
"properties": {
|
||||
"store_listing_id": {
|
||||
"type": "string",
|
||||
"title": "Store Listing Id",
|
||||
"description": "The ID of the store listing"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["store_listing_id"],
|
||||
"title": "Body_postV2Link waitlist to store listing"
|
||||
},
|
||||
"Body_postV2Upload_submission_media": {
|
||||
"properties": {
|
||||
"file": { "type": "string", "format": "binary", "title": "File" }
|
||||
@@ -8764,8 +8351,7 @@
|
||||
"REFUND_REQUEST",
|
||||
"REFUND_PROCESSED",
|
||||
"AGENT_APPROVED",
|
||||
"AGENT_REJECTED",
|
||||
"WAITLIST_LAUNCH"
|
||||
"AGENT_REJECTED"
|
||||
],
|
||||
"title": "NotificationType"
|
||||
},
|
||||
@@ -10313,57 +9899,6 @@
|
||||
"required": ["submissions", "pagination"],
|
||||
"title": "StoreSubmissionsResponse"
|
||||
},
|
||||
"StoreWaitlistEntry": {
|
||||
"properties": {
|
||||
"waitlistId": { "type": "string", "title": "Waitlistid" },
|
||||
"slug": { "type": "string", "title": "Slug" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"subHeading": { "type": "string", "title": "Subheading" },
|
||||
"videoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Videourl"
|
||||
},
|
||||
"agentOutputDemoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Agentoutputdemourl"
|
||||
},
|
||||
"imageUrls": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Imageurls"
|
||||
},
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"waitlistId",
|
||||
"slug",
|
||||
"name",
|
||||
"subHeading",
|
||||
"imageUrls",
|
||||
"description",
|
||||
"categories"
|
||||
],
|
||||
"title": "StoreWaitlistEntry",
|
||||
"description": "Public waitlist entry - no PII fields exposed."
|
||||
},
|
||||
"StoreWaitlistsAllResponse": {
|
||||
"properties": {
|
||||
"listings": {
|
||||
"items": { "$ref": "#/components/schemas/StoreWaitlistEntry" },
|
||||
"type": "array",
|
||||
"title": "Listings"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["listings"],
|
||||
"title": "StoreWaitlistsAllResponse"
|
||||
},
|
||||
"SubmissionStatus": {
|
||||
"type": "string",
|
||||
"enum": ["DRAFT", "PENDING", "APPROVED", "REJECTED"],
|
||||
@@ -12108,201 +11643,6 @@
|
||||
"required": ["loc", "msg", "type"],
|
||||
"title": "ValidationError"
|
||||
},
|
||||
"WaitlistAdminListResponse": {
|
||||
"properties": {
|
||||
"waitlists": {
|
||||
"items": { "$ref": "#/components/schemas/WaitlistAdminResponse" },
|
||||
"type": "array",
|
||||
"title": "Waitlists"
|
||||
},
|
||||
"totalCount": { "type": "integer", "title": "Totalcount" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["waitlists", "totalCount"],
|
||||
"title": "WaitlistAdminListResponse",
|
||||
"description": "Response model for listing all waitlists (admin view)."
|
||||
},
|
||||
"WaitlistAdminResponse": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"createdAt": { "type": "string", "title": "Createdat" },
|
||||
"updatedAt": { "type": "string", "title": "Updatedat" },
|
||||
"slug": { "type": "string", "title": "Slug" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"subHeading": { "type": "string", "title": "Subheading" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories"
|
||||
},
|
||||
"imageUrls": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Imageurls"
|
||||
},
|
||||
"videoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Videourl"
|
||||
},
|
||||
"agentOutputDemoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Agentoutputdemourl"
|
||||
},
|
||||
"status": { "$ref": "#/components/schemas/WaitlistExternalStatus" },
|
||||
"votes": { "type": "integer", "title": "Votes" },
|
||||
"signupCount": { "type": "integer", "title": "Signupcount" },
|
||||
"storeListingId": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Storelistingid"
|
||||
},
|
||||
"owningUserId": { "type": "string", "title": "Owninguserid" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"slug",
|
||||
"name",
|
||||
"subHeading",
|
||||
"description",
|
||||
"categories",
|
||||
"imageUrls",
|
||||
"status",
|
||||
"votes",
|
||||
"signupCount",
|
||||
"owningUserId"
|
||||
],
|
||||
"title": "WaitlistAdminResponse",
|
||||
"description": "Admin response model with full waitlist details including internal data."
|
||||
},
|
||||
"WaitlistCreateRequest": {
|
||||
"properties": {
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"slug": { "type": "string", "title": "Slug" },
|
||||
"subHeading": { "type": "string", "title": "Subheading" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories",
|
||||
"default": []
|
||||
},
|
||||
"imageUrls": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Imageurls",
|
||||
"default": []
|
||||
},
|
||||
"videoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Videourl"
|
||||
},
|
||||
"agentOutputDemoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Agentoutputdemourl"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "slug", "subHeading", "description"],
|
||||
"title": "WaitlistCreateRequest",
|
||||
"description": "Request model for creating a new waitlist."
|
||||
},
|
||||
"WaitlistExternalStatus": {
|
||||
"type": "string",
|
||||
"enum": ["DONE", "NOT_STARTED", "CANCELED", "WORK_IN_PROGRESS"],
|
||||
"title": "WaitlistExternalStatus"
|
||||
},
|
||||
"WaitlistSignup": {
|
||||
"properties": {
|
||||
"type": { "type": "string", "title": "Type" },
|
||||
"userId": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Userid"
|
||||
},
|
||||
"email": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Email"
|
||||
},
|
||||
"username": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Username"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["type"],
|
||||
"title": "WaitlistSignup",
|
||||
"description": "Individual signup entry for a waitlist."
|
||||
},
|
||||
"WaitlistSignupListResponse": {
|
||||
"properties": {
|
||||
"waitlistId": { "type": "string", "title": "Waitlistid" },
|
||||
"signups": {
|
||||
"items": { "$ref": "#/components/schemas/WaitlistSignup" },
|
||||
"type": "array",
|
||||
"title": "Signups"
|
||||
},
|
||||
"totalCount": { "type": "integer", "title": "Totalcount" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["waitlistId", "signups", "totalCount"],
|
||||
"title": "WaitlistSignupListResponse",
|
||||
"description": "Response model for listing waitlist signups."
|
||||
},
|
||||
"WaitlistUpdateRequest": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Name"
|
||||
},
|
||||
"slug": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Slug"
|
||||
},
|
||||
"subHeading": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Subheading"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Description"
|
||||
},
|
||||
"categories": {
|
||||
"anyOf": [
|
||||
{ "items": { "type": "string" }, "type": "array" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Categories"
|
||||
},
|
||||
"imageUrls": {
|
||||
"anyOf": [
|
||||
{ "items": { "type": "string" }, "type": "array" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Imageurls"
|
||||
},
|
||||
"videoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Videourl"
|
||||
},
|
||||
"agentOutputDemoUrl": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Agentoutputdemourl"
|
||||
},
|
||||
"status": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Status"
|
||||
},
|
||||
"storeListingId": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Storelistingid"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"title": "WaitlistUpdateRequest",
|
||||
"description": "Request model for updating a waitlist."
|
||||
},
|
||||
"Webhook": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
@@ -12353,7 +11693,6 @@
|
||||
"in": "header",
|
||||
"name": "X-Postmark-Webhook-Token"
|
||||
},
|
||||
"HTTPBearer": { "type": "http", "scheme": "bearer" },
|
||||
"HTTPBearerJWT": {
|
||||
"type": "http",
|
||||
"scheme": "bearer",
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { Table } from "./Table";
|
||||
|
||||
const meta = {
|
||||
title: "Molecules/Table",
|
||||
component: Table,
|
||||
decorators: [
|
||||
(Story) => (
|
||||
<TooltipProvider>
|
||||
<Story />
|
||||
</TooltipProvider>
|
||||
),
|
||||
],
|
||||
parameters: {
|
||||
layout: "centered",
|
||||
},
|
||||
tags: ["autodocs"],
|
||||
argTypes: {
|
||||
allowAddRow: {
|
||||
control: "boolean",
|
||||
description: "Whether to show the Add row button",
|
||||
},
|
||||
allowDeleteRow: {
|
||||
control: "boolean",
|
||||
description: "Whether to show delete buttons for each row",
|
||||
},
|
||||
readOnly: {
|
||||
control: "boolean",
|
||||
description:
|
||||
"Whether the table is read-only (renders text instead of inputs)",
|
||||
},
|
||||
addRowLabel: {
|
||||
control: "text",
|
||||
description: "Label for the Add row button",
|
||||
},
|
||||
},
|
||||
} satisfies Meta<typeof Table>;
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof meta>;
|
||||
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
columns: ["name", "email", "role"],
|
||||
allowAddRow: true,
|
||||
allowDeleteRow: true,
|
||||
},
|
||||
};
|
||||
|
||||
export const WithDefaultValues: Story = {
|
||||
args: {
|
||||
columns: ["name", "email", "role"],
|
||||
defaultValues: [
|
||||
{ name: "John Doe", email: "john@example.com", role: "Admin" },
|
||||
{ name: "Jane Smith", email: "jane@example.com", role: "User" },
|
||||
{ name: "Bob Wilson", email: "bob@example.com", role: "Editor" },
|
||||
],
|
||||
allowAddRow: true,
|
||||
allowDeleteRow: true,
|
||||
},
|
||||
};
|
||||
|
||||
export const ReadOnly: Story = {
|
||||
args: {
|
||||
columns: ["name", "email"],
|
||||
defaultValues: [
|
||||
{ name: "John Doe", email: "john@example.com" },
|
||||
{ name: "Jane Smith", email: "jane@example.com" },
|
||||
],
|
||||
readOnly: true,
|
||||
},
|
||||
};
|
||||
|
||||
export const NoAddOrDelete: Story = {
|
||||
args: {
|
||||
columns: ["name", "email"],
|
||||
defaultValues: [
|
||||
{ name: "John Doe", email: "john@example.com" },
|
||||
{ name: "Jane Smith", email: "jane@example.com" },
|
||||
],
|
||||
allowAddRow: false,
|
||||
allowDeleteRow: false,
|
||||
},
|
||||
};
|
||||
|
||||
export const SingleColumn: Story = {
|
||||
args: {
|
||||
columns: ["item"],
|
||||
allowAddRow: true,
|
||||
allowDeleteRow: true,
|
||||
addRowLabel: "Add item",
|
||||
},
|
||||
};
|
||||
|
||||
export const CustomAddLabel: Story = {
|
||||
args: {
|
||||
columns: ["key", "value"],
|
||||
allowAddRow: true,
|
||||
allowDeleteRow: true,
|
||||
addRowLabel: "Add new entry",
|
||||
},
|
||||
};
|
||||
|
||||
export const KeyValuePairs: Story = {
|
||||
args: {
|
||||
columns: ["key", "value"],
|
||||
defaultValues: [
|
||||
{ key: "API_KEY", value: "sk-..." },
|
||||
{ key: "DATABASE_URL", value: "postgres://..." },
|
||||
],
|
||||
allowAddRow: true,
|
||||
allowDeleteRow: true,
|
||||
addRowLabel: "Add variable",
|
||||
},
|
||||
};
|
||||
@@ -1,133 +0,0 @@
|
||||
import * as React from "react";
|
||||
import {
|
||||
Table as BaseTable,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/__legacy__/ui/table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Plus, Trash2 } from "lucide-react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useTable, RowData } from "./useTable";
|
||||
import { formatColumnTitle, formatPlaceholder } from "./helpers";
|
||||
|
||||
export interface TableProps {
|
||||
columns: string[];
|
||||
defaultValues?: RowData[];
|
||||
onChange?: (rows: RowData[]) => void;
|
||||
allowAddRow?: boolean;
|
||||
allowDeleteRow?: boolean;
|
||||
addRowLabel?: string;
|
||||
className?: string;
|
||||
readOnly?: boolean;
|
||||
}
|
||||
|
||||
export function Table({
|
||||
columns,
|
||||
defaultValues,
|
||||
onChange,
|
||||
allowAddRow = true,
|
||||
allowDeleteRow = true,
|
||||
addRowLabel = "Add row",
|
||||
className,
|
||||
readOnly = false,
|
||||
}: TableProps) {
|
||||
const { rows, handleAddRow, handleDeleteRow, handleCellChange } = useTable({
|
||||
columns,
|
||||
defaultValues,
|
||||
onChange,
|
||||
});
|
||||
|
||||
const showDeleteColumn = allowDeleteRow && !readOnly;
|
||||
const showAddButton = allowAddRow && !readOnly;
|
||||
|
||||
return (
|
||||
<div className={cn("flex flex-col gap-3", className)}>
|
||||
<div className="overflow-hidden rounded-xl border border-zinc-200 bg-white">
|
||||
<BaseTable>
|
||||
<TableHeader>
|
||||
<TableRow className="border-b border-zinc-100 bg-zinc-50/50">
|
||||
{columns.map((column) => (
|
||||
<TableHead
|
||||
key={column}
|
||||
className="h-10 px-3 text-sm font-medium text-zinc-600"
|
||||
>
|
||||
{formatColumnTitle(column)}
|
||||
</TableHead>
|
||||
))}
|
||||
{showDeleteColumn && <TableHead className="w-[50px]" />}
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{rows.map((row, rowIndex) => (
|
||||
<TableRow key={rowIndex} className="border-none">
|
||||
{columns.map((column) => (
|
||||
<TableCell key={`${rowIndex}-${column}`} className="p-2">
|
||||
{readOnly ? (
|
||||
<Text
|
||||
variant="body"
|
||||
className="px-3 py-2 text-sm text-zinc-800"
|
||||
>
|
||||
{row[column] || "-"}
|
||||
</Text>
|
||||
) : (
|
||||
<Input
|
||||
id={`table-${rowIndex}-${column}`}
|
||||
label={formatColumnTitle(column)}
|
||||
hideLabel
|
||||
value={row[column] ?? ""}
|
||||
onChange={(e) =>
|
||||
handleCellChange(rowIndex, column, e.target.value)
|
||||
}
|
||||
placeholder={formatPlaceholder(column)}
|
||||
size="small"
|
||||
wrapperClassName="mb-0"
|
||||
/>
|
||||
)}
|
||||
</TableCell>
|
||||
))}
|
||||
{showDeleteColumn && (
|
||||
<TableCell className="p-2">
|
||||
<Button
|
||||
variant="icon"
|
||||
size="icon"
|
||||
onClick={() => handleDeleteRow(rowIndex)}
|
||||
aria-label="Delete row"
|
||||
className="text-zinc-400 transition-colors hover:text-red-500"
|
||||
>
|
||||
<Trash2 className="h-4 w-4" />
|
||||
</Button>
|
||||
</TableCell>
|
||||
)}
|
||||
</TableRow>
|
||||
))}
|
||||
{showAddButton && (
|
||||
<TableRow>
|
||||
<TableCell
|
||||
colSpan={columns.length + (showDeleteColumn ? 1 : 0)}
|
||||
className="p-2"
|
||||
>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={handleAddRow}
|
||||
leftIcon={<Plus className="h-4 w-4" />}
|
||||
className="w-fit"
|
||||
>
|
||||
{addRowLabel}
|
||||
</Button>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
</TableBody>
|
||||
</BaseTable>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export { type RowData } from "./useTable";
|
||||
@@ -1,7 +0,0 @@
|
||||
export const formatColumnTitle = (key: string): string => {
|
||||
return key.charAt(0).toUpperCase() + key.slice(1);
|
||||
};
|
||||
|
||||
export const formatPlaceholder = (key: string): string => {
|
||||
return `Enter ${key.toLowerCase()}`;
|
||||
};
|
||||
@@ -1,81 +0,0 @@
|
||||
import { useState, useEffect } from "react";
|
||||
|
||||
export type RowData = Record<string, string>;
|
||||
|
||||
interface UseTableOptions {
|
||||
columns: string[];
|
||||
defaultValues?: RowData[];
|
||||
onChange?: (rows: RowData[]) => void;
|
||||
}
|
||||
|
||||
export function useTable({
|
||||
columns,
|
||||
defaultValues,
|
||||
onChange,
|
||||
}: UseTableOptions) {
|
||||
const createEmptyRow = (): RowData => {
|
||||
const emptyRow: RowData = {};
|
||||
columns.forEach((column) => {
|
||||
emptyRow[column] = "";
|
||||
});
|
||||
return emptyRow;
|
||||
};
|
||||
|
||||
const [rows, setRows] = useState<RowData[]>(() => {
|
||||
if (defaultValues && defaultValues.length > 0) {
|
||||
return defaultValues;
|
||||
}
|
||||
return [];
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (defaultValues !== undefined) {
|
||||
setRows(defaultValues);
|
||||
}
|
||||
}, [defaultValues]);
|
||||
|
||||
const updateRows = (newRows: RowData[]) => {
|
||||
setRows(newRows);
|
||||
onChange?.(newRows);
|
||||
};
|
||||
|
||||
const handleAddRow = () => {
|
||||
const newRows = [...rows, createEmptyRow()];
|
||||
updateRows(newRows);
|
||||
};
|
||||
|
||||
const handleDeleteRow = (rowIndex: number) => {
|
||||
const newRows = rows.filter((_, index) => index !== rowIndex);
|
||||
updateRows(newRows);
|
||||
};
|
||||
|
||||
const handleCellChange = (
|
||||
rowIndex: number,
|
||||
columnKey: string,
|
||||
value: string,
|
||||
) => {
|
||||
const newRows = rows.map((row, index) => {
|
||||
if (index === rowIndex) {
|
||||
return {
|
||||
...row,
|
||||
[columnKey]: value,
|
||||
};
|
||||
}
|
||||
return row;
|
||||
});
|
||||
updateRows(newRows);
|
||||
};
|
||||
|
||||
const clearAll = () => {
|
||||
updateRows([]);
|
||||
};
|
||||
|
||||
return {
|
||||
rows,
|
||||
handleAddRow,
|
||||
handleDeleteRow,
|
||||
handleCellChange,
|
||||
clearAll,
|
||||
createEmptyRow,
|
||||
};
|
||||
}
|
||||
@@ -30,8 +30,6 @@ export const FormRenderer = ({
|
||||
return generateUiSchemaForCustomFields(preprocessedSchema, uiSchema);
|
||||
}, [preprocessedSchema, uiSchema]);
|
||||
|
||||
console.log("preprocessedSchema", preprocessedSchema);
|
||||
|
||||
return (
|
||||
<div className={"mb-6 mt-4"}>
|
||||
<Form
|
||||
|
||||
@@ -5,14 +5,19 @@ import { useAnyOfField } from "./useAnyOfField";
|
||||
import { getHandleId, updateUiOption } from "../../helpers";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { ANY_OF_FLAG } from "../../constants";
|
||||
import { findCustomFieldId } from "../../registry";
|
||||
|
||||
export const AnyOfField = (props: FieldProps) => {
|
||||
const { registry, schema } = props;
|
||||
const { fields } = registry;
|
||||
const { SchemaField: _SchemaField } = fields;
|
||||
const { nodeId } = registry.formContext;
|
||||
|
||||
const { isInputConnected } = useEdgeStore();
|
||||
|
||||
const uiOptions = getUiOptions(props.uiSchema, props.globalUiOptions);
|
||||
|
||||
const Widget = getWidget({ type: "string" }, "select", registry.widgets);
|
||||
|
||||
const {
|
||||
handleOptionChange,
|
||||
enumOptions,
|
||||
@@ -21,15 +26,6 @@ export const AnyOfField = (props: FieldProps) => {
|
||||
field_id,
|
||||
} = useAnyOfField(props);
|
||||
|
||||
const parentCustomFieldId = findCustomFieldId(schema);
|
||||
if (parentCustomFieldId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const uiOptions = getUiOptions(props.uiSchema, props.globalUiOptions);
|
||||
|
||||
const Widget = getWidget({ type: "string" }, "select", registry.widgets);
|
||||
|
||||
const handleId = getHandleId({
|
||||
uiOptions,
|
||||
id: field_id + ANY_OF_FLAG,
|
||||
@@ -44,21 +40,12 @@ export const AnyOfField = (props: FieldProps) => {
|
||||
|
||||
const isHandleConnected = isInputConnected(nodeId, handleId);
|
||||
|
||||
// Now anyOf can render - custom fields if the option schema matches a custom field
|
||||
const optionCustomFieldId = optionSchema
|
||||
? findCustomFieldId(optionSchema)
|
||||
: null;
|
||||
|
||||
const optionUiSchema = optionCustomFieldId
|
||||
? { ...updatedUiSchema, "ui:field": optionCustomFieldId }
|
||||
: updatedUiSchema;
|
||||
|
||||
const optionsSchemaField =
|
||||
(optionSchema && optionSchema.type !== "null" && (
|
||||
<_SchemaField
|
||||
{...props}
|
||||
schema={optionSchema}
|
||||
uiSchema={optionUiSchema}
|
||||
uiSchema={updatedUiSchema}
|
||||
/>
|
||||
)) ||
|
||||
null;
|
||||
|
||||
@@ -17,7 +17,6 @@ interface InputExpanderModalProps {
|
||||
defaultValue: string;
|
||||
description?: string;
|
||||
placeholder?: string;
|
||||
inputType?: "text" | "json";
|
||||
}
|
||||
|
||||
export const InputExpanderModal: FC<InputExpanderModalProps> = ({
|
||||
@@ -28,7 +27,6 @@ export const InputExpanderModal: FC<InputExpanderModalProps> = ({
|
||||
defaultValue,
|
||||
description,
|
||||
placeholder,
|
||||
inputType = "text",
|
||||
}) => {
|
||||
const [tempValue, setTempValue] = useState(defaultValue);
|
||||
const [isCopied, setIsCopied] = useState(false);
|
||||
@@ -80,10 +78,7 @@ export const InputExpanderModal: FC<InputExpanderModalProps> = ({
|
||||
hideLabel
|
||||
id="input-expander-modal"
|
||||
value={tempValue}
|
||||
className={cn(
|
||||
"!min-h-[300px] rounded-2xlarge",
|
||||
inputType === "json" && "font-mono text-sm",
|
||||
)}
|
||||
className="!min-h-[300px] rounded-2xlarge"
|
||||
onChange={(e) => setTempValue(e.target.value)}
|
||||
placeholder={placeholder || "Enter text..."}
|
||||
autoFocus
|
||||
|
||||
@@ -88,8 +88,6 @@ export const CredentialsField = (props: FieldProps) => {
|
||||
showTitle={false}
|
||||
readOnly={formContext?.readOnly}
|
||||
isOptional={!isRequired}
|
||||
className="w-full"
|
||||
variant="node"
|
||||
/>
|
||||
|
||||
{/* Optional credentials toggle - only show in builder canvas, not run dialogs */}
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { FieldProps, getTemplate, getUiOptions } from "@rjsf/utils";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { ArrowsOutIcon } from "@phosphor-icons/react";
|
||||
import { InputExpanderModal } from "../../base/standard/widgets/TextInput/TextInputExpanderModal";
|
||||
import { getHandleId, updateUiOption } from "../../helpers";
|
||||
import { useJsonTextField } from "./useJsonTextField";
|
||||
import { getPlaceholder } from "./helpers";
|
||||
|
||||
export const JsonTextField = (props: FieldProps) => {
|
||||
const {
|
||||
formData,
|
||||
onChange,
|
||||
schema,
|
||||
registry,
|
||||
uiSchema,
|
||||
required,
|
||||
name,
|
||||
fieldPathId,
|
||||
} = props;
|
||||
|
||||
const uiOptions = getUiOptions(uiSchema);
|
||||
|
||||
const TitleFieldTemplate = getTemplate(
|
||||
"TitleFieldTemplate",
|
||||
registry,
|
||||
uiOptions,
|
||||
);
|
||||
|
||||
const fieldId = fieldPathId?.$id ?? props.id ?? "json-field";
|
||||
|
||||
const handleId = getHandleId({
|
||||
uiOptions,
|
||||
id: fieldId,
|
||||
schema: schema,
|
||||
});
|
||||
|
||||
const updatedUiSchema = updateUiOption(uiSchema, {
|
||||
handleId: handleId,
|
||||
});
|
||||
|
||||
const {
|
||||
textValue,
|
||||
isModalOpen,
|
||||
handleChange,
|
||||
handleModalOpen,
|
||||
handleModalClose,
|
||||
handleModalSave,
|
||||
} = useJsonTextField({
|
||||
formData,
|
||||
onChange,
|
||||
path: fieldPathId?.path,
|
||||
});
|
||||
|
||||
const placeholder = getPlaceholder(schema);
|
||||
const title = schema.title || name || "JSON Value";
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<TitleFieldTemplate
|
||||
id={fieldId}
|
||||
title={title}
|
||||
required={required}
|
||||
schema={schema}
|
||||
uiSchema={updatedUiSchema}
|
||||
registry={registry}
|
||||
/>
|
||||
<div className="nodrag relative flex items-center gap-2">
|
||||
<Input
|
||||
id={fieldId}
|
||||
hideLabel={true}
|
||||
type="textarea"
|
||||
label=""
|
||||
size="small"
|
||||
wrapperClassName="mb-0 flex-1 "
|
||||
value={textValue}
|
||||
onChange={handleChange}
|
||||
placeholder={placeholder}
|
||||
required={required}
|
||||
disabled={props.disabled}
|
||||
className="min-h-[60px] pr-8 font-mono text-xs"
|
||||
/>
|
||||
|
||||
<Tooltip delayDuration={0}>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={handleModalOpen}
|
||||
type="button"
|
||||
className="p-1"
|
||||
>
|
||||
<ArrowsOutIcon className="size-4" />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Expand input</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
{schema.description && (
|
||||
<span className="text-xs text-gray-500">{schema.description}</span>
|
||||
)}
|
||||
|
||||
<InputExpanderModal
|
||||
isOpen={isModalOpen}
|
||||
onClose={handleModalClose}
|
||||
onSave={handleModalSave}
|
||||
title={`Edit ${title}`}
|
||||
description={schema.description || "Enter valid JSON"}
|
||||
defaultValue={textValue}
|
||||
placeholder={placeholder}
|
||||
inputType="json"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default JsonTextField;
|
||||
@@ -1,67 +0,0 @@
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
|
||||
/**
|
||||
* Converts form data to a JSON string for display
|
||||
* @param formData - The data to stringify
|
||||
* @returns JSON string or empty string if data is null/undefined
|
||||
*/
|
||||
export function stringifyFormData(formData: unknown): string {
|
||||
if (formData === undefined || formData === null) {
|
||||
return "";
|
||||
}
|
||||
try {
|
||||
return JSON.stringify(formData, null, 2);
|
||||
} catch {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a JSON string into an object/array
|
||||
* @param value - The JSON string to parse
|
||||
* @returns Parsed value or undefined if parsing fails or empty
|
||||
*/
|
||||
export function parseJsonValue(value: string): unknown | undefined {
|
||||
const trimmed = value.trim();
|
||||
if (trimmed === "") {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(trimmed);
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the appropriate placeholder text based on schema type
|
||||
* @param schema - The JSON schema
|
||||
* @returns Placeholder string
|
||||
*/
|
||||
export function getPlaceholder(schema: RJSFSchema): string {
|
||||
if (schema.type === "array") {
|
||||
return '["item1", "item2"] or [{"key": "value"}]';
|
||||
}
|
||||
if (schema.type === "object") {
|
||||
return '{"key": "value"}';
|
||||
}
|
||||
return "Enter JSON value...";
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a JSON string is valid
|
||||
* @param value - The JSON string to validate
|
||||
* @returns true if valid JSON, false otherwise
|
||||
*/
|
||||
export function isValidJson(value: string): boolean {
|
||||
if (value.trim() === "") {
|
||||
return true; // Empty is considered valid (will be undefined)
|
||||
}
|
||||
try {
|
||||
JSON.parse(value);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
import { useState, useEffect, useCallback } from "react";
|
||||
import { FieldProps } from "@rjsf/utils";
|
||||
import { stringifyFormData, parseJsonValue, isValidJson } from "./helpers";
|
||||
|
||||
type FieldOnChange = FieldProps["onChange"];
|
||||
type FieldPathId = FieldProps["fieldPathId"];
|
||||
|
||||
interface UseJsonTextFieldOptions {
|
||||
formData: unknown;
|
||||
onChange: FieldOnChange;
|
||||
path?: FieldPathId["path"];
|
||||
}
|
||||
|
||||
interface UseJsonTextFieldReturn {
|
||||
textValue: string;
|
||||
isModalOpen: boolean;
|
||||
hasError: boolean;
|
||||
handleChange: (
|
||||
e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>,
|
||||
) => void;
|
||||
handleModalOpen: () => void;
|
||||
handleModalClose: () => void;
|
||||
handleModalSave: (value: string) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom hook for managing JSON text field state and handlers
|
||||
*/
|
||||
export function useJsonTextField({
|
||||
formData,
|
||||
onChange,
|
||||
path,
|
||||
}: UseJsonTextFieldOptions): UseJsonTextFieldReturn {
|
||||
const [textValue, setTextValue] = useState(() => stringifyFormData(formData));
|
||||
const [isModalOpen, setIsModalOpen] = useState(false);
|
||||
const [hasError, setHasError] = useState(false);
|
||||
|
||||
// Update text value when formData changes externally
|
||||
useEffect(() => {
|
||||
const newValue = stringifyFormData(formData);
|
||||
setTextValue(newValue);
|
||||
setHasError(false);
|
||||
}, [formData]);
|
||||
|
||||
const handleChange = useCallback(
|
||||
(e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
const value = e.target.value;
|
||||
setTextValue(value);
|
||||
|
||||
// Validate JSON and update error state
|
||||
const valid = isValidJson(value);
|
||||
setHasError(!valid);
|
||||
|
||||
// Try to parse and update formData
|
||||
if (value.trim() === "") {
|
||||
onChange(undefined, path ?? []);
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = parseJsonValue(value);
|
||||
if (parsed !== undefined) {
|
||||
onChange(parsed, path ?? []);
|
||||
}
|
||||
},
|
||||
[onChange, path],
|
||||
);
|
||||
|
||||
const handleModalOpen = useCallback(() => {
|
||||
setIsModalOpen(true);
|
||||
}, []);
|
||||
|
||||
const handleModalClose = useCallback(() => {
|
||||
setIsModalOpen(false);
|
||||
}, []);
|
||||
|
||||
const handleModalSave = useCallback(
|
||||
(value: string) => {
|
||||
setTextValue(value);
|
||||
setIsModalOpen(false);
|
||||
|
||||
// Validate and update
|
||||
const valid = isValidJson(value);
|
||||
setHasError(!valid);
|
||||
|
||||
if (value.trim() === "") {
|
||||
onChange(undefined, path ?? []);
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = parseJsonValue(value);
|
||||
if (parsed !== undefined) {
|
||||
onChange(parsed, path ?? []);
|
||||
}
|
||||
},
|
||||
[onChange, path],
|
||||
);
|
||||
|
||||
return {
|
||||
textValue,
|
||||
isModalOpen,
|
||||
hasError,
|
||||
handleChange,
|
||||
handleModalOpen,
|
||||
handleModalClose,
|
||||
handleModalSave,
|
||||
};
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
import React from "react";
|
||||
import { FieldProps, getUiOptions } from "@rjsf/utils";
|
||||
import { BlockIOObjectSubSchema } from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
MultiSelector,
|
||||
MultiSelectorContent,
|
||||
MultiSelectorInput,
|
||||
MultiSelectorItem,
|
||||
MultiSelectorList,
|
||||
MultiSelectorTrigger,
|
||||
} from "@/components/__legacy__/ui/multiselect";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useMultiSelectField } from "./useMultiSelectField";
|
||||
|
||||
export const MultiSelectField = (props: FieldProps) => {
|
||||
const { schema, formData, onChange, fieldPathId } = props;
|
||||
const uiOptions = getUiOptions(props.uiSchema);
|
||||
|
||||
const { optionSchema, options, selection, createChangeHandler } =
|
||||
useMultiSelectField({
|
||||
schema: schema as BlockIOObjectSubSchema,
|
||||
formData,
|
||||
});
|
||||
|
||||
const handleValuesChange = createChangeHandler(onChange, fieldPathId);
|
||||
|
||||
const displayName = schema.title || "options";
|
||||
|
||||
return (
|
||||
<div className={cn("flex flex-col", uiOptions.className)}>
|
||||
<MultiSelector
|
||||
className="nodrag"
|
||||
values={selection}
|
||||
onValuesChange={handleValuesChange}
|
||||
>
|
||||
<MultiSelectorTrigger className="rounded-3xl border border-zinc-200 bg-white px-2 shadow-none">
|
||||
<MultiSelectorInput
|
||||
placeholder={
|
||||
(schema as any).placeholder ?? `Select ${displayName}...`
|
||||
}
|
||||
/>
|
||||
</MultiSelectorTrigger>
|
||||
<MultiSelectorContent className="nowheel">
|
||||
<MultiSelectorList>
|
||||
{options
|
||||
.map((key) => ({ ...optionSchema[key], key }))
|
||||
.map(({ key, title, description }) => (
|
||||
<MultiSelectorItem key={key} value={key} title={description}>
|
||||
{title ?? key}
|
||||
</MultiSelectorItem>
|
||||
))}
|
||||
</MultiSelectorList>
|
||||
</MultiSelectorContent>
|
||||
</MultiSelector>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -1 +0,0 @@
|
||||
export { MultiSelectField } from "./MultiSelectField";
|
||||
@@ -1,65 +0,0 @@
|
||||
import { FieldProps } from "@rjsf/utils";
|
||||
import { BlockIOObjectSubSchema } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
type FormData = Record<string, boolean> | null | undefined;
|
||||
|
||||
interface UseMultiSelectFieldOptions {
|
||||
schema: BlockIOObjectSubSchema;
|
||||
formData: FormData;
|
||||
}
|
||||
|
||||
export function useMultiSelectField({
|
||||
schema,
|
||||
formData,
|
||||
}: UseMultiSelectFieldOptions) {
|
||||
const getOptionSchema = (): Record<string, BlockIOObjectSubSchema> => {
|
||||
if (schema.properties) {
|
||||
return schema.properties as Record<string, BlockIOObjectSubSchema>;
|
||||
}
|
||||
if (
|
||||
"anyOf" in schema &&
|
||||
Array.isArray(schema.anyOf) &&
|
||||
schema.anyOf.length > 0 &&
|
||||
"properties" in schema.anyOf[0]
|
||||
) {
|
||||
return (schema.anyOf[0] as BlockIOObjectSubSchema).properties as Record<
|
||||
string,
|
||||
BlockIOObjectSubSchema
|
||||
>;
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
||||
const optionSchema = getOptionSchema();
|
||||
const options = Object.keys(optionSchema);
|
||||
|
||||
const getSelection = (): string[] => {
|
||||
if (!formData || typeof formData !== "object") {
|
||||
return [];
|
||||
}
|
||||
return Object.entries(formData)
|
||||
.filter(([_, value]) => value === true)
|
||||
.map(([key]) => key);
|
||||
};
|
||||
|
||||
const selection = getSelection();
|
||||
|
||||
const createChangeHandler =
|
||||
(
|
||||
onChange: FieldProps["onChange"],
|
||||
fieldPathId: FieldProps["fieldPathId"],
|
||||
) =>
|
||||
(values: string[]) => {
|
||||
const newValue = Object.fromEntries(
|
||||
options.map((opt) => [opt, values.includes(opt)]),
|
||||
);
|
||||
onChange(newValue, fieldPathId?.path);
|
||||
};
|
||||
|
||||
return {
|
||||
optionSchema,
|
||||
options,
|
||||
selection,
|
||||
createChangeHandler,
|
||||
};
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { descriptionId, FieldProps, getTemplate, titleId } from "@rjsf/utils";
|
||||
import { Table, RowData } from "@/components/molecules/Table/Table";
|
||||
import { useMemo } from "react";
|
||||
|
||||
export const TableField = (props: FieldProps) => {
|
||||
const { schema, formData, onChange, fieldPathId, registry, uiSchema } = props;
|
||||
|
||||
const itemSchema = schema.items as any;
|
||||
const properties = itemSchema?.properties || {};
|
||||
|
||||
const columns: string[] = useMemo(() => {
|
||||
return Object.keys(properties);
|
||||
}, [properties]);
|
||||
|
||||
const handleChange = (rows: RowData[]) => {
|
||||
onChange(rows, fieldPathId?.path.slice(0, -1));
|
||||
};
|
||||
|
||||
const TitleFieldTemplate = getTemplate("TitleFieldTemplate", registry);
|
||||
const DescriptionFieldTemplate = getTemplate(
|
||||
"DescriptionFieldTemplate",
|
||||
registry,
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<TitleFieldTemplate
|
||||
id={titleId(fieldPathId)}
|
||||
title={schema.title || ""}
|
||||
required={true}
|
||||
schema={schema}
|
||||
uiSchema={uiSchema}
|
||||
registry={registry}
|
||||
/>
|
||||
<DescriptionFieldTemplate
|
||||
id={descriptionId(fieldPathId)}
|
||||
description={schema.description || ""}
|
||||
schema={schema}
|
||||
registry={registry}
|
||||
/>
|
||||
|
||||
<Table
|
||||
columns={columns}
|
||||
defaultValues={formData}
|
||||
onChange={handleChange}
|
||||
allowAddRow={true}
|
||||
allowDeleteRow={true}
|
||||
addRowLabel="Add row"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -1,10 +1,6 @@
|
||||
import { FieldProps, RJSFSchema, RegistryFieldsType } from "@rjsf/utils";
|
||||
import { CredentialsField } from "./CredentialField/CredentialField";
|
||||
import { GoogleDrivePickerField } from "./GoogleDrivePickerField/GoogleDrivePickerField";
|
||||
import { JsonTextField } from "./JsonTextField/JsonTextField";
|
||||
import { MultiSelectField } from "./MultiSelectField/MultiSelectField";
|
||||
import { isMultiSelectSchema } from "../utils/schema-utils";
|
||||
import { TableField } from "./TableField/TableField";
|
||||
|
||||
export interface CustomFieldDefinition {
|
||||
id: string;
|
||||
@@ -12,9 +8,6 @@ export interface CustomFieldDefinition {
|
||||
component: (props: FieldProps<any, RJSFSchema, any>) => JSX.Element | null;
|
||||
}
|
||||
|
||||
/** Field ID for JsonTextField - used to render nested complex types as text input */
|
||||
export const JSON_TEXT_FIELD_ID = "custom/json_text_field";
|
||||
|
||||
export const CUSTOM_FIELDS: CustomFieldDefinition[] = [
|
||||
{
|
||||
id: "custom/credential_field",
|
||||
@@ -37,28 +30,6 @@ export const CUSTOM_FIELDS: CustomFieldDefinition[] = [
|
||||
},
|
||||
component: GoogleDrivePickerField,
|
||||
},
|
||||
{
|
||||
id: "custom/json_text_field",
|
||||
// Not matched by schema - assigned via uiSchema for nested complex types
|
||||
matcher: () => false,
|
||||
component: JsonTextField,
|
||||
},
|
||||
{
|
||||
id: "custom/multi_select_field",
|
||||
matcher: isMultiSelectSchema,
|
||||
component: MultiSelectField,
|
||||
},
|
||||
{
|
||||
id: "custom/table_field",
|
||||
matcher: (schema: any) => {
|
||||
return (
|
||||
schema.type === "array" &&
|
||||
"format" in schema &&
|
||||
schema.format === "table"
|
||||
);
|
||||
},
|
||||
component: TableField,
|
||||
},
|
||||
];
|
||||
|
||||
export function findCustomFieldId(schema: any): string | null {
|
||||
|
||||
@@ -1,46 +1,19 @@
|
||||
import { RJSFSchema, UiSchema } from "@rjsf/utils";
|
||||
import {
|
||||
findCustomFieldId,
|
||||
JSON_TEXT_FIELD_ID,
|
||||
} from "../custom/custom-registry";
|
||||
|
||||
function isComplexType(schema: RJSFSchema): boolean {
|
||||
return schema.type === "object" || schema.type === "array";
|
||||
}
|
||||
|
||||
function hasComplexAnyOfOptions(schema: RJSFSchema): boolean {
|
||||
const options = schema.anyOf || schema.oneOf;
|
||||
if (!Array.isArray(options)) return false;
|
||||
return options.some(
|
||||
(opt: any) =>
|
||||
opt &&
|
||||
typeof opt === "object" &&
|
||||
(opt.type === "object" || opt.type === "array"),
|
||||
);
|
||||
}
|
||||
import { findCustomFieldId } from "../custom/custom-registry";
|
||||
|
||||
/**
|
||||
* Generates uiSchema with ui:field settings for custom fields based on schema matchers.
|
||||
* This is the standard RJSF way to route fields to custom components.
|
||||
*
|
||||
* Nested complex types (arrays/objects inside arrays/objects) are rendered as JsonTextField
|
||||
* to avoid deeply nested form UIs. Users can enter raw JSON for these fields.
|
||||
*
|
||||
* @param schema - The JSON schema
|
||||
* @param existingUiSchema - Existing uiSchema to merge with
|
||||
* @param insideComplexType - Whether we're already inside a complex type (object/array)
|
||||
*/
|
||||
export function generateUiSchemaForCustomFields(
|
||||
schema: RJSFSchema,
|
||||
existingUiSchema: UiSchema = {},
|
||||
insideComplexType: boolean = false,
|
||||
): UiSchema {
|
||||
const uiSchema: UiSchema = { ...existingUiSchema };
|
||||
|
||||
if (schema.properties) {
|
||||
for (const [key, propSchema] of Object.entries(schema.properties)) {
|
||||
if (propSchema && typeof propSchema === "object") {
|
||||
// First check for custom field matchers (credentials, google drive, etc.)
|
||||
const customFieldId = findCustomFieldId(propSchema);
|
||||
|
||||
if (customFieldId) {
|
||||
@@ -48,33 +21,8 @@ export function generateUiSchemaForCustomFields(
|
||||
...(uiSchema[key] as object),
|
||||
"ui:field": customFieldId,
|
||||
};
|
||||
// Skip further processing for custom fields
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle nested complex types - render as JsonTextField
|
||||
if (insideComplexType && isComplexType(propSchema as RJSFSchema)) {
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
};
|
||||
// Don't recurse further - this field is now a text input
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle anyOf/oneOf inside complex types
|
||||
if (
|
||||
insideComplexType &&
|
||||
hasComplexAnyOfOptions(propSchema as RJSFSchema)
|
||||
) {
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
};
|
||||
continue;
|
||||
}
|
||||
|
||||
// Recurse into object properties
|
||||
if (
|
||||
propSchema.type === "object" &&
|
||||
propSchema.properties &&
|
||||
@@ -83,7 +31,6 @@ export function generateUiSchemaForCustomFields(
|
||||
const nestedUiSchema = generateUiSchemaForCustomFields(
|
||||
propSchema as RJSFSchema,
|
||||
(uiSchema[key] as UiSchema) || {},
|
||||
true, // Now inside a complex type
|
||||
);
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
@@ -91,11 +38,9 @@ export function generateUiSchemaForCustomFields(
|
||||
};
|
||||
}
|
||||
|
||||
// Handle arrays
|
||||
if (propSchema.type === "array" && propSchema.items) {
|
||||
const itemsSchema = propSchema.items as RJSFSchema;
|
||||
if (itemsSchema && typeof itemsSchema === "object") {
|
||||
// Check for custom field on array items
|
||||
const itemsCustomFieldId = findCustomFieldId(itemsSchema);
|
||||
if (itemsCustomFieldId) {
|
||||
uiSchema[key] = {
|
||||
@@ -104,28 +49,10 @@ export function generateUiSchemaForCustomFields(
|
||||
"ui:field": itemsCustomFieldId,
|
||||
},
|
||||
};
|
||||
} else if (isComplexType(itemsSchema)) {
|
||||
// Array items that are complex types become JsonTextField
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
items: {
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
},
|
||||
};
|
||||
} else if (hasComplexAnyOfOptions(itemsSchema)) {
|
||||
// Array items with anyOf containing complex types become JsonTextField
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
items: {
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
},
|
||||
};
|
||||
} else if (itemsSchema.properties) {
|
||||
// Recurse into object items (but they're now inside a complex type)
|
||||
const itemsUiSchema = generateUiSchemaForCustomFields(
|
||||
itemsSchema,
|
||||
((uiSchema[key] as UiSchema)?.items as UiSchema) || {},
|
||||
true, // Inside complex type (array)
|
||||
);
|
||||
if (Object.keys(itemsUiSchema).length > 0) {
|
||||
uiSchema[key] = {
|
||||
@@ -136,61 +63,6 @@ export function generateUiSchemaForCustomFields(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle anyOf/oneOf at root level - process complex options
|
||||
if (!insideComplexType) {
|
||||
const anyOfOptions = propSchema.anyOf || propSchema.oneOf;
|
||||
|
||||
if (Array.isArray(anyOfOptions)) {
|
||||
for (let i = 0; i < anyOfOptions.length; i++) {
|
||||
const option = anyOfOptions[i] as RJSFSchema;
|
||||
if (option && typeof option === "object") {
|
||||
// Handle anyOf array options with complex items
|
||||
if (option.type === "array" && option.items) {
|
||||
const itemsSchema = option.items as RJSFSchema;
|
||||
if (itemsSchema && typeof itemsSchema === "object") {
|
||||
// Array items that are complex types become JsonTextField
|
||||
if (isComplexType(itemsSchema)) {
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
items: {
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
},
|
||||
};
|
||||
} else if (hasComplexAnyOfOptions(itemsSchema)) {
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
items: {
|
||||
"ui:field": JSON_TEXT_FIELD_ID,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recurse into anyOf object options with properties
|
||||
if (
|
||||
option.type === "object" &&
|
||||
option.properties &&
|
||||
typeof option.properties === "object"
|
||||
) {
|
||||
const optionUiSchema = generateUiSchemaForCustomFields(
|
||||
option,
|
||||
{},
|
||||
true, // Inside complex type (anyOf object option)
|
||||
);
|
||||
if (Object.keys(optionUiSchema).length > 0) {
|
||||
// Store under the property key - RJSF will apply it
|
||||
uiSchema[key] = {
|
||||
...(uiSchema[key] as object),
|
||||
...optionUiSchema,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
import { getUiOptions, RJSFSchema, UiSchema } from "@rjsf/utils";
|
||||
|
||||
export function isAnyOfSchema(schema: RJSFSchema | undefined): boolean {
|
||||
return (
|
||||
Array.isArray(schema?.anyOf) &&
|
||||
schema!.anyOf.length > 0 &&
|
||||
schema?.enum === undefined
|
||||
);
|
||||
return Array.isArray(schema?.anyOf) && schema!.anyOf.length > 0;
|
||||
}
|
||||
|
||||
export const isAnyOfChild = (
|
||||
@@ -37,21 +33,3 @@ export function isOptionalType(schema: RJSFSchema | undefined): {
|
||||
export function isAnyOfSelector(name: string) {
|
||||
return name.includes("anyof_select");
|
||||
}
|
||||
|
||||
export function isMultiSelectSchema(schema: RJSFSchema | undefined): boolean {
|
||||
if (typeof schema !== "object" || schema === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ("anyOf" in schema || "oneOf" in schema) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !!(
|
||||
schema.type === "object" &&
|
||||
schema.properties &&
|
||||
Object.values(schema.properties).every(
|
||||
(prop: any) => prop.type === "boolean",
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1102,7 +1102,6 @@ export type AddUserCreditsResponse = {
|
||||
new_balance: number;
|
||||
transaction_key: string;
|
||||
};
|
||||
|
||||
const _stringFormatToDataTypeMap: Partial<Record<string, DataType>> = {
|
||||
date: DataType.DATE,
|
||||
time: DataType.TIME,
|
||||
|
||||
75
docs/platform/blocks/airtable/bases.md
Normal file
75
docs/platform/blocks/airtable/bases.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Airtable Create Base
|
||||
|
||||
### What it is
|
||||
Create or find a base in Airtable
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates a new Airtable base in a specified workspace, or finds an existing one with the same name. When creating, you can optionally define initial tables and their fields to set up the schema.
|
||||
|
||||
Enable find_existing to search for a base with the same name before creating a new one, preventing duplicates in your workspace.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| workspace_id | The workspace ID where the base will be created | str | Yes |
|
||||
| name | The name of the new base | str | Yes |
|
||||
| find_existing | If true, return existing base with same name instead of creating duplicate | bool | No |
|
||||
| tables | At least one table and field must be specified. Array of table objects to create in the base. Each table should have 'name' and 'fields' properties | List[Dict[str, True]] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| base_id | The ID of the created or found base | str |
|
||||
| tables | Array of table objects | List[Dict[str, True]] |
|
||||
| table | A single table object | Dict[str, True] |
|
||||
| was_created | True if a new base was created, False if existing was found | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Project Setup**: Automatically create new bases when projects start with predefined table structures.
|
||||
|
||||
**Template Deployment**: Deploy standardized base templates across teams or clients.
|
||||
|
||||
**Multi-Tenant Apps**: Create separate bases for each customer or project programmatically.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable List Bases
|
||||
|
||||
### What it is
|
||||
List all bases in Airtable
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves a list of all Airtable bases accessible to your connected account. It returns basic information about each base including ID, name, and permission level.
|
||||
|
||||
Results are paginated; use the offset output to retrieve additional pages if there are more bases than returned in a single call.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| trigger | Trigger the block to run - value is ignored | str | No |
|
||||
| offset | Pagination offset from previous request | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| bases | Array of base objects | List[Dict[str, True]] |
|
||||
| offset | Offset for next page (null if no more bases) | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Base Discovery**: Find available bases for building dynamic dropdowns or navigation.
|
||||
|
||||
**Inventory Management**: List all bases in an organization for auditing or documentation.
|
||||
|
||||
**Cross-Base Operations**: Enumerate bases to perform operations across multiple databases.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
199
docs/platform/blocks/airtable/records.md
Normal file
199
docs/platform/blocks/airtable/records.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# Airtable Create Records
|
||||
|
||||
### What it is
|
||||
Create records in an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates new records in an Airtable table using the Airtable API. Each record is specified with a fields object containing field names and values. You can create up to 10 records in a single call.
|
||||
|
||||
Enable typecast to automatically convert string values to appropriate field types (dates, numbers, etc.). The block returns the created records with their assigned IDs.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Table ID or name | str | Yes |
|
||||
| records | Array of records to create (each with 'fields' object) | List[Dict[str, True]] | Yes |
|
||||
| skip_normalization | Skip output normalization to get raw Airtable response (faster but may have missing fields) | bool | No |
|
||||
| typecast | Automatically convert string values to appropriate types | bool | No |
|
||||
| return_fields_by_field_id | Return fields by field ID | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| records | Array of created record objects | List[Dict[str, True]] |
|
||||
| details | Details of the created records | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Data Import**: Bulk import data from external sources into Airtable tables.
|
||||
|
||||
**Form Submissions**: Create records from form submissions or API integrations.
|
||||
|
||||
**Workflow Output**: Save workflow results or processed data to Airtable for tracking.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Delete Records
|
||||
|
||||
### What it is
|
||||
Delete records from an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block deletes records from an Airtable table by their record IDs. You can delete up to 10 records in a single call. The operation is permanent and cannot be undone.
|
||||
|
||||
Provide an array of record IDs to delete. Using the table ID instead of the name is recommended for reliability.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Table ID or name - It's better to use the table ID instead of the name | str | Yes |
|
||||
| record_ids | Array of upto 10 record IDs to delete | List[str] | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| records | Array of deletion results | List[Dict[str, True]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Data Cleanup**: Remove outdated or duplicate records from tables.
|
||||
|
||||
**Workflow Cleanup**: Delete temporary records after processing is complete.
|
||||
|
||||
**Batch Removal**: Remove multiple records that match certain criteria.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Get Record
|
||||
|
||||
### What it is
|
||||
Get a single record from Airtable
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves a single record from an Airtable table by its ID. The record includes all field values and metadata like creation time. Enable normalize_output to ensure all fields are included with proper empty values.
|
||||
|
||||
Optionally include field metadata for type information and configuration details about each field.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Table ID or name | str | Yes |
|
||||
| record_id | The record ID to retrieve | str | Yes |
|
||||
| normalize_output | Normalize output to include all fields with proper empty values (disable to skip schema fetch and get raw Airtable response) | bool | No |
|
||||
| include_field_metadata | Include field type and configuration metadata (requires normalize_output=true) | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| id | The record ID | str |
|
||||
| fields | The record fields | Dict[str, True] |
|
||||
| created_time | The record created time | str |
|
||||
| field_metadata | Field type and configuration metadata (only when include_field_metadata=true) | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Detail View**: Fetch complete record data for display or detailed processing.
|
||||
|
||||
**Record Lookup**: Retrieve specific records by ID from webhook payloads or references.
|
||||
|
||||
**Data Validation**: Check record contents before performing updates or related operations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable List Records
|
||||
|
||||
### What it is
|
||||
List records from an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block queries records from an Airtable table with optional filtering, sorting, and pagination. Use Airtable formulas to filter records and specify sort order by field and direction.
|
||||
|
||||
Results can be limited, paginated with offsets, and restricted to specific fields. Enable normalize_output for consistent field values across records.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Table ID or name | str | Yes |
|
||||
| filter_formula | Airtable formula to filter records | str | No |
|
||||
| view | View ID or name to use | str | No |
|
||||
| sort | Sort configuration (array of {field, direction}) | List[Dict[str, True]] | No |
|
||||
| max_records | Maximum number of records to return | int | No |
|
||||
| page_size | Number of records per page (max 100) | int | No |
|
||||
| offset | Pagination offset from previous request | str | No |
|
||||
| return_fields | Specific fields to return (comma-separated) | List[str] | No |
|
||||
| normalize_output | Normalize output to include all fields with proper empty values (disable to skip schema fetch and get raw Airtable response) | bool | No |
|
||||
| include_field_metadata | Include field type and configuration metadata (requires normalize_output=true) | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| records | Array of record objects | List[Dict[str, True]] |
|
||||
| offset | Offset for next page (null if no more records) | str |
|
||||
| field_metadata | Field type and configuration metadata (only when include_field_metadata=true) | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Report Generation**: Query records with filters to build reports or dashboards.
|
||||
|
||||
**Data Export**: Fetch records matching criteria for export to other systems.
|
||||
|
||||
**Batch Processing**: List records to process in subsequent workflow steps.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Update Records
|
||||
|
||||
### What it is
|
||||
Update records in an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block updates existing records in an Airtable table. Each record update requires the record ID and a fields object with the values to update. Only specified fields are modified; other fields remain unchanged.
|
||||
|
||||
Enable typecast to automatically convert string values to appropriate types. You can update up to 10 records per call.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Table ID or name - It's better to use the table ID instead of the name | str | Yes |
|
||||
| records | Array of records to update (each with 'id' and 'fields') | List[Dict[str, True]] | Yes |
|
||||
| typecast | Automatically convert string values to appropriate types | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| records | Array of updated record objects | List[Dict[str, True]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Status Updates**: Update record status fields as workflows progress.
|
||||
|
||||
**Data Enrichment**: Add computed or fetched data to existing records.
|
||||
|
||||
**Batch Modifications**: Update multiple records based on processed results.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
187
docs/platform/blocks/airtable/schema.md
Normal file
187
docs/platform/blocks/airtable/schema.md
Normal file
@@ -0,0 +1,187 @@
|
||||
# Airtable Create Field
|
||||
|
||||
### What it is
|
||||
Add a new field to an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block adds a new field to an existing Airtable table using the Airtable API. Specify the field type (text, email, URL, etc.), name, and optional description and configuration options.
|
||||
|
||||
The field is created immediately and becomes available for use in all records. Returns the created field object with its assigned ID.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id | The table ID to add field to | str | Yes |
|
||||
| field_type | The type of the field to create | "singleLineText" | "email" | "url" | No |
|
||||
| name | The name of the field to create | str | Yes |
|
||||
| description | The description of the field to create | str | No |
|
||||
| options | The options of the field to create | Dict[str, str] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| field | Created field object | Dict[str, True] |
|
||||
| field_id | ID of the created field | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Schema Evolution**: Add new fields to tables as application requirements grow.
|
||||
|
||||
**Dynamic Forms**: Create fields based on user configuration or form builder settings.
|
||||
|
||||
**Data Integration**: Add fields to capture data from newly integrated external systems.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Create Table
|
||||
|
||||
### What it is
|
||||
Create a new table in an Airtable base
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates a new table in an Airtable base with the specified name and optional field definitions. Each field definition includes name, type, and type-specific options.
|
||||
|
||||
The table is created with the defined schema and is immediately ready for use. Returns the created table object with its ID.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_name | The name of the table to create | str | Yes |
|
||||
| table_fields | Table fields with name, type, and options | List[Dict[str, True]] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| table | Created table object | Dict[str, True] |
|
||||
| table_id | ID of the created table | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Application Scaffolding**: Create tables programmatically when setting up new application modules.
|
||||
|
||||
**Multi-Tenant Setup**: Generate customer-specific tables dynamically.
|
||||
|
||||
**Feature Expansion**: Add new tables as features are enabled or installed.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable List Schema
|
||||
|
||||
### What it is
|
||||
Get the complete schema of an Airtable base
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves the complete schema of an Airtable base, including all tables, their fields, field types, and views. This metadata is essential for building dynamic integrations that need to understand table structure.
|
||||
|
||||
The schema includes field configurations, validation rules, and relationship definitions between tables.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| base_schema | Complete base schema with tables, fields, and views | Dict[str, True] |
|
||||
| tables | Array of table objects | List[Dict[str, True]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Schema Discovery**: Understand table structure for building dynamic forms or queries.
|
||||
|
||||
**Documentation**: Generate documentation of database schema automatically.
|
||||
|
||||
**Migration Planning**: Analyze schema before migrating data to other systems.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Update Field
|
||||
|
||||
### What it is
|
||||
Update field properties in an Airtable table
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block updates properties of an existing field in an Airtable table. You can modify the field name and description. Note that field type cannot be changed after creation.
|
||||
|
||||
Changes take effect immediately across all records and views that use the field.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id | The table ID containing the field | str | Yes |
|
||||
| field_id | The field ID to update | str | Yes |
|
||||
| name | The name of the field to update | str | No |
|
||||
| description | The description of the field to update | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| field | Updated field object | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Field Renaming**: Update field names to match evolving terminology or standards.
|
||||
|
||||
**Documentation Updates**: Add or update field descriptions for better team understanding.
|
||||
|
||||
**Schema Maintenance**: Keep field metadata current as application requirements change.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Airtable Update Table
|
||||
|
||||
### What it is
|
||||
Update table properties
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block updates table properties in an Airtable base. You can change the table name, description, and date dependency settings. Changes apply immediately and affect all users accessing the table.
|
||||
|
||||
This is useful for maintaining table metadata and organizing your base structure.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | The Airtable base ID | str | Yes |
|
||||
| table_id | The table ID to update | str | Yes |
|
||||
| table_name | The name of the table to update | str | No |
|
||||
| table_description | The description of the table to update | str | No |
|
||||
| date_dependency | The date dependency of the table to update | Dict[str, True] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| table | Updated table object | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Table Organization**: Rename tables to follow naming conventions or reflect current usage.
|
||||
|
||||
**Description Management**: Update table descriptions for documentation purposes.
|
||||
|
||||
**Configuration Updates**: Modify table settings like date dependencies as requirements change.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
35
docs/platform/blocks/airtable/triggers.md
Normal file
35
docs/platform/blocks/airtable/triggers.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Airtable Webhook Trigger
|
||||
|
||||
### What it is
|
||||
Starts a flow whenever Airtable emits a webhook event
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block subscribes to Airtable webhook events for a specific base and table. When records are created, updated, or deleted, Airtable sends a webhook notification that triggers your workflow.
|
||||
|
||||
You specify which events to listen for using the event selector. The webhook payload includes details about the changed records and the type of change that occurred.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| base_id | Airtable base ID | str | Yes |
|
||||
| table_id_or_name | Airtable table ID or name | str | Yes |
|
||||
| events | Airtable webhook event filter | AirtableEventSelector | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| payload | Airtable webhook payload | WebhookPayload |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Real-Time Sync**: Automatically sync Airtable changes to other systems like CRMs or databases.
|
||||
|
||||
**Notification Workflows**: Send alerts when specific records are created or modified in Airtable.
|
||||
|
||||
**Automated Processing**: Trigger document generation or emails when new entries are added to a table.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
54
docs/platform/blocks/apollo/organization.md
Normal file
54
docs/platform/blocks/apollo/organization.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Search Organizations
|
||||
|
||||
### What it is
|
||||
Search for organizations in Apollo
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block searches the Apollo database for organizations using various filters like employee count, location, and keywords. Apollo maintains a comprehensive database of company information for sales and marketing purposes.
|
||||
|
||||
Results can be filtered by headquarters location, excluded locations, industry keywords, and specific Apollo organization IDs.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| organization_num_employees_range | The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma. | List[int] | No |
|
||||
| organization_locations | The location of the company headquarters. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||
|
||||
To exclude companies based on location, use the organization_not_locations parameter.
|
||||
| List[str] | No |
|
||||
| organizations_not_locations | Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
|
||||
|
||||
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
|
||||
| List[str] | No |
|
||||
| q_organization_keyword_tags | Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry. | List[str] | No |
|
||||
| q_organization_name | Filter search results to include a specific company name.
|
||||
|
||||
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible. | str | No |
|
||||
| organization_ids | The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, identify the values for organization_id when you call this endpoint. | List[str] | No |
|
||||
| max_results | The maximum number of results to return. If you don't specify this parameter, the default is 100. | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the search failed | str |
|
||||
| organizations | List of organizations found | List[Dict[str, True]] |
|
||||
| organization | Each found organization, one at a time | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Market Research**: Find companies matching specific criteria for market analysis.
|
||||
|
||||
**Lead List Building**: Build targeted lists of companies for outbound sales campaigns.
|
||||
|
||||
**Competitive Intelligence**: Research competitors and similar companies in your market.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
68
docs/platform/blocks/apollo/people.md
Normal file
68
docs/platform/blocks/apollo/people.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Search People
|
||||
|
||||
### What it is
|
||||
Search for people in Apollo
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block searches Apollo's database for people based on job titles, seniority, location, company, and other criteria. It's designed for finding prospects and contacts for sales and marketing.
|
||||
|
||||
Enable enrich_info to get detailed contact information including verified email addresses (costs more credits).
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| person_titles | Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
|
||||
|
||||
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
|
||||
|
||||
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
|
||||
| List[str] | No |
|
||||
| person_locations | The location where people live. You can search across cities, US states, and countries.
|
||||
|
||||
To find people based on the headquarters locations of their current employer, use the organization_locations parameter. | List[str] | No |
|
||||
| person_seniorities | The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
|
||||
|
||||
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
|
||||
|
||||
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
|
||||
|
||||
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels. | List["owner" | "founder" | "c_suite"] | No |
|
||||
| organization_locations | The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
|
||||
|
||||
To find people based on their personal location, use the person_locations parameter. | List[str] | No |
|
||||
| q_organization_domains | The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
|
||||
|
||||
You can add multiple domains to search across companies.
|
||||
|
||||
Examples: apollo.io and microsoft.com | List[str] | No |
|
||||
| contact_email_statuses | The email statuses for the people you want to find. You can add multiple statuses to expand your search. | List["verified" | "unverified" | "likely_to_engage"] | No |
|
||||
| organization_ids | The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, call the Organization Search endpoint and identify the values for organization_id. | List[str] | No |
|
||||
| organization_num_employees_range | The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma. | List[int] | No |
|
||||
| q_keywords | A string of words over which we want to filter the results | str | No |
|
||||
| max_results | The maximum number of results to return. If you don't specify this parameter, the default is 25. Limited to 500 to prevent overspending. | int | No |
|
||||
| enrich_info | Whether to enrich contacts with detailed information including real email addresses. This will double the search cost. | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the search failed | str |
|
||||
| people | List of people found | List[Dict[str, True]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Prospecting**: Find decision-makers at target companies for outbound sales.
|
||||
|
||||
**Recruiting**: Search for candidates with specific titles and experience.
|
||||
|
||||
**ABM Campaigns**: Build contact lists at specific accounts for account-based marketing.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
42
docs/platform/blocks/apollo/person.md
Normal file
42
docs/platform/blocks/apollo/person.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Get Person Detail
|
||||
|
||||
### What it is
|
||||
Get detailed person data with Apollo API, including email reveal
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block enriches person data using Apollo's API. You can look up by Apollo person ID for best accuracy, or match by name plus company information, LinkedIn URL, or email address.
|
||||
|
||||
Returns comprehensive contact details including email addresses (if available), job title, company information, and social profiles.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| person_id | Apollo person ID to enrich (most accurate method) | str | No |
|
||||
| first_name | First name of the person to enrich | str | No |
|
||||
| last_name | Last name of the person to enrich | str | No |
|
||||
| name | Full name of the person to enrich (alternative to first_name + last_name) | str | No |
|
||||
| email | Known email address of the person (helps with matching) | str | No |
|
||||
| domain | Company domain of the person (e.g., 'google.com') | str | No |
|
||||
| company | Company name of the person | str | No |
|
||||
| linkedin_url | LinkedIn URL of the person | str | No |
|
||||
| organization_id | Apollo organization ID of the person's company | str | No |
|
||||
| title | Job title of the person to enrich | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if enrichment failed | str |
|
||||
| contact | Enriched contact information | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Contact Enrichment**: Get full contact details from partial information like name and company.
|
||||
|
||||
**Email Discovery**: Find verified email addresses for outreach campaigns.
|
||||
|
||||
**Profile Completion**: Fill in missing contact details in your CRM or database.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
45
docs/platform/blocks/ayrshare/post_to_bluesky.md
Normal file
45
docs/platform/blocks/ayrshare/post_to_bluesky.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Post To Bluesky
|
||||
|
||||
### What it is
|
||||
Post to Bluesky using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's social media API to publish content to Bluesky. It handles text posts (up to 300 characters), images (up to 4), and video content with support for scheduling, accessibility features like alt text, and link shortening.
|
||||
|
||||
The block authenticates through your Ayrshare credentials and sends the post data to Ayrshare's unified API, which then publishes to Bluesky. It returns post identifiers and status information upon completion, or error details if the operation fails.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text to be published (max 300 characters for Bluesky) | str | No |
|
||||
| media_urls | Optional list of media URLs to include. Bluesky supports up to 4 images or 1 video. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| alt_text | Alt text for each media item (accessibility) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Cross-Platform Publishing**: Automatically share content across Bluesky and other social networks from a single workflow.
|
||||
|
||||
**Scheduled Content Calendar**: Queue up posts with specific publishing times to maintain consistent presence on Bluesky.
|
||||
|
||||
**Visual Content Sharing**: Share image galleries with accessibility-friendly alt text for photo-focused content strategies.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
61
docs/platform/blocks/ayrshare/post_to_facebook.md
Normal file
61
docs/platform/blocks/ayrshare/post_to_facebook.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Post To Facebook
|
||||
|
||||
### What it is
|
||||
Post to Facebook using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's social media API to publish content to Facebook Pages. It supports text posts, images, videos, carousels (2-10 items), Reels, and Stories, with features like audience targeting by age and country, location tagging, and scheduling.
|
||||
|
||||
The block authenticates through Ayrshare and leverages the Meta Graph API to handle various Facebook-specific formats. Advanced options include draft mode for Meta Business Suite, custom link previews, and video thumbnails. Results include post IDs for tracking engagement.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text to be published | str | No |
|
||||
| media_urls | Optional list of media URLs to include. Set is_video in advanced settings to true if you want to upload videos. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| is_carousel | Whether to post a carousel | bool | No |
|
||||
| carousel_link | The URL for the 'See More At' button in the carousel | str | No |
|
||||
| carousel_items | List of carousel items with name, link and picture URLs. Min 2, max 10 items. | List[CarouselItem] | No |
|
||||
| is_reels | Whether to post to Facebook Reels | bool | No |
|
||||
| reels_title | Title for the Reels video (max 255 chars) | str | No |
|
||||
| reels_thumbnail | Thumbnail URL for Reels video (JPEG/PNG, <10MB) | str | No |
|
||||
| is_story | Whether to post as a Facebook Story | bool | No |
|
||||
| media_captions | Captions for each media item | List[str] | No |
|
||||
| location_id | Facebook Page ID or name for location tagging | str | No |
|
||||
| age_min | Minimum age for audience targeting (13,15,18,21,25) | int | No |
|
||||
| target_countries | List of country codes to target (max 25) | List[str] | No |
|
||||
| alt_text | Alt text for each media item | List[str] | No |
|
||||
| video_title | Title for video post | str | No |
|
||||
| video_thumbnail | Thumbnail URL for video post | str | No |
|
||||
| is_draft | Save as draft in Meta Business Suite | bool | No |
|
||||
| scheduled_publish_date | Schedule publish time in Meta Business Suite (UTC) | str | No |
|
||||
| preview_link | URL for custom link preview | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Product Launches**: Create carousel posts showcasing multiple product images with links to purchase pages.
|
||||
|
||||
**Event Promotion**: Share event details with age-targeted reach and location tagging for local business events.
|
||||
|
||||
**Short-Form Video**: Automatically publish Reels with custom thumbnails to maximize video content reach.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
57
docs/platform/blocks/ayrshare/post_to_gmb.md
Normal file
57
docs/platform/blocks/ayrshare/post_to_gmb.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Post To GMB
|
||||
|
||||
### What it is
|
||||
Post to Google My Business using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to Google My Business profiles. It supports standard posts, photo/video posts (categorized by type like exterior, interior, product), and special post types including events and promotional offers with coupon codes.
|
||||
|
||||
The block integrates with Google's Business Profile API through Ayrshare, enabling call-to-action buttons (book, order, shop, learn more, sign up, call), event scheduling with start/end dates, and promotional offers with terms and redemption URLs.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text to be published | str | No |
|
||||
| media_urls | Optional list of media URLs. GMB supports only one image or video per post. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| is_photo_video | Whether this is a photo/video post (appears in Photos section) | bool | No |
|
||||
| photo_category | Category for photo/video: cover, profile, logo, exterior, interior, product, at_work, food_and_drink, menu, common_area, rooms, teams | str | No |
|
||||
| call_to_action_type | Type of action button: 'book', 'order', 'shop', 'learn_more', 'sign_up', or 'call' | str | No |
|
||||
| call_to_action_url | URL for the action button (not required for 'call' action) | str | No |
|
||||
| event_title | Event title for event posts | str | No |
|
||||
| event_start_date | Event start date in ISO format (e.g., '2024-03-15T09:00:00Z') | str | No |
|
||||
| event_end_date | Event end date in ISO format (e.g., '2024-03-15T17:00:00Z') | str | No |
|
||||
| offer_title | Offer title for promotional posts | str | No |
|
||||
| offer_start_date | Offer start date in ISO format (e.g., '2024-03-15T00:00:00Z') | str | No |
|
||||
| offer_end_date | Offer end date in ISO format (e.g., '2024-04-15T23:59:59Z') | str | No |
|
||||
| offer_coupon_code | Coupon code for the offer (max 58 characters) | str | No |
|
||||
| offer_redeem_online_url | URL where customers can redeem the offer online | str | No |
|
||||
| offer_terms_conditions | Terms and conditions for the offer | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Local Business Updates**: Post daily specials, new arrivals, or service announcements directly to your Google Business Profile.
|
||||
|
||||
**Promotional Campaigns**: Create time-limited offers with coupon codes and online redemption links to drive sales.
|
||||
|
||||
**Event Marketing**: Announce upcoming events with dates, descriptions, and call-to-action buttons for reservations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
54
docs/platform/blocks/ayrshare/post_to_instagram.md
Normal file
54
docs/platform/blocks/ayrshare/post_to_instagram.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Post To Instagram
|
||||
|
||||
### What it is
|
||||
Post to Instagram using Ayrshare. Requires a Business or Creator Instagram Account connected with a Facebook Page
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to Instagram Business or Creator accounts. It supports feed posts, Stories (24-hour expiration), Reels, and carousels (up to 10 images/videos), with features like collaborator invitations, location tagging, and user tags with coordinates.
|
||||
|
||||
The block requires an Instagram account connected to a Facebook Page and authenticates through Meta's Graph API via Ayrshare. Instagram-specific features include auto-resize for optimal dimensions, audio naming for Reels, and thumbnail customization with frame offset control.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (max 2,200 chars, up to 30 hashtags, 3 @mentions) | str | No |
|
||||
| media_urls | Optional list of media URLs. Instagram supports up to 10 images/videos in a carousel. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| is_story | Whether to post as Instagram Story (24-hour expiration) | bool | No |
|
||||
| share_reels_feed | Whether Reel should appear in both Feed and Reels tabs | bool | No |
|
||||
| audio_name | Audio name for Reels (e.g., 'The Weeknd - Blinding Lights') | str | No |
|
||||
| thumbnail | Thumbnail URL for Reel video | str | No |
|
||||
| thumbnail_offset | Thumbnail frame offset in milliseconds (default: 0) | int | No |
|
||||
| alt_text | Alt text for each media item (up to 1,000 chars each, accessibility feature), each item in the list corresponds to a media item in the media_urls list | List[str] | No |
|
||||
| location_id | Facebook Page ID or name for location tagging (e.g., '7640348500' or '@guggenheimmuseum') | str | No |
|
||||
| user_tags | List of users to tag with coordinates for images | List[Dict[str, True]] | No |
|
||||
| collaborators | Instagram usernames to invite as collaborators (max 3, public accounts only) | List[str] | No |
|
||||
| auto_resize | Auto-resize images to 1080x1080px for Instagram | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Influencer Collaborations**: Create posts with collaborator tags to feature brand partnerships across multiple accounts.
|
||||
|
||||
**E-commerce Product Showcases**: Share carousel posts of product images with location tags for local discovery.
|
||||
|
||||
**Reels Automation**: Automatically publish short-form video content with custom thumbnails and trending audio.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
56
docs/platform/blocks/ayrshare/post_to_linkedin.md
Normal file
56
docs/platform/blocks/ayrshare/post_to_linkedin.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Post To Linked In
|
||||
|
||||
### What it is
|
||||
Post to LinkedIn using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's social media API to post content to LinkedIn. It handles text posts, images, videos, and documents, with support for scheduling and audience targeting. The block authenticates through Ayrshare's API.
|
||||
|
||||
LinkedIn-specific features include visibility controls, comment management, and targeting by country, seniority, industry, and other demographics (requires 300+ followers in target audience).
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (max 3,000 chars, hashtags supported with #) | str | No |
|
||||
| media_urls | Optional list of media URLs. LinkedIn supports up to 9 images, videos, or documents (PPT, PPTX, DOC, DOCX, PDF <100MB, <300 pages). | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| visibility | Post visibility: 'public' (default), 'connections' (personal only), 'loggedin' | str | No |
|
||||
| alt_text | Alt text for each image (accessibility feature, not supported for videos/documents) | List[str] | No |
|
||||
| titles | Title/caption for each image or video | List[str] | No |
|
||||
| document_title | Title for document posts (max 400 chars, uses filename if not specified) | str | No |
|
||||
| thumbnail | Thumbnail URL for video (PNG/JPG, same dimensions as video, <10MB) | str | No |
|
||||
| targeting_countries | Country codes for targeting (e.g., ['US', 'IN', 'DE', 'GB']). Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_seniorities | Seniority levels for targeting (e.g., ['Senior', 'VP']). Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_degrees | Education degrees for targeting. Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_fields_of_study | Fields of study for targeting. Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_industries | Industry categories for targeting. Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_job_functions | Job function categories for targeting. Requires 300+ followers in target audience. | List[str] | No |
|
||||
| targeting_staff_count_ranges | Company size ranges for targeting. Requires 300+ followers in target audience. | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Thought Leadership**: Automatically share blog posts or industry insights with professional network.
|
||||
|
||||
**Scheduled Content**: Queue up a week's worth of LinkedIn posts with scheduled publishing times.
|
||||
|
||||
**Targeted Announcements**: Share company updates targeted to specific industries or seniority levels.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
51
docs/platform/blocks/ayrshare/post_to_pinterest.md
Normal file
51
docs/platform/blocks/ayrshare/post_to_pinterest.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Post To Pinterest
|
||||
|
||||
### What it is
|
||||
Post to Pinterest using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish pins to Pinterest boards. It supports image pins, video pins (with required thumbnails), and carousel pins (up to 5 images), with customizable titles, descriptions, destination links, and private notes.
|
||||
|
||||
The block connects to Pinterest's API through Ayrshare, allowing you to specify target boards, add alt text for accessibility, and configure per-image carousel options including individual titles, links, and descriptions. Pins can be scheduled for future publishing.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | Pin description (max 500 chars, links not clickable - use link field instead) | str | No |
|
||||
| media_urls | Required image/video URLs. Pinterest requires at least one image. Videos need thumbnail. Up to 5 images for carousel. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| pin_title | Pin title displayed in 'Add your title' section (max 100 chars) | str | No |
|
||||
| link | Clickable destination URL when users click the pin (max 2048 chars) | str | No |
|
||||
| board_id | Pinterest Board ID to post to (from /user/details endpoint, uses default board if not specified) | str | No |
|
||||
| note | Private note for the pin (only visible to you and board collaborators) | str | No |
|
||||
| thumbnail | Required thumbnail URL for video pins (must have valid image Content-Type) | str | No |
|
||||
| carousel_options | Options for each image in carousel (title, link, description per image) | List[PinterestCarouselOption] | No |
|
||||
| alt_text | Alt text for each image/video (max 500 chars each, accessibility feature) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Product Catalog Distribution**: Automatically pin product images with direct links to purchase pages organized by board category.
|
||||
|
||||
**Content Repurposing**: Convert blog posts and articles into visual pins with clickable destination URLs.
|
||||
|
||||
**Visual Inspiration Boards**: Create carousel pins showcasing design ideas, recipes, or tutorials with step-by-step images.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
44
docs/platform/blocks/ayrshare/post_to_reddit.md
Normal file
44
docs/platform/blocks/ayrshare/post_to_reddit.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Post To Reddit
|
||||
|
||||
### What it is
|
||||
Post to Reddit using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to Reddit. It supports text posts, image posts, and video submissions with optional scheduling and link shortening features.
|
||||
|
||||
The block authenticates through Ayrshare and submits content to your connected Reddit account. Common options include approval workflows for content review before publishing, random content generation, and Unsplash integration for sourcing images.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text to be published | str | No |
|
||||
| media_urls | Optional list of media URLs to include. Set is_video in advanced settings to true if you want to upload videos. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Community Engagement**: Share relevant content to niche subreddits as part of community marketing strategies.
|
||||
|
||||
**Content Distribution**: Cross-post blog articles or announcements to relevant Reddit communities for broader reach.
|
||||
|
||||
**Brand Monitoring Response**: Automatically share updates or responses in communities where your brand is discussed.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
46
docs/platform/blocks/ayrshare/post_to_snapchat.md
Normal file
46
docs/platform/blocks/ayrshare/post_to_snapchat.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Post To Snapchat
|
||||
|
||||
### What it is
|
||||
Post to Snapchat using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish video content to Snapchat. Snapchat only supports video content, with three destination options: Stories (24-hour ephemeral content), Saved Stories (persistent Stories), and Spotlight (public discovery feed).
|
||||
|
||||
The block authenticates through Ayrshare and uploads video content with optional custom thumbnails. Videos can be scheduled for future publishing and support approval workflows for content review before going live.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (optional for video-only content) | str | No |
|
||||
| media_urls | Required video URL for Snapchat posts. Snapchat only supports video content. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| story_type | Type of Snapchat content: 'story' (24-hour Stories), 'saved_story' (Saved Stories), or 'spotlight' (Spotlight posts) | str | No |
|
||||
| video_thumbnail | Thumbnail URL for video content (optional, auto-generated if not provided) | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Ephemeral Marketing**: Share time-sensitive promotions or behind-the-scenes content that creates urgency through 24-hour Stories.
|
||||
|
||||
**Public Discovery**: Post engaging video content to Spotlight to reach new audiences beyond your followers.
|
||||
|
||||
**Scheduled Story Series**: Plan and schedule a sequence of video Stories for product launches or events.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
44
docs/platform/blocks/ayrshare/post_to_telegram.md
Normal file
44
docs/platform/blocks/ayrshare/post_to_telegram.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Post To Telegram
|
||||
|
||||
### What it is
|
||||
Post to Telegram using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish messages to Telegram channels. It supports text messages, images, videos, and animated GIFs, with automatic link preview generation unless media is included.
|
||||
|
||||
The block authenticates through Ayrshare and sends content to your connected Telegram channel or bot. User mentions are supported via @handle syntax, and content can be scheduled for future delivery.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (empty string allowed). Use @handle to mention other Telegram users. | str | No |
|
||||
| media_urls | Optional list of media URLs. For animated GIFs, only one URL is allowed. Telegram will auto-preview links unless image/video is included. | List[str] | No |
|
||||
| is_video | Whether the media is a video. Set to true for animated GIFs that don't end in .gif/.GIF extension. | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Channel Broadcasting**: Automatically distribute announcements, updates, or news to Telegram channel subscribers.
|
||||
|
||||
**Alert Systems**: Send automated notifications with media attachments to monitoring or alert channels.
|
||||
|
||||
**Content Syndication**: Cross-post content from other platforms to Telegram communities for broader reach.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
44
docs/platform/blocks/ayrshare/post_to_threads.md
Normal file
44
docs/platform/blocks/ayrshare/post_to_threads.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Post To Threads
|
||||
|
||||
### What it is
|
||||
Post to Threads using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to Threads (Meta's text-based social platform). It supports text posts (up to 500 characters with one hashtag), images, videos, and carousels (up to 20 items), with automatic link previews when no media is attached.
|
||||
|
||||
The block authenticates through Meta's API via Ayrshare. Content can mention users via @handle syntax, be scheduled for future publishing, and include approval workflows for content review.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (max 500 chars, empty string allowed). Only 1 hashtag allowed. Use @handle to mention users. | str | No |
|
||||
| media_urls | Optional list of media URLs. Supports up to 20 images/videos in a carousel. Auto-preview links unless media is included. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Thought Leadership**: Share quick insights, opinions, or industry commentary in a conversational format.
|
||||
|
||||
**Cross-Platform Text Content**: Automatically syndicate text-based content from other platforms to Threads.
|
||||
|
||||
**Community Engagement**: Post discussion prompts or responses to engage with your Threads audience.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
55
docs/platform/blocks/ayrshare/post_to_tiktok.md
Normal file
55
docs/platform/blocks/ayrshare/post_to_tiktok.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Post To Tik Tok
|
||||
|
||||
### What it is
|
||||
Post to TikTok using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to TikTok. It supports video posts and image slideshows (up to 35 images), with extensive options for content labeling including AI-generated disclosure, branded content, and brand organic content tags.
|
||||
|
||||
The block connects to TikTok's API through Ayrshare with controls for visibility, duet/stitch permissions, comment settings, auto-music, and thumbnail selection. Videos can be posted as drafts for final review, and scheduled for future publishing.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (max 2,200 chars, empty string allowed). Use @handle to mention users. Line breaks will be ignored. | str | Yes |
|
||||
| media_urls | Required media URLs. Either 1 video OR up to 35 images (JPG/JPEG/WEBP only). Cannot mix video and images. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Disable comments on the published post | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| auto_add_music | Whether to automatically add recommended music to the post. If you set this field to true, you can change the music later in the TikTok app. | bool | No |
|
||||
| disable_duet | Disable duets on published video (video only) | bool | No |
|
||||
| disable_stitch | Disable stitch on published video (video only) | bool | No |
|
||||
| is_ai_generated | If you enable the toggle, your video will be labeled as “Creator labeled as AI-generated” once posted and can’t be changed. The “Creator labeled as AI-generated” label indicates that the content was completely AI-generated or significantly edited with AI. | bool | No |
|
||||
| is_branded_content | Whether to enable the Branded Content toggle. If this field is set to true, the video will be labeled as Branded Content, indicating you are in a paid partnership with a brand. A “Paid partnership” label will be attached to the video. | bool | No |
|
||||
| is_brand_organic | Whether to enable the Brand Organic Content toggle. If this field is set to true, the video will be labeled as Brand Organic Content, indicating you are promoting yourself or your own business. A “Promotional content” label will be attached to the video. | bool | No |
|
||||
| image_cover_index | Index of image to use as cover (0-based, image posts only) | int | No |
|
||||
| title | Title for image posts | str | No |
|
||||
| thumbnail_offset | Video thumbnail frame offset in milliseconds (video only) | int | No |
|
||||
| visibility | Post visibility: 'public', 'private', 'followers', or 'friends' | "public" | "private" | "followers" | No |
|
||||
| draft | Create as draft post (video only) | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Creator Content Pipeline**: Automate video uploads with proper AI disclosure labels and visibility settings for content creators.
|
||||
|
||||
**Brand Campaigns**: Publish branded content with proper disclosure labels to maintain FTC compliance and platform guidelines.
|
||||
|
||||
**Image Slideshow Posts**: Create TikTok slideshows from product images or photo series with automatic cover selection.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
57
docs/platform/blocks/ayrshare/post_to_x.md
Normal file
57
docs/platform/blocks/ayrshare/post_to_x.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Post To X
|
||||
|
||||
### What it is
|
||||
Post to X / Twitter using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to publish content to X (formerly Twitter). It supports standard tweets (280 characters, or 25,000 for Premium users), threads, polls, quote tweets, and replies, with up to 4 media attachments including video with subtitles.
|
||||
|
||||
The block authenticates through Ayrshare and handles X-specific features like automatic thread breaking using double newlines, thread numbering, per-post media attachments, and long-form video uploads (with approval). Poll options and duration can be configured for engagement posts.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | The post text (max 280 chars, up to 25,000 for Premium users). Use @handle to mention users. Use \n\n for thread breaks. | str | Yes |
|
||||
| media_urls | Optional list of media URLs. X supports up to 4 images or videos per tweet. Auto-preview links unless media is included. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| reply_to_id | ID of the tweet to reply to | str | No |
|
||||
| quote_tweet_id | ID of the tweet to quote (low-level Tweet ID) | str | No |
|
||||
| poll_options | Poll options (2-4 choices) | List[str] | No |
|
||||
| poll_duration | Poll duration in minutes (1-10080) | int | No |
|
||||
| alt_text | Alt text for each image (max 1,000 chars each, not supported for videos) | List[str] | No |
|
||||
| is_thread | Whether to automatically break post into thread based on line breaks | bool | No |
|
||||
| thread_number | Add thread numbers (1/n format) to each thread post | bool | No |
|
||||
| thread_media_urls | Media URLs for thread posts (one per thread, use 'null' to skip) | List[str] | No |
|
||||
| long_post | Force long form post (requires Premium X account) | bool | No |
|
||||
| long_video | Enable long video upload (requires approval and Business/Enterprise plan) | bool | No |
|
||||
| subtitle_url | URL to SRT subtitle file for videos (must be HTTPS and end in .srt) | str | No |
|
||||
| subtitle_language | Language code for subtitles (default: 'en') | str | No |
|
||||
| subtitle_name | Name of caption track (max 150 chars, default: 'English') | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Thread Publishing**: Automatically format and publish long-form content as numbered thread sequences.
|
||||
|
||||
**Engagement Polls**: Create polls to gather audience feedback or drive interaction with scheduled posting.
|
||||
|
||||
**Reply Automation**: Build workflows that automatically respond to mentions or engage in conversations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
60
docs/platform/blocks/ayrshare/post_to_youtube.md
Normal file
60
docs/platform/blocks/ayrshare/post_to_youtube.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Post To You Tube
|
||||
|
||||
### What it is
|
||||
Post to YouTube using Ayrshare
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Ayrshare's API to upload videos to YouTube. It handles video uploads with extensive metadata including titles, descriptions, tags, custom thumbnails, playlist assignment, category selection, and visibility controls (public, private, unlisted).
|
||||
|
||||
The block supports YouTube Shorts (up to 3 minutes), geographic targeting to allow or block specific countries, subtitle files (SRT/SBV format), synthetic/AI content disclosure, kids content labeling, and subscriber notification controls. Videos can be scheduled for specific publish times.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| post | Video description (max 5,000 chars, empty string allowed). Cannot contain < or > characters. | str | Yes |
|
||||
| media_urls | Required video URL. YouTube only supports 1 video per post. | List[str] | No |
|
||||
| is_video | Whether the media is a video | bool | No |
|
||||
| schedule_date | UTC datetime for scheduling (YYYY-MM-DDThh:mm:ssZ) | str (date-time) | No |
|
||||
| disable_comments | Whether to disable comments | bool | No |
|
||||
| shorten_links | Whether to shorten links | bool | No |
|
||||
| unsplash | Unsplash image configuration | str | No |
|
||||
| requires_approval | Whether to enable approval workflow | bool | No |
|
||||
| random_post | Whether to generate random post text | bool | No |
|
||||
| random_media_url | Whether to generate random media | bool | No |
|
||||
| notes | Additional notes for the post | str | No |
|
||||
| title | Video title (max 100 chars, required). Cannot contain < or > characters. | str | Yes |
|
||||
| visibility | Video visibility: 'private' (default), 'public' , or 'unlisted' | "private" | "public" | "unlisted" | No |
|
||||
| thumbnail | Thumbnail URL (JPEG/PNG under 2MB, must end in .png/.jpg/.jpeg). Requires phone verification. | str | No |
|
||||
| playlist_id | Playlist ID to add video (user must own playlist) | str | No |
|
||||
| tags | Video tags (min 2 chars each, max 500 chars total) | List[str] | No |
|
||||
| made_for_kids | Self-declared kids content | bool | No |
|
||||
| is_shorts | Post as YouTube Short (max 3 minutes, adds #shorts) | bool | No |
|
||||
| notify_subscribers | Send notification to subscribers | bool | No |
|
||||
| category_id | Video category ID (e.g., 24 = Entertainment) | int | No |
|
||||
| contains_synthetic_media | Disclose realistic AI/synthetic content | bool | No |
|
||||
| publish_at | UTC publish time (YouTube controlled, format: 2022-10-08T21:18:36Z) | str | No |
|
||||
| targeting_block_countries | Country codes to block from viewing (e.g., ['US', 'CA']) | List[str] | No |
|
||||
| targeting_allow_countries | Country codes to allow viewing (e.g., ['GB', 'AU']) | List[str] | No |
|
||||
| subtitle_url | URL to SRT or SBV subtitle file (must be HTTPS and end in .srt/.sbv, under 100MB) | str | No |
|
||||
| subtitle_language | Language code for subtitles (default: 'en') | str | No |
|
||||
| subtitle_name | Name of caption track (max 150 chars, default: 'English') | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| post_result | The result of the post | PostResponse |
|
||||
| post | The result of the post | PostIds |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Video Publishing Pipeline**: Automate video uploads with thumbnails, descriptions, and playlist organization for content creators.
|
||||
|
||||
**YouTube Shorts Automation**: Publish short-form vertical videos to YouTube Shorts with proper metadata and hashtags.
|
||||
|
||||
**Multi-Region Content**: Upload videos with geographic restrictions for region-specific content licensing or compliance.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
147
docs/platform/blocks/baas/bots.md
Normal file
147
docs/platform/blocks/baas/bots.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Baas Bot Delete Recording
|
||||
|
||||
### What it is
|
||||
Permanently delete a meeting's recorded data
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block permanently deletes the recorded data for a meeting bot using the BaaS (Bot as a Service) API. The deletion is irreversible and removes all associated recording files and transcripts.
|
||||
|
||||
Provide the bot_id from a previous recording session to delete that specific meeting's data.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| bot_id | UUID of the bot whose data to delete | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| deleted | Whether the data was successfully deleted | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Privacy Compliance**: Delete recordings to comply with data retention policies or user requests.
|
||||
|
||||
**Storage Management**: Clean up old recordings to manage storage costs.
|
||||
|
||||
**Post-Processing Cleanup**: Delete recordings after extracting needed information.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Baas Bot Fetch Meeting Data
|
||||
|
||||
### What it is
|
||||
Retrieve recorded meeting data
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves recorded meeting data including video URL, transcript, and metadata from a completed bot session. The video URL is time-limited and should be downloaded promptly.
|
||||
|
||||
Enable include_transcripts to receive the full meeting transcript with speaker identification and timestamps.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| bot_id | UUID of the bot whose data to fetch | str | Yes |
|
||||
| include_transcripts | Include transcript data in response | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| mp4_url | URL to download the meeting recording (time-limited) | str |
|
||||
| transcript | Meeting transcript data | List[Any] |
|
||||
| metadata | Meeting metadata and bot information | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Meeting Summarization**: Retrieve transcripts for AI summarization and action item extraction.
|
||||
|
||||
**Recording Archive**: Download and store meeting recordings for compliance or reference.
|
||||
|
||||
**Analytics**: Extract meeting metadata for participation and duration analytics.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Baas Bot Join Meeting
|
||||
|
||||
### What it is
|
||||
Deploy a bot to join and record a meeting
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block deploys a recording bot to join a video meeting (Zoom, Google Meet, Teams). Configure the bot's display name, avatar, and entry message. The bot joins, records, and transcribes the meeting.
|
||||
|
||||
Use webhooks to receive notifications when the meeting ends and recordings are ready.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| meeting_url | The URL of the meeting the bot should join | str | Yes |
|
||||
| bot_name | Display name for the bot in the meeting | str | Yes |
|
||||
| bot_image | URL to an image for the bot's avatar (16:9 ratio recommended) | str | No |
|
||||
| entry_message | Chat message the bot will post upon entry | str | No |
|
||||
| reserved | Use a reserved bot slot (joins 4 min before meeting) | bool | No |
|
||||
| start_time | Unix timestamp (ms) when bot should join | int | No |
|
||||
| webhook_url | URL to receive webhook events for this bot | str | No |
|
||||
| timeouts | Automatic leave timeouts configuration | Dict[str, True] | No |
|
||||
| extra | Custom metadata to attach to the bot | Dict[str, True] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| bot_id | UUID of the deployed bot | str |
|
||||
| join_response | Full response from join operation | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Automated Recording**: Record meetings automatically without requiring host intervention.
|
||||
|
||||
**Meeting Assistant**: Deploy bots to take notes and transcribe customer or team meetings.
|
||||
|
||||
**Compliance Recording**: Ensure all meetings are recorded for compliance or quality assurance.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Baas Bot Leave Meeting
|
||||
|
||||
### What it is
|
||||
Remove a bot from an ongoing meeting
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block removes a recording bot from an ongoing meeting. Use this when you need to stop recording before the meeting naturally ends.
|
||||
|
||||
The bot leaves gracefully and recording data becomes available for retrieval.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| bot_id | UUID of the bot to remove from meeting | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| left | Whether the bot successfully left | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Early Termination**: Stop recording when a meeting transitions to an off-record discussion.
|
||||
|
||||
**Time-Based Recording**: Leave after capturing a specific portion of a meeting.
|
||||
|
||||
**Error Recovery**: Remove and redeploy bots when issues occur during recording.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
42
docs/platform/blocks/bannerbear/text_overlay.md
Normal file
42
docs/platform/blocks/bannerbear/text_overlay.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Bannerbear Text Overlay
|
||||
|
||||
### What it is
|
||||
Add text overlay to images using Bannerbear templates. Perfect for creating social media graphics, marketing materials, and dynamic image content.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Bannerbear's API to generate images by populating templates with dynamic text and images. Create templates in Bannerbear with text layers, then modify layer content programmatically.
|
||||
|
||||
Webhooks can notify you when asynchronous generation completes. Include custom metadata for tracking generated images.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| template_id | The unique ID of your Bannerbear template | str | Yes |
|
||||
| project_id | Optional: Project ID (required when using Master API Key) | str | No |
|
||||
| text_modifications | List of text layers to modify in the template | List[TextModification] | Yes |
|
||||
| image_url | Optional: URL of an image to use in the template | str | No |
|
||||
| image_layer_name | Optional: Name of the image layer in the template | str | No |
|
||||
| webhook_url | Optional: URL to receive webhook notification when image is ready | str | No |
|
||||
| metadata | Optional: Custom metadata to attach to the image | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | Whether the image generation was successfully initiated | bool |
|
||||
| image_url | URL of the generated image (if synchronous) or placeholder | str |
|
||||
| uid | Unique identifier for the generated image | str |
|
||||
| status | Status of the image generation | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Social Media Graphics**: Generate personalized social posts with dynamic quotes, stats, or headlines.
|
||||
|
||||
**Marketing Banners**: Create ad banners with different product names, prices, or offers.
|
||||
|
||||
**Certificates & Cards**: Generate personalized certificates, invitations, or greeting cards.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,194 +13,515 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
## Basic Operations
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Store Value](basic.md#store-value) | Stores and forwards a value |
|
||||
| [Print to Console](basic.md#print-to-console) | Outputs text to the console for debugging |
|
||||
| [Find in Dictionary](basic.md#find-in-dictionary) | Looks up a value in a dictionary or list |
|
||||
| [Agent Input](basic.md#agent-input) | Accepts user input in a workflow |
|
||||
| [Agent Output](basic.md#agent-output) | Records and formats workflow results |
|
||||
| [Add to Dictionary](basic.md#add-to-dictionary) | Adds a new key-value pair to a dictionary |
|
||||
| [Add to List](basic.md#add-to-list) | Adds a new entry to a list |
|
||||
| [Note](basic.md#note) | Displays a sticky note in the workflow |
|
||||
| [Add Memory](basic.md#add-memory) | Add new memories to Mem0 with user segmentation |
|
||||
| [Add To Dictionary](basic.md#add-to-dictionary) | Adds a new key-value pair to a dictionary |
|
||||
| [Add To Library From Store](system/library_operations.md#add-to-library-from-store) | Add an agent from the store to your personal library |
|
||||
| [Add To List](basic.md#add-to-list) | Adds a new entry to a list |
|
||||
| [Agent Date Input](basic.md#agent-date-input) | Block for date input |
|
||||
| [Agent Dropdown Input](basic.md#agent-dropdown-input) | Block for dropdown text selection |
|
||||
| [Agent File Input](basic.md#agent-file-input) | Block for file upload input (string path for example) |
|
||||
| [Agent Google Drive File Input](basic.md#agent-google-drive-file-input) | Block for selecting a file from Google Drive |
|
||||
| [Agent Input](basic.md#agent-input) | A block that accepts and processes user input values within a workflow, supporting various input types and validation |
|
||||
| [Agent Long Text Input](basic.md#agent-long-text-input) | Block for long text input (multi-line) |
|
||||
| [Agent Number Input](basic.md#agent-number-input) | Block for number input |
|
||||
| [Agent Output](basic.md#agent-output) | A block that records and formats workflow results for display to users, with optional Jinja2 template formatting support |
|
||||
| [Agent Short Text Input](basic.md#agent-short-text-input) | Block for short text input (single-line) |
|
||||
| [Agent Table Input](basic.md#agent-table-input) | Block for table data input with customizable headers |
|
||||
| [Agent Time Input](basic.md#agent-time-input) | Block for time input |
|
||||
| [Agent Toggle Input](basic.md#agent-toggle-input) | Block for boolean toggle input |
|
||||
| [Dictionary Is Empty](basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
|
||||
| [File Store](basic.md#file-store) | Stores the input file in the temporary directory |
|
||||
| [Find In Dictionary](basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
|
||||
| [Find In List](basic.md#find-in-list) | Finds the index of the value in the list |
|
||||
| [Get All Memories](basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering |
|
||||
| [Get Latest Memory](basic.md#get-latest-memory) | Retrieve the latest memory from Mem0 with optional key filtering |
|
||||
| [Get List Item](basic.md#get-list-item) | Returns the element at the given index |
|
||||
| [Get Store Agent Details](system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||
| [Get Weather Information](basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||
| [Human In The Loop](basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
||||
| [Installation](basic.md#installation) | Given a code string, this block allows the verification and installation of a block code into the system |
|
||||
| [Linear Search Issues](linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
||||
| [List Is Empty](basic.md#list-is-empty) | Checks if a list is empty |
|
||||
| [List Library Agents](system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||
| [Note](basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||
| [Print To Console](basic.md#print-to-console) | A debugging block that outputs text to the console for monitoring and troubleshooting workflow execution |
|
||||
| [Remove From Dictionary](basic.md#remove-from-dictionary) | Removes a key-value pair from a dictionary |
|
||||
| [Remove From List](basic.md#remove-from-list) | Removes an item from a list by value or index |
|
||||
| [Replace Dictionary Value](basic.md#replace-dictionary-value) | Replaces the value for a specified key in a dictionary |
|
||||
| [Replace List Item](basic.md#replace-list-item) | Replaces an item at the specified index |
|
||||
| [Reverse List Order](basic.md#reverse-list-order) | Reverses the order of elements in a list |
|
||||
| [Search Memory](basic.md#search-memory) | Search memories in Mem0 by user |
|
||||
| [Search Store Agents](system/store_operations.md#search-store-agents) | Search for agents in the store |
|
||||
| [Slant3D Cancel Order](slant3d/order.md#slant3d-cancel-order) | Cancel an existing order |
|
||||
| [Slant3D Create Order](slant3d/order.md#slant3d-create-order) | Create a new print order |
|
||||
| [Slant3D Estimate Order](slant3d/order.md#slant3d-estimate-order) | Get order cost estimate |
|
||||
| [Slant3D Estimate Shipping](slant3d/order.md#slant3d-estimate-shipping) | Get shipping cost estimate |
|
||||
| [Slant3D Filament](slant3d/filament.md#slant3d-filament) | Get list of available filaments |
|
||||
| [Slant3D Get Orders](slant3d/order.md#slant3d-get-orders) | Get all orders for the account |
|
||||
| [Slant3D Slicer](slant3d/slicing.md#slant3d-slicer) | Slice a 3D model file and get pricing information |
|
||||
| [Slant3D Tracking](slant3d/order.md#slant3d-tracking) | Track order status and shipping |
|
||||
| [Store Value](basic.md#store-value) | A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks |
|
||||
| [Universal Type Converter](basic.md#universal-type-converter) | This block is used to convert a value to a universal type |
|
||||
| [XML Parser](basic.md#xml-parser) | Parses XML using gravitasml to tokenize and coverts it to dict |
|
||||
|
||||
## Data Processing
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Read CSV](csv.md#read-csv) | Processes and extracts data from CSV files |
|
||||
| [Data Sampling](sampling.md#data-sampling) | Selects a subset of data using various sampling methods |
|
||||
| [Airtable Create Base](airtable/bases.md#airtable-create-base) | Create or find a base in Airtable |
|
||||
| [Airtable Create Field](airtable/schema.md#airtable-create-field) | Add a new field to an Airtable table |
|
||||
| [Airtable Create Records](airtable/records.md#airtable-create-records) | Create records in an Airtable table |
|
||||
| [Airtable Create Table](airtable/schema.md#airtable-create-table) | Create a new table in an Airtable base |
|
||||
| [Airtable Delete Records](airtable/records.md#airtable-delete-records) | Delete records from an Airtable table |
|
||||
| [Airtable Get Record](airtable/records.md#airtable-get-record) | Get a single record from Airtable |
|
||||
| [Airtable List Bases](airtable/bases.md#airtable-list-bases) | List all bases in Airtable |
|
||||
| [Airtable List Records](airtable/records.md#airtable-list-records) | List records from an Airtable table |
|
||||
| [Airtable List Schema](airtable/schema.md#airtable-list-schema) | Get the complete schema of an Airtable base |
|
||||
| [Airtable Update Field](airtable/schema.md#airtable-update-field) | Update field properties in an Airtable table |
|
||||
| [Airtable Update Records](airtable/records.md#airtable-update-records) | Update records in an Airtable table |
|
||||
| [Airtable Update Table](airtable/schema.md#airtable-update-table) | Update table properties |
|
||||
| [Airtable Webhook Trigger](airtable/triggers.md#airtable-webhook-trigger) | Starts a flow whenever Airtable emits a webhook event |
|
||||
| [Baas Bot Delete Recording](baas/bots.md#baas-bot-delete-recording) | Permanently delete a meeting's recorded data |
|
||||
| [Baas Bot Fetch Meeting Data](baas/bots.md#baas-bot-fetch-meeting-data) | Retrieve recorded meeting data |
|
||||
| [Create Dictionary](data.md#create-dictionary) | Creates a dictionary with the specified key-value pairs |
|
||||
| [Create List](data.md#create-list) | Creates a list with the specified values |
|
||||
| [Data For Seo Keyword Suggestions](dataforseo/keyword_suggestions.md#data-for-seo-keyword-suggestions) | Get keyword suggestions from DataForSEO Labs Google API |
|
||||
| [Data For Seo Related Keywords](dataforseo/related_keywords.md#data-for-seo-related-keywords) | Get related keywords from DataForSEO Labs Google API |
|
||||
| [Exa Create Import](exa/websets_import_export.md#exa-create-import) | Import CSV data to use with websets for targeted searches |
|
||||
| [Exa Delete Import](exa/websets_import_export.md#exa-delete-import) | Delete an import |
|
||||
| [Exa Export Webset](exa/websets_import_export.md#exa-export-webset) | Export webset data in JSON, CSV, or JSON Lines format |
|
||||
| [Exa Get Import](exa/websets_import_export.md#exa-get-import) | Get the status and details of an import |
|
||||
| [Exa Get New Items](exa/websets_items.md#exa-get-new-items) | Get items added since a cursor - enables incremental processing without reprocessing |
|
||||
| [Exa List Imports](exa/websets_import_export.md#exa-list-imports) | List all imports with pagination support |
|
||||
| [File Read](data.md#file-read) | Reads a file and returns its content as a string, with optional chunking by delimiter and size limits |
|
||||
| [Google Calendar Read Events](google/calendar.md#google-calendar-read-events) | Retrieves upcoming events from a Google Calendar with filtering options |
|
||||
| [Google Docs Append Markdown](google/docs.md#google-docs-append-markdown) | Append Markdown content to the end of a Google Doc with full formatting - ideal for LLM/AI output |
|
||||
| [Google Docs Append Plain Text](google/docs.md#google-docs-append-plain-text) | Append plain text to the end of a Google Doc (no formatting applied) |
|
||||
| [Google Docs Create](google/docs.md#google-docs-create) | Create a new Google Doc |
|
||||
| [Google Docs Delete Content](google/docs.md#google-docs-delete-content) | Delete a range of content from a Google Doc |
|
||||
| [Google Docs Export](google/docs.md#google-docs-export) | Export a Google Doc to PDF, Word, text, or other formats |
|
||||
| [Google Docs Find Replace Plain Text](google/docs.md#google-docs-find-replace-plain-text) | Find and replace plain text in a Google Doc (no formatting applied to replacement) |
|
||||
| [Google Docs Format Text](google/docs.md#google-docs-format-text) | Apply formatting (bold, italic, color, etc |
|
||||
| [Google Docs Get Metadata](google/docs.md#google-docs-get-metadata) | Get metadata about a Google Doc |
|
||||
| [Google Docs Get Structure](google/docs.md#google-docs-get-structure) | Get document structure with index positions for precise editing operations |
|
||||
| [Google Docs Insert Markdown At](google/docs.md#google-docs-insert-markdown-at) | Insert formatted Markdown at a specific position in a Google Doc - ideal for LLM/AI output |
|
||||
| [Google Docs Insert Page Break](google/docs.md#google-docs-insert-page-break) | Insert a page break into a Google Doc |
|
||||
| [Google Docs Insert Plain Text](google/docs.md#google-docs-insert-plain-text) | Insert plain text at a specific position in a Google Doc (no formatting applied) |
|
||||
| [Google Docs Insert Table](google/docs.md#google-docs-insert-table) | Insert a table into a Google Doc, optionally with content and Markdown formatting |
|
||||
| [Google Docs Read](google/docs.md#google-docs-read) | Read text content from a Google Doc |
|
||||
| [Google Docs Replace All With Markdown](google/docs.md#google-docs-replace-all-with-markdown) | Replace entire Google Doc content with formatted Markdown - ideal for LLM/AI output |
|
||||
| [Google Docs Replace Content With Markdown](google/docs.md#google-docs-replace-content-with-markdown) | Find text and replace it with formatted Markdown - ideal for LLM/AI output and templates |
|
||||
| [Google Docs Replace Range With Markdown](google/docs.md#google-docs-replace-range-with-markdown) | Replace a specific index range in a Google Doc with formatted Markdown - ideal for LLM/AI output |
|
||||
| [Google Docs Set Public Access](google/docs.md#google-docs-set-public-access) | Make a Google Doc public or private |
|
||||
| [Google Docs Share](google/docs.md#google-docs-share) | Share a Google Doc with specific users |
|
||||
| [Google Sheets Add Column](google/sheets.md#google-sheets-add-column) | Add a new column with a header |
|
||||
| [Google Sheets Add Dropdown](google/sheets.md#google-sheets-add-dropdown) | Add a dropdown list (data validation) to cells |
|
||||
| [Google Sheets Add Note](google/sheets.md#google-sheets-add-note) | Add a note to a cell in a Google Sheet |
|
||||
| [Google Sheets Append Row](google/sheets.md#google-sheets-append-row) | Append or Add a single row to the end of a Google Sheet |
|
||||
| [Google Sheets Batch Operations](google/sheets.md#google-sheets-batch-operations) | This block performs multiple operations on a Google Sheets spreadsheet in a single batch request |
|
||||
| [Google Sheets Clear](google/sheets.md#google-sheets-clear) | This block clears data from a specified range in a Google Sheets spreadsheet |
|
||||
| [Google Sheets Copy To Spreadsheet](google/sheets.md#google-sheets-copy-to-spreadsheet) | Copy a sheet from one spreadsheet to another |
|
||||
| [Google Sheets Create Named Range](google/sheets.md#google-sheets-create-named-range) | Create a named range to reference cells by name instead of A1 notation |
|
||||
| [Google Sheets Create Spreadsheet](google/sheets.md#google-sheets-create-spreadsheet) | This block creates a new Google Sheets spreadsheet with specified sheets |
|
||||
| [Google Sheets Delete Column](google/sheets.md#google-sheets-delete-column) | Delete a column by header name or column letter |
|
||||
| [Google Sheets Delete Rows](google/sheets.md#google-sheets-delete-rows) | Delete specific rows from a Google Sheet by their row indices |
|
||||
| [Google Sheets Export Csv](google/sheets.md#google-sheets-export-csv) | Export a Google Sheet as CSV data |
|
||||
| [Google Sheets Filter Rows](google/sheets.md#google-sheets-filter-rows) | Filter rows in a Google Sheet based on a column condition |
|
||||
| [Google Sheets Find](google/sheets.md#google-sheets-find) | Find text in a Google Sheets spreadsheet |
|
||||
| [Google Sheets Find Replace](google/sheets.md#google-sheets-find-replace) | This block finds and replaces text in a Google Sheets spreadsheet |
|
||||
| [Google Sheets Format](google/sheets.md#google-sheets-format) | Format a range in a Google Sheet (sheet optional) |
|
||||
| [Google Sheets Get Column](google/sheets.md#google-sheets-get-column) | Extract all values from a specific column |
|
||||
| [Google Sheets Get Notes](google/sheets.md#google-sheets-get-notes) | Get notes from cells in a Google Sheet |
|
||||
| [Google Sheets Get Row](google/sheets.md#google-sheets-get-row) | Get a specific row by its index |
|
||||
| [Google Sheets Get Row Count](google/sheets.md#google-sheets-get-row-count) | Get row count and dimensions of a Google Sheet |
|
||||
| [Google Sheets Get Unique Values](google/sheets.md#google-sheets-get-unique-values) | Get unique values from a column |
|
||||
| [Google Sheets Import Csv](google/sheets.md#google-sheets-import-csv) | Import CSV data into a Google Sheet |
|
||||
| [Google Sheets Insert Row](google/sheets.md#google-sheets-insert-row) | Insert a single row at a specific position |
|
||||
| [Google Sheets List Named Ranges](google/sheets.md#google-sheets-list-named-ranges) | List all named ranges in a spreadsheet |
|
||||
| [Google Sheets Lookup Row](google/sheets.md#google-sheets-lookup-row) | Look up a row by finding a value in a specific column |
|
||||
| [Google Sheets Manage Sheet](google/sheets.md#google-sheets-manage-sheet) | Create, delete, or copy sheets (sheet optional) |
|
||||
| [Google Sheets Metadata](google/sheets.md#google-sheets-metadata) | This block retrieves metadata about a Google Sheets spreadsheet including sheet names and properties |
|
||||
| [Google Sheets Protect Range](google/sheets.md#google-sheets-protect-range) | Protect a cell range or entire sheet from editing |
|
||||
| [Google Sheets Read](google/sheets.md#google-sheets-read) | A block that reads data from a Google Sheets spreadsheet using A1 notation range selection |
|
||||
| [Google Sheets Remove Duplicates](google/sheets.md#google-sheets-remove-duplicates) | Remove duplicate rows based on specified columns |
|
||||
| [Google Sheets Set Public Access](google/sheets.md#google-sheets-set-public-access) | Make a Google Spreadsheet public or private |
|
||||
| [Google Sheets Share Spreadsheet](google/sheets.md#google-sheets-share-spreadsheet) | Share a Google Spreadsheet with users or get shareable link |
|
||||
| [Google Sheets Sort](google/sheets.md#google-sheets-sort) | Sort a Google Sheet by one or two columns |
|
||||
| [Google Sheets Update Cell](google/sheets.md#google-sheets-update-cell) | Update a single cell in a Google Sheets spreadsheet |
|
||||
| [Google Sheets Update Row](google/sheets.md#google-sheets-update-row) | Update a specific row by its index |
|
||||
| [Google Sheets Write](google/sheets.md#google-sheets-write) | A block that writes data to a Google Sheets spreadsheet at a specified A1 notation range |
|
||||
| [Keyword Suggestion Extractor](dataforseo/keyword_suggestions.md#keyword-suggestion-extractor) | Extract individual fields from a KeywordSuggestion object |
|
||||
| [Persist Information](data.md#persist-information) | Persist key-value information for the current user |
|
||||
| [Read Spreadsheet](data.md#read-spreadsheet) | Reads CSV and Excel files and outputs the data as a list of dictionaries and individual rows |
|
||||
| [Related Keyword Extractor](dataforseo/related_keywords.md#related-keyword-extractor) | Extract individual fields from a RelatedKeyword object |
|
||||
| [Retrieve Information](data.md#retrieve-information) | Retrieve key-value information for the current user |
|
||||
| [Screenshot Web Page](data.md#screenshot-web-page) | Takes a screenshot of a specified website using ScreenshotOne API |
|
||||
|
||||
## Text Processing
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Match Text Pattern](text.md#match-text-pattern) | Checks if text matches a specified pattern |
|
||||
| [Extract Text Information](text.md#extract-text-information) | Extracts specific information from text using patterns |
|
||||
| [Fill Text Template](text.md#fill-text-template) | Populates a template with provided values |
|
||||
| [Combine Texts](text.md#combine-texts) | Merges multiple text inputs into one |
|
||||
| [Text Decoder](decoder_block.md#text-decoder) | Converts encoded text into readable format |
|
||||
| [Code Extraction](text.md#code-extraction) | Extracts code blocks from text and identifies their programming languages |
|
||||
| [Combine Texts](text.md#combine-texts) | This block combines multiple input texts into a single output text |
|
||||
| [Countdown Timer](text.md#countdown-timer) | This block triggers after a specified duration |
|
||||
| [Extract Text Information](text.md#extract-text-information) | This block extracts the text from the given text using the pattern (regex) |
|
||||
| [Fill Text Template](text.md#fill-text-template) | This block formats the given texts using the format template |
|
||||
| [Get Current Date](text.md#get-current-date) | This block outputs the current date with an optional offset |
|
||||
| [Get Current Date And Time](text.md#get-current-date-and-time) | This block outputs the current date and time |
|
||||
| [Get Current Time](text.md#get-current-time) | This block outputs the current time |
|
||||
| [Match Text Pattern](text.md#match-text-pattern) | Matches text against a regex pattern and forwards data to positive or negative output based on the match |
|
||||
| [Text Decoder](text.md#text-decoder) | Decodes a string containing escape sequences into actual text |
|
||||
| [Text Replace](text.md#text-replace) | This block is used to replace a text with a new text |
|
||||
| [Text Split](text.md#text-split) | This block is used to split a text into a list of strings |
|
||||
| [Word Character Count](text.md#word-character-count) | Counts the number of words and characters in a given text |
|
||||
|
||||
## AI and Language Models
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [AI Structured Response Generator](llm.md#ai-structured-response-generator) | Generates structured responses using LLMs |
|
||||
| [AI Text Generator](llm.md#ai-text-generator) | Produces text responses using LLMs |
|
||||
| [AI Text Summarizer](llm.md#ai-text-summarizer) | Summarizes long texts using LLMs |
|
||||
| [AI Conversation](llm.md#ai-conversation) | Facilitates multi-turn conversations with LLMs |
|
||||
| [AI List Generator](llm.md#ai-list-generator) | Creates lists based on prompts using LLMs |
|
||||
|
||||
## Web and API Interactions
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Send Web Request](http.md#send-web-request) | Makes HTTP requests to specified web addresses |
|
||||
| [Read RSS Feed](rss.md#read-rss-feed) | Retrieves and processes entries from RSS feeds |
|
||||
| [Get Weather Information](search.md#get-weather-information) | Fetches current weather data for a location |
|
||||
| [Google Maps Search](google_maps.md#google-maps-search) | Searches for local businesses using Google Maps API |
|
||||
|
||||
## Social Media and Content
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Get Reddit Posts](reddit.md#get-reddit-posts) | Retrieves posts from specified subreddits |
|
||||
| [Post Reddit Comment](reddit.md#post-reddit-comment) | Posts comments on Reddit |
|
||||
| [Publish to Medium](medium.md#publish-to-medium) | Publishes content directly to Medium |
|
||||
| [Read Discord Messages](discord.md#read-discord-messages) | Retrieves messages from Discord channels |
|
||||
| [Send Discord Message](discord.md#send-discord-message) | Sends messages to Discord channels |
|
||||
| [AI Ad Maker Video Creator](llm.md#ai-ad-maker-video-creator) | Creates an AI‑generated 30‑second advert (text + images) |
|
||||
| [AI Condition](llm.md#ai-condition) | Uses AI to evaluate natural language conditions and provide conditional outputs |
|
||||
| [AI Conversation](llm.md#ai-conversation) | A block that facilitates multi-turn conversations with a Large Language Model (LLM), maintaining context across message exchanges |
|
||||
| [AI Image Customizer](llm.md#ai-image-customizer) | Generate and edit custom images using Google's Nano-Banana model from Gemini 2 |
|
||||
| [AI Image Editor](llm.md#ai-image-editor) | Edit images using BlackForest Labs' Flux Kontext models |
|
||||
| [AI Image Generator](llm.md#ai-image-generator) | Generate images using various AI models through a unified interface |
|
||||
| [AI List Generator](llm.md#ai-list-generator) | A block that creates lists of items based on prompts using a Large Language Model (LLM), with optional source data for context |
|
||||
| [AI Music Generator](llm.md#ai-music-generator) | This block generates music using Meta's MusicGen model on Replicate |
|
||||
| [AI Screenshot To Video Ad](llm.md#ai-screenshot-to-video-ad) | Turns a screenshot into an engaging, avatar‑narrated video advert |
|
||||
| [AI Shortform Video Creator](llm.md#ai-shortform-video-creator) | Creates a shortform video using revid |
|
||||
| [AI Structured Response Generator](llm.md#ai-structured-response-generator) | A block that generates structured JSON responses using a Large Language Model (LLM), with schema validation and format enforcement |
|
||||
| [AI Text Generator](llm.md#ai-text-generator) | A block that produces text responses using a Large Language Model (LLM) based on customizable prompts and system instructions |
|
||||
| [AI Text Summarizer](llm.md#ai-text-summarizer) | A block that summarizes long texts using a Large Language Model (LLM), with configurable focus topics and summary styles |
|
||||
| [AI Video Generator](fal/ai_video_generator.md#ai-video-generator) | Generate videos using FAL AI models |
|
||||
| [Bannerbear Text Overlay](bannerbear/text_overlay.md#bannerbear-text-overlay) | Add text overlay to images using Bannerbear templates |
|
||||
| [Code Generation](llm.md#code-generation) | Generate or refactor code using OpenAI's Codex (Responses API) |
|
||||
| [Create Talking Avatar Video](llm.md#create-talking-avatar-video) | This block integrates with D-ID to create video clips and retrieve their URLs |
|
||||
| [Exa Answer](exa/answers.md#exa-answer) | Get an LLM answer to a question informed by Exa search results |
|
||||
| [Exa Create Enrichment](exa/websets_enrichment.md#exa-create-enrichment) | Create enrichments to extract additional structured data from webset items |
|
||||
| [Exa Create Research](exa/research.md#exa-create-research) | Create research task with optional waiting - explores web and synthesizes findings with citations |
|
||||
| [Ideogram Model](llm.md#ideogram-model) | This block runs Ideogram models with both simple and advanced settings |
|
||||
| [Jina Chunking](jina/chunking.md#jina-chunking) | Chunks texts using Jina AI's segmentation service |
|
||||
| [Jina Embedding](jina/embeddings.md#jina-embedding) | Generates embeddings using Jina AI |
|
||||
| [Perplexity](llm.md#perplexity) | Query Perplexity's sonar models with real-time web search capabilities and receive annotated responses with source citations |
|
||||
| [Replicate Flux Advanced Model](replicate/flux_advanced.md#replicate-flux-advanced-model) | This block runs Flux models on Replicate with advanced settings |
|
||||
| [Replicate Model](replicate/replicate_block.md#replicate-model) | Run Replicate models synchronously |
|
||||
| [Smart Decision Maker](llm.md#smart-decision-maker) | Uses AI to intelligently decide what tool to use |
|
||||
| [Stagehand Act](stagehand/blocks.md#stagehand-act) | Interact with a web page by performing actions on a web page |
|
||||
| [Stagehand Extract](stagehand/blocks.md#stagehand-extract) | Extract structured data from a webpage |
|
||||
| [Stagehand Observe](stagehand/blocks.md#stagehand-observe) | Find suggested actions for your workflows |
|
||||
| [Unreal Text To Speech](llm.md#unreal-text-to-speech) | Converts text to speech using the Unreal Speech API |
|
||||
|
||||
## Search and Information Retrieval
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Get Wikipedia Summary](search.md#get-wikipedia-summary) | Fetches summaries of topics from Wikipedia |
|
||||
| [Search The Web](search.md#search-the-web) | Performs web searches and returns results |
|
||||
| [Extract Website Content](search.md#extract-website-content) | Retrieves and extracts content from websites |
|
||||
| [Ask Wolfram](wolfram/llm_api.md#ask-wolfram) | Ask Wolfram Alpha a question |
|
||||
| [Exa Bulk Webset Items](exa/websets_items.md#exa-bulk-webset-items) | Get all items from a webset in bulk (with configurable limits) |
|
||||
| [Exa Cancel Enrichment](exa/websets_enrichment.md#exa-cancel-enrichment) | Cancel a running enrichment operation |
|
||||
| [Exa Cancel Webset](exa/websets.md#exa-cancel-webset) | Cancel all operations being performed on a Webset |
|
||||
| [Exa Cancel Webset Search](exa/websets_search.md#exa-cancel-webset-search) | Cancel a running webset search |
|
||||
| [Exa Contents](exa/contents.md#exa-contents) | Retrieves document contents using Exa's contents API |
|
||||
| [Exa Create Monitor](exa/websets_monitor.md#exa-create-monitor) | Create automated monitors to keep websets updated with fresh data on a schedule |
|
||||
| [Exa Create Or Find Webset](exa/websets.md#exa-create-or-find-webset) | Create a new webset or return existing one by external_id (idempotent operation) |
|
||||
| [Exa Create Webset](exa/websets.md#exa-create-webset) | Create a new Exa Webset for persistent web search collections with optional waiting for initial results |
|
||||
| [Exa Create Webset Search](exa/websets_search.md#exa-create-webset-search) | Add a new search to an existing webset to find more items |
|
||||
| [Exa Delete Enrichment](exa/websets_enrichment.md#exa-delete-enrichment) | Delete an enrichment from a webset |
|
||||
| [Exa Delete Monitor](exa/websets_monitor.md#exa-delete-monitor) | Delete a monitor from a webset |
|
||||
| [Exa Delete Webset](exa/websets.md#exa-delete-webset) | Delete a Webset and all its items |
|
||||
| [Exa Delete Webset Item](exa/websets_items.md#exa-delete-webset-item) | Delete a specific item from a webset |
|
||||
| [Exa Find Or Create Search](exa/websets_search.md#exa-find-or-create-search) | Find existing search by query or create new - prevents duplicate searches in workflows |
|
||||
| [Exa Find Similar](exa/similar.md#exa-find-similar) | Finds similar links using Exa's findSimilar API |
|
||||
| [Exa Get Enrichment](exa/websets_enrichment.md#exa-get-enrichment) | Get the status and details of a webset enrichment |
|
||||
| [Exa Get Monitor](exa/websets_monitor.md#exa-get-monitor) | Get the details and status of a webset monitor |
|
||||
| [Exa Get Research](exa/research.md#exa-get-research) | Get status and results of a research task |
|
||||
| [Exa Get Webset](exa/websets.md#exa-get-webset) | Retrieve a Webset by ID or external ID |
|
||||
| [Exa Get Webset Item](exa/websets_items.md#exa-get-webset-item) | Get a specific item from a webset by its ID |
|
||||
| [Exa Get Webset Search](exa/websets_search.md#exa-get-webset-search) | Get the status and details of a webset search |
|
||||
| [Exa List Monitors](exa/websets_monitor.md#exa-list-monitors) | List all monitors with optional webset filtering |
|
||||
| [Exa List Research](exa/research.md#exa-list-research) | List all research tasks with pagination support |
|
||||
| [Exa List Webset Items](exa/websets_items.md#exa-list-webset-items) | List items in a webset with pagination support |
|
||||
| [Exa List Websets](exa/websets.md#exa-list-websets) | List all Websets with pagination support |
|
||||
| [Exa Preview Webset](exa/websets.md#exa-preview-webset) | Preview how a search query will be interpreted before creating a webset |
|
||||
| [Exa Search](exa/search.md#exa-search) | Searches the web using Exa's advanced search API |
|
||||
| [Exa Update Enrichment](exa/websets_enrichment.md#exa-update-enrichment) | Update an existing enrichment configuration |
|
||||
| [Exa Update Monitor](exa/websets_monitor.md#exa-update-monitor) | Update a monitor's status, schedule, or metadata |
|
||||
| [Exa Update Webset](exa/websets.md#exa-update-webset) | Update metadata for an existing Webset |
|
||||
| [Exa Wait For Enrichment](exa/websets_polling.md#exa-wait-for-enrichment) | Wait for a webset enrichment to complete with progress tracking |
|
||||
| [Exa Wait For Research](exa/research.md#exa-wait-for-research) | Wait for a research task to complete with configurable timeout |
|
||||
| [Exa Wait For Search](exa/websets_polling.md#exa-wait-for-search) | Wait for a specific webset search to complete with progress tracking |
|
||||
| [Exa Wait For Webset](exa/websets_polling.md#exa-wait-for-webset) | Wait for a webset to reach a specific status with progress tracking |
|
||||
| [Exa Webset Items Summary](exa/websets_items.md#exa-webset-items-summary) | Get a summary of webset items without retrieving all data |
|
||||
| [Exa Webset Status](exa/websets.md#exa-webset-status) | Get a quick status overview of a webset |
|
||||
| [Exa Webset Summary](exa/websets.md#exa-webset-summary) | Get a comprehensive summary of a webset with samples and statistics |
|
||||
| [Extract Website Content](jina/search.md#extract-website-content) | This block scrapes the content from the given web URL |
|
||||
| [Fact Checker](jina/fact_checker.md#fact-checker) | This block checks the factuality of a given statement using Jina AI's Grounding API |
|
||||
| [Firecrawl Crawl](firecrawl/crawl.md#firecrawl-crawl) | Firecrawl crawls websites to extract comprehensive data while bypassing blockers |
|
||||
| [Firecrawl Extract](firecrawl/extract.md#firecrawl-extract) | Firecrawl crawls websites to extract comprehensive data while bypassing blockers |
|
||||
| [Firecrawl Map Website](firecrawl/map.md#firecrawl-map-website) | Firecrawl maps a website to extract all the links |
|
||||
| [Firecrawl Scrape](firecrawl/scrape.md#firecrawl-scrape) | Firecrawl scrapes a website to extract comprehensive data while bypassing blockers |
|
||||
| [Firecrawl Search](firecrawl/search.md#firecrawl-search) | Firecrawl searches the web for the given query |
|
||||
| [Get Person Detail](apollo/person.md#get-person-detail) | Get detailed person data with Apollo API, including email reveal |
|
||||
| [Get Wikipedia Summary](search.md#get-wikipedia-summary) | This block fetches the summary of a given topic from Wikipedia |
|
||||
| [Google Maps Search](search.md#google-maps-search) | This block searches for local businesses using Google Maps API |
|
||||
| [Search Organizations](apollo/organization.md#search-organizations) | Search for organizations in Apollo |
|
||||
| [Search People](apollo/people.md#search-people) | Search for people in Apollo |
|
||||
| [Search The Web](jina/search.md#search-the-web) | This block searches the internet for the given search query |
|
||||
| [Validate Emails](zerobounce/validate_emails.md#validate-emails) | Validate emails |
|
||||
|
||||
## Time and Date
|
||||
## Social Media and Content
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Get Current Time](time_blocks.md#get-current-time) | Provides the current time |
|
||||
| [Get Current Date](time_blocks.md#get-current-date) | Provides the current date |
|
||||
| [Get Current Date and Time](time_blocks.md#get-current-date-and-time) | Provides both current date and time |
|
||||
| [Countdown Timer](time_blocks.md#countdown-timer) | Acts as a countdown timer |
|
||||
| [Create Discord Thread](discord/bot_blocks.md#create-discord-thread) | Creates a new thread in a Discord channel |
|
||||
| [Discord Channel Info](discord/bot_blocks.md#discord-channel-info) | Resolves Discord channel names to IDs and vice versa |
|
||||
| [Discord Get Current User](discord/oauth_blocks.md#discord-get-current-user) | Gets information about the currently authenticated Discord user using OAuth2 credentials |
|
||||
| [Discord User Info](discord/bot_blocks.md#discord-user-info) | Gets information about a Discord user by their ID |
|
||||
| [Get Linkedin Profile](enrichlayer/linkedin.md#get-linkedin-profile) | Fetch LinkedIn profile data using Enrichlayer |
|
||||
| [Get Linkedin Profile Picture](enrichlayer/linkedin.md#get-linkedin-profile-picture) | Get LinkedIn profile pictures using Enrichlayer |
|
||||
| [Get Reddit Posts](misc.md#get-reddit-posts) | This block fetches Reddit posts from a defined subreddit name |
|
||||
| [Linkedin Person Lookup](enrichlayer/linkedin.md#linkedin-person-lookup) | Look up LinkedIn profiles by person information using Enrichlayer |
|
||||
| [Linkedin Role Lookup](enrichlayer/linkedin.md#linkedin-role-lookup) | Look up LinkedIn profiles by role in a company using Enrichlayer |
|
||||
| [Post Reddit Comment](misc.md#post-reddit-comment) | This block posts a Reddit comment on a specified Reddit post |
|
||||
| [Post To Bluesky](ayrshare/post_to_bluesky.md#post-to-bluesky) | Post to Bluesky using Ayrshare |
|
||||
| [Post To Facebook](ayrshare/post_to_facebook.md#post-to-facebook) | Post to Facebook using Ayrshare |
|
||||
| [Post To GMB](ayrshare/post_to_gmb.md#post-to-gmb) | Post to Google My Business using Ayrshare |
|
||||
| [Post To Instagram](ayrshare/post_to_instagram.md#post-to-instagram) | Post to Instagram using Ayrshare |
|
||||
| [Post To Linked In](ayrshare/post_to_linkedin.md#post-to-linked-in) | Post to LinkedIn using Ayrshare |
|
||||
| [Post To Pinterest](ayrshare/post_to_pinterest.md#post-to-pinterest) | Post to Pinterest using Ayrshare |
|
||||
| [Post To Reddit](ayrshare/post_to_reddit.md#post-to-reddit) | Post to Reddit using Ayrshare |
|
||||
| [Post To Snapchat](ayrshare/post_to_snapchat.md#post-to-snapchat) | Post to Snapchat using Ayrshare |
|
||||
| [Post To Telegram](ayrshare/post_to_telegram.md#post-to-telegram) | Post to Telegram using Ayrshare |
|
||||
| [Post To Threads](ayrshare/post_to_threads.md#post-to-threads) | Post to Threads using Ayrshare |
|
||||
| [Post To Tik Tok](ayrshare/post_to_tiktok.md#post-to-tik-tok) | Post to TikTok using Ayrshare |
|
||||
| [Post To X](ayrshare/post_to_x.md#post-to-x) | Post to X / Twitter using Ayrshare |
|
||||
| [Post To You Tube](ayrshare/post_to_youtube.md#post-to-you-tube) | Post to YouTube using Ayrshare |
|
||||
| [Publish To Medium](misc.md#publish-to-medium) | Publishes a post to Medium |
|
||||
| [Read Discord Messages](discord/bot_blocks.md#read-discord-messages) | Reads messages from a Discord channel using a bot token |
|
||||
| [Reply To Discord Message](discord/bot_blocks.md#reply-to-discord-message) | Replies to a specific Discord message |
|
||||
| [Send Discord DM](discord/bot_blocks.md#send-discord-dm) | Sends a direct message to a Discord user using their user ID |
|
||||
| [Send Discord Embed](discord/bot_blocks.md#send-discord-embed) | Sends a rich embed message to a Discord channel |
|
||||
| [Send Discord File](discord/bot_blocks.md#send-discord-file) | Sends a file attachment to a Discord channel |
|
||||
| [Send Discord Message](discord/bot_blocks.md#send-discord-message) | Sends a message to a Discord channel using a bot token |
|
||||
| [Transcribe Youtube Video](misc.md#transcribe-youtube-video) | Transcribes a YouTube video using a proxy |
|
||||
| [Twitter Add List Member](twitter/list_members.md#twitter-add-list-member) | This block adds a specified user to a Twitter List owned by the authenticated user |
|
||||
| [Twitter Bookmark Tweet](twitter/bookmark.md#twitter-bookmark-tweet) | This block bookmarks a tweet on Twitter |
|
||||
| [Twitter Create List](twitter/manage_lists.md#twitter-create-list) | This block creates a new Twitter List for the authenticated user |
|
||||
| [Twitter Delete List](twitter/manage_lists.md#twitter-delete-list) | This block deletes a specified Twitter List owned by the authenticated user |
|
||||
| [Twitter Delete Tweet](twitter/manage.md#twitter-delete-tweet) | This block deletes a tweet on Twitter |
|
||||
| [Twitter Follow List](twitter/list_follows.md#twitter-follow-list) | This block follows a specified Twitter list for the authenticated user |
|
||||
| [Twitter Follow User](twitter/follows.md#twitter-follow-user) | This block follows a specified Twitter user |
|
||||
| [Twitter Get Bookmarked Tweets](twitter/bookmark.md#twitter-get-bookmarked-tweets) | This block retrieves bookmarked tweets from Twitter |
|
||||
| [Twitter Get Followers](twitter/follows.md#twitter-get-followers) | This block retrieves followers of a specified Twitter user |
|
||||
| [Twitter Get Following](twitter/follows.md#twitter-get-following) | This block retrieves the users that a specified Twitter user is following |
|
||||
| [Twitter Get Home Timeline](twitter/timeline.md#twitter-get-home-timeline) | This block retrieves the authenticated user's home timeline |
|
||||
| [Twitter Get Liked Tweets](twitter/like.md#twitter-get-liked-tweets) | This block gets information about tweets liked by a user |
|
||||
| [Twitter Get Liking Users](twitter/like.md#twitter-get-liking-users) | This block gets information about users who liked a tweet |
|
||||
| [Twitter Get List](twitter/list_lookup.md#twitter-get-list) | This block retrieves information about a specified Twitter List |
|
||||
| [Twitter Get List Members](twitter/list_members.md#twitter-get-list-members) | This block retrieves the members of a specified Twitter List |
|
||||
| [Twitter Get List Memberships](twitter/list_members.md#twitter-get-list-memberships) | This block retrieves all Lists that a specified user is a member of |
|
||||
| [Twitter Get List Tweets](twitter/list_tweets_lookup.md#twitter-get-list-tweets) | This block retrieves tweets from a specified Twitter list |
|
||||
| [Twitter Get Muted Users](twitter/mutes.md#twitter-get-muted-users) | This block gets a list of users muted by the authenticating user |
|
||||
| [Twitter Get Owned Lists](twitter/list_lookup.md#twitter-get-owned-lists) | This block retrieves all Lists owned by a specified Twitter user |
|
||||
| [Twitter Get Pinned Lists](twitter/pinned_lists.md#twitter-get-pinned-lists) | This block returns the Lists pinned by the authenticated user |
|
||||
| [Twitter Get Quote Tweets](twitter/quote.md#twitter-get-quote-tweets) | This block gets quote tweets for a specific tweet |
|
||||
| [Twitter Get Retweeters](twitter/retweet.md#twitter-get-retweeters) | This block gets information about who has retweeted a tweet |
|
||||
| [Twitter Get Space Buyers](twitter/spaces_lookup.md#twitter-get-space-buyers) | This block retrieves a list of users who purchased tickets to a Twitter Space |
|
||||
| [Twitter Get Space By Id](twitter/spaces_lookup.md#twitter-get-space-by-id) | This block retrieves information about a single Twitter Space |
|
||||
| [Twitter Get Space Tweets](twitter/spaces_lookup.md#twitter-get-space-tweets) | This block retrieves tweets shared in a Twitter Space |
|
||||
| [Twitter Get Spaces](twitter/spaces_lookup.md#twitter-get-spaces) | This block retrieves information about multiple Twitter Spaces |
|
||||
| [Twitter Get Tweet](twitter/tweet_lookup.md#twitter-get-tweet) | This block retrieves information about a specific Tweet |
|
||||
| [Twitter Get Tweets](twitter/tweet_lookup.md#twitter-get-tweets) | This block retrieves information about multiple Tweets |
|
||||
| [Twitter Get User](twitter/user_lookup.md#twitter-get-user) | This block retrieves information about a specified Twitter user |
|
||||
| [Twitter Get User Mentions](twitter/timeline.md#twitter-get-user-mentions) | This block retrieves Tweets mentioning a specific user |
|
||||
| [Twitter Get User Tweets](twitter/timeline.md#twitter-get-user-tweets) | This block retrieves Tweets composed by a single user |
|
||||
| [Twitter Get Users](twitter/user_lookup.md#twitter-get-users) | This block retrieves information about multiple Twitter users |
|
||||
| [Twitter Geted Users](twitter/blocks.md#twitter-geted-users) | This block retrieves a list of users blocked by the authenticating user |
|
||||
| [Twitter Hide Reply](twitter/hide.md#twitter-hide-reply) | This block hides a reply to a tweet |
|
||||
| [Twitter Like Tweet](twitter/like.md#twitter-like-tweet) | This block likes a tweet |
|
||||
| [Twitter Mute User](twitter/mutes.md#twitter-mute-user) | This block mutes a specified Twitter user |
|
||||
| [Twitter Pin List](twitter/pinned_lists.md#twitter-pin-list) | This block allows the authenticated user to pin a specified List |
|
||||
| [Twitter Post Tweet](twitter/manage.md#twitter-post-tweet) | This block posts a tweet on Twitter |
|
||||
| [Twitter Remove Bookmark Tweet](twitter/bookmark.md#twitter-remove-bookmark-tweet) | This block removes a bookmark from a tweet on Twitter |
|
||||
| [Twitter Remove List Member](twitter/list_members.md#twitter-remove-list-member) | This block removes a specified user from a Twitter List owned by the authenticated user |
|
||||
| [Twitter Remove Retweet](twitter/retweet.md#twitter-remove-retweet) | This block removes a retweet on Twitter |
|
||||
| [Twitter Retweet](twitter/retweet.md#twitter-retweet) | This block retweets a tweet on Twitter |
|
||||
| [Twitter Search Recent Tweets](twitter/manage.md#twitter-search-recent-tweets) | This block searches all public Tweets in Twitter history |
|
||||
| [Twitter Search Spaces](twitter/search_spaces.md#twitter-search-spaces) | This block searches for Twitter Spaces based on specified terms |
|
||||
| [Twitter Unfollow List](twitter/list_follows.md#twitter-unfollow-list) | This block unfollows a specified Twitter list for the authenticated user |
|
||||
| [Twitter Unfollow User](twitter/follows.md#twitter-unfollow-user) | This block unfollows a specified Twitter user |
|
||||
| [Twitter Unhide Reply](twitter/hide.md#twitter-unhide-reply) | This block unhides a reply to a tweet |
|
||||
| [Twitter Unlike Tweet](twitter/like.md#twitter-unlike-tweet) | This block unlikes a tweet |
|
||||
| [Twitter Unmute User](twitter/mutes.md#twitter-unmute-user) | This block unmutes a specified Twitter user |
|
||||
| [Twitter Unpin List](twitter/pinned_lists.md#twitter-unpin-list) | This block allows the authenticated user to unpin a specified List |
|
||||
| [Twitter Update List](twitter/manage_lists.md#twitter-update-list) | This block updates a specified Twitter List owned by the authenticated user |
|
||||
|
||||
## Math and Calculations
|
||||
## Communication
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Calculator](maths.md#calculator) | Performs basic mathematical operations |
|
||||
| [Count Items](maths.md#count-items) | Counts items in a collection |
|
||||
| [Baas Bot Join Meeting](baas/bots.md#baas-bot-join-meeting) | Deploy a bot to join and record a meeting |
|
||||
| [Baas Bot Leave Meeting](baas/bots.md#baas-bot-leave-meeting) | Remove a bot from an ongoing meeting |
|
||||
| [Gmail Add Label](google/gmail.md#gmail-add-label) | A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist |
|
||||
| [Gmail Create Draft](google/gmail.md#gmail-create-draft) | Create draft emails in Gmail with automatic HTML detection and proper text formatting |
|
||||
| [Gmail Draft Reply](google/gmail.md#gmail-draft-reply) | Create draft replies to Gmail threads with automatic HTML detection and proper text formatting |
|
||||
| [Gmail Forward](google/gmail.md#gmail-forward) | Forward Gmail messages to other recipients with automatic HTML detection and proper formatting |
|
||||
| [Gmail Get Profile](google/gmail.md#gmail-get-profile) | Get the authenticated user's Gmail profile details including email address and message statistics |
|
||||
| [Gmail Get Thread](google/gmail.md#gmail-get-thread) | A block that retrieves an entire Gmail thread (email conversation) by ID, returning all messages with decoded bodies for reading complete conversations |
|
||||
| [Gmail List Labels](google/gmail.md#gmail-list-labels) | A block that retrieves all labels (categories) from a Gmail account for organizing and categorizing emails |
|
||||
| [Gmail Read](google/gmail.md#gmail-read) | A block that retrieves and reads emails from a Gmail account based on search criteria, returning detailed message information including subject, sender, body, and attachments |
|
||||
| [Gmail Remove Label](google/gmail.md#gmail-remove-label) | A block that removes a label from a specific email message in a Gmail account |
|
||||
| [Gmail Reply](google/gmail.md#gmail-reply) | Reply to Gmail threads with automatic HTML detection and proper text formatting |
|
||||
| [Gmail Send](google/gmail.md#gmail-send) | Send emails via Gmail with automatic HTML detection and proper text formatting |
|
||||
| [Hub Spot Engagement](hubspot/engagement.md#hub-spot-engagement) | Manages HubSpot engagements - sends emails and tracks engagement metrics |
|
||||
|
||||
## Developer Tools
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Exa Code Context](exa/code_context.md#exa-code-context) | Search billions of GitHub repos, docs, and Stack Overflow for relevant code examples |
|
||||
| [Execute Code](misc.md#execute-code) | Executes code in a sandbox environment with internet access |
|
||||
| [Execute Code Step](misc.md#execute-code-step) | Execute code in a previously instantiated sandbox |
|
||||
| [Github Add Label](github/issues.md#github-add-label) | A block that adds a label to a GitHub issue or pull request for categorization and organization |
|
||||
| [Github Assign Issue](github/issues.md#github-assign-issue) | A block that assigns a GitHub user to an issue for task ownership and tracking |
|
||||
| [Github Assign PR Reviewer](github/pull_requests.md#github-assign-pr-reviewer) | This block assigns a reviewer to a specified GitHub pull request |
|
||||
| [Github Comment](github/issues.md#github-comment) | A block that posts comments on GitHub issues or pull requests using the GitHub API |
|
||||
| [Github Create Check Run](github/checks.md#github-create-check-run) | Creates a new check run for a specific commit in a GitHub repository |
|
||||
| [Github Create Comment Object](github/reviews.md#github-create-comment-object) | Creates a comment object for use with GitHub blocks |
|
||||
| [Github Create File](github/repo.md#github-create-file) | This block creates a new file in a GitHub repository |
|
||||
| [Github Create PR Review](github/reviews.md#github-create-pr-review) | This block creates a review on a GitHub pull request with optional inline comments |
|
||||
| [Github Create Repository](github/repo.md#github-create-repository) | This block creates a new GitHub repository |
|
||||
| [Github Create Status](github/statuses.md#github-create-status) | Creates a new commit status in a GitHub repository |
|
||||
| [Github Delete Branch](github/repo.md#github-delete-branch) | This block deletes a specified branch |
|
||||
| [Github Discussion Trigger](github/triggers.md#github-discussion-trigger) | This block triggers on GitHub Discussions events |
|
||||
| [Github Get CI Results](github/ci.md#github-get-ci-results) | This block gets CI results for a commit or PR, with optional search for specific errors/warnings in logs |
|
||||
| [Github Get PR Review Comments](github/reviews.md#github-get-pr-review-comments) | This block gets all review comments from a GitHub pull request or from a specific review |
|
||||
| [Github Issues Trigger](github/triggers.md#github-issues-trigger) | This block triggers on GitHub issues events |
|
||||
| [Github List Branches](github/repo.md#github-list-branches) | This block lists all branches for a specified GitHub repository |
|
||||
| [Github List Comments](github/issues.md#github-list-comments) | A block that retrieves all comments from a GitHub issue or pull request, including comment metadata and content |
|
||||
| [Github List Discussions](github/repo.md#github-list-discussions) | This block lists recent discussions for a specified GitHub repository |
|
||||
| [Github List Issues](github/issues.md#github-list-issues) | A block that retrieves a list of issues from a GitHub repository with their titles and URLs |
|
||||
| [Github List PR Reviewers](github/pull_requests.md#github-list-pr-reviewers) | This block lists all reviewers for a specified GitHub pull request |
|
||||
| [Github List PR Reviews](github/reviews.md#github-list-pr-reviews) | This block lists all reviews for a specified GitHub pull request |
|
||||
| [Github List Pull Requests](github/pull_requests.md#github-list-pull-requests) | This block lists all pull requests for a specified GitHub repository |
|
||||
| [Github List Releases](github/repo.md#github-list-releases) | This block lists all releases for a specified GitHub repository |
|
||||
| [Github List Stargazers](github/repo.md#github-list-stargazers) | This block lists all users who have starred a specified GitHub repository |
|
||||
| [Github List Tags](github/repo.md#github-list-tags) | This block lists all tags for a specified GitHub repository |
|
||||
| [Github Make Branch](github/repo.md#github-make-branch) | This block creates a new branch from a specified source branch |
|
||||
| [Github Make Issue](github/issues.md#github-make-issue) | A block that creates new issues on GitHub repositories with a title and body content |
|
||||
| [Github Make Pull Request](github/pull_requests.md#github-make-pull-request) | This block creates a new pull request on a specified GitHub repository |
|
||||
| [Github Pull Request Trigger](github/triggers.md#github-pull-request-trigger) | This block triggers on pull request events and outputs the event type and payload |
|
||||
| [Github Read File](github/repo.md#github-read-file) | This block reads the content of a specified file from a GitHub repository |
|
||||
| [Github Read Folder](github/repo.md#github-read-folder) | This block reads the content of a specified folder from a GitHub repository |
|
||||
| [Github Read Issue](github/issues.md#github-read-issue) | A block that retrieves information about a specific GitHub issue, including its title, body content, and creator |
|
||||
| [Github Read Pull Request](github/pull_requests.md#github-read-pull-request) | This block reads the body, title, user, and changes of a specified GitHub pull request |
|
||||
| [Github Release Trigger](github/triggers.md#github-release-trigger) | This block triggers on GitHub release events |
|
||||
| [Github Remove Label](github/issues.md#github-remove-label) | A block that removes a label from a GitHub issue or pull request |
|
||||
| [Github Resolve Review Discussion](github/reviews.md#github-resolve-review-discussion) | This block resolves or unresolves a review discussion thread on a GitHub pull request |
|
||||
| [Github Star Trigger](github/triggers.md#github-star-trigger) | This block triggers on GitHub star events |
|
||||
| [Github Submit Pending Review](github/reviews.md#github-submit-pending-review) | This block submits a pending (draft) review on a GitHub pull request |
|
||||
| [Github Unassign Issue](github/issues.md#github-unassign-issue) | A block that removes a user's assignment from a GitHub issue |
|
||||
| [Github Unassign PR Reviewer](github/pull_requests.md#github-unassign-pr-reviewer) | This block unassigns a reviewer from a specified GitHub pull request |
|
||||
| [Github Update Check Run](github/checks.md#github-update-check-run) | Updates an existing check run in a GitHub repository |
|
||||
| [Github Update Comment](github/issues.md#github-update-comment) | A block that updates an existing comment on a GitHub issue or pull request |
|
||||
| [Github Update File](github/repo.md#github-update-file) | This block updates an existing file in a GitHub repository |
|
||||
| [Instantiate Code Sandbox](misc.md#instantiate-code-sandbox) | Instantiate a sandbox environment with internet access in which you can execute code with the Execute Code Step block |
|
||||
| [Slant3D Order Webhook](slant3d/webhook.md#slant3d-order-webhook) | This block triggers on Slant3D order status updates and outputs the event details, including tracking information when orders are shipped |
|
||||
|
||||
## Media Generation
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Ideogram Model](ideogram.md#ideogram-model) | Generates images based on text prompts |
|
||||
| [Create Talking Avatar Video](talking_head.md#create-talking-avatar-video) | Creates videos with talking avatars |
|
||||
| [Unreal Text to Speech](text_to_speech_block.md#unreal-text-to-speech) | Converts text to speech using Unreal Speech API |
|
||||
| [AI Shortform Video Creator](ai_shortform_video_block.md#ai-shortform-video-creator) | Generates short-form videos using AI |
|
||||
| [Replicate Flux Advanced Model](replicate_flux_advanced.md#replicate-flux-advanced-model) | Creates images using Replicate's Flux models |
|
||||
| [Flux Kontext](flux_kontext.md#flux-kontext) | Text-based image editing using Flux Kontext |
|
||||
| [Add Audio To Video](multimedia.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
||||
| [Loop Video](multimedia.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
||||
| [Media Duration](multimedia.md#media-duration) | Block to get the duration of a media file |
|
||||
|
||||
## Miscellaneous
|
||||
## Productivity
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Transcribe YouTube Video](youtube.md#transcribe-youtube-video) | Transcribes audio from YouTube videos |
|
||||
| [Send Email](email_block.md#send-email) | Sends emails using SMTP |
|
||||
| [Condition Block](branching.md#condition-block) | Evaluates conditions for workflow branching |
|
||||
| [Step Through Items](iteration.md#step-through-items) | Iterates through lists or dictionaries |
|
||||
| [Google Calendar Create Event](google/calendar.md#google-calendar-create-event) | This block creates a new event in Google Calendar with customizable parameters |
|
||||
| [Notion Create Page](notion/create_page.md#notion-create-page) | Create a new page in Notion |
|
||||
| [Notion Read Database](notion/read_database.md#notion-read-database) | Query a Notion database with optional filtering and sorting, returning structured entries |
|
||||
| [Notion Read Page](notion/read_page.md#notion-read-page) | Read a Notion page by its ID and return its raw JSON |
|
||||
| [Notion Read Page Markdown](notion/read_page_markdown.md#notion-read-page-markdown) | Read a Notion page and convert it to Markdown format with proper formatting for headings, lists, links, and rich text |
|
||||
| [Notion Search](notion/search.md#notion-search) | Search your Notion workspace for pages and databases by text query |
|
||||
| [Todoist Close Task](todoist/tasks.md#todoist-close-task) | Closes a task in Todoist |
|
||||
| [Todoist Create Comment](todoist/comments.md#todoist-create-comment) | Creates a new comment on a Todoist task or project |
|
||||
| [Todoist Create Label](todoist/labels.md#todoist-create-label) | Creates a new label in Todoist, It will not work if same name already exists |
|
||||
| [Todoist Create Project](todoist/projects.md#todoist-create-project) | Creates a new project in Todoist |
|
||||
| [Todoist Create Task](todoist/tasks.md#todoist-create-task) | Creates a new task in a Todoist project |
|
||||
| [Todoist Delete Comment](todoist/comments.md#todoist-delete-comment) | Deletes a Todoist comment |
|
||||
| [Todoist Delete Label](todoist/labels.md#todoist-delete-label) | Deletes a personal label in Todoist |
|
||||
| [Todoist Delete Project](todoist/projects.md#todoist-delete-project) | Deletes a Todoist project and all its contents |
|
||||
| [Todoist Delete Section](todoist/sections.md#todoist-delete-section) | Deletes a section and all its tasks from Todoist |
|
||||
| [Todoist Delete Task](todoist/tasks.md#todoist-delete-task) | Deletes a task in Todoist |
|
||||
| [Todoist Get Comment](todoist/comments.md#todoist-get-comment) | Get a single comment from Todoist |
|
||||
| [Todoist Get Comments](todoist/comments.md#todoist-get-comments) | Get all comments for a Todoist task or project |
|
||||
| [Todoist Get Label](todoist/labels.md#todoist-get-label) | Gets a personal label from Todoist by ID |
|
||||
| [Todoist Get Project](todoist/projects.md#todoist-get-project) | Gets details for a specific Todoist project |
|
||||
| [Todoist Get Section](todoist/sections.md#todoist-get-section) | Gets a single section by ID from Todoist |
|
||||
| [Todoist Get Shared Labels](todoist/labels.md#todoist-get-shared-labels) | Gets all shared labels from Todoist |
|
||||
| [Todoist Get Task](todoist/tasks.md#todoist-get-task) | Get an active task from Todoist |
|
||||
| [Todoist Get Tasks](todoist/tasks.md#todoist-get-tasks) | Get active tasks from Todoist |
|
||||
| [Todoist List Collaborators](todoist/projects.md#todoist-list-collaborators) | Gets all collaborators for a specific Todoist project |
|
||||
| [Todoist List Labels](todoist/labels.md#todoist-list-labels) | Gets all personal labels from Todoist |
|
||||
| [Todoist List Projects](todoist/projects.md#todoist-list-projects) | Gets all projects and their details from Todoist |
|
||||
| [Todoist List Sections](todoist/sections.md#todoist-list-sections) | Gets all sections and their details from Todoist |
|
||||
| [Todoist Remove Shared Labels](todoist/labels.md#todoist-remove-shared-labels) | Removes all instances of a shared label |
|
||||
| [Todoist Rename Shared Labels](todoist/labels.md#todoist-rename-shared-labels) | Renames all instances of a shared label |
|
||||
| [Todoist Reopen Task](todoist/tasks.md#todoist-reopen-task) | Reopens a task in Todoist |
|
||||
| [Todoist Update Comment](todoist/comments.md#todoist-update-comment) | Updates a Todoist comment |
|
||||
| [Todoist Update Label](todoist/labels.md#todoist-update-label) | Updates a personal label in Todoist |
|
||||
| [Todoist Update Project](todoist/projects.md#todoist-update-project) | Updates an existing project in Todoist |
|
||||
| [Todoist Update Task](todoist/tasks.md#todoist-update-task) | Updates an existing task in Todoist |
|
||||
|
||||
## Google Services
|
||||
## Logic and Control Flow
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Gmail Read](google/gmail.md#gmail-read) | Retrieves and reads emails from a Gmail account |
|
||||
| [Gmail Get Thread](google/gmail.md#gmail-get-thread) | Returns every message in a Gmail thread |
|
||||
| [Gmail Reply](google/gmail.md#gmail-reply) | Sends a reply that stays in the same thread |
|
||||
| [Gmail Send](google/gmail.md#gmail-send) | Sends emails using a Gmail account |
|
||||
| [Gmail List Labels](google/gmail.md#gmail-list-labels) | Retrieves all labels from a Gmail account |
|
||||
| [Gmail Add Label](google/gmail.md#gmail-add-label) | Adds a label to a specific email in a Gmail account |
|
||||
| [Gmail Remove Label](google/gmail.md#gmail-remove-label) | Removes a label from a specific email in a Gmail account |
|
||||
| [Google Sheets Read](google/sheet.md#google-sheets-read) | Reads data from a Google Sheets spreadsheet |
|
||||
| [Google Sheets Write](google/sheet.md#google-sheets-write) | Writes data to a Google Sheets spreadsheet |
|
||||
| [Google Maps Search](google_maps.md#google-maps-search) | Searches for local businesses using the Google Maps API |
|
||||
| [Calculator](logic.md#calculator) | Performs a mathematical operation on two numbers |
|
||||
| [Condition](logic.md#condition) | Handles conditional logic based on comparison operators |
|
||||
| [Count Items](logic.md#count-items) | Counts the number of items in a collection |
|
||||
| [Data Sampling](logic.md#data-sampling) | This block samples data from a given dataset using various sampling methods |
|
||||
| [Exa Webset Ready Check](exa/websets.md#exa-webset-ready-check) | Check if webset is ready for next operation - enables conditional workflow branching |
|
||||
| [If Input Matches](logic.md#if-input-matches) | Handles conditional logic based on comparison operators |
|
||||
| [Pinecone Init](logic.md#pinecone-init) | Initializes a Pinecone index |
|
||||
| [Pinecone Insert](logic.md#pinecone-insert) | Upload data to a Pinecone index |
|
||||
| [Pinecone Query](logic.md#pinecone-query) | Queries a Pinecone index |
|
||||
| [Step Through Items](logic.md#step-through-items) | Iterates over a list or dictionary and outputs each item |
|
||||
|
||||
## GitHub Integration
|
||||
## Input/Output
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [GitHub Comment](github/issues.md#github-comment) | Posts comments on GitHub issues or pull requests |
|
||||
| [GitHub Make Issue](github/issues.md#github-make-issue) | Creates new issues on GitHub repositories |
|
||||
| [GitHub Read Issue](github/issues.md#github-read-issue) | Retrieves information about a specific GitHub issue |
|
||||
| [GitHub List Issues](github/issues.md#github-list-issues) | Retrieves a list of issues from a GitHub repository |
|
||||
| [GitHub Add Label](github/issues.md#github-add-label) | Adds a label to a GitHub issue or pull request |
|
||||
| [GitHub Remove Label](github/issues.md#github-remove-label) | Removes a label from a GitHub issue or pull request |
|
||||
| [GitHub Assign Issue](github/issues.md#github-assign-issue) | Assigns a user to a GitHub issue |
|
||||
| [GitHub List Tags](github/repo.md#github-list-tags) | Retrieves and lists all tags for a specified GitHub repository |
|
||||
| [GitHub List Branches](github/repo.md#github-list-branches) | Retrieves and lists all branches for a specified GitHub repository |
|
||||
| [GitHub List Discussions](github/repo.md#github-list-discussions) | Retrieves and lists recent discussions for a specified GitHub repository |
|
||||
| [GitHub Make Branch](github/repo.md#github-make-branch) | Creates a new branch in a GitHub repository |
|
||||
| [GitHub Delete Branch](github/repo.md#github-delete-branch) | Deletes a specified branch from a GitHub repository |
|
||||
| [GitHub List Pull Requests](github/pull_requests.md#github-list-pull-requests) | Retrieves a list of pull requests from a specified GitHub repository |
|
||||
| [GitHub Make Pull Request](github/pull_requests.md#github-make-pull-request) | Creates a new pull request in a specified GitHub repository |
|
||||
| [GitHub Read Pull Request](github/pull_requests.md#github-read-pull-request) | Retrieves detailed information about a specific GitHub pull request |
|
||||
| [GitHub Assign PR Reviewer](github/pull_requests.md#github-assign-pr-reviewer) | Assigns a reviewer to a specific GitHub pull request |
|
||||
| [GitHub Unassign PR Reviewer](github/pull_requests.md#github-unassign-pr-reviewer) | Removes an assigned reviewer from a specific GitHub pull request |
|
||||
| [GitHub List PR Reviewers](github/pull_requests.md#github-list-pr-reviewers) | Retrieves a list of all assigned reviewers for a specific GitHub pull request |
|
||||
| [Exa Webset Webhook](exa/webhook_blocks.md#exa-webset-webhook) | Receive webhook notifications for Exa webset events |
|
||||
| [Generic Webhook Trigger](generic_webhook/triggers.md#generic-webhook-trigger) | This block will output the contents of the generic input for the webhook |
|
||||
| [Read RSS Feed](misc.md#read-rss-feed) | Reads RSS feed entries from a given URL |
|
||||
|
||||
## Twitter Integration
|
||||
## Input/Output
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Twitter Post Tweet](twitter/twitter.md#twitter-post-tweet-block) | Creates a tweet on Twitter with text content and optional attachments including media, polls, quotes, or deep links |
|
||||
| [Twitter Delete Tweet](twitter/twitter.md#twitter-delete-tweet-block) | Deletes a specified tweet using its tweet ID |
|
||||
| [Twitter Search Recent Tweets](twitter/twitter.md#twitter-search-recent-tweets-block) | Searches for tweets matching specified criteria with options for filtering and pagination |
|
||||
| [Twitter Get Quote Tweets](twitter/twitter.md#twitter-get-quote-tweets-block) | Gets tweets that quote a specified tweet ID with options for pagination and filtering |
|
||||
| [Twitter Retweet](twitter/twitter.md#twitter-retweet-block) | Creates a retweet of a specified tweet using its tweet ID |
|
||||
| [Twitter Remove Retweet](twitter/twitter.md#twitter-remove-retweet-block) | Removes an existing retweet of a specified tweet |
|
||||
| [Twitter Get Retweeters](twitter/twitter.md#twitter-get-retweeters-block) | Gets list of users who have retweeted a specified tweet with pagination and filtering options |
|
||||
| [Twitter Get User Mentions](twitter/twitter.md#twitter-get-user-mentions-block) | Gets tweets where a specific user is mentioned using their user ID |
|
||||
| [Twitter Get Home Timeline](twitter/twitter.md#twitter-get-home-timeline-block) | Gets recent tweets and retweets from authenticated user and followed accounts |
|
||||
| [Twitter Get User](twitter/twitter.md#twitter-get-user-block) | Gets detailed profile information for a single Twitter user |
|
||||
| [Twitter Get Users](twitter/twitter.md#twitter-get-users-block) | Gets profile information for multiple Twitter users (up to 100) |
|
||||
| [Twitter Search Spaces](twitter/twitter.md#twitter-search-spaces-block) | Searches for Twitter Spaces matching title keywords with state filtering |
|
||||
| [Twitter Get Spaces](twitter/twitter.md#twitter-get-spaces-block) | Gets information about multiple Twitter Spaces by Space IDs or creator IDs |
|
||||
| [Twitter Get Space By Id](twitter/twitter.md#twitter-get-space-by-id-block) | Gets detailed information about a single Twitter Space |
|
||||
| [Twitter Get Space Tweets](twitter/twitter.md#twitter-get-space-tweets-block) | Gets tweets that were shared during a Twitter Space session |
|
||||
| [Twitter Follow List](twitter/twitter.md#twitter-follow-list-block) | Follows a Twitter List using its List ID |
|
||||
| [Twitter Unfollow List](twitter/twitter.md#twitter-unfollow-list-block) | Unfollows a previously followed Twitter List |
|
||||
| [Twitter Get List](twitter/twitter.md#twitter-get-list-block) | Gets detailed information about a specific Twitter List |
|
||||
| [Twitter Get Owned Lists](twitter/twitter.md#twitter-get-owned-lists-block) | Gets all Twitter Lists owned by a specified user |
|
||||
| [Twitter Get List Members](twitter/twitter.md#twitter-get-list-members-block) | Gets information about members of a specified Twitter List |
|
||||
| [Twitter Add List Member](twitter/twitter.md#twitter-add-list-member-block) | Adds a specified user as a member to a Twitter List |
|
||||
| [Twitter Remove List Member](twitter/twitter.md#twitter-remove-list-member-block) | Removes a specified user from a Twitter List |
|
||||
| [Twitter Get List Tweets](twitter/twitter.md#twitter-get-list-tweets-block) | Gets tweets posted within a specified Twitter List |
|
||||
| [Twitter Create List](twitter/twitter.md#twitter-create-list-block) | Creates a new Twitter List with specified name and settings |
|
||||
| [Twitter Update List](twitter/twitter.md#twitter-update-list-block) | Updates name and/or description of an existing Twitter List |
|
||||
| [Twitter Delete List](twitter/twitter.md#twitter-delete-list-block) | Deletes a specified Twitter List |
|
||||
| [Twitter Pin List](twitter/twitter.md#twitter-pin-list-block) | Pins a Twitter List to appear at top of Lists |
|
||||
| [Twitter Unpin List](twitter/twitter.md#twitter-unpin-list-block) | Removes a Twitter List from pinned Lists |
|
||||
| [Twitter Get Pinned Lists](twitter/twitter.md#twitter-get-pinned-lists-block) | Gets all Twitter Lists that are currently pinned |
|
||||
| Twitter List Get Followers | Working... Gets all followers of a specified Twitter List |
|
||||
| Twitter Get Followed Lists | Working... Gets all Lists that a user follows |
|
||||
| Twitter Get DM Events | Working... Retrieves direct message events for a user |
|
||||
| Twitter Send Direct Message | Working... Sends a direct message to a specified user |
|
||||
| Twitter Create DM Conversation | Working... Creates a new direct message conversation |
|
||||
| [Send Authenticated Web Request](misc.md#send-authenticated-web-request) | Make an authenticated HTTP request with host-scoped credentials (JSON / form / multipart) |
|
||||
| [Send Email](misc.md#send-email) | This block sends an email using the provided SMTP credentials |
|
||||
| [Send Web Request](misc.md#send-web-request) | Make an HTTP request (JSON / form / multipart) |
|
||||
|
||||
## Todoist Integration
|
||||
## Agent Integration
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Todoist Create Label](todoist.md#todoist-create-label) | Creates a new label in Todoist |
|
||||
| [Todoist List Labels](todoist.md#todoist-list-labels) | Retrieves all personal labels from Todoist |
|
||||
| [Todoist Get Label](todoist.md#todoist-get-label) | Retrieves a specific label by ID |
|
||||
| [Todoist Create Task](todoist.md#todoist-create-task) | Creates a new task in Todoist |
|
||||
| [Todoist Get Tasks](todoist.md#todoist-get-tasks) | Retrieves active tasks from Todoist |
|
||||
| [Todoist Update Task](todoist.md#todoist-update-task) | Updates an existing task |
|
||||
| [Todoist Close Task](todoist.md#todoist-close-task) | Completes/closes a task |
|
||||
| [Todoist Reopen Task](todoist.md#todoist-reopen-task) | Reopens a completed task |
|
||||
| [Todoist Delete Task](todoist.md#todoist-delete-task) | Permanently deletes a task |
|
||||
| [Todoist List Projects](todoist.md#todoist-list-projects) | Retrieves all projects from Todoist |
|
||||
| [Todoist Create Project](todoist.md#todoist-create-project) | Creates a new project in Todoist |
|
||||
| [Todoist Get Project](todoist.md#todoist-get-project) | Retrieves details for a specific project |
|
||||
| [Todoist Update Project](todoist.md#todoist-update-project) | Updates an existing project |
|
||||
| [Todoist Delete Project](todoist.md#todoist-delete-project) | Deletes a project and its contents |
|
||||
| [Todoist List Collaborators](todoist.md#todoist-list-collaborators) | Retrieves collaborators on a project |
|
||||
| [Todoist List Sections](todoist.md#todoist-list-sections) | Retrieves sections from Todoist |
|
||||
| [Todoist Get Section](todoist.md#todoist-get-section) | Retrieves details for a specific section |
|
||||
| [Todoist Delete Section](todoist.md#todoist-delete-section) | Deletes a section and its tasks |
|
||||
| [Todoist Create Comment](todoist.md#todoist-create-comment) | Creates a new comment on a task or project |
|
||||
| [Todoist Get Comments](todoist.md#todoist-get-comments) | Retrieves all comments for a task or project |
|
||||
| [Todoist Get Comment](todoist.md#todoist-get-comment) | Retrieves a specific comment by ID |
|
||||
| [Todoist Update Comment](todoist.md#todoist-update-comment) | Updates an existing comment |
|
||||
| [Todoist Delete Comment](todoist.md#todoist-delete-comment) | Deletes a comment |
|
||||
| [Agent Executor](misc.md#agent-executor) | Executes an existing agent inside your agent |
|
||||
|
||||
This comprehensive list covers all the blocks available in AutoGPT. Each block is designed to perform a specific task, and they can be combined to create powerful, automated workflows. For more detailed information on each block, click on its name to view the full documentation.
|
||||
## CRM Services
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Add Lead To Campaign](smartlead/campaign.md#add-lead-to-campaign) | Add a lead to a campaign in SmartLead |
|
||||
| [Create Campaign](smartlead/campaign.md#create-campaign) | Create a campaign in SmartLead |
|
||||
| [Hub Spot Company](hubspot/company.md#hub-spot-company) | Manages HubSpot companies - create, update, and retrieve company information |
|
||||
| [Hub Spot Contact](hubspot/contact.md#hub-spot-contact) | Manages HubSpot contacts - create, update, and retrieve contact information |
|
||||
| [Save Campaign Sequences](smartlead/campaign.md#save-campaign-sequences) | Save sequences within a campaign |
|
||||
|
||||
## AI Safety
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Nvidia Deepfake Detect](nvidia/deepfake.md#nvidia-deepfake-detect) | Detects potential deepfakes in images using Nvidia's AI API |
|
||||
|
||||
## Issue Tracking
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Linear Create Comment](linear/comment.md#linear-create-comment) | Creates a new comment on a Linear issue |
|
||||
| [Linear Create Issue](linear/issues.md#linear-create-issue) | Creates a new issue on Linear |
|
||||
| [Linear Get Project Issues](linear/issues.md#linear-get-project-issues) | Gets issues from a Linear project filtered by status and assignee |
|
||||
| [Linear Search Projects](linear/projects.md#linear-search-projects) | Searches for projects on Linear |
|
||||
|
||||
## Hardware
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Compass AI Trigger](compass/triggers.md#compass-ai-trigger) | This block will output the contents of the compass transcription |
|
||||
|
||||
28
docs/platform/blocks/compass/triggers.md
Normal file
28
docs/platform/blocks/compass/triggers.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Compass AI Trigger
|
||||
|
||||
### What it is
|
||||
This block will output the contents of the compass transcription.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block triggers when a Compass AI transcription is received. It outputs the transcription text content, enabling workflows that process voice input or meeting transcripts from Compass AI.
|
||||
|
||||
The transcription is output as a string for downstream processing, analysis, or storage.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| transcription | The contents of the compass transcription. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Voice Command Processing**: Trigger workflows from voice commands transcribed by Compass AI.
|
||||
|
||||
**Meeting Automation**: Process meeting transcripts to extract action items or summaries.
|
||||
|
||||
**Transcription Analysis**: Analyze transcribed content for sentiment, topics, or key information.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
266
docs/platform/blocks/data.md
Normal file
266
docs/platform/blocks/data.md
Normal file
@@ -0,0 +1,266 @@
|
||||
# Create Dictionary
|
||||
|
||||
### What it is
|
||||
Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates a new dictionary from specified key-value pairs in a single operation. It's designed for cases where you know all the data upfront, rather than building the dictionary incrementally.
|
||||
|
||||
The block takes a dictionary input and outputs it as-is, making it useful as a starting point for workflows that need to pass structured data between blocks.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| values | Key-value pairs to create the dictionary with | Dict[str, True] | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if dictionary creation failed | str |
|
||||
| dictionary | The created dictionary containing the specified key-value pairs | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**API Request Payloads**: Create complete request body objects with all required fields before sending to an API.
|
||||
|
||||
**Configuration Objects**: Build settings dictionaries with predefined values for initializing services or workflows.
|
||||
|
||||
**Data Mapping**: Transform input data into a structured format with specific keys expected by downstream blocks.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Create List
|
||||
|
||||
### What it is
|
||||
Creates a list with the specified values. Use this when you know all the values you want to add upfront. This block can also yield the list in batches based on a maximum size or token limit.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates a list from provided values and can optionally chunk it into smaller batches. When max_size is set, the list is yielded in chunks of that size. When max_tokens is set, chunks are sized to fit within token limits for LLM processing.
|
||||
|
||||
This batching capability is particularly useful when processing large datasets that need to be split for API limits or memory constraints.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| values | A list of values to be combined into a new list. | List[Any] | Yes |
|
||||
| max_size | Maximum size of the list. If provided, the list will be yielded in chunks of this size. | int | No |
|
||||
| max_tokens | Maximum tokens for the list. If provided, the list will be yielded in chunks that fit within this token limit. | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| list | The created list containing the specified values. | List[Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Batch Processing**: Split large datasets into manageable chunks for API calls with rate limits.
|
||||
|
||||
**LLM Token Management**: Divide text content into token-limited batches for processing by language models.
|
||||
|
||||
**Parallel Processing**: Create batches of work items that can be processed concurrently by multiple blocks.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## File Read
|
||||
|
||||
### What it is
|
||||
Reads a file and returns its content as a string, with optional chunking by delimiter and size limits
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block reads file content from various sources (URL, data URI, or local path) and returns it as a string. It supports chunking via delimiter (like newlines) or size limits, yielding content in manageable pieces.
|
||||
|
||||
Use skip_rows and skip_size to skip header content or initial bytes. When delimiter and limits are set, content is yielded chunk by chunk, enabling processing of large files without loading everything into memory.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| file_input | The file to read from (URL, data URI, or local path) | str (file) | Yes |
|
||||
| delimiter | Delimiter to split the content into rows/chunks (e.g., '\n' for lines) | str | No |
|
||||
| size_limit | Maximum size in bytes per chunk to yield (0 for no limit) | int | No |
|
||||
| row_limit | Maximum number of rows to process (0 for no limit, requires delimiter) | int | No |
|
||||
| skip_size | Number of characters to skip from the beginning of the file | int | No |
|
||||
| skip_rows | Number of rows to skip from the beginning (requires delimiter) | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| content | File content, yielded as individual chunks when delimiter or size limits are applied | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Log File Processing**: Read and process log files line by line, filtering or transforming each entry.
|
||||
|
||||
**Large Document Analysis**: Read large text files in chunks for summarization or analysis without memory issues.
|
||||
|
||||
**Data Import**: Read text-based data files and process them row by row for database import.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Persist Information
|
||||
|
||||
### What it is
|
||||
Persist key-value information for the current user
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block stores key-value data that persists across workflow runs. You can scope the persistence to either within_agent (available to all runs of this specific agent) or across_agents (available to all agents for this user).
|
||||
|
||||
The stored data remains available until explicitly overwritten, enabling state management and configuration persistence between workflow executions.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| key | Key to store the information under | str | Yes |
|
||||
| value | Value to store | Value | Yes |
|
||||
| scope | Scope of persistence: within_agent (shared across all runs of this agent) or across_agents (shared across all agents for this user) | "within_agent" | "across_agents" | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| value | Value that was stored | Value |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**User Preferences**: Store user settings like preferred language or notification preferences for future runs.
|
||||
|
||||
**Progress Tracking**: Save the last processed item ID to resume batch processing where you left off.
|
||||
|
||||
**API Token Caching**: Store refreshed API tokens that can be reused across multiple workflow executions.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Read Spreadsheet
|
||||
|
||||
### What it is
|
||||
Reads CSV and Excel files and outputs the data as a list of dictionaries and individual rows. Excel files are automatically converted to CSV format.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block parses CSV and Excel files, converting each row into a dictionary with column headers as keys. Excel files are automatically converted to CSV format before processing.
|
||||
|
||||
Configure delimiter, quote character, and escape character for proper CSV parsing. Use skip_rows to ignore headers or initial rows, and skip_columns to exclude unwanted columns from the output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| contents | The contents of the CSV/spreadsheet data to read | str | No |
|
||||
| file_input | CSV or Excel file to read from (URL, data URI, or local path). Excel files are automatically converted to CSV | str (file) | No |
|
||||
| delimiter | The delimiter used in the CSV/spreadsheet data | str | No |
|
||||
| quotechar | The character used to quote fields | str | No |
|
||||
| escapechar | The character used to escape the delimiter | str | No |
|
||||
| has_header | Whether the CSV file has a header row | bool | No |
|
||||
| skip_rows | The number of rows to skip from the start of the file | int | No |
|
||||
| strip | Whether to strip whitespace from the values | bool | No |
|
||||
| skip_columns | The columns to skip from the start of the row | List[str] | No |
|
||||
| produce_singular_result | If True, yield individual 'row' outputs only (can be slow). If False, yield both 'rows' (all data) | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| row | The data produced from each row in the spreadsheet | Dict[str, str] |
|
||||
| rows | All the data in the spreadsheet as a list of rows | List[Dict[str, str]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Data Import**: Import product catalogs, contact lists, or inventory data from spreadsheet exports.
|
||||
|
||||
**Report Processing**: Parse generated CSV reports from other systems for analysis or transformation.
|
||||
|
||||
**Bulk Operations**: Process spreadsheets of email addresses, user records, or configuration data row by row.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Retrieve Information
|
||||
|
||||
### What it is
|
||||
Retrieve key-value information for the current user
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves previously stored key-value data for the current user. Specify the key and scope to fetch the corresponding value. If the key doesn't exist, the default_value is returned.
|
||||
|
||||
Use within_agent scope for agent-specific data or across_agents for data shared across all user agents.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| key | Key to retrieve the information for | str | Yes |
|
||||
| scope | Scope of persistence: within_agent (shared across all runs of this agent) or across_agents (shared across all agents for this user) | "within_agent" | "across_agents" | No |
|
||||
| default_value | Default value to return if key is not found | Default Value | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| value | Retrieved value or default value | Value |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Resume Processing**: Retrieve the last processed item ID to continue batch operations from where you left off.
|
||||
|
||||
**Load Preferences**: Fetch stored user preferences at workflow start to customize behavior.
|
||||
|
||||
**State Restoration**: Retrieve workflow state saved from a previous run to maintain continuity.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Screenshot Web Page
|
||||
|
||||
### What it is
|
||||
Takes a screenshot of a specified website using ScreenshotOne API
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses the ScreenshotOne API to capture screenshots of web pages. Configure viewport dimensions, output format, and whether to capture the full page or just the visible area.
|
||||
|
||||
Optional features include blocking ads, cookie banners, and chat widgets for cleaner screenshots. Caching can be enabled to improve performance for repeated captures of the same page.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| url | URL of the website to screenshot | str | Yes |
|
||||
| viewport_width | Width of the viewport in pixels | int | No |
|
||||
| viewport_height | Height of the viewport in pixels | int | No |
|
||||
| full_page | Whether to capture the full page length | bool | No |
|
||||
| format | Output format (png, jpeg, webp) | "png" | "jpeg" | "webp" | No |
|
||||
| block_ads | Whether to block ads | bool | No |
|
||||
| block_cookie_banners | Whether to block cookie banners | bool | No |
|
||||
| block_chats | Whether to block chat widgets | bool | No |
|
||||
| cache | Whether to enable caching | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| image | The screenshot image data | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Visual Documentation**: Capture screenshots of web pages for documentation, reports, or archives.
|
||||
|
||||
**Competitive Monitoring**: Regularly screenshot competitor websites to track design and content changes.
|
||||
|
||||
**Visual Testing**: Capture page renders for visual regression testing or design verification workflows.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
82
docs/platform/blocks/dataforseo/keyword_suggestions.md
Normal file
82
docs/platform/blocks/dataforseo/keyword_suggestions.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Data For Seo Keyword Suggestions
|
||||
|
||||
### What it is
|
||||
Get keyword suggestions from DataForSEO Labs Google API
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the DataForSEO Labs Google Keyword Suggestions API to generate keyword ideas based on a seed keyword. It provides search volume, competition metrics, CPC data, and keyword difficulty scores for each suggestion.
|
||||
|
||||
Configure location and language targeting to get region-specific results. Optional SERP and clickstream data provide additional insights into search behavior and click patterns.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| keyword | Seed keyword to get suggestions for | str | Yes |
|
||||
| location_code | Location code for targeting (e.g., 2840 for USA) | int | No |
|
||||
| language_code | Language code (e.g., 'en' for English) | str | No |
|
||||
| include_seed_keyword | Include the seed keyword in results | bool | No |
|
||||
| include_serp_info | Include SERP information | bool | No |
|
||||
| include_clickstream_data | Include clickstream metrics | bool | No |
|
||||
| limit | Maximum number of results (up to 3000) | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| suggestions | List of keyword suggestions with metrics | List[KeywordSuggestion] |
|
||||
| suggestion | A single keyword suggestion with metrics | KeywordSuggestion |
|
||||
| total_count | Total number of suggestions returned | int |
|
||||
| seed_keyword | The seed keyword used for the query | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Content Planning**: Generate blog post and article ideas based on keyword suggestions with high search volume.
|
||||
|
||||
**SEO Strategy**: Discover new keyword opportunities to target based on competition and difficulty metrics.
|
||||
|
||||
**PPC Campaigns**: Find keywords for advertising campaigns using CPC and competition data.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Keyword Suggestion Extractor
|
||||
|
||||
### What it is
|
||||
Extract individual fields from a KeywordSuggestion object
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block extracts individual fields from a KeywordSuggestion object returned by the Keyword Suggestions block. It decomposes the suggestion into separate outputs for easier use in workflows.
|
||||
|
||||
Each field including keyword text, search volume, competition level, CPC, difficulty score, and optional SERP/clickstream data becomes available as individual outputs for downstream processing.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| suggestion | The keyword suggestion object to extract fields from | KeywordSuggestion | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| keyword | The keyword suggestion | str |
|
||||
| search_volume | Monthly search volume | int |
|
||||
| competition | Competition level (0-1) | float |
|
||||
| cpc | Cost per click in USD | float |
|
||||
| keyword_difficulty | Keyword difficulty score | int |
|
||||
| serp_info | data from SERP for each keyword | Dict[str, True] |
|
||||
| clickstream_data | Clickstream data metrics | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Keyword Filtering**: Extract search volume and difficulty to filter keywords meeting specific thresholds.
|
||||
|
||||
**Data Analysis**: Access individual metrics for comparison, sorting, or custom scoring algorithms.
|
||||
|
||||
**Report Generation**: Pull specific fields like CPC and competition for SEO or PPC reports.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
83
docs/platform/blocks/dataforseo/related_keywords.md
Normal file
83
docs/platform/blocks/dataforseo/related_keywords.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Data For Seo Related Keywords
|
||||
|
||||
### What it is
|
||||
Get related keywords from DataForSEO Labs Google API
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses the DataForSEO Labs Google Related Keywords API to find semantically related keywords based on a seed keyword. It returns keywords that share similar search intent or topic relevance.
|
||||
|
||||
The depth parameter controls the breadth of the search, with higher values returning exponentially more related keywords. Results include search metrics, competition data, and optional SERP/clickstream information.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| keyword | Seed keyword to find related keywords for | str | Yes |
|
||||
| location_code | Location code for targeting (e.g., 2840 for USA) | int | No |
|
||||
| language_code | Language code (e.g., 'en' for English) | str | No |
|
||||
| include_seed_keyword | Include the seed keyword in results | bool | No |
|
||||
| include_serp_info | Include SERP information | bool | No |
|
||||
| include_clickstream_data | Include clickstream metrics | bool | No |
|
||||
| limit | Maximum number of results (up to 3000) | int | No |
|
||||
| depth | Keyword search depth (0-4). Controls the number of returned keywords: 0=1 keyword, 1=~8 keywords, 2=~72 keywords, 3=~584 keywords, 4=~4680 keywords | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| related_keywords | List of related keywords with metrics | List[RelatedKeyword] |
|
||||
| related_keyword | A related keyword with metrics | RelatedKeyword |
|
||||
| total_count | Total number of related keywords returned | int |
|
||||
| seed_keyword | The seed keyword used for the query | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Topic Clustering**: Group related keywords to build comprehensive content clusters around a topic.
|
||||
|
||||
**Semantic SEO**: Discover LSI (latent semantic indexing) keywords to improve content relevance.
|
||||
|
||||
**Keyword Expansion**: Expand targeting beyond exact match to capture related search traffic.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Related Keyword Extractor
|
||||
|
||||
### What it is
|
||||
Extract individual fields from a RelatedKeyword object
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block extracts individual fields from a RelatedKeyword object returned by the Related Keywords block. It separates the compound object into distinct outputs for workflow integration.
|
||||
|
||||
Outputs include the keyword text, search volume, competition score, CPC, keyword difficulty, and any SERP or clickstream data that was requested in the original search.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| related_keyword | The related keyword object to extract fields from | RelatedKeyword | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| keyword | The related keyword | str |
|
||||
| search_volume | Monthly search volume | int |
|
||||
| competition | Competition level (0-1) | float |
|
||||
| cpc | Cost per click in USD | float |
|
||||
| keyword_difficulty | Keyword difficulty score | int |
|
||||
| serp_info | SERP data for the keyword | Dict[str, True] |
|
||||
| clickstream_data | Clickstream data metrics | Dict[str, True] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Keyword Prioritization**: Extract metrics to rank related keywords by opportunity score.
|
||||
|
||||
**Content Optimization**: Access keyword difficulty and search volume for content planning decisions.
|
||||
|
||||
**Competitive Analysis**: Pull competition and CPC data to assess keyword viability.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
336
docs/platform/blocks/discord/bot_blocks.md
Normal file
336
docs/platform/blocks/discord/bot_blocks.md
Normal file
@@ -0,0 +1,336 @@
|
||||
# Create Discord Thread
|
||||
|
||||
### What it is
|
||||
Creates a new thread in a Discord channel.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses the Discord API with a bot token to create a new thread in a specified channel. Threads can be public or private (private requires Boost Level 2+).
|
||||
|
||||
Configure auto-archive duration and optionally send an initial message when the thread is created.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| channel_name | Channel ID or channel name to create the thread in | str | Yes |
|
||||
| server_name | Server name (only needed if using channel name) | str | No |
|
||||
| thread_name | The name of the thread to create | str | Yes |
|
||||
| is_private | Whether to create a private thread (requires Boost Level 2+) or public thread | bool | No |
|
||||
| auto_archive_duration | Duration before the thread is automatically archived | "60" | "1440" | "4320" | No |
|
||||
| message_content | Optional initial message to send in the thread | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | Operation status | str |
|
||||
| thread_id | ID of the created thread | str |
|
||||
| thread_name | Name of the created thread | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Support Tickets**: Create threads for individual support conversations to keep channels organized.
|
||||
|
||||
**Discussion Topics**: Automatically create threads for new topics or announcements.
|
||||
|
||||
**Project Channels**: Spin up discussion threads for specific tasks or features.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Discord Channel Info
|
||||
|
||||
### What it is
|
||||
Resolves Discord channel names to IDs and vice versa.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block resolves Discord channel identifiers, converting between channel names and IDs. It queries the Discord API to find the channel and returns comprehensive information including server details.
|
||||
|
||||
Useful for workflows that receive channel names but need IDs for other Discord operations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| channel_identifier | Channel name or channel ID to look up | str | Yes |
|
||||
| server_name | Server name (optional, helps narrow down search) | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| channel_id | The channel's ID | str |
|
||||
| channel_name | The channel's name | str |
|
||||
| server_id | The server's ID | str |
|
||||
| server_name | The server's name | str |
|
||||
| channel_type | Type of channel (text, voice, etc) | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Dynamic Routing**: Look up channel IDs to route messages to user-specified channels by name.
|
||||
|
||||
**Validation**: Verify channel existence before attempting to send messages.
|
||||
|
||||
**Workflow Setup**: Get channel details during workflow configuration.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Discord User Info
|
||||
|
||||
### What it is
|
||||
Gets information about a Discord user by their ID.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves information about a Discord user by their ID. It queries the Discord API and returns profile details including username, display name, avatar, and account creation date.
|
||||
|
||||
The user must be visible to your bot (share a server with your bot).
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| user_id | The Discord user ID to get information about | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| user_id | The user's ID (passed through for chaining) | str |
|
||||
| username | The user's username | str |
|
||||
| display_name | The user's display name | str |
|
||||
| discriminator | The user's discriminator (if applicable) | str |
|
||||
| avatar_url | URL to the user's avatar | str |
|
||||
| is_bot | Whether the user is a bot | bool |
|
||||
| created_at | When the account was created | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**User Profiling**: Get user details to personalize responses or create user profiles.
|
||||
|
||||
**Mention Resolution**: Look up user information when processing mentions in messages.
|
||||
|
||||
**Activity Logging**: Retrieve user details for logging or analytics purposes.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Read Discord Messages
|
||||
|
||||
### What it is
|
||||
Reads messages from a Discord channel using a bot token.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses a Discord bot to log into a server and listen for new messages. When a message is received, it extracts the content, channel name, and username of the sender. If the message contains a text file attachment, the block also retrieves and includes the file's content.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_content | The content of the message received | str |
|
||||
| message_id | The ID of the message | str |
|
||||
| channel_id | The ID of the channel | str |
|
||||
| channel_name | The name of the channel the message was received from | str |
|
||||
| user_id | The ID of the user who sent the message | str |
|
||||
| username | The username of the user who sent the message | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
This block could be used to monitor a Discord channel for support requests. When a user posts a message, the block captures it, allowing another part of the system to process and respond to the request.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Reply To Discord Message
|
||||
|
||||
### What it is
|
||||
Replies to a specific Discord message.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block sends a reply to a specific Discord message, creating a threaded reply that references the original message. Optionally mention the original author to notify them.
|
||||
|
||||
The reply appears linked to the original message in Discord's UI, maintaining conversation context.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| channel_id | The channel ID where the message to reply to is located | str | Yes |
|
||||
| message_id | The ID of the message to reply to | str | Yes |
|
||||
| reply_content | The content of the reply | str | Yes |
|
||||
| mention_author | Whether to mention the original message author | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | Operation status | str |
|
||||
| reply_id | ID of the reply message | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Conversation Bots**: Reply to user questions maintaining conversation context.
|
||||
|
||||
**Support Responses**: Respond to support requests by replying to the original message.
|
||||
|
||||
**Interactive Commands**: Reply to command messages with results or confirmations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Send Discord DM
|
||||
|
||||
### What it is
|
||||
Sends a direct message to a Discord user using their user ID.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block sends a direct message to a Discord user. It opens a DM channel with the user (if not already open) and sends the message. The user must allow DMs from server members or share a server with your bot.
|
||||
|
||||
Returns the message ID of the sent DM for tracking purposes.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| user_id | The Discord user ID to send the DM to (e.g., '123456789012345678') | str | Yes |
|
||||
| message_content | The content of the direct message to send | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | The status of the operation | str |
|
||||
| message_id | The ID of the sent message | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Private Notifications**: Send private alerts or notifications to specific users.
|
||||
|
||||
**Welcome Messages**: DM new server members with welcome information.
|
||||
|
||||
**Verification Systems**: Send verification codes or instructions via DM.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Send Discord Embed
|
||||
|
||||
### What it is
|
||||
Sends a rich embed message to a Discord channel.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block sends a rich embed message to a Discord channel. Embeds support formatted content with titles, descriptions, colors, images, thumbnails, author sections, footers, and structured fields.
|
||||
|
||||
Configure the embed's appearance with colors, images, and multiple fields for organized information display.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| channel_identifier | Channel ID or channel name to send the embed to | str | Yes |
|
||||
| server_name | Server name (only needed if using channel name) | str | No |
|
||||
| title | The title of the embed | str | No |
|
||||
| description | The main content/description of the embed | str | No |
|
||||
| color | Embed color as integer (e.g., 0x00ff00 for green) | int | No |
|
||||
| thumbnail_url | URL for the thumbnail image | str | No |
|
||||
| image_url | URL for the main embed image | str | No |
|
||||
| author_name | Author name to display | str | No |
|
||||
| footer_text | Footer text | str | No |
|
||||
| fields | List of field dictionaries with 'name', 'value', and optional 'inline' keys | List[Dict[str, True]] | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | Operation status | str |
|
||||
| message_id | ID of the sent embed message | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Status Updates**: Send formatted status updates with colors and structured information.
|
||||
|
||||
**Data Displays**: Present data in organized embed fields for easy reading.
|
||||
|
||||
**Announcements**: Create visually appealing announcements with images and branding.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Send Discord File
|
||||
|
||||
### What it is
|
||||
Sends a file attachment to a Discord channel.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uploads and sends a file attachment to a Discord channel. It supports various file types including images, documents, and other media. Files can be provided as URLs, data URIs, or local paths.
|
||||
|
||||
Optionally include a message along with the file attachment.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| channel_identifier | Channel ID or channel name to send the file to | str | Yes |
|
||||
| server_name | Server name (only needed if using channel name) | str | No |
|
||||
| file | The file to send (URL, data URI, or local path). Supports images, videos, documents, etc. | str (file) | Yes |
|
||||
| filename | Name of the file when sent (e.g., 'report.pdf', 'image.png') | str | No |
|
||||
| message_content | Optional message to send with the file | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | Operation status | str |
|
||||
| message_id | ID of the sent message | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Report Sharing**: Send generated reports or documents to Discord channels.
|
||||
|
||||
**Image Posting**: Share images from workflows or external sources.
|
||||
|
||||
**Backup Distribution**: Share backup files or exports with team channels.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Send Discord Message
|
||||
|
||||
### What it is
|
||||
Sends a message to a Discord channel using a bot token.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block uses a Discord bot to log into a server, locate the specified channel, and send the provided message. If the message is longer than Discord's character limit, it automatically splits the message into smaller chunks and sends them sequentially.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| message_content | The content of the message to send | str | Yes |
|
||||
| channel_name | Channel ID or channel name to send the message to | str | Yes |
|
||||
| server_name | Server name (only needed if using channel name) | str | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| status | The status of the operation (e.g., 'Message sent', 'Error') | str |
|
||||
| message_id | The ID of the sent message | str |
|
||||
| channel_id | The ID of the channel where the message was sent | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
This block could be used as part of an automated notification system. For example, it could send alerts to a Discord channel when certain events occur in another system, such as when a new user signs up or when a critical error is detected.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
32
docs/platform/blocks/discord/oauth_blocks.md
Normal file
32
docs/platform/blocks/discord/oauth_blocks.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Discord Get Current User
|
||||
|
||||
### What it is
|
||||
Gets information about the currently authenticated Discord user using OAuth2 credentials.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Discord's OAuth2 API to retrieve information about the currently authenticated user. It requires valid OAuth2 credentials that have been obtained through Discord's authorization flow with the `identify` scope.
|
||||
|
||||
The block queries the Discord `/users/@me` endpoint and returns the user's profile information including their unique ID, username, avatar, and customization settings like banner and accent color.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| user_id | The authenticated user's Discord ID | str |
|
||||
| username | The user's username | str |
|
||||
| avatar_url | URL to the user's avatar image | str |
|
||||
| banner_url | URL to the user's banner image (if set) | str |
|
||||
| accent_color | The user's accent color as an integer | int |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**User Authentication**: Verify user identity after OAuth login to personalize experiences or grant access.
|
||||
|
||||
**Profile Integration**: Display Discord user information in external applications or dashboards.
|
||||
|
||||
**Account Linking**: Connect Discord accounts with other services using the unique user ID.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
151
docs/platform/blocks/enrichlayer/linkedin.md
Normal file
151
docs/platform/blocks/enrichlayer/linkedin.md
Normal file
@@ -0,0 +1,151 @@
|
||||
# Get Linkedin Profile
|
||||
|
||||
### What it is
|
||||
Fetch LinkedIn profile data using Enrichlayer
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves comprehensive LinkedIn profile data using Enrichlayer's API. Provide a LinkedIn profile URL to fetch details including work history, education, skills, and contact information.
|
||||
|
||||
Configure caching options for performance and optionally include additional data like inferred salary, personal email, or social media links.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| linkedin_url | LinkedIn profile URL to fetch data from | str | Yes |
|
||||
| fallback_to_cache | Cache usage if live fetch fails | "on-error" | "never" | No |
|
||||
| use_cache | Cache utilization strategy | "if-present" | "never" | No |
|
||||
| include_skills | Include skills data | bool | No |
|
||||
| include_inferred_salary | Include inferred salary data | bool | No |
|
||||
| include_personal_email | Include personal email | bool | No |
|
||||
| include_personal_contact_number | Include personal contact number | bool | No |
|
||||
| include_social_media | Include social media profiles | bool | No |
|
||||
| include_extra | Include additional data | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| profile | LinkedIn profile data | PersonProfileResponse |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Lead Enrichment**: Enrich sales leads with detailed professional background information.
|
||||
|
||||
**Recruitment Research**: Gather candidate information for hiring and outreach workflows.
|
||||
|
||||
**Contact Discovery**: Find contact details associated with LinkedIn profiles.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Get Linkedin Profile Picture
|
||||
|
||||
### What it is
|
||||
Get LinkedIn profile pictures using Enrichlayer
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves the profile picture URL for a LinkedIn profile using Enrichlayer's API. Provide the LinkedIn profile URL to get a direct link to the user's profile photo.
|
||||
|
||||
The returned URL can be used for display, download, or further image processing.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| linkedin_profile_url | LinkedIn profile URL | str | Yes |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| profile_picture_url | LinkedIn profile picture URL | str (file) |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**CRM Enhancement**: Add profile photos to contact records for visual identification.
|
||||
|
||||
**Personalized Outreach**: Include profile pictures in personalized email or message templates.
|
||||
|
||||
**Identity Verification**: Retrieve profile photos for manual identity verification workflows.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Linkedin Person Lookup
|
||||
|
||||
### What it is
|
||||
Look up LinkedIn profiles by person information using Enrichlayer
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block finds LinkedIn profiles by matching person details like name, company, and title using Enrichlayer's API. Provide first name and company domain as minimum inputs, with optional last name, location, and title for better matching.
|
||||
|
||||
Enable similarity checks and profile enrichment for more detailed results.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| first_name | Person's first name | str | Yes |
|
||||
| last_name | Person's last name | str | No |
|
||||
| company_domain | Domain of the company they work for (optional) | str | Yes |
|
||||
| location | Person's location (optional) | str | No |
|
||||
| title | Person's job title (optional) | str | No |
|
||||
| include_similarity_checks | Include similarity checks | bool | No |
|
||||
| enrich_profile | Enrich the profile with additional data | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| lookup_result | LinkedIn profile lookup result | PersonLookupResponse |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Lead Discovery**: Find LinkedIn profiles for leads when you only have name and company.
|
||||
|
||||
**Contact Matching**: Match CRM contacts to their LinkedIn profiles for enrichment.
|
||||
|
||||
**Prospecting**: Discover LinkedIn profiles of people at target companies.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Linkedin Role Lookup
|
||||
|
||||
### What it is
|
||||
Look up LinkedIn profiles by role in a company using Enrichlayer
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block finds LinkedIn profiles by role title and company using Enrichlayer's API. Specify a role like CEO, CTO, or VP of Sales along with the company name to find matching profiles.
|
||||
|
||||
Enable enrich_profile to automatically fetch full profile data for the matched result.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| role | Role title (e.g., CEO, CTO) | str | Yes |
|
||||
| company_name | Name of the company | str | Yes |
|
||||
| enrich_profile | Enrich the profile with additional data | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| role_lookup_result | LinkedIn role lookup result | RoleLookupResponse |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Decision Maker Discovery**: Find key decision makers at target companies for sales outreach.
|
||||
|
||||
**Executive Research**: Look up C-suite executives for account-based marketing.
|
||||
|
||||
**Org Chart Building**: Map leadership at companies by looking up specific roles.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
36
docs/platform/blocks/exa/answers.md
Normal file
36
docs/platform/blocks/exa/answers.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Exa Answer
|
||||
|
||||
### What it is
|
||||
Get an LLM answer to a question informed by Exa search results
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block sends your question to the Exa Answer API, which performs a semantic search across billions of web pages to find relevant information. The API then uses an LLM to synthesize the search results into a coherent answer with citations.
|
||||
|
||||
The block returns both the generated answer and the source citations that informed it. You can optionally include full text content from the search results for more comprehensive answers.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| query | The question or query to answer | str | Yes |
|
||||
| text | Include full text content in the search results used for the answer | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the request failed | str |
|
||||
| answer | The generated answer based on search results | str |
|
||||
| citations | Search results used to generate the answer | List[AnswerCitation] |
|
||||
| citation | Individual citation from the answer | AnswerCitation |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Research Assistance**: Get quick, sourced answers to complex questions without manually searching multiple websites.
|
||||
|
||||
**Fact Verification**: Verify claims or statements by getting answers backed by real web sources with citations.
|
||||
|
||||
**Content Creation**: Generate research-backed content by asking questions about topics and using the cited sources.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
40
docs/platform/blocks/exa/code_context.md
Normal file
40
docs/platform/blocks/exa/code_context.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Exa Code Context
|
||||
|
||||
### What it is
|
||||
Search billions of GitHub repos, docs, and Stack Overflow for relevant code examples
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block uses Exa's specialized code search API to find relevant code examples from GitHub repositories, official documentation, and Stack Overflow. The search is optimized for code context, returning formatted snippets with source references.
|
||||
|
||||
The block returns code snippets along with metadata including the source URL, search time, and token counts. You can control response size with the tokens_num parameter to balance comprehensiveness with cost.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| query | Search query to find relevant code snippets. Describe what you're trying to do or what code you're looking for. | str | Yes |
|
||||
| tokens_num | Token limit for response. Use 'dynamic' for automatic sizing, 5000 for standard queries, or 10000 for comprehensive examples. | str | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| request_id | Unique identifier for this request | str |
|
||||
| query | The search query used | str |
|
||||
| response | Formatted code snippets and contextual examples with sources | str |
|
||||
| results_count | Number of code sources found and included | int |
|
||||
| cost_dollars | Cost of this request in dollars | str |
|
||||
| search_time | Time taken to search in milliseconds | float |
|
||||
| output_tokens | Number of tokens in the response | int |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**API Integration Examples**: Find real-world code examples showing how to integrate with specific APIs or libraries.
|
||||
|
||||
**Debugging Assistance**: Search for code patterns related to error messages or specific programming challenges.
|
||||
|
||||
**Learning New Technologies**: Discover implementation examples when learning a new framework or programming language.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
47
docs/platform/blocks/exa/contents.md
Normal file
47
docs/platform/blocks/exa/contents.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Exa Contents
|
||||
|
||||
### What it is
|
||||
Retrieves document contents using Exa's contents API
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves full content from web pages using Exa's contents API. You can provide URLs directly or document IDs from previous searches. The API supports live crawling to fetch fresh content and can extract text, highlights, and AI-generated summaries.
|
||||
|
||||
The block supports subpage crawling to gather related content and offers various content retrieval options including full text extraction, relevant highlights, and customizable summary generation. Results are formatted for easy use with LLMs.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| urls | Array of URLs to crawl (preferred over 'ids') | List[str] | No |
|
||||
| ids | [DEPRECATED - use 'urls' instead] Array of document IDs obtained from searches | List[str] | No |
|
||||
| text | Retrieve text content from pages | bool | No |
|
||||
| highlights | Text snippets most relevant from each page | HighlightSettings | No |
|
||||
| summary | LLM-generated summary of the webpage | SummarySettings | No |
|
||||
| livecrawl | Livecrawling options: never, fallback (default), always, preferred | "never" | "fallback" | "always" | No |
|
||||
| livecrawl_timeout | Timeout for livecrawling in milliseconds | int | No |
|
||||
| subpages | Number of subpages to crawl | int | No |
|
||||
| subpage_target | Keyword(s) to find specific subpages of search results | str | List[str] | No |
|
||||
| extras | Extra parameters for additional content | ExtrasSettings | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the request failed | str |
|
||||
| results | List of document contents with metadata | List[ExaSearchResults] |
|
||||
| result | Single document content result | ExaSearchResults |
|
||||
| context | A formatted string of the results ready for LLMs | str |
|
||||
| request_id | Unique identifier for the request | str |
|
||||
| statuses | Status information for each requested URL | List[ContentStatus] |
|
||||
| cost_dollars | Cost breakdown for the request | CostDollars |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Content Aggregation**: Retrieve full article content from multiple URLs for analysis or summarization.
|
||||
|
||||
**Competitive Research**: Crawl competitor websites to extract product information, pricing, or feature details.
|
||||
|
||||
**Data Enrichment**: Fetch detailed content from URLs discovered through Exa searches to build comprehensive datasets.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
173
docs/platform/blocks/exa/research.md
Normal file
173
docs/platform/blocks/exa/research.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Exa Create Research
|
||||
|
||||
### What it is
|
||||
Create research task with optional waiting - explores web and synthesizes findings with citations
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block creates an asynchronous research task using Exa's Research API. The API autonomously explores the web, searches for relevant information, and synthesizes findings into a comprehensive report with citations.
|
||||
|
||||
You can choose from different model tiers (fast, standard, pro) depending on your speed vs. depth requirements. The block supports structured output via JSON Schema and can optionally wait for completion to return results immediately.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| instructions | Research instructions - clearly define what information to find, how to conduct research, and desired output format. | str | Yes |
|
||||
| model | Research model: 'fast' for quick results, 'standard' for balanced quality, 'pro' for thorough analysis | "exa-research-fast" | "exa-research" | "exa-research-pro" | No |
|
||||
| output_schema | JSON Schema to enforce structured output. When provided, results are validated and returned as parsed JSON. | Dict[str, True] | No |
|
||||
| wait_for_completion | Wait for research to complete before returning. Ensures you get results immediately. | bool | No |
|
||||
| polling_timeout | Maximum time to wait for completion in seconds (only if wait_for_completion is True) | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| research_id | Unique identifier for tracking this research request | str |
|
||||
| status | Final status of the research | str |
|
||||
| model | The research model used | str |
|
||||
| instructions | The research instructions provided | str |
|
||||
| created_at | When the research was created (Unix timestamp in ms) | int |
|
||||
| output_content | Research output as text (only if wait_for_completion was True and completed) | str |
|
||||
| output_parsed | Structured JSON output (only if wait_for_completion and outputSchema were provided) | Dict[str, True] |
|
||||
| cost_total | Total cost in USD (only if wait_for_completion was True and completed) | float |
|
||||
| elapsed_time | Time taken to complete in seconds (only if wait_for_completion was True) | float |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Market Research**: Automatically research market trends, competitors, or industry developments with cited sources.
|
||||
|
||||
**Due Diligence**: Conduct comprehensive background research on companies, people, or technologies.
|
||||
|
||||
**Content Research**: Gather research on topics for articles, reports, or presentations with proper citations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Exa Get Research
|
||||
|
||||
### What it is
|
||||
Get status and results of a research task
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves the current status and results of a previously created research task. You can check whether the research is still running, completed, or failed.
|
||||
|
||||
When the research is complete, the block returns the full output content along with cost breakdown including searches performed, pages crawled, and tokens used. You can also optionally retrieve the detailed event log of research operations.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| research_id | The ID of the research task to retrieve | str | Yes |
|
||||
| include_events | Include detailed event log of research operations | bool | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| research_id | The research task identifier | str |
|
||||
| status | Current status: pending, running, completed, canceled, or failed | str |
|
||||
| instructions | The original research instructions | str |
|
||||
| model | The research model used | str |
|
||||
| created_at | When research was created (Unix timestamp in ms) | int |
|
||||
| finished_at | When research finished (Unix timestamp in ms, if completed/canceled/failed) | int |
|
||||
| output_content | Research output as text (if completed) | str |
|
||||
| output_parsed | Structured JSON output matching outputSchema (if provided and completed) | Dict[str, True] |
|
||||
| cost_total | Total cost in USD (if completed) | float |
|
||||
| cost_searches | Number of searches performed (if completed) | int |
|
||||
| cost_pages | Number of pages crawled (if completed) | int |
|
||||
| cost_reasoning_tokens | AI tokens used for reasoning (if completed) | int |
|
||||
| error_message | Error message if research failed | str |
|
||||
| events | Detailed event log (if include_events was True) | List[Dict[str, True]] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Status Monitoring**: Check progress of long-running research tasks that were started asynchronously.
|
||||
|
||||
**Result Retrieval**: Fetch completed research results from tasks started earlier in your workflow.
|
||||
|
||||
**Cost Tracking**: Review the cost breakdown of completed research for budgeting and optimization.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Exa List Research
|
||||
|
||||
### What it is
|
||||
List all research tasks with pagination support
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves a list of all your research tasks, ordered by creation time with newest first. It supports pagination for handling large numbers of tasks.
|
||||
|
||||
The block returns basic information about each task including its ID, status, instructions, and timestamps. Use this to find specific research tasks or monitor all ongoing research activities.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| cursor | Cursor for pagination through results | str | No |
|
||||
| limit | Number of research tasks to return (1-50) | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| research_tasks | List of research tasks ordered by creation time (newest first) | List[ResearchTaskModel] |
|
||||
| research_task | Individual research task (yielded for each task) | ResearchTaskModel |
|
||||
| has_more | Whether there are more tasks to paginate through | bool |
|
||||
| next_cursor | Cursor for the next page of results | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Research Management**: View all active and completed research tasks for project management.
|
||||
|
||||
**Task Discovery**: Find previously created research tasks to retrieve their results or check status.
|
||||
|
||||
**Activity Auditing**: Review research activity history for compliance or reporting purposes.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Exa Wait For Research
|
||||
|
||||
### What it is
|
||||
Wait for a research task to complete with configurable timeout
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block polls a research task until it completes or times out. It periodically checks the task status at configurable intervals and returns the final results when done.
|
||||
|
||||
The block is useful when you need to block workflow execution until research completes. It returns whether the operation timed out, allowing you to handle incomplete research gracefully.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| research_id | The ID of the research task to wait for | str | Yes |
|
||||
| timeout | Maximum time to wait in seconds | int | No |
|
||||
| check_interval | Seconds between status checks | int | No |
|
||||
|
||||
### Outputs
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| research_id | The research task identifier | str |
|
||||
| final_status | Final status when polling stopped | str |
|
||||
| output_content | Research output as text (if completed) | str |
|
||||
| output_parsed | Structured JSON output (if outputSchema was provided and completed) | Dict[str, True] |
|
||||
| cost_total | Total cost in USD | float |
|
||||
| elapsed_time | Total time waited in seconds | float |
|
||||
| timed_out | Whether polling timed out before completion | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Sequential Workflows**: Ensure research completes before proceeding to dependent workflow steps.
|
||||
|
||||
**Synchronous Integration**: Convert asynchronous research into synchronous operations for simpler workflow logic.
|
||||
|
||||
**Timeout Handling**: Implement research with graceful timeout handling for time-sensitive applications.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user