Merge branch 'master' into zamilmajdy/code-validation

This commit is contained in:
Reinier van der Leer
2024-07-23 20:38:30 +02:00
150 changed files with 19679 additions and 1855 deletions

View File

@@ -88,14 +88,16 @@ body:
- type: dropdown
attributes:
label: Do you use OpenAI GPT-3 or GPT-4?
label: What LLM Provider do you use?
description: >
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- GPT-3.5
- GPT-4
- GPT-4(32k)
- Azure
- Groq
- Anthropic
- Llamafile
- Other (detail in issue)
validations:
required: true
@@ -126,6 +128,13 @@ body:
label: Specify the area
description: Please specify the area you think is best related to the issue.
- type: input
attributes:
label: What commit or version are you using?
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
validations:
required: true
- type: textarea
attributes:
label: Describe your issue.

8
.github/labeler.yml vendored
View File

@@ -17,3 +17,11 @@ Frontend:
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
Builder:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_builder/**
Server:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_server/**

View File

@@ -34,6 +34,15 @@ jobs:
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
- name: Setup PostgreSQL
uses: ikalnytskyi/action-setup-postgres@v6
with:
username: ${{ secrets.DB_USER }}
password: ${{ secrets.DB_PASS }}
database: postgres
port: 5432
id: postgres
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
@@ -106,161 +115,34 @@ jobs:
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
run: poetry run prisma generate --schema postgres/schema.prisma
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
run: poetry run prisma migrate dev --schema postgres/schema.prisma --name updates
env:
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
- name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
test
env:
CI: true
PLAIN_OUTPUT: True
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
DB_USER: ${{ secrets.DB_USER }}
DB_PASS: ${{ secrets.DB_PASS }}
DB_NAME: postgres
DB_PORT: 5432
RUN_ENV: local
PORT: 8080
DATABASE_URL: postgresql://${{ secrets.DB_USER }}:${{ secrets.DB_PASS }}@localhost:5432/${{ secrets.DB_NAME }}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: autogpt-server,${{ runner.os }}
build:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
- name: install rpm
if: matrix.platform-os == 'ubuntu'
run: sudo apt-get install -y alien fakeroot rpm
- name: Build distribution
run: |
case "${{ matrix.platform-os }}" in
"macos" | "macos-arm64")
${MAC_COMMAND}
;;
"windows")
${WINDOWS_COMMAND}
;;
*)
${LINUX_COMMAND}
;;
esac
env:
MAC_COMMAND: "poetry run poe dist_dmg"
WINDOWS_COMMAND: "poetry run poe dist_msi"
LINUX_COMMAND: "poetry run poe dist_appimage"
# break this into seperate steps each with their own name that matches the file
- name: Upload App artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-app-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.app
- name: Upload dmg artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-dmg-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/AutoGPTServer.dmg
- name: Upload msi artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-msi-${{ matrix.platform-os }}
path: D:\a\AutoGPT\AutoGPT\rnd\autogpt_server\dist\*.msi
- name: Upload deb artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-deb-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.deb
- name: Upload rpm artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-rpm-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.rpm
- name: Upload tar.gz artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-tar.gz-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.tar.gz
- name: Upload zip artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-zip-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.zip
- name: Upload pkg artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-pkg-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.pkg
- name: Upload AppImage artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-AppImage-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/dist/*.AppImage

View File

@@ -0,0 +1,55 @@
import os
import requests
import sys
# GitHub API endpoint
api_url = os.environ["GITHUB_API_URL"]
repo = os.environ["GITHUB_REPOSITORY"]
sha = os.environ["GITHUB_SHA"]
# GitHub token for authentication
github_token = os.environ["GITHUB_TOKEN"]
# API endpoint for check runs for the specific SHA
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
# Set up headers for authentication
headers = {
"Authorization": f"token {github_token}",
"Accept": "application/vnd.github.v3+json"
}
# Make the API request
response = requests.get(endpoint, headers=headers)
if response.status_code != 200:
print(f"Error: Unable to fetch check runs data. Status code: {response.status_code}")
sys.exit(1)
check_runs = response.json()["check_runs"]
# Flag to track if all other check runs have passed
all_others_passed = True
# Current run id
current_run_id = os.environ["GITHUB_RUN_ID"]
for run in check_runs:
if str(run["id"]) != current_run_id:
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}")
else:
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)

51
.github/workflows/workflow-checker.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
name: PR Status Checker
on:
workflow_run:
workflows: ["*"]
types:
- completed
jobs:
status-check:
name: Check Actions Status
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install requests
- name: Debug Information
run: |
echo "Event name: ${{ github.event_name }}"
echo "Workflow: ${{ github.workflow }}"
echo "Action: ${{ github.action }}"
echo "Actor: ${{ github.actor }}"
echo "Repository: ${{ github.repository }}"
echo "Ref: ${{ github.ref }}"
echo "Head ref: ${{ github.head_ref }}"
echo "Base ref: ${{ github.base_ref }}"
echo "Event payload:"
cat $GITHUB_EVENT_PATH
- name: Debug File Structure
run: |
echo "Current directory:"
pwd
echo "Directory contents:"
ls -R
echo "GitHub workspace:"
echo $GITHUB_WORKSPACE
echo "GitHub workspace contents:"
ls -R $GITHUB_WORKSPACE
- name: Check Actions Status
run: |
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python .github/scripts/check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
.gitignore vendored
View File

@@ -32,7 +32,6 @@ dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/

47
.vscode/all-projects.code-workspace vendored Normal file
View File

@@ -0,0 +1,47 @@
{
"folders": [
{
"name": "autogpt",
"path": "../autogpt"
},
{
"name": "benchmark",
"path": "../benchmark"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "forge",
"path": "../forge"
},
{
"name": "frontend",
"path": "../frontend"
},
{
"name": "autogpt_server",
"path": "../rnd/autogpt_server"
},
{
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "[root]",
"path": ".."
}
],
"settings": {},
"extensions": {
"recommendations": [
"charliermarsh.ruff",
"dart-code.flutter",
"ms-python.black-formatter",
"ms-python.vscode-pylance",
"prisma.prisma",
"qwtel.sqlite-viewer"
]
}
}

View File

@@ -1,17 +1,43 @@
# AutoGPT: build & use AI agents
# AutoGPT: Build & Use AI Agents
[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt)  
[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT)  
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
**AutoGPT** is a generalist LLM based AI agent that can autonomously accomplish minor tasks.
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
**Examples**:
## How to Get Started
- Look up and summarize this research paper
- Write a marketing for food supplements
- Write a blog post detailing the news in AI
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
### 🧱 AutoGPT Builder
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
### 💽 AutoGPT Server
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
### 🐙 Example Agents
Here are two examples of what you can do with AutoGPT:
1. **Reddit Marketing Agent**
- This agent reads comments on Reddit.
- It looks for people asking about your product.
- It then automatically responds to them.
2. **YouTube Content Repurposing Agent**
- This agent subscribes to your YouTube channel.
- When you post a new video, it transcribes it.
- It uses AI to write a search engine optimized blog post.
- Then, it publishes this blog post to your Medium account.
These examples show just a glimpse of what you can achieve with AutoGPT!
---
Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
@@ -23,11 +49,13 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
**📖 [Documentation](https://docs.agpt.co)**
 | 
**🚀 [Contributing](CONTRIBUTING.md)**
 | 
---
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
## 🧱 Building blocks
### 🏗️ Forge
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.

View File

@@ -11,6 +11,9 @@
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
# GROQ_API_KEY=
## LLAMAFILE_API_BASE - Llamafile API base URL
# LLAMAFILE_API_BASE=http://localhost:8080/v1
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
# TELEMETRY_OPT_IN=true

View File

@@ -19,7 +19,7 @@ logger = logging.getLogger(__name__)
class AgentProfileGeneratorConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable(
llm_classification: LanguageModelClassification = UserConfigurable(
default=LanguageModelClassification.SMART_MODEL
)
_example_call: object = {
@@ -148,12 +148,12 @@ class AgentProfileGenerator(PromptStrategy):
def __init__(
self,
model_classification: LanguageModelClassification,
llm_classification: LanguageModelClassification,
system_prompt: str,
user_prompt_template: str,
create_agent_function: dict,
):
self._model_classification = model_classification
self._llm_classification = llm_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.model_validate(
@@ -161,8 +161,8 @@ class AgentProfileGenerator(PromptStrategy):
)
@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def llm_classification(self) -> LanguageModelClassification:
return self._llm_classification
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = ChatMessage.system(self._system_prompt_message)

View File

@@ -125,7 +125,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
)
.run_after(WatchdogComponent)
@@ -181,13 +181,19 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
# Get messages
messages = await self.run_pipeline(MessageProvider.get_messages)
include_os_info = (
self.code_executor.config.execute_local_commands
if hasattr(self, "code_executor")
else False
)
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
messages=messages,
task=self.state.task,
ai_profile=self.state.ai_profile,
ai_directives=directives,
commands=self.commands,
include_os_info=self.code_executor.config.execute_local_commands,
include_os_info=include_os_info,
)
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")

View File

@@ -102,8 +102,8 @@ class CodeFlowAgentPromptStrategy(PromptStrategy):
self.commands: Sequence[Command] = [] # Sequence -> disallow list modification
@property
def model_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.SMART_MODEL # FIXME: dynamic switching
def build_prompt(
self,

View File

@@ -110,8 +110,8 @@ class OneShotAgentPromptStrategy(PromptStrategy):
self.logger = logger
@property
def model_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.SMART_MODEL # FIXME: dynamic switching
def build_prompt(
self,

5
autogpt/poetry.lock generated
View File

@@ -327,6 +327,7 @@ gTTS = "^2.3.1"
jinja2 = "^3.1.2"
jsonschema = "*"
litellm = "^1.17.9"
numpy = ">=1.26.0,<2.0.0"
openai = "^1.7.2"
Pillow = "*"
playsound = "~1.2.2"
@@ -345,12 +346,12 @@ sqlalchemy = "^2.0.19"
tenacity = "^8.2.2"
tiktoken = ">=0.7.0,<1.0.0"
toml = "^0.10.2"
uvicorn = ">=0.23.2,<1"
uvicorn = {version = ">=0.23.2,<1", extras = ["standard"]}
watchdog = "4.0.0"
webdriver-manager = "^4.0.1"
[package.extras]
benchmark = ["agbenchmark @ file:///home/reinier/code/agpt/AutoGPT/benchmark"]
benchmark = ["agbenchmark @ file:///Users/czerwinski/Projects/AutoGPT/benchmark"]
[package.source]
type = "directory"

3
autogpt/scripts/llamafile/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
*.llamafile
*.llamafile.exe
llamafile.exe

View File

@@ -0,0 +1,165 @@
#!/usr/bin/env python3
"""
Use llamafile to serve a (quantized) mistral-7b-instruct-v0.2 model
Usage:
cd <repo-root>/autogpt
./scripts/llamafile/serve.py
"""
import os
import platform
import subprocess
from pathlib import Path
from typing import Optional
import click
LLAMAFILE = Path("mistral-7b-instruct-v0.2.Q5_K_M.llamafile")
LLAMAFILE_URL = f"https://huggingface.co/jartine/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/{LLAMAFILE.name}" # noqa
LLAMAFILE_EXE = Path("llamafile.exe")
LLAMAFILE_EXE_URL = "https://github.com/Mozilla-Ocho/llamafile/releases/download/0.8.6/llamafile-0.8.6" # noqa
@click.command()
@click.option(
"--llamafile",
type=click.Path(dir_okay=False, path_type=Path),
help=f"Name of the llamafile to serve. Default: {LLAMAFILE.name}",
)
@click.option("--llamafile_url", help="Download URL for the llamafile you want to use")
@click.option(
"--host", help="Specify the address for the llamafile server to listen on"
)
@click.option(
"--port", type=int, help="Specify the port for the llamafile server to listen on"
)
@click.option(
"--force-gpu",
is_flag=True,
hidden=platform.system() != "Darwin",
help="Run the model using only the GPU (AMD or Nvidia). "
"Otherwise, both CPU and GPU may be (partially) used.",
)
def main(
llamafile: Optional[Path] = None,
llamafile_url: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
force_gpu: bool = False,
):
print(f"type(llamafile) = {type(llamafile)}")
if not llamafile:
if not llamafile_url:
llamafile = LLAMAFILE
else:
llamafile = Path(llamafile_url.rsplit("/", 1)[1])
if llamafile.suffix != ".llamafile":
click.echo(
click.style(
"The given URL does not end with '.llamafile' -> "
"can't get filename from URL. "
"Specify the filename using --llamafile.",
fg="red",
),
err=True,
)
return
if llamafile == LLAMAFILE and not llamafile_url:
llamafile_url = LLAMAFILE_URL
elif llamafile_url != LLAMAFILE_URL:
if not click.prompt(
click.style(
"You seem to have specified a different URL for the default model "
f"({llamafile.name}). Are you sure this is correct? "
"If you want to use a different model, also specify --llamafile.",
fg="yellow",
),
type=bool,
):
return
# Go to autogpt/scripts/llamafile/
os.chdir(Path(__file__).resolve().parent)
on_windows = platform.system() == "Windows"
if not llamafile.is_file():
if not llamafile_url:
click.echo(
click.style(
"Please use --lamafile_url to specify a download URL for "
f"'{llamafile.name}'. "
"This will only be necessary once, so we can download the model.",
fg="red",
),
err=True,
)
return
download_file(llamafile_url, llamafile)
if not on_windows:
llamafile.chmod(0o755)
subprocess.run([llamafile, "--version"], check=True)
if not on_windows:
base_command = [f"./{llamafile}"]
else:
# Windows does not allow executables over 4GB, so we have to download a
# model-less llamafile.exe and run that instead.
if not LLAMAFILE_EXE.is_file():
download_file(LLAMAFILE_EXE_URL, LLAMAFILE_EXE)
LLAMAFILE_EXE.chmod(0o755)
subprocess.run([f".\\{LLAMAFILE_EXE}", "--version"], check=True)
base_command = [f".\\{LLAMAFILE_EXE}", "-m", llamafile]
if host:
base_command.extend(["--host", host])
if port:
base_command.extend(["--port", str(port)])
if force_gpu:
base_command.extend(["-ngl", "9999"])
subprocess.run(
[
*base_command,
"--server",
"--nobrowser",
"--ctx-size",
"0",
"--n-predict",
"1024",
],
check=True,
)
# note: --ctx-size 0 means the prompt context size will be set directly from the
# underlying model configuration. This may cause slow response times or consume
# a lot of memory.
def download_file(url: str, to_file: Path) -> None:
print(f"Downloading {to_file.name}...")
import urllib.request
urllib.request.urlretrieve(url, to_file, reporthook=report_download_progress)
print()
def report_download_progress(chunk_number: int, chunk_size: int, total_size: int):
if total_size != -1:
downloaded_size = chunk_number * chunk_size
percent = min(1, downloaded_size / total_size)
bar = "#" * int(40 * percent)
print(
f"\rDownloading: [{bar:<40}] {percent:.0%}"
f" - {downloaded_size/1e6:.1f}/{total_size/1e6:.1f} MB",
end="",
)
if __name__ == "__main__":
main()

View File

@@ -9,7 +9,7 @@ You can set configuration variables via the `.env` file. If you don't have a `.e
- `ANTHROPIC_API_KEY`: Set this if you want to use Anthropic models with AutoGPT
- `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the AutoGPT root directory. Default: azure.yaml
- `COMPONENT_CONFIG_FILE`: Path to the component configuration file (json) for an agent. Optional
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../components/components.md). Default: None
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/components.md). Default: None
- `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional.
- `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional.
- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: `text-embedding-3-small`
@@ -22,6 +22,7 @@ You can set configuration variables via the `.env` file. If you don't have a `.e
- `GROQ_API_KEY`: Set this if you want to use Groq models with AutoGPT
- `HUGGINGFACE_API_TOKEN`: HuggingFace API, to be used for both image generation and audio to text. Optional.
- `HUGGINGFACE_IMAGE_MODEL`: HuggingFace model to use for image generation. Default: CompVis/stable-diffusion-v1-4
- `LLAMAFILE_API_BASE`: Llamafile API base URL. Default: `http://localhost:8080/v1`
- `OPENAI_API_KEY`: Set this if you want to use OpenAI models; [OpenAI API Key](https://platform.openai.com/account/api-keys).
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False

View File

@@ -198,3 +198,66 @@ If you don't know which to choose, you can safely go with OpenAI*.
[groq/api-keys]: https://console.groq.com/keys
[groq/models]: https://console.groq.com/docs/models
### Llamafile
With llamafile you can run models locally, which means no need to set up billing,
and guaranteed data privacy.
For more information and in-depth documentation, check out the [llamafile documentation].
!!! warning
At the moment, llamafile only serves one model at a time. This means you can not
set `SMART_LLM` and `FAST_LLM` to two different llamafile models.
!!! warning
Due to the issues linked below, llamafiles don't work on WSL. To use a llamafile
with AutoGPT in WSL, you will have to run the llamafile in Windows (outside WSL).
<details>
<summary>Instructions</summary>
1. Get the `llamafile/serve.py` script through one of these two ways:
1. Clone the AutoGPT repo somewhere in your Windows environment,
with the script located at `autogpt/scripts/llamafile/serve.py`
2. Download just the [serve.py] script somewhere in your Windows environment
2. Make sure you have `click` installed: `pip install click`
3. Run `ip route | grep default | awk '{print $3}'` *inside WSL* to get the address
of the WSL host machine
4. Run `python3 serve.py --host {WSL_HOST_ADDR}`, where `{WSL_HOST_ADDR}`
is the address you found at step 3.
If port 8080 is taken, also specify a different port using `--port {PORT}`.
5. In WSL, set `LLAMAFILE_API_BASE=http://{WSL_HOST_ADDR}:8080/v1` in your `.env`.
6. Follow the rest of the regular instructions below.
[serve.py]: https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt/scripts/llamafile/serve.py
</details>
* [Mozilla-Ocho/llamafile#356](https://github.com/Mozilla-Ocho/llamafile/issues/356)
* [Mozilla-Ocho/llamafile#100](https://github.com/Mozilla-Ocho/llamafile/issues/100)
!!! note
These instructions will download and use `mistral-7b-instruct-v0.2.Q5_K_M.llamafile`.
`mistral-7b-instruct-v0.2` is currently the only tested and supported model.
If you want to try other models, you'll have to add them to `LlamafileModelName` in
[`llamafile.py`][forge/llamafile.py].
For optimal results, you may also have to add some logic to adapt the message format,
like `LlamafileProvider._adapt_chat_messages_for_mistral_instruct(..)` does.
1. Run the llamafile serve script:
```shell
python3 ./scripts/llamafile/serve.py
```
The first time this is run, it will download a file containing the model + runtime,
which may take a while and a few gigabytes of disk space.
To force GPU acceleration, add `--use-gpu` to the command.
3. In `.env`, set `SMART_LLM`/`FAST_LLM` or both to `mistral-7b-instruct-v0.2`
4. If the server is running on different address than `http://localhost:8080/v1`,
set `LLAMAFILE_API_BASE` in `.env` to the right base URL
[llamafile documentation]: https://github.com/Mozilla-Ocho/llamafile#readme
[forge/llamafile.py]: https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/forge/llm/providers/llamafile/llamafile.py

View File

@@ -213,5 +213,5 @@ For example, to disable python coding features, set it to the value below:
DISABLED_COMMANDS=execute_python_code,execute_python_file
```
[components]: ./components/components.md
[commands]: ./components/built-in-components.md
[components]: ../forge/components/components.md
[commands]: ../forge/components/built-in-components.md

View File

@@ -40,7 +40,7 @@ Necessary for saving and loading agent's state (preserving session).
| Config variable | Details | Type | Default |
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |
[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
@@ -84,7 +84,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the
| Config variable | Details | Type | Default |
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
@@ -178,7 +178,7 @@ Allows agent to read websites using Selenium.
| Config variable | Details | Type | Default |
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
| `model_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
| `headless` | Run browser in headless mode | `bool` | `True` |
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |

View File

@@ -0,0 +1,149 @@
# Contributing to AutoGPT Agent Server: Creating and Testing Blocks
This guide will walk you through the process of creating and testing a new block for the AutoGPT Agent Server, using the WikipediaSummaryBlock as an example.
## Understanding Blocks and Testing
Blocks are reusable components that can be connected to form a graph representing an agent's behavior. Each block has inputs, outputs, and a specific function. Proper testing is crucial to ensure blocks work correctly and consistently.
## Creating and Testing a New Block
Follow these steps to create and test a new block:
1. **Create a new Python file** in the `autogpt_server/blocks` directory. Name it descriptively and use snake_case. For example: `get_wikipedia_summary.py`.
2. **Import necessary modules and create a class that inherits from `Block`**. Make sure to include all necessary imports for your block.
Every block should contain the following:
```python
from autogpt_server.data.block import Block, BlockSchema, BlockOutput
```
Example for the Wikipedia summary block:
```python
from autogpt_server.data.block import Block, BlockSchema, BlockOutput
from autogpt_server.utils.get_request import GetRequest
import requests
class WikipediaSummaryBlock(Block, GetRequest):
# Block implementation will go here
```
3. **Define the input and output schemas** using `BlockSchema`. These schemas specify the data structure that the block expects to receive (input) and produce (output).
- The input schema defines the structure of the data the block will process. Each field in the schema represents a required piece of input data.
- The output schema defines the structure of the data the block will return after processing. Each field in the schema represents a piece of output data.
Example:
```python
class Input(BlockSchema):
topic: str # The topic to get the Wikipedia summary for
class Output(BlockSchema):
summary: str # The summary of the topic from Wikipedia
error: str # Any error message if the request fails
```
4. **Implement the `__init__` method, including test data and mocks:**
```python
def __init__(self):
super().__init__(
# Unique ID for the block
# you can generate this with this python one liner
# print(__import__('uuid').uuid4())
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
input_schema=WikipediaSummaryBlock.Input, # Assign input schema
output_schema=WikipediaSummaryBlock.Output, # Assign output schema
# Provide sample input, output and test mock for testing the block
test_input={"topic": "Artificial Intelligence"},
test_output=("summary", "summary content"),
test_mock={"get_request": lambda url, json: {"extract": "summary content"}},
)
```
- `id`: A unique identifier for the block.
- `input_schema` and `output_schema`: Define the structure of the input and output data.
Let's break down the testing components:
- `test_input`: This is a sample input that will be used to test the block. It should be a valid input according to your Input schema.
- `test_output`: This is the expected output when running the block with the `test_input`. It should match your Output schema. For non-deterministic outputs or when you only want to assert the type, you can use Python types instead of specific values. In this example, `("summary", str)` asserts that the output key is "summary" and its value is a string.
- `test_mock`: This is crucial for blocks that make network calls. It provides a mock function that replaces the actual network call during testing.
In this case, we're mocking the `get_request` method to always return a dictionary with an 'extract' key, simulating a successful API response. This allows us to test the block's logic without making actual network requests, which could be slow, unreliable, or rate-limited.
5. **Implement the `run` method with error handling:**, this should contain the main logic of the block:
```python
def run(self, input_data: Input) -> BlockOutput:
try:
topic = input_data.topic
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
response = self.get_request(url, json=True)
yield "summary", response['extract']
except requests.exceptions.HTTPError as http_err:
yield "error", f"HTTP error occurred: {http_err}"
except requests.RequestException as e:
yield "error", f"Request to Wikipedia failed: {e}"
except KeyError as e:
yield "error", f"Error parsing Wikipedia response: {e}"
```
- **Try block**: Contains the main logic to fetch and process the Wikipedia summary.
- **API request**: Send a GET request to the Wikipedia API.
- **Error handling**: Handle various exceptions that might occur during the API request and data processing.
- **Yield**: Use `yield` to output the results.
## Key Points to Remember
- **Unique ID**: Give your block a unique ID in the **init** method.
- **Input and Output Schemas**: Define clear input and output schemas.
- **Error Handling**: Implement error handling in the `run` method.
- **Output Results**: Use `yield` to output results in the `run` method.
- **Testing**: Provide test input and output in the **init** method for automatic testing.
## Understanding the Testing Process
The testing of blocks is handled by `test_block.py`, which does the following:
1. It calls the block with the provided `test_input`.
2. If a `test_mock` is provided, it temporarily replaces the specified methods with the mock functions.
3. It then asserts that the output matches the `test_output`.
For the WikipediaSummaryBlock:
- The test will call the block with the topic "Artificial Intelligence".
- Instead of making a real API call, it will use the mock function, which returns `{"extract": "summary content"}`.
- It will then check if the output key is "summary" and its value is a string.
This approach allows us to test the block's logic comprehensively without relying on external services, while also accommodating non-deterministic outputs.
## Tips for Effective Block Testing
1. **Provide realistic test_input**: Ensure your test input covers typical use cases.
2. **Define appropriate test_output**:
- For deterministic outputs, use specific expected values.
- For non-deterministic outputs or when only the type matters, use Python types (e.g., `str`, `int`, `dict`).
- You can mix specific values and types, e.g., `("key1", str), ("key2", 42)`.
3. **Use test_mock for network calls**: This prevents tests from failing due to network issues or API changes.
4. **Consider omitting test_mock for blocks without external dependencies**: If your block doesn't make network calls or use external resources, you might not need a mock.
5. **Consider edge cases**: Include tests for potential error conditions in your `run` method.
6. **Update tests when changing block behavior**: If you modify your block, ensure the tests are updated accordingly.
By following these steps, you can create new blocks that extend the functionality of the AutoGPT Agent Server.

View File

@@ -5,6 +5,9 @@ docs_dir: content
nav:
- Home: index.md
- The AutoGPT Server 🆕:
- Build your own Blocks: server/new_blocks.md
- AutoGPT Agent:
- Introduction: AutoGPT/index.md
- Setup:
@@ -40,7 +43,7 @@ nav:
- Readme: https://github.com/Significant-Gravitas/AutoGPT/blob/master/frontend/README.md
- Docs: docs/index.md
# - Challenges:
# - Introduction: challenges/introduction.md
# - List of Challenges:

View File

@@ -116,7 +116,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"github_username": null
},
"ActionHistoryConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"max_tokens": 1024,
"spacy_language_model": "en_core_web_sm"
},
@@ -129,7 +129,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"duckduckgo_max_attempts": 3
},
"WebSeleniumConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"web_browser": "chrome",
"headless": true,
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",

View File

@@ -16,7 +16,7 @@ from .model import ActionResult, AnyProposal, Episode, EpisodicActionHistory
class ActionHistoryConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to compress the history"""
max_tokens: int = 1024
"""Maximum number of tokens to use up with generated history messages"""
@@ -97,7 +97,7 @@ class ActionHistoryComponent(
async def after_execute(self, result: ActionResult) -> None:
self.event_history.register_result(result)
await self.event_history.handle_compression(
self.llm_provider, self.config.model_name, self.config.spacy_language_model
self.llm_provider, self.config.llm_name, self.config.spacy_language_model
)
@staticmethod

View File

@@ -55,7 +55,7 @@ class BrowsingError(CommandExecutionError):
class WebSeleniumConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to read websites"""
web_browser: Literal["chrome", "firefox", "safari", "edge"] = "chrome"
"""Web browser used by Selenium"""
@@ -164,7 +164,7 @@ class WebSeleniumComponent(
elif get_raw_content:
if (
output_tokens := self.llm_provider.count_tokens(
text, self.config.model_name
text, self.config.llm_name
)
) > MAX_RAW_CONTENT_LENGTH:
oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1)
@@ -382,7 +382,7 @@ class WebSeleniumComponent(
text,
topics_of_interest=topics_of_interest,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return "\n".join(f"* {i}" for i in information)
@@ -391,7 +391,7 @@ class WebSeleniumComponent(
text,
question=question,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return result

View File

@@ -10,7 +10,7 @@ from .schema import ChatPrompt, LanguageModelClassification
class PromptStrategy(abc.ABC):
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
...
@abc.abstractmethod

View File

@@ -236,7 +236,7 @@ class BaseOpenAIChatProvider(
tool_calls=tool_calls or None,
),
parsed_result=parsed_result,
model_info=self.CHAT_MODELS[model_name],
llm_info=self.CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)
@@ -469,7 +469,7 @@ class BaseOpenAIEmbeddingProvider(
return EmbeddingModelResponse(
embedding=embedding_parser(response.data[0].embedding),
model_info=self.EMBEDDING_MODELS[model_name],
llm_info=self.EMBEDDING_MODELS[model_name],
prompt_tokens_used=response.usage.prompt_tokens,
)

View File

@@ -330,7 +330,7 @@ class AnthropicProvider(BaseChatModelProvider[AnthropicModelName, AnthropicSetti
return ChatModelResponse(
response=assistant_msg,
parsed_result=parsed_result,
model_info=ANTHROPIC_CHAT_MODELS[model_name],
llm_info=ANTHROPIC_CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)

View File

@@ -0,0 +1,36 @@
# Llamafile Integration Notes
Tested with:
* Python 3.11
* Apple M2 Pro (32 GB), macOS 14.2.1
* quantized mistral-7b-instruct-v0.2
## Setup
Download a `mistral-7b-instruct-v0.2` llamafile:
```shell
wget -nc https://huggingface.co/jartine/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/mistral-7b-instruct-v0.2.Q5_K_M.llamafile
chmod +x mistral-7b-instruct-v0.2.Q5_K_M.llamafile
./mistral-7b-instruct-v0.2.Q5_K_M.llamafile --version
```
Run the llamafile server:
```shell
LLAMAFILE="./mistral-7b-instruct-v0.2.Q5_K_M.llamafile"
"${LLAMAFILE}" \
--server \
--nobrowser \
--ctx-size 0 \
--n-predict 1024
# note: ctx-size=0 means the prompt context size will be set directly from the
# underlying model configuration. This may cause slow response times or consume
# a lot of memory.
```
## TODOs
* `SMART_LLM`/`FAST_LLM` configuration: Currently, the llamafile server only serves one model at a time. However, there's no reason you can't start multiple llamafile servers on different ports. To support using different models for `smart_llm` and `fast_llm`, you could implement config vars like `LLAMAFILE_SMART_LLM_URL` and `LLAMAFILE_FAST_LLM_URL` that point to different llamafile servers (one serving a 'big model' and one serving a 'fast model').
* Authorization: the `serve.sh` script does not set up any authorization for the llamafile server; this can be turned on by adding arg `--api-key <some-key>` to the server startup command. However I haven't attempted to test whether the integration with autogpt works when this feature is turned on.
* Test with other models

View File

@@ -0,0 +1,17 @@
from .llamafile import (
LLAMAFILE_CHAT_MODELS,
LLAMAFILE_EMBEDDING_MODELS,
LlamafileCredentials,
LlamafileModelName,
LlamafileProvider,
LlamafileSettings,
)
__all__ = [
"LLAMAFILE_CHAT_MODELS",
"LLAMAFILE_EMBEDDING_MODELS",
"LlamafileCredentials",
"LlamafileModelName",
"LlamafileProvider",
"LlamafileSettings",
]

View File

@@ -0,0 +1,351 @@
import enum
import logging
import re
from pathlib import Path
from typing import Any, Iterator, Optional, Sequence
import requests
from openai.types.chat import (
ChatCompletionMessage,
ChatCompletionMessageParam,
CompletionCreateParams,
)
from pydantic import SecretStr
from forge.json.parsing import json_loads
from forge.models.config import UserConfigurable
from .._openai_base import BaseOpenAIChatProvider
from ..schema import (
AssistantToolCall,
AssistantToolCallDict,
ChatMessage,
ChatModelInfo,
CompletionModelFunction,
ModelProviderConfiguration,
ModelProviderCredentials,
ModelProviderName,
ModelProviderSettings,
ModelTokenizer,
)
class LlamafileModelName(str, enum.Enum):
MISTRAL_7B_INSTRUCT = "mistral-7b-instruct-v0.2"
LLAMAFILE_CHAT_MODELS = {
info.name: info
for info in [
ChatModelInfo(
name=LlamafileModelName.MISTRAL_7B_INSTRUCT,
provider_name=ModelProviderName.LLAMAFILE,
prompt_token_cost=0.0,
completion_token_cost=0.0,
max_tokens=32768,
has_function_call_api=False,
),
]
}
LLAMAFILE_EMBEDDING_MODELS = {}
class LlamafileConfiguration(ModelProviderConfiguration):
# TODO: implement 'seed' across forge.llm.providers
seed: Optional[int] = None
class LlamafileCredentials(ModelProviderCredentials):
api_key: Optional[SecretStr] = SecretStr("sk-no-key-required")
api_base: SecretStr = UserConfigurable( # type: ignore
default=SecretStr("http://localhost:8080/v1"), from_env="LLAMAFILE_API_BASE"
)
def get_api_access_kwargs(self) -> dict[str, str]:
return {
k: v.get_secret_value()
for k, v in {
"api_key": self.api_key,
"base_url": self.api_base,
}.items()
if v is not None
}
class LlamafileSettings(ModelProviderSettings):
configuration: LlamafileConfiguration # type: ignore
credentials: Optional[LlamafileCredentials] = None # type: ignore
class LlamafileTokenizer(ModelTokenizer[int]):
def __init__(self, credentials: LlamafileCredentials):
self._credentials = credentials
@property
def _tokenizer_base_url(self):
# The OpenAI-chat-compatible base url should look something like
# 'http://localhost:8080/v1' but the tokenizer endpoint is
# 'http://localhost:8080/tokenize'. So here we just strip off the '/v1'.
api_base = self._credentials.api_base.get_secret_value()
return api_base.strip("/v1")
def encode(self, text: str) -> list[int]:
response = requests.post(
url=f"{self._tokenizer_base_url}/tokenize", json={"content": text}
)
response.raise_for_status()
return response.json()["tokens"]
def decode(self, tokens: list[int]) -> str:
response = requests.post(
url=f"{self._tokenizer_base_url}/detokenize", json={"tokens": tokens}
)
response.raise_for_status()
return response.json()["content"]
class LlamafileProvider(
BaseOpenAIChatProvider[LlamafileModelName, LlamafileSettings],
# TODO: add and test support for embedding models
# BaseOpenAIEmbeddingProvider[LlamafileModelName, LlamafileSettings],
):
EMBEDDING_MODELS = LLAMAFILE_EMBEDDING_MODELS
CHAT_MODELS = LLAMAFILE_CHAT_MODELS
MODELS = {**CHAT_MODELS, **EMBEDDING_MODELS}
default_settings = LlamafileSettings(
name="llamafile_provider",
description=(
"Provides chat completion and embedding services "
"through a llamafile instance"
),
configuration=LlamafileConfiguration(),
)
_settings: LlamafileSettings
_credentials: LlamafileCredentials
_configuration: LlamafileConfiguration
async def get_available_models(self) -> Sequence[ChatModelInfo[LlamafileModelName]]:
_models = (await self._client.models.list()).data
# note: at the moment, llamafile only serves one model at a time (so this
# list will only ever have one value). however, in the future, llamafile
# may support multiple models, so leaving this method as-is for now.
self._logger.debug(f"Retrieved llamafile models: {_models}")
clean_model_ids = [clean_model_name(m.id) for m in _models]
self._logger.debug(f"Cleaned llamafile model IDs: {clean_model_ids}")
return [
LLAMAFILE_CHAT_MODELS[id]
for id in clean_model_ids
if id in LLAMAFILE_CHAT_MODELS
]
def get_tokenizer(self, model_name: LlamafileModelName) -> LlamafileTokenizer:
return LlamafileTokenizer(self._credentials)
def count_message_tokens(
self,
messages: ChatMessage | list[ChatMessage],
model_name: LlamafileModelName,
) -> int:
if isinstance(messages, ChatMessage):
messages = [messages]
if model_name == LlamafileModelName.MISTRAL_7B_INSTRUCT:
# For mistral-instruct, num added tokens depends on if the message
# is a prompt/instruction or an assistant-generated message.
# - prompt gets [INST], [/INST] added and the first instruction
# begins with '<s>' ('beginning-of-sentence' token).
# - assistant-generated messages get '</s>' added
# see: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
#
prompt_added = 1 # one for '<s>' token
assistant_num_added = 0
ntokens = 0
for message in messages:
if (
message.role == ChatMessage.Role.USER
# note that 'system' messages will get converted
# to 'user' messages before being sent to the model
or message.role == ChatMessage.Role.SYSTEM
):
# 5 tokens for [INST], [/INST], which actually get
# tokenized into "[, INST, ]" and "[, /, INST, ]"
# by the mistral tokenizer
prompt_added += 5
elif message.role == ChatMessage.Role.ASSISTANT:
assistant_num_added += 1 # for </s>
else:
raise ValueError(
f"{model_name} does not support role: {message.role}"
)
ntokens += self.count_tokens(message.content, model_name)
total_token_count = prompt_added + assistant_num_added + ntokens
return total_token_count
else:
raise NotImplementedError(
f"count_message_tokens not implemented for model {model_name}"
)
def _get_chat_completion_args(
self,
prompt_messages: list[ChatMessage],
model: LlamafileModelName,
functions: list[CompletionModelFunction] | None = None,
max_output_tokens: int | None = None,
**kwargs,
) -> tuple[
list[ChatCompletionMessageParam], CompletionCreateParams, dict[str, Any]
]:
messages, completion_kwargs, parse_kwargs = super()._get_chat_completion_args(
prompt_messages, model, functions, max_output_tokens, **kwargs
)
if model == LlamafileModelName.MISTRAL_7B_INSTRUCT:
messages = self._adapt_chat_messages_for_mistral_instruct(messages)
if "seed" not in kwargs and self._configuration.seed is not None:
completion_kwargs["seed"] = self._configuration.seed
# Convert all messages with content blocks to simple text messages
for message in messages:
if isinstance(content := message.get("content"), list):
message["content"] = "\n\n".join(
b["text"]
for b in content
if b["type"] == "text"
# FIXME: add support for images through image_data completion kwarg
)
return messages, completion_kwargs, parse_kwargs
def _adapt_chat_messages_for_mistral_instruct(
self, messages: list[ChatCompletionMessageParam]
) -> list[ChatCompletionMessageParam]:
"""
Munge the messages to be compatible with the mistral-7b-instruct chat
template, which:
- only supports 'user' and 'assistant' roles.
- expects messages to alternate between user/assistant roles.
See details here:
https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2#instruction-format
"""
adapted_messages: list[ChatCompletionMessageParam] = []
for message in messages:
# convert 'system' role to 'user' role as mistral-7b-instruct does
# not support 'system'
if message["role"] == ChatMessage.Role.SYSTEM:
message["role"] = ChatMessage.Role.USER
if (
len(adapted_messages) == 0
or message["role"] != (last_message := adapted_messages[-1])["role"]
):
adapted_messages.append(message)
else:
if not message.get("content"):
continue
# if the curr message has the same role as the previous one,
# concat the current message content to the prev message
if message["role"] == "user" and last_message["role"] == "user":
# user messages can contain other types of content blocks
if not isinstance(last_message["content"], list):
last_message["content"] = [
{"type": "text", "text": last_message["content"]}
]
last_message["content"].extend(
message["content"]
if isinstance(message["content"], list)
else [{"type": "text", "text": message["content"]}]
)
elif message["role"] != "user" and last_message["role"] != "user":
last_message["content"] = (
(last_message.get("content") or "")
+ "\n\n"
+ (message.get("content") or "")
).strip()
return adapted_messages
def _parse_assistant_tool_calls(
self,
assistant_message: ChatCompletionMessage,
compat_mode: bool = False,
**kwargs,
):
tool_calls: list[AssistantToolCall] = []
parse_errors: list[Exception] = []
if compat_mode and assistant_message.content:
try:
tool_calls = list(
_tool_calls_compat_extract_calls(assistant_message.content)
)
except Exception as e:
parse_errors.append(e)
return tool_calls, parse_errors
def clean_model_name(model_file: str) -> str:
"""
Clean up model names:
1. Remove file extension
2. Remove quantization info
Examples:
```
raw: 'mistral-7b-instruct-v0.2.Q5_K_M.gguf'
clean: 'mistral-7b-instruct-v0.2'
raw: '/Users/kate/models/mistral-7b-instruct-v0.2.Q5_K_M.gguf'
clean: 'mistral-7b-instruct-v0.2'
raw: 'llava-v1.5-7b-q4.gguf'
clean: 'llava-v1.5-7b'
```
"""
name_without_ext = Path(model_file).name.rsplit(".", 1)[0]
name_without_Q = re.match(
r"^[a-zA-Z0-9]+([.\-](?!([qQ]|B?F)\d{1,2})[a-zA-Z0-9]+)*",
name_without_ext,
)
return name_without_Q.group() if name_without_Q else name_without_ext
def _tool_calls_compat_extract_calls(response: str) -> Iterator[AssistantToolCall]:
import re
import uuid
logging.debug(f"Trying to extract tool calls from response:\n{response}")
response = response.strip() # strip off any leading/trailing whitespace
if response.startswith("```"):
# attempt to remove any extraneous markdown artifacts like "```json"
response = response.strip("```")
if response.startswith("json"):
response = response.strip("json")
response = response.strip() # any remaining whitespace
if response[0] == "[":
tool_calls: list[AssistantToolCallDict] = json_loads(response)
else:
block = re.search(r"```(?:tool_calls)?\n(.*)\n```\s*$", response, re.DOTALL)
if not block:
raise ValueError("Could not find tool_calls block in response")
tool_calls: list[AssistantToolCallDict] = json_loads(block.group(1))
for t in tool_calls:
t["id"] = str(uuid.uuid4())
# t["function"]["arguments"] = str(t["function"]["arguments"]) # HACK
yield AssistantToolCall.parse_obj(t)

View File

@@ -16,6 +16,7 @@ from pydantic import ValidationError
from .anthropic import ANTHROPIC_CHAT_MODELS, AnthropicModelName, AnthropicProvider
from .groq import GROQ_CHAT_MODELS, GroqModelName, GroqProvider
from .llamafile import LLAMAFILE_CHAT_MODELS, LlamafileModelName, LlamafileProvider
from .openai import OPEN_AI_CHAT_MODELS, OpenAIModelName, OpenAIProvider
from .schema import (
AssistantChatMessage,
@@ -33,10 +34,15 @@ from .schema import (
_T = TypeVar("_T")
ModelName = AnthropicModelName | GroqModelName | OpenAIModelName
ModelName = AnthropicModelName | GroqModelName | LlamafileModelName | OpenAIModelName
EmbeddingModelProvider = OpenAIProvider
CHAT_MODELS = {**ANTHROPIC_CHAT_MODELS, **GROQ_CHAT_MODELS, **OPEN_AI_CHAT_MODELS}
CHAT_MODELS = {
**ANTHROPIC_CHAT_MODELS,
**GROQ_CHAT_MODELS,
**LLAMAFILE_CHAT_MODELS,
**OPEN_AI_CHAT_MODELS,
}
class MultiProvider(BaseChatModelProvider[ModelName, ModelProviderSettings]):
@@ -128,35 +134,52 @@ class MultiProvider(BaseChatModelProvider[ModelName, ModelProviderSettings]):
def get_available_providers(self) -> Iterator[ChatModelProvider]:
for provider_name in ModelProviderName:
self._logger.debug(f"Checking if {provider_name} is available...")
try:
yield self._get_provider(provider_name)
except Exception:
self._logger.debug(f"{provider_name} is available!")
except ValueError:
pass
def _get_provider(self, provider_name: ModelProviderName) -> ChatModelProvider:
_provider = self._provider_instances.get(provider_name)
if not _provider:
Provider = self._get_provider_class(provider_name)
self._logger.debug(
f"{Provider.__name__} not yet in cache, trying to init..."
)
settings = Provider.default_settings.model_copy(deep=True)
settings.budget = self._budget
settings.configuration.extra_request_headers.update(
self._settings.configuration.extra_request_headers
)
if settings.credentials is None:
credentials_field = settings.model_fields["credentials"]
Credentials = get_args( # Union[Credentials, None] -> Credentials
credentials_field.annotation
)[0]
self._logger.debug(f"Loading {Credentials.__name__}...")
try:
Credentials = get_args( # Union[Credentials, None] -> Credentials
settings.model_fields["credentials"].annotation
)[0]
settings.credentials = Credentials.from_env()
except ValidationError as e:
raise ValueError(
f"{provider_name} is unavailable: can't load credentials"
) from e
if credentials_field.is_required():
self._logger.debug(
f"Could not load (required) {Credentials.__name__}"
)
raise ValueError(
f"{Provider.__name__} is unavailable: "
"can't load credentials"
) from e
self._logger.debug(
f"Could not load {Credentials.__name__}, continuing without..."
)
self._provider_instances[provider_name] = _provider = Provider(
settings=settings, logger=self._logger # type: ignore
)
_provider._budget = self._budget # Object binding not preserved by Pydantic
self._logger.debug(f"Initialized {Provider.__name__}!")
return _provider
@classmethod
@@ -167,6 +190,7 @@ class MultiProvider(BaseChatModelProvider[ModelName, ModelProviderSettings]):
return {
ModelProviderName.ANTHROPIC: AnthropicProvider,
ModelProviderName.GROQ: GroqProvider,
ModelProviderName.LLAMAFILE: LlamafileProvider,
ModelProviderName.OPENAI: OpenAIProvider,
}[provider_name]
except KeyError:
@@ -176,4 +200,10 @@ class MultiProvider(BaseChatModelProvider[ModelName, ModelProviderSettings]):
return f"{self.__class__.__name__}()"
ChatModelProvider = AnthropicProvider | GroqProvider | OpenAIProvider | MultiProvider
ChatModelProvider = (
AnthropicProvider
| GroqProvider
| LlamafileProvider
| OpenAIProvider
| MultiProvider
)

View File

@@ -56,6 +56,7 @@ class ModelProviderName(str, enum.Enum):
OPENAI = "openai"
ANTHROPIC = "anthropic"
GROQ = "groq"
LLAMAFILE = "llamafile"
class ChatMessage(BaseModel):
@@ -227,7 +228,7 @@ class ModelResponse(BaseModel):
prompt_tokens_used: int
completion_tokens_used: int
model_info: ModelInfo
llm_info: ModelInfo
class ModelProviderConfiguration(SystemConfiguration):

69
forge/poetry.lock generated
View File

@@ -5642,41 +5642,41 @@ files = [
[[package]]
name = "spacy"
version = "3.7.4"
version = "3.7.5"
description = "Industrial-strength Natural Language Processing (NLP) in Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "spacy-3.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0f748625192f573c07ddea5fcd324919dbfbf4f4a2f7a1fc731e6dcba7321ea1"},
{file = "spacy-3.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6288dca7b3a5489b3d7ce68404bc432ca22f826c662a12af47ef7bdb264307fb"},
{file = "spacy-3.7.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef59db99b12a72d2646be3888d87f94c59e11cd07adc2f50a8130e83f07eb1cf"},
{file = "spacy-3.7.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f07477a4027711c22b3865e78dc9076335c03fcf318a6736159bf07e2a923125"},
{file = "spacy-3.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:787ce42a837f7edfbd4185356eea893a81b7dd75743d0047f2b9bf179775f970"},
{file = "spacy-3.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e82b9da21853d4aee46811804dc7e136895f087fda25c7585172d95eb9b70833"},
{file = "spacy-3.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07ffedf51899441070fb70432f8f873696f39e0e31c9ce7403101c459f8a1281"},
{file = "spacy-3.7.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba57bcc111eca7b086ee33a9636df775cfd4b14302f7d0ffbc11e95ac0fb3f0e"},
{file = "spacy-3.7.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7580d1565f4d1ccbee9a18531f993a5b9b37ced96f145153dd4e98ceec607a55"},
{file = "spacy-3.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:df99c6f0085b1ec8e88beb5fd96d4371cef6fc19c202c41fc4fadc2afd55a157"},
{file = "spacy-3.7.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b982ebab417189346acb4722637c573830d62e157ba336c3eb6c417249344be1"},
{file = "spacy-3.7.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e7c29e152d8ea060af60da9410fa8ef038f3c9068a206905ee5c704de78f6e87"},
{file = "spacy-3.7.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:023c9a008328f55c4717c56c4f8a28073b9961547f7d38a9405c967a52e66d59"},
{file = "spacy-3.7.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1969d3d0fd0c811b7485438460f0ae8cfe16d46b54bcb8d1c26e70914e67e3d"},
{file = "spacy-3.7.4-cp312-cp312-win_amd64.whl", hash = "sha256:040f7df5096c817450820eaaa426d54ed266254d16974e9a707a32f5b0f139ae"},
{file = "spacy-3.7.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6757e8fbfd35dc0ed830296d5756f46d5b8d4b0353925dbe2f9aa33b82c5308"},
{file = "spacy-3.7.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c500c1bad9e0488814a75077089aeef64a6b520ae8131578f266a08168106fa3"},
{file = "spacy-3.7.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c992e2c5c0cd06c7f3e74fe8d758885117090013931c7938277d1421660bf71f"},
{file = "spacy-3.7.4-cp37-cp37m-win_amd64.whl", hash = "sha256:2463c56ab1378f2b9a675340a2e3dfb618989d0da8cdce06429bc9b1dad4f294"},
{file = "spacy-3.7.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b43e92edfa99f34dbb9dd30175f41158d20945e3179055d0071fee19394add96"},
{file = "spacy-3.7.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c26a81d33c93e4a8e3360d61dcce0802fb886de79f666a487ea5abbd3ce4b30b"},
{file = "spacy-3.7.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d7910ca7a91bf423febd8a9a10ca6a4cfcb5c99abdec79df1eb7b67ea3e3c90"},
{file = "spacy-3.7.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b16768b9e5c350b8a383a6bd84cd0481ccdf10ae6231f568598890638065f69"},
{file = "spacy-3.7.4-cp38-cp38-win_amd64.whl", hash = "sha256:ed99fb176979b1e3cf6830161f8e881beae54e80147b05fca31d9a67cb12fbca"},
{file = "spacy-3.7.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ca8112330982dbeef125cc5eb40e0349493055835a0ebe29028a0953a25d8522"},
{file = "spacy-3.7.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:977f37493d7cf0b5dca155f0450d47890378703283c29919cdcc220db994a775"},
{file = "spacy-3.7.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ad5e931c294d100ec3edb40e40f2722ef505cea16312839dd6467e81d665740"},
{file = "spacy-3.7.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11ebf6054cd3ec3638801d7ff9b709e32fb9c15512b347b489bfe2ccb1102c9f"},
{file = "spacy-3.7.4-cp39-cp39-win_amd64.whl", hash = "sha256:f5b930753027ac599f70bb7e77d6a2256191fe582e6f3f0cd624d88f6c279fa4"},
{file = "spacy-3.7.4.tar.gz", hash = "sha256:525f2ced2e40761562c8cace93ef6a1e6e8c483f27bd564bc1b15f608efbe85b"},
{file = "spacy-3.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8002897701429ee2ab5ff6921ae43560f4cd17184cb1e10dad761901c12dcb85"},
{file = "spacy-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43acd19efc845e9126b61a05ed7508a0aff509e96e15563f30f810c19e636b7c"},
{file = "spacy-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f044522b1271ea54718dc43b6f593b5dad349cd31b3827764c501529b599e09a"},
{file = "spacy-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a7dbfbca42c1c128fefa6832631fe49e11c850e963af99229f14e2d0ae94f34"},
{file = "spacy-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:2a21b2a1e1e5d10d15c6f75990b7341d0fc9b454083dfd4222fdd75b9164831c"},
{file = "spacy-3.7.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cd93c34bf2a02bbed7df73d42aed8df5e3eb9688c4ea84ec576f740ba939cce5"},
{file = "spacy-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:190ba0032a5efdb138487c587c0ebb7a98f86adb917f464b252ee8766b8eec4a"},
{file = "spacy-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38de1c9bbb73b8cdfea2dd6e57450f093c1a1af47515870c1c8640b85b35ab16"},
{file = "spacy-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dad4853950a2fe6c7a0bdfd791a762d1f8cedd2915c4ae41b2e0ca3a850eefc"},
{file = "spacy-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:4e00d076871af784c2e43185a71ee676b58893853a05c5b81717b8af2b666c07"},
{file = "spacy-3.7.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bf54c3c2425428b328b53a65913d47eb4cb27a1429aa4e8ed979ffc97d4663e0"},
{file = "spacy-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4145cea7f9814fa7d86b2028c2dd83e02f13f80d5ac604a400b2f7d7b26a0e8c"},
{file = "spacy-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:262f8ebb71f7ed5ffe8e4f384b2594b7a296be50241ce9fbd9277b5da2f46f38"},
{file = "spacy-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:faa1e2b6234ae33c0b1f8dfa5a8dcb66fb891f19231725dfcff4b2666125c250"},
{file = "spacy-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:07677e270a6d729453cc04b5e2247a96a86320b8845e6428d9f90f217eff0f56"},
{file = "spacy-3.7.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e207dda0639818e2ef8f12e3df82a526de118cc09082b0eee3053ebcd9f8332"},
{file = "spacy-3.7.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5694dd3b2f6414c18e2a3f31111cd41ffd597e1d614b51c5779f85ff07f08f6c"},
{file = "spacy-3.7.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d211920ff73d68b8febb1d293f10accbd54f2b2228ecd3530548227b750252b1"},
{file = "spacy-3.7.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1171bf4d8541c18a83441be01feb6c735ffc02e9308810cd691c8900a6678cd5"},
{file = "spacy-3.7.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d9108f67675fb2078ed77cda61fd4cfc197f9256c28d35cfd946dcb080190ddc"},
{file = "spacy-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:12fdc01a4391299a47f16915505cc515fd059e71c7239904e216523354eeb9d9"},
{file = "spacy-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f8fbe9f6b9de1bf05d163a9dd88108b8f20b138986e6ed36f960832e3fcab33"},
{file = "spacy-3.7.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d244d524ab5a33530ac5c50fc92c9a41da6c3980f452048b9fc29e1ff1bdd03e"},
{file = "spacy-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:8b493a8b79a7f3754102fa5ef7e2615568a390fec7ea20db49af55e5f0841fcf"},
{file = "spacy-3.7.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fdbb667792d6ca93899645774d1db3fccc327088a92072029be1e4bc25d7cf15"},
{file = "spacy-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4cfb85309e11a39681c9d4941aebb95c1f5e2e3b77a61a5451e2c3849da4b92e"},
{file = "spacy-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b0bf1788ca397eef8e67e9c07cfd9287adac438512dd191e6e6ca0f36357201"},
{file = "spacy-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:591d90d8504e9bd5be5b482be7c6d6a974afbaeb62c3181e966f4e407e0ab300"},
{file = "spacy-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:713b56fe008c79df01617f3602a0b7e523292211337eb999bdffb910ea1f4825"},
{file = "spacy-3.7.5.tar.gz", hash = "sha256:a648c6cbf2acc7a55a69ee9e7fa4f22bdf69aa828a587a1bc5cfff08cf3c2dd3"},
]
[package.dependencies]
@@ -5691,15 +5691,14 @@ preshed = ">=3.0.2,<3.1.0"
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0"
requests = ">=2.13.0,<3.0.0"
setuptools = "*"
smart-open = ">=5.2.1,<7.0.0"
spacy-legacy = ">=3.0.11,<3.1.0"
spacy-loggers = ">=1.0.0,<2.0.0"
srsly = ">=2.4.3,<3.0.0"
thinc = ">=8.2.2,<8.3.0"
tqdm = ">=4.38.0,<5.0.0"
typer = ">=0.3.0,<0.10.0"
typer = ">=0.3.0,<1.0.0"
wasabi = ">=0.9.1,<1.2.0"
weasel = ">=0.1.0,<0.4.0"
weasel = ">=0.1.0,<0.5.0"
[package.extras]
apple = ["thinc-apple-ops (>=0.1.0.dev0,<1.0.0)"]
@@ -7085,4 +7084,4 @@ benchmark = ["agbenchmark"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "7523abd672967cbe924f045a00bf519ee08c8537fdf2f2191d2928201497d7b7"
content-hash = "acca6b5d67a64527f1d19f61e20a89eb228e066a80cd7701fd59cf19bb267eb8"

View File

@@ -33,6 +33,7 @@ gTTS = "^2.3.1"
jinja2 = "^3.1.2"
jsonschema = "*"
litellm = "^1.17.9"
numpy = ">=1.26.0,<2.0.0"
openai = "^1.7.2"
Pillow = "*"
playsound = "~1.2.2"
@@ -51,7 +52,7 @@ spacy = "^3.0.0"
tenacity = "^8.2.2"
tiktoken = ">=0.7.0,<1.0.0"
toml = "^0.10.2"
uvicorn = ">=0.23.2,<1"
uvicorn = { extras = ["standard"], version = ">=0.23.2,<1" }
watchdog = "4.0.0"
webdriver-manager = "^4.0.1"

36
rnd/README.md Normal file
View File

@@ -0,0 +1,36 @@
This is a guide to setting up and running the AutoGPT Server and Builder. This tutorial will cover downloading the necessary files, setting up the server, and testing the system.
https://github.com/user-attachments/assets/fd0d0f35-3155-4263-b575-ba3efb126cb4
1. Navigate to the AutoGPT GitHub repository.
2. Click the "Code" button, then select "Download ZIP".
3. Once downloaded, extract the ZIP file to a folder of your choice.
4. Open the extracted folder and navigate to the "rnd" directory.
5. Enter the "AutoGPT server" folder.
6. Open a terminal window in this directory.
7. Locate and open the README file in the AutoGPT server folder: [doc](./autogpt_server/README.md#setup).
8. Copy and paste each command from the setup section in the README into your terminal.
- Important: Wait for each command to finish before running the next one.
9. If all commands run without errors, enter the final command: `poetry run app`
10. You should now see the server running in your terminal.
11. Navigate back to the "rnd" folder.
12. Open the "AutoGPT builder" folder.
13. Open the README file in this folder: [doc](./autogpt_builder/README.md#getting-started).
14. In your terminal, run the following commands:
```
npm install
```
```
npm run dev
```
15. Once the front-end is running, click the link to navigate to `localhost:3000`.
16. Click on the "Build" option.
17. Add a few blocks to test the functionality.
18. Connect the blocks together.
19. Click "Run".
20. Check your terminal window - you should see that the server has received the request, is processing it, and has executed it.
And there you have it! You've successfully set up and tested AutoGPT.

View File

@@ -1 +1 @@
AGPT_SERVER_URL=http://localhost:8000
AGPT_SERVER_URL=http://localhost:8000/api

30
rnd/autogpt_builder/.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,30 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Next.js: debug server-side",
"type": "node-terminal",
"request": "launch",
"command": "yarn dev"
},
{
"name": "Next.js: debug client-side",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Next.js: debug full stack",
"type": "node-terminal",
"request": "launch",
"command": "yarn dev",
"serverReadyAction": {
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"action": "debugWithEdge"
}
},
]
}

View File

@@ -0,0 +1,17 @@
{
"$schema": "https://ui.shadcn.com/schema.json",
"style": "new-york",
"rsc": true,
"tsx": true,
"tailwind": {
"config": "tailwind.config.ts",
"css": "src/app/globals.css",
"baseColor": "neutral",
"cssVariables": false,
"prefix": ""
},
"aliases": {
"components": "@/components",
"utils": "@/lib/utils"
}
}

View File

@@ -1,4 +1,22 @@
import dotenv from 'dotenv';
// Load environment variables
dotenv.config();
/** @type {import('next').NextConfig} */
const nextConfig = {};
const nextConfig = {
env: {
AGPT_SERVER_URL: process.env.AGPT_SERVER_URL,
},
async redirects() {
return [
{
source: '/',
destination: '/build',
permanent: false,
},
];
},
};
export default nextConfig;

View File

@@ -9,11 +9,35 @@
"lint": "next lint"
},
"dependencies": {
"@hookform/resolvers": "^3.9.0",
"@radix-ui/react-avatar": "^1.1.0",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-popover": "^1.1.1",
"@radix-ui/react-slot": "^1.1.0",
"@radix-ui/react-switch": "^1.1.0",
"@radix-ui/react-tooltip": "^1.1.2",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"date-fns": "^3.6.0",
"dotenv": "^16.4.5",
"lucide-react": "^0.407.0",
"moment": "^2.30.1",
"next": "14.2.4",
"next-themes": "^0.3.0",
"react": "^18",
"react-day-picker": "^8.10.1",
"react-dom": "^18",
"react-hook-form": "^7.52.1",
"react-markdown": "^9.0.1",
"react-modal": "^3.16.1",
"reactflow": "^11.11.4"
"reactflow": "^11.11.4",
"recharts": "^2.12.7",
"tailwind-merge": "^2.3.0",
"tailwindcss-animate": "^1.0.7",
"zod": "^3.23.8"
},
"devDependencies": {
"@types/node": "^20",

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -1,72 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.8.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="AUTOgpt_logo" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px"
y="0px" viewBox="0 0 2000 2000" style="enable-background:new 0 0 2000 2000;" xml:space="preserve">
<style type="text/css">
.st0{fill:url(#SVGID_1_);}
.st1{fill:url(#SVGID_00000044859330063917736280000017916509329539228544_);}
.st2{fill:url(#SVGID_00000140714777961496567230000017473346511890493859_);}
.st3{fill:url(#SVGID_00000016043459524955834950000015278934287808704695_);}
.st4{fill:url(#SVGID_00000133526441615091004900000013561443639704575621_);}
.st5{fill:#000030;}
.st6{fill:#669CF6;}
</style>
<g>
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="17241.2793" y1="15058.8164" x2="17241.2793" y2="16623.8047" gradientTransform="matrix(7.200000e-02 0 0 7.200000e-02 0.928 1.072)">
<stop offset="0" style="stop-color:#000030"/>
<stop offset="1" style="stop-color:#9900FF"/>
</linearGradient>
<path class="st0" d="M1216.7,1078.8v86.8c0,6.4-5.2,11.6-11.6,11.6c-6.9,0-12.6-4.4-12.6-11.6V1036c0-27.5,22.3-49.8,49.8-49.8
s49.8,22.3,49.8,49.8c0,27.5-22.3,49.8-49.8,49.8C1233,1085.8,1224.2,1083.2,1216.7,1078.8L1216.7,1078.8z M1226.9,1020.6
c8.5,0,15.4,6.9,15.4,15.4s-6.9,15.4-15.4,15.4c-1.6,0-3.1-0.2-4.5-0.7c4.5,6.1,11.8,10.1,19.9,10.1c13.7,0,24.8-11.1,24.8-24.8
s-11.1-24.8-24.8-24.8c-8.2,0-15.4,4-19.9,10.1C1223.8,1020.9,1225.3,1020.6,1226.9,1020.6L1226.9,1020.6z"/>
<linearGradient id="SVGID_00000085938981603410528570000012380000869662973629_" gradientUnits="userSpaceOnUse" x1="15312.8066" y1="15057.3965" x2="15312.8066" y2="16624.1172" gradientTransform="matrix(7.200000e-02 0 0 7.200000e-02 0.928 1.072)">
<stop offset="0" style="stop-color:#000030"/>
<stop offset="1" style="stop-color:#4285F4"/>
</linearGradient>
<path style="fill:url(#SVGID_00000085938981603410528570000012380000869662973629_);" d="M1154.5,1078.8v55.8c0,5.1-2.1,9.7-5.4,13
c-7.3,7.3-20.9,7.3-28.2,0c-9.6-9.6-0.5-25.9-17.7-43.1c-16.7-16.7-45.8-16.7-62.5,0c-7.7,7.7-12.5,18.4-12.5,30.1
c0,6.4,5.2,11.6,11.6,11.6c6.9,0,12.6-4.4,12.6-11.6c0-5.1,2.1-9.7,5.4-13c7.3-7.3,20.9-7.3,28.2,0c10.5,10.5-0.1,25.3,17.7,43.1
c16.7,16.7,45.8,16.7,62.5,0c7.7-7.7,12.5-18.4,12.5-30.1v-98.2v-0.3c0-27.5-22.3-49.8-49.8-49.8c-27.5,0-49.8,22.3-49.8,49.8
c0,27.5,22.3,49.8,49.8,49.8C1138.3,1085.8,1147,1083.2,1154.5,1078.8z M1128.9,1060.8c-8.2,0-15.4-4-19.9-10.1
c1.4,0.4,3,0.7,4.5,0.7c8.5,0,15.4-6.9,15.4-15.4s-6.9-15.4-15.4-15.4c-1.6,0-3.1,0.2-4.5,0.7c4.5-6.1,11.8-10.1,19.9-10.1
c13.7,0,24.8,11.1,24.8,24.8C1153.7,1049.7,1142.6,1060.8,1128.9,1060.8L1128.9,1060.8z"/>
<linearGradient id="SVGID_00000127739374497564837560000013534033995177318078_" gradientUnits="userSpaceOnUse" x1="18088.9141" y1="13182.8672" x2="15383.333" y2="11899.5996" gradientTransform="matrix(7.200000e-02 0 0 7.200000e-02 0.928 1.072)">
<stop offset="0" style="stop-color:#4285F4"/>
<stop offset="1" style="stop-color:#9900FF"/>
</linearGradient>
<path style="fill:url(#SVGID_00000127739374497564837560000013534033995177318078_);" d="M1328.4,937.5c0-30.6-12.2-59.7-33.8-81.3
c-21.6-21.6-50.7-33.8-81.3-33.8c-30.6,0-59.7,12.2-81.3,33.8c-21.6,21.6-33.8,50.7-33.8,81.3v5.2c0,6.7,5.4,12.1,12.1,12.1
c6.7,0,12.1-5.4,12.1-12.1v-5.2c0-24.2,9.7-47.2,26.7-64.2c17.1-17.1,40.1-26.7,64.2-26.7s47.2,9.7,64.2,26.7
c17.1,17.1,26.7,40.1,26.7,64.2c0,6.7,5.4,12.1,12.1,12.1C1323,949.5,1328.4,944.1,1328.4,937.5z"/>
<linearGradient id="SVGID_00000026880830724572405890000002574533588083035832_" gradientUnits="userSpaceOnUse" x1="18708.3613" y1="14393.377" x2="18708.3613" y2="16782.8711" gradientTransform="matrix(7.200000e-02 0 0 7.200000e-02 0.928 1.072)">
<stop offset="0" style="stop-color:#000030"/>
<stop offset="1" style="stop-color:#4285F4"/>
</linearGradient>
<path style="fill:url(#SVGID_00000026880830724572405890000002574533588083035832_);" d="M1328.4,973.9v14.9h19.4
c6.5,0,11.8,5.3,11.8,11.8c0,6.8-4.6,12.4-11.8,12.4h-19.4v122c0,5.1,2.1,9.7,5.4,13c7.3,7.3,20.9,7.3,28.2,0
c3.3-3.3,5.4-7.9,5.4-13v-4.1c0-7.2,5.7-11.6,12.6-11.6c6.4,0,11.6,5.2,11.6,11.6v4.1c0,11.8-4.8,22.4-12.5,30.1
c-16.7,16.7-45.7,16.7-62.4,0c-7.7-7.7-12.5-18.4-12.5-30.1V973.9c0-7,5.6-11.8,12.4-11.8C1323.1,962.2,1328.3,967.4,1328.4,973.9
L1328.4,973.9z"/>
<linearGradient id="SVGID_00000018229338295230736120000011477717140636842910_" gradientUnits="userSpaceOnUse" x1="17447.4375" y1="15469.0166" x2="17540.1348" y2="16329.7832" gradientTransform="matrix(7.200000e-02 0 0 7.200000e-02 0.928 1.072)">
<stop offset="0" style="stop-color:#4285F4"/>
<stop offset="1" style="stop-color:#9900FF"/>
</linearGradient>
<path style="fill:url(#SVGID_00000018229338295230736120000011477717140636842910_);" d="M1272.6,1165.5c0,6.4-5.2,11.6-11.6,11.6
c-6.9,0-12.6-4.4-12.6-11.6c0-35.5,0-3.9,0-39.4c0-6.4,5.2-11.6,11.6-11.6c6.9,0,12.6,4.4,12.6,11.6
C1272.6,1161.6,1272.6,1130.1,1272.6,1165.5z"/>
<path class="st5" d="M707.2,1020.3v82.9h-25.1v-41.6h-54.3v41.6h-25.1v-82.9C602.7,952,707.2,951.1,707.2,1020.3z M996.8,1103.2
c37.1,0,67.2-30.1,67.2-67.2s-30.1-67.2-67.2-67.2s-67.2,30.1-67.2,67.2C929.6,1073.2,959.7,1103.2,996.8,1103.2z M996.8,1077.5
c-22.9,0-41.5-18.6-41.5-41.5c0-22.9,18.6-41.5,41.5-41.5s41.5,18.6,41.5,41.5C1038.3,1058.9,1019.8,1077.5,996.8,1077.5z
M934.1,968.8V993h-36.5v110.3h-24.2V993h-36.5v-24.2C869.3,968.8,901.7,968.8,934.1,968.8z M824.8,1051.7v-82.9h-25.1v82.9
c0,37.3-54.3,36.7-54.3,0v-82.9h-25.1v82.9C720.3,1120,824.8,1120.9,824.8,1051.7z M682.1,1037.4v-17.1c0-37.3-54.3-36.7-54.3,0
v17.1H682.1z"/>
<circle class="st6" cx="1379.5" cy="1096.4" r="12.4"/>
<circle class="st6" cx="1039.8" cy="1164.7" r="12.4"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.8 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -0,0 +1,44 @@
"use client";
import Image from "next/image";
import { useSearchParams } from "next/navigation";
import FlowEditor from '@/components/Flow';
export default function Home() {
const query = useSearchParams();
return (
<div className="flex flex-col items-center min-h-screen">
<div className="z-10 w-full flex items-center justify-between font-mono text-sm relative">
<p className="border border-gray-600 rounded-xl pb-4 pt-4 p-4">
Get started by adding a&nbsp;
<code className="font-mono font-bold">block</code>
</p>
<div className="absolute top-0 right-0 p-4">
<a
className="pointer-events-auto flex place-items-center gap-2"
href="https://news.agpt.co/"
target="_blank"
rel="noopener noreferrer"
>
By{" "}
<Image
src="/AUTOgpt_Logo_dark.png"
alt="AutoGPT Logo"
width={100}
height={24}
priority
/>
</a>
</div>
</div>
<div className="w-full flex justify-center mt-10">
<FlowEditor
className="flow-container w-full min-h-[75vh] border border-gray-300 dark:border-gray-700 rounded-lg"
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
template={!!query.get("templateID")}
/>
</div>
</div>
);
}

View File

@@ -2,30 +2,6 @@
@tailwind components;
@tailwind utilities;
:root {
--foreground-rgb: 0, 0, 0;
--background-start-rgb: 214, 219, 220;
--background-end-rgb: 255, 255, 255;
}
@media (prefers-color-scheme: dark) {
:root {
--foreground-rgb: 255, 255, 255;
--background-start-rgb: 0, 0, 0;
--background-end-rgb: 0, 0, 0;
}
}
body {
color: rgb(var(--foreground-rgb));
background: linear-gradient(
to bottom,
transparent,
rgb(var(--background-end-rgb))
)
rgb(var(--background-start-rgb));
}
@layer utilities {
.text-balance {
text-wrap: balance;

View File

@@ -1,7 +1,19 @@
import React from 'react';
import type { Metadata } from "next";
import { ThemeProvider as NextThemeProvider } from "next-themes";
import { type ThemeProviderProps } from "next-themes/dist/types";
import { Inter } from "next/font/google";
import Link from "next/link";
import { CubeIcon, Pencil1Icon, ReaderIcon, TimerIcon } from "@radix-ui/react-icons";
import "./globals.css";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import { Button, buttonVariants } from "@/components/ui/button";
import {
DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger
} from "@/components/ui/dropdown-menu";
const inter = Inter({ subsets: ["latin"] });
export const metadata: Metadata = {
@@ -9,6 +21,39 @@ export const metadata: Metadata = {
description: "Your one stop shop to creating AI Agents",
};
function ThemeProvider({ children, ...props }: ThemeProviderProps) {
return <NextThemeProvider {...props}>{children}</NextThemeProvider>
}
const NavBar = () => (
<nav className="bg-white dark:bg-slate-800 p-4 flex justify-between items-center shadow">
<div className="flex space-x-4">
<Link href="/monitor" className={buttonVariants({ variant: "ghost" })}>
<TimerIcon className="mr-1" /> Monitor
</Link>
<Link href="/build" className={buttonVariants({ variant: "ghost" })}>
<Pencil1Icon className="mr-1" /> Build
</Link>
</div>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" className="h-8 w-8 rounded-full">
<Avatar>
<AvatarImage src="https://github.com/shadcn.png" alt="@shadcn" />
<AvatarFallback>CN</AvatarFallback>
</Avatar>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
<DropdownMenuItem>Profile</DropdownMenuItem>
<DropdownMenuItem>Settings</DropdownMenuItem>
<DropdownMenuItem>Switch Workspace</DropdownMenuItem>
<DropdownMenuItem>Log out</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</nav>
);
export default function RootLayout({
children,
}: Readonly<{
@@ -16,7 +61,20 @@ export default function RootLayout({
}>) {
return (
<html lang="en">
<body className={inter.className}>{children}</body>
<body className={inter.className}>
<ThemeProvider
attribute="class"
defaultTheme="light"
disableTransitionOnChange
>
<div className="min-h-screen bg-gray-200 text-gray-900">
<NavBar />
<main className="mx-auto p-4">
{children}
</main>
</div>
</ThemeProvider>
</body>
</html>
);
}

View File

@@ -0,0 +1,712 @@
"use client";
import React, { useEffect, useState } from 'react';
import Link from 'next/link';
import moment from 'moment';
import {
ComposedChart,
DefaultLegendContentProps,
Legend,
Line,
ResponsiveContainer,
Scatter,
Tooltip,
XAxis,
YAxis,
} from 'recharts';
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuLabel,
DropdownMenuRadioGroup,
DropdownMenuRadioItem,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu"
import AutoGPTServerAPI, {
Graph,
GraphMeta,
NodeExecutionResult,
safeCopyGraph,
} from '@/lib/autogpt-server-api';
import { ChevronDownIcon, ClockIcon, EnterIcon, ExitIcon, Pencil2Icon } from '@radix-ui/react-icons';
import { cn, exportAsJSONFile, hashString } from '@/lib/utils';
import { Badge } from "@/components/ui/badge";
import { Button, buttonVariants } from "@/components/ui/button";
import { Calendar } from "@/components/ui/calendar";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { Dialog, DialogContent, DialogHeader, DialogTrigger } from '@/components/ui/dialog';
import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table";
import { AgentImportForm } from '@/components/agent-import-form';
const Monitor = () => {
const [flows, setFlows] = useState<GraphMeta[]>([]);
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
const [selectedFlow, setSelectedFlow] = useState<GraphMeta | null>(null);
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
const api = new AutoGPTServerAPI();
useEffect(() => fetchFlowsAndRuns(), []);
useEffect(() => {
const intervalId = setInterval(() => flows.map(f => refreshFlowRuns(f.id)), 5000);
return () => clearInterval(intervalId);
}, []);
function fetchFlowsAndRuns() {
api.listGraphs()
.then(flows => {
setFlows(flows);
flows.map(flow => refreshFlowRuns(flow.id));
});
}
function refreshFlowRuns(flowID: string) {
// Fetch flow run IDs
api.listGraphRunIDs(flowID)
.then(runIDs => runIDs.map(runID => {
let run;
if (
(run = flowRuns.find(fr => fr.id == runID))
&& !["waiting", "running"].includes(run.status)
) {
return
}
// Fetch flow run
api.getGraphExecutionInfo(flowID, runID)
.then(execInfo => setFlowRuns(flowRuns => {
if (execInfo.length == 0) return flowRuns;
const flowRunIndex = flowRuns.findIndex(fr => fr.id == runID);
const flowRun = flowRunFromNodeExecutionResults(execInfo);
if (flowRunIndex > -1) {
flowRuns.splice(flowRunIndex, 1, flowRun)
}
else {
flowRuns.push(flowRun)
}
return [...flowRuns]
}));
}));
}
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
return (
<div className="grid grid-cols-1 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10 gap-4">
<AgentFlowList
className={column1}
flows={flows}
flowRuns={flowRuns}
selectedFlow={selectedFlow}
onSelectFlow={f => {
setSelectedRun(null);
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
}}
/>
<FlowRunsList
className={column2}
flows={flows}
runs={
(
selectedFlow
? flowRuns.filter(v => v.graphID == selectedFlow.id)
: flowRuns
)
.toSorted((a, b) => Number(a.startTime) - Number(b.startTime))
}
selectedRun={selectedRun}
onSelectRun={r => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
{selectedRun && (
<FlowRunInfo
flow={selectedFlow || flows.find(f => f.id == selectedRun.graphID)!}
flowRun={selectedRun}
className={column3}
/>
) || selectedFlow && (
<FlowInfo
flow={selectedFlow}
flowRuns={flowRuns.filter(r => r.graphID == selectedFlow.id)}
className={column3}
/>
) || (
<Card className={`p-6 ${column3}`}>
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
</Card>
)}
</div>
);
};
type FlowRun = {
id: string
graphID: string
graphVersion: number
status: 'running' | 'waiting' | 'success' | 'failed'
startTime: number // unix timestamp (ms)
endTime: number // unix timestamp (ms)
duration: number // seconds
totalRunTime: number // seconds
nodeExecutionResults: NodeExecutionResult[]
};
function flowRunFromNodeExecutionResults(
nodeExecutionResults: NodeExecutionResult[]
): FlowRun {
// Determine overall status
let status: 'running' | 'waiting' | 'success' | 'failed' = 'success';
for (const execution of nodeExecutionResults) {
if (execution.status === 'FAILED') {
status = 'failed';
break;
} else if (['QUEUED', 'RUNNING'].includes(execution.status)) {
status = 'running';
break;
} else if (execution.status === 'INCOMPLETE') {
status = 'waiting';
}
}
// Determine aggregate startTime, endTime, and totalRunTime
const now = Date.now();
const startTime = Math.min(
...nodeExecutionResults.map(ner => ner.add_time.getTime()), now
);
const endTime = (
['success', 'failed'].includes(status)
? Math.max(
...nodeExecutionResults.map(ner => ner.end_time?.getTime() || 0), startTime
)
: now
);
const duration = (endTime - startTime) / 1000; // Convert to seconds
const totalRunTime = nodeExecutionResults.reduce((cum, node) => (
cum + ((node.end_time?.getTime() ?? now) - (node.start_time?.getTime() ?? now))
), 0) / 1000;
return {
id: nodeExecutionResults[0].graph_exec_id,
graphID: nodeExecutionResults[0].graph_id,
graphVersion: nodeExecutionResults[0].graph_version,
status,
startTime,
endTime,
duration,
totalRunTime,
nodeExecutionResults: nodeExecutionResults,
};
}
const AgentFlowList = (
{ flows, flowRuns, selectedFlow, onSelectFlow, className }: {
flows: GraphMeta[],
flowRuns?: FlowRun[],
selectedFlow: GraphMeta | null,
onSelectFlow: (f: GraphMeta) => void,
className?: string,
}
) => {
const [templates, setTemplates] = useState<GraphMeta[]>([]);
const api = new AutoGPTServerAPI();
useEffect(() => {
api.listTemplates().then(templates => setTemplates(templates))
}, []);
return <Card className={className}>
<CardHeader className="flex-row justify-between items-center space-x-3 space-y-0">
<CardTitle>Agents</CardTitle>
<div className="flex items-center">{/* Split "Create" button */}
<Button variant="outline" className="rounded-r-none" asChild>
<Link href="/build">Create</Link>
</Button>
<Dialog>{/* https://ui.shadcn.com/docs/components/dialog#notes */}
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="outline" className={"rounded-l-none border-l-0 px-2"}>
<ChevronDownIcon />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
<DialogTrigger asChild>
<DropdownMenuItem>
<EnterIcon className="mr-2" /> Import from file
</DropdownMenuItem>
</DialogTrigger>
{templates.length > 0 && <>{/* List of templates */}
<DropdownMenuSeparator />
<DropdownMenuLabel>Use a template</DropdownMenuLabel>
{templates.map(template => (
<DropdownMenuItem
key={template.id}
onClick={() => {
api.createGraph(template.id, template.version)
.then(newGraph => {
window.location.href = `/build?flowID=${newGraph.id}`;
});
}}
>
{template.name}
</DropdownMenuItem>
))}
</>}
</DropdownMenuContent>
</DropdownMenu>
<DialogContent>
<DialogHeader className="text-lg">
Import an Agent (template) from a file
</DialogHeader>
<AgentImportForm />
</DialogContent>
</Dialog>
</div>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Name</TableHead>
{/* <TableHead>Status</TableHead> */}
{/* <TableHead>Last updated</TableHead> */}
{flowRuns && <TableHead className="md:hidden lg:table-cell"># of runs</TableHead>}
{flowRuns && <TableHead>Last run</TableHead>}
</TableRow>
</TableHeader>
<TableBody>
{flows
.map((flow) => {
let runCount = 0, lastRun: FlowRun | null = null;
if (flowRuns) {
const _flowRuns = flowRuns.filter(r => r.graphID == flow.id);
runCount = _flowRuns.length;
lastRun = runCount == 0 ? null : _flowRuns.reduce(
(a, c) => a.startTime > c.startTime ? a : c
);
}
return { flow, runCount, lastRun };
})
.sort((a, b) => {
if (!a.lastRun && !b.lastRun) return 0;
if (!a.lastRun) return 1;
if (!b.lastRun) return -1;
return b.lastRun.startTime - a.lastRun.startTime;
})
.map(({ flow, runCount, lastRun }) => (
<TableRow
key={flow.id}
className="cursor-pointer"
onClick={() => onSelectFlow(flow)}
data-state={selectedFlow?.id == flow.id ? "selected" : null}
>
<TableCell>{flow.name}</TableCell>
{/* <TableCell><FlowStatusBadge status={flow.status ?? "active"} /></TableCell> */}
{/* <TableCell>
{flow.updatedAt ?? "???"}
</TableCell> */}
{flowRuns && <TableCell className="md:hidden lg:table-cell">{runCount}</TableCell>}
{flowRuns && (!lastRun ? <TableCell /> :
<TableCell title={moment(lastRun.startTime).toString()}>
{moment(lastRun.startTime).fromNow()}
</TableCell>)}
</TableRow>
))
}
</TableBody>
</Table>
</CardContent>
</Card>
};
const FlowStatusBadge = ({ status }: { status: "active" | "disabled" | "failing" }) => (
<Badge
variant="default"
className={
status === 'active' ? 'bg-green-500 dark:bg-green-600' :
status === 'failing' ? 'bg-red-500 dark:bg-red-700' :
'bg-gray-500 dark:bg-gray-600'
}
>
{status}
</Badge>
);
const FlowRunsList: React.FC<{
flows: GraphMeta[];
runs: FlowRun[];
className?: string;
selectedRun?: FlowRun | null;
onSelectRun: (r: FlowRun) => void;
}> = ({ flows, runs, selectedRun, onSelectRun, className }) => (
<Card className={className}>
<CardHeader>
<CardTitle>Runs</CardTitle>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Agent</TableHead>
<TableHead>Started</TableHead>
<TableHead>Status</TableHead>
<TableHead>Duration</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{runs.map((run) => (
<TableRow
key={run.id}
className="cursor-pointer"
onClick={() => onSelectRun(run)}
data-state={selectedRun?.id == run.id ? "selected" : null}
>
<TableCell>{flows.find(f => f.id == run.graphID)!.name}</TableCell>
<TableCell>{moment(run.startTime).format("HH:mm")}</TableCell>
<TableCell><FlowRunStatusBadge status={run.status} /></TableCell>
<TableCell>{formatDuration(run.duration)}</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</CardContent>
</Card>
);
const FlowRunStatusBadge: React.FC<{
status: FlowRun['status'];
className?: string;
}> = ({ status, className }) => (
<Badge
variant="default"
className={cn(
status === 'running' ? 'bg-blue-500 dark:bg-blue-700' :
status === 'waiting' ? 'bg-yellow-500 dark:bg-yellow-600' :
status === 'success' ? 'bg-green-500 dark:bg-green-600' :
'bg-red-500 dark:bg-red-700',
className,
)}
>
{status}
</Badge>
);
const FlowInfo: React.FC<React.HTMLAttributes<HTMLDivElement> & {
flow: GraphMeta;
flowRuns: FlowRun[];
flowVersion?: number | "all";
}> = ({ flow, flowRuns, flowVersion, ...props }) => {
const api = new AutoGPTServerAPI();
const [flowVersions, setFlowVersions] = useState<Graph[] | null>(null);
const [selectedVersion, setSelectedFlowVersion] = useState(flowVersion ?? "all");
const selectedFlowVersion: Graph | undefined = flowVersions?.find(v => (
v.version == (selectedVersion == "all" ? flow.version : selectedVersion)
));
useEffect(() => {
api.getGraphAllVersions(flow.id).then(result => setFlowVersions(result));
}, [flow.id]);
return <Card {...props}>
<CardHeader className="flex-row justify-between space-y-0 space-x-3">
<div>
<CardTitle>
{flow.name} <span className="font-light">v{flow.version}</span>
</CardTitle>
<p className="mt-2">Agent ID: <code>{flow.id}</code></p>
</div>
<div className="flex items-start space-x-2">
{(flowVersions?.length ?? 0) > 1 &&
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="outline">
<ClockIcon className="mr-2" />
{selectedVersion == "all" ? "All versions" : `Version ${selectedVersion}`}
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent className="w-56">
<DropdownMenuLabel>Choose a version</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuRadioGroup
value={String(selectedVersion)}
onValueChange={choice => setSelectedFlowVersion(
choice == "all" ? choice : Number(choice)
)}
>
<DropdownMenuRadioItem value="all">All versions</DropdownMenuRadioItem>
{flowVersions?.map(v =>
<DropdownMenuRadioItem key={v.version} value={v.version.toString()}>
Version {v.version}{v.is_active ? " (active)" : ""}
</DropdownMenuRadioItem>
)}
</DropdownMenuRadioGroup>
</DropdownMenuContent>
</DropdownMenu>}
<Link className={buttonVariants({ variant: "outline" })} href={`/build?flowID=${flow.id}`}>
<Pencil2Icon className="mr-2" /> Edit
</Link>
<Button
variant="outline"
className="px-2.5"
title="Export to a JSON-file"
onClick={async () => exportAsJSONFile(
safeCopyGraph(
flowVersions!.find(v => v.version == selectedFlowVersion!.version)!,
await api.getBlocks(),
),
`${flow.name}_v${selectedFlowVersion!.version}.json`
)}
>
<ExitIcon />
</Button>
</div>
</CardHeader>
<CardContent>
<FlowRunsStats
flows={[selectedFlowVersion ?? flow]}
flowRuns={flowRuns.filter(r =>
r.graphID == flow.id
&& (selectedVersion == "all" || r.graphVersion == selectedVersion)
)}
/>
</CardContent>
</Card>;
};
const FlowRunInfo: React.FC<React.HTMLAttributes<HTMLDivElement> & {
flow: GraphMeta;
flowRun: FlowRun;
}> = ({ flow, flowRun, ...props }) => {
if (flowRun.graphID != flow.id) {
throw new Error(`FlowRunInfo can't be used with non-matching flowRun.flowID and flow.id`)
}
return <Card {...props}>
<CardHeader className="flex-row items-center justify-between space-y-0 space-x-3">
<div>
<CardTitle>
{flow.name} <span className="font-light">v{flow.version}</span>
</CardTitle>
<p className="mt-2">Agent ID: <code>{flow.id}</code></p>
<p className="mt-1">Run ID: <code>{flowRun.id}</code></p>
</div>
<Link className={buttonVariants({ variant: "outline" })} href={`/build?flowID=${flow.id}`}>
<Pencil2Icon className="mr-2" /> Edit Agent
</Link>
</CardHeader>
<CardContent>
<p><strong>Status:</strong> <FlowRunStatusBadge status={flowRun.status} /></p>
<p><strong>Started:</strong> {moment(flowRun.startTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Finished:</strong> {moment(flowRun.endTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Duration (run time):</strong> {flowRun.duration} ({flowRun.totalRunTime}) seconds</p>
{/* <p><strong>Total cost:</strong> €1,23</p> */}
</CardContent>
</Card>;
};
const FlowRunsStats: React.FC<{
flows: GraphMeta[],
flowRuns: FlowRun[],
title?: string,
className?: string,
}> = ({ flows, flowRuns, title, className }) => {
/* "dateMin": since the first flow in the dataset
* number > 0: custom date (unix timestamp)
* number < 0: offset relative to Date.now() (in seconds) */
const [statsSince, setStatsSince] = useState<number | "dataMin">(-24*3600)
const statsSinceTimestamp = ( // unix timestamp or null
typeof(statsSince) == "string"
? null
: statsSince < 0
? Date.now() + (statsSince*1000)
: statsSince
)
const filteredFlowRuns = statsSinceTimestamp != null
? flowRuns.filter(fr => fr.startTime > statsSinceTimestamp)
: flowRuns;
return (
<div className={className}>
<div className="flex flex-row items-center justify-between">
<CardTitle>{ title || "Stats" }</CardTitle>
<div className="flex space-x-2">
<Button variant="outline" size="sm" onClick={() => setStatsSince(-2*3600)}>2h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-8*3600)}>8h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-24*3600)}>24h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-7*24*3600)}>7d</Button>
<Popover>
<PopoverTrigger asChild>
<Button variant={"outline"} size="sm">Custom</Button>
</PopoverTrigger>
<PopoverContent className="w-auto p-0" align="start">
<Calendar
mode="single"
onSelect={(_, selectedDay) => setStatsSince(selectedDay.getTime())}
initialFocus
/>
</PopoverContent>
</Popover>
<Button variant="outline" size="sm" onClick={() => setStatsSince("dataMin")}>All</Button>
</div>
</div>
<FlowRunsTimeline flows={flows} flowRuns={flowRuns} dataMin={statsSince} className="mt-3" />
<hr className="my-4" />
<div>
<p><strong>Total runs:</strong> {filteredFlowRuns.length}</p>
<p>
<strong>Total run time:</strong> {
filteredFlowRuns.reduce((total, run) => total + run.totalRunTime, 0)
} seconds
</p>
{/* <p><strong>Total cost:</strong> €1,23</p> */}
</div>
</div>
)
}
const FlowRunsTimeline = (
{ flows, flowRuns, dataMin, className }: {
flows: GraphMeta[],
flowRuns: FlowRun[],
dataMin: "dataMin" | number,
className?: string,
}
) => (
/* TODO: make logarithmic? */
<ResponsiveContainer width="100%" height={120} className={className}>
<ComposedChart>
<XAxis
dataKey="time"
type="number"
domain={[
typeof(dataMin) == "string"
? dataMin
: dataMin < 0
? Date.now() + (dataMin*1000)
: dataMin,
Date.now()
]}
allowDataOverflow={true}
tickFormatter={(unixTime) => {
const now = moment();
const time = moment(unixTime);
return now.diff(time, 'hours') < 24
? time.format('HH:mm')
: time.format('YYYY-MM-DD HH:mm');
}}
name="Time"
scale="time"
/>
<YAxis
dataKey="_duration"
name="Duration (s)"
tickFormatter={s => s > 90 ? `${Math.round(s / 60)}m` : `${s}s`}
/>
<Tooltip
content={({ payload, label }) => {
if (payload && payload.length) {
const data: FlowRun & { time: number, _duration: number } = payload[0].payload;
const flow = flows.find(f => f.id === data.graphID);
return (
<Card className="p-2 text-xs leading-normal">
<p><strong>Agent:</strong> {flow ? flow.name : 'Unknown'}</p>
<p>
<strong>Status:</strong>&nbsp;
<FlowRunStatusBadge status={data.status} className="px-1.5 py-0" />
</p>
<p><strong>Started:</strong> {moment(data.startTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Duration / run time:</strong> {
formatDuration(data.duration)} / {formatDuration(data.totalRunTime)
}</p>
</Card>
);
}
return null;
}}
/>
{flows.map((flow) => (
<Scatter
key={flow.id}
data={flowRuns.filter(fr => fr.graphID == flow.id).map(fr => ({
...fr,
time: fr.startTime + (fr.totalRunTime * 1000),
_duration: fr.totalRunTime,
}))}
name={flow.name}
fill={`hsl(${hashString(flow.id) * 137.5 % 360}, 70%, 50%)`}
/>
))}
{flowRuns.map((run) => (
<Line
key={run.id}
type="linear"
dataKey="_duration"
data={[
{ ...run, time: run.startTime, _duration: 0 },
{ ...run, time: run.endTime, _duration: run.totalRunTime }
]}
stroke={`hsl(${hashString(run.graphID) * 137.5 % 360}, 70%, 50%)`}
strokeWidth={2}
dot={false}
legendType="none"
/>
))}
<Legend
content={<ScrollableLegend />}
wrapperStyle={{
bottom: 0,
left: 0,
right: 0,
width: "100%",
display: "flex",
justifyContent: "center",
}}
/>
</ComposedChart>
</ResponsiveContainer>
);
const ScrollableLegend: React.FC<DefaultLegendContentProps & { className?: string }> = (
{ payload, className }
) => {
return (
<div
className={cn(
"whitespace-nowrap px-4 text-sm overflow-x-auto space-x-3",
className,
)}
style={{ scrollbarWidth: "none" }}
>
{payload.map((entry, index) => {
if (entry.type == "none") return;
return (
<span key={`item-${index}`} className="inline-flex items-center">
<span
className="size-2.5 inline-block mr-1 rounded-full"
style={{backgroundColor: entry.color}}
/>
<span>{entry.value}</span>
</span>
)
})}
</div>
);
};
function formatDuration(seconds: number): string {
return (
seconds < 100
? seconds.toPrecision(2)
: Math.round(seconds)
).toString() + "s";
}
export default Monitor;

View File

@@ -1,40 +0,0 @@
import Image from "next/image";
import Flow from '../components/Flow';
export default function Home() {
return (
<main className="flex min-h-screen flex-col items-center justify-between p-24">
<div className="z-10 w-full max-w-5xl items-center justify-between font-mono text-sm lg:flex">
<p className="fixed left-0 top-0 flex w-full justify-center border-b border-gray-300 bg-gradient-to-b from-zinc-200 pb-6 pt-8 backdrop-blur-2xl dark:border-neutral-800 dark:bg-zinc-800/30 dark:from-inherit lg:static lg:w-auto lg:rounded-xl lg:border lg:bg-gray-200 lg:p-4 lg:dark:bg-zinc-800/30">
Get started by adding a&nbsp;
<code className="font-mono font-bold">node</code>
</p>
<div
className="fixed bottom-0 left-0 flex h-48 w-full items-end justify-center bg-gradient-to-t from-white via-white dark:from-black dark:via-black lg:static lg:size-auto lg:bg-none">
<a
className="pointer-events-none flex place-items-center gap-2 p-8 lg:pointer-events-auto lg:p-0"
href="https://news.agpt.co/"
target="_blank"
rel="noopener noreferrer"
>
By{" "}
<Image
src="/autogpt.svg"
alt="AutoGPT Logo"
className="dark:invert"
width={100}
height={24}
priority
/>
</a>
</div>
</div>
<div className="w-full flex justify-center mt-10">
<div className="flow-container w-full h-full">
<Flow/>
</div>
</div>
</main>
);
}

View File

@@ -0,0 +1,22 @@
import { BaseEdge, ConnectionLineComponentProps, getBezierPath, Position } from "reactflow";
const ConnectionLine: React.FC<ConnectionLineComponentProps> = ({ fromPosition, fromHandle, fromX, fromY, toPosition, toX, toY }) => {
const sourceX = fromPosition === Position.Right ?
fromX + (fromHandle?.width! / 2 - 5) : fromX - (fromHandle?.width! / 2 - 5);
const [path] = getBezierPath({
sourceX: sourceX,
sourceY: fromY,
sourcePosition: fromPosition,
targetX: toX,
targetY: toY,
targetPosition: toPosition,
});
return (
<BaseEdge path={path} style={{ strokeWidth: 2, stroke: '#555' }} />
);
};
export default ConnectionLine;

View File

@@ -0,0 +1,37 @@
import { FC, memo, useMemo } from "react";
import { BaseEdge, EdgeProps, getBezierPath, XYPosition } from "reactflow";
export type CustomEdgeData = {
edgeColor: string
sourcePos: XYPosition
}
const CustomEdgeFC: FC<EdgeProps<CustomEdgeData>> = ({ data, selected, source, sourcePosition, sourceX, sourceY, target, targetPosition, targetX, targetY, markerEnd }) => {
const [path] = getBezierPath({
sourceX: sourceX - 5,
sourceY,
sourcePosition,
targetX: targetX + 4,
targetY,
targetPosition,
});
// Calculate y difference between source and source node, to adjust self-loop edge
const yDifference = useMemo(() => sourceY - data!.sourcePos.y, [data!.sourcePos.y]);
// Define special edge path for self-loop
const edgePath = source === target ?
`M ${sourceX - 5} ${sourceY} C ${sourceX + 128} ${sourceY - yDifference - 128} ${targetX - 128} ${sourceY - yDifference - 128} ${targetX + 3}, ${targetY}` :
path;
return (
<BaseEdge
style={{ strokeWidth: 2, stroke: (data?.edgeColor ?? '#555555') + (selected ? '' : '80') }}
path={edgePath}
markerEnd={markerEnd}
/>
)
};
export const CustomEdge = memo(CustomEdgeFC);

View File

@@ -1,30 +1,34 @@
import React, { useState, useEffect, FC, memo } from 'react';
import { Handle, Position, NodeProps } from 'reactflow';
import { NodeProps } from 'reactflow';
import 'reactflow/dist/style.css';
import './customnode.css';
import ModalComponent from './ModalComponent';
type Schema = {
type: string;
properties: { [key: string]: any };
required?: string[];
};
import { Button } from './ui/button';
import { Input } from './ui/input';
import { BlockSchema } from '@/lib/types';
import { beautifyString } from '@/lib/utils';
import { Switch } from "@/components/ui/switch"
import NodeHandle from './NodeHandle';
type CustomNodeData = {
blockType: string;
title: string;
inputSchema: Schema;
outputSchema: Schema;
inputSchema: BlockSchema;
outputSchema: BlockSchema;
hardcodedValues: { [key: string]: any };
setHardcodedValues: (values: { [key: string]: any }) => void;
connections: Array<{ source: string; sourceHandle: string; target: string; targetHandle: string }>;
isPropertiesOpen: boolean;
isOutputOpen: boolean;
status?: string;
output_data?: any;
};
const CustomNode: FC<NodeProps<CustomNodeData>> = ({ data, id }) => {
const [isPropertiesOpen, setIsPropertiesOpen] = useState(data.isPropertiesOpen || false);
const [isOutputOpen, setIsOutputOpen] = useState(data.isOutputOpen || false);
const [isAdvancedOpen, setIsAdvancedOpen] = useState(false);
const [keyValuePairs, setKeyValuePairs] = useState<{ key: string, value: string }[]>([]);
const [newKey, setNewKey] = useState<string>('');
const [newValue, setNewValue] = useState<string>('');
const [isModalOpen, setIsModalOpen] = useState(false);
const [activeKey, setActiveKey] = useState<string | null>(null);
const [modalValue, setModalValue] = useState<string>('');
@@ -32,7 +36,7 @@ const CustomNode: FC<NodeProps<CustomNodeData>> = ({ data, id }) => {
useEffect(() => {
if (data.output_data || data.status) {
setIsPropertiesOpen(true);
setIsOutputOpen(true);
}
}, [data.output_data, data.status]);
@@ -40,196 +44,299 @@ const CustomNode: FC<NodeProps<CustomNodeData>> = ({ data, id }) => {
console.log(`Node ${id} data:`, data);
}, [id, data]);
const toggleProperties = () => {
setIsPropertiesOpen(!isPropertiesOpen);
const toggleOutput = (checked: boolean) => {
setIsOutputOpen(checked);
};
const generateHandles = (schema: Schema, type: 'source' | 'target') => {
const toggleAdvancedSettings = (checked: boolean) => {
setIsAdvancedOpen(checked);
};
const hasOptionalFields = () => {
return data.inputSchema && Object.keys(data.inputSchema.properties).some((key) => {
return !(data.inputSchema.required?.includes(key));
});
};
const generateOutputHandles = (schema: BlockSchema) => {
if (!schema?.properties) return null;
const keys = Object.keys(schema.properties);
return keys.map((key) => (
<div key={key} className="handle-container">
{type === 'target' && (
<>
<Handle
type={type}
position={Position.Left}
id={key}
style={{ background: '#555', borderRadius: '50%' }}
/>
<span className="handle-label">{key}</span>
</>
)}
{type === 'source' && (
<>
<span className="handle-label">{key}</span>
<Handle
type={type}
position={Position.Right}
id={key}
style={{ background: '#555', borderRadius: '50%' }}
/>
</>
)}
<div key={key}>
<NodeHandle keyName={key} isConnected={isHandleConnected(key)} schema={schema.properties[key]} side="right" />
</div>
));
};
const handleInputChange = (key: string, value: any) => {
const newValues = { ...data.hardcodedValues, [key]: value };
const keys = key.split('.');
const newValues = JSON.parse(JSON.stringify(data.hardcodedValues));
let current = newValues;
for (let i = 0; i < keys.length - 1; i++) {
if (!current[keys[i]]) current[keys[i]] = {};
current = current[keys[i]];
}
current[keys[keys.length - 1]] = value;
console.log(`Updating hardcoded values for node ${id}:`, newValues);
data.setHardcodedValues(newValues);
setErrors((prevErrors) => ({ ...prevErrors, [key]: null }));
};
const validateInput = (key: string, value: any, schema: any) => {
switch (schema.type) {
case 'string':
if (schema.enum && !schema.enum.includes(value)) {
return `Invalid value for ${key}`;
}
break;
case 'boolean':
if (typeof value !== 'boolean') {
return `Invalid value for ${key}`;
}
break;
case 'number':
if (typeof value !== 'number') {
return `Invalid value for ${key}`;
}
break;
case 'array':
if (!Array.isArray(value) || value.some((item: any) => typeof item !== 'string')) {
return `Invalid value for ${key}`;
}
if (schema.minItems && value.length < schema.minItems) {
return `${key} requires at least ${schema.minItems} items`;
}
break;
default:
return null;
}
return null;
const getValue = (key: string) => {
const keys = key.split('.');
return keys.reduce((acc, k) => (acc && acc[k] !== undefined) ? acc[k] : '', data.hardcodedValues);
};
const isHandleConnected = (key: string) => {
return data.connections && data.connections.some((conn: any) => {
if (typeof conn === 'string') {
const [source, target] = conn.split(' -> ');
return target.includes(key) && target.includes(data.title);
return (target.includes(key) && target.includes(data.title)) ||
(source.includes(key) && source.includes(data.title));
}
return conn.target === id && conn.targetHandle === key;
return (conn.target === id && conn.targetHandle === key) ||
(conn.source === id && conn.sourceHandle === key);
});
};
const handleAddProperty = () => {
if (newKey && newValue) {
const newPairs = [...keyValuePairs, { key: newKey, value: newValue }];
setKeyValuePairs(newPairs);
setNewKey('');
setNewValue('');
const expectedFormat = newPairs.reduce((acc, pair) => ({ ...acc, [pair.key]: pair.value }), {});
handleInputChange('expected_format', expectedFormat);
}
};
const handleInputClick = (key: string) => {
setActiveKey(key);
setModalValue(data.hardcodedValues[key] || '');
const value = getValue(key);
setModalValue(typeof value === 'object' ? JSON.stringify(value, null, 2) : value);
setIsModalOpen(true);
};
const handleModalSave = (value: string) => {
if (activeKey) {
handleInputChange(activeKey, value);
try {
const parsedValue = JSON.parse(value);
handleInputChange(activeKey, parsedValue);
} catch (error) {
handleInputChange(activeKey, value);
}
}
setIsModalOpen(false);
setActiveKey(null);
};
const addArrayItem = (key: string) => {
const currentValues = data.hardcodedValues[key] || [];
handleInputChange(key, [...currentValues, '']);
};
const renderInputField = (key: string, schema: any, parentKey: string = '', displayKey: string = ''): JSX.Element => {
const fullKey = parentKey ? `${parentKey}.${key}` : key;
const error = errors[fullKey];
const value = getValue(fullKey);
if (displayKey === '') {
displayKey = key;
}
const removeArrayItem = (key: string, index: number) => {
const currentValues = data.hardcodedValues[key] || [];
currentValues.splice(index, 1);
handleInputChange(key, [...currentValues]);
};
if (isHandleConnected(fullKey)) {
return <></>;
}
const handleArrayItemChange = (key: string, index: number, value: string) => {
const currentValues = data.hardcodedValues[key] || [];
currentValues[index] = value;
handleInputChange(key, [...currentValues]);
};
const renderClickableInput = (value: string | null = null, placeholder: string = "", secret: boolean = false) => {
const addDynamicTextInput = () => {
const dynamicKeyPrefix = 'texts_$_';
const currentKeys = Object.keys(data.hardcodedValues).filter(key => key.startsWith(dynamicKeyPrefix));
const nextIndex = currentKeys.length + 1;
const newKey = `${dynamicKeyPrefix}${nextIndex}`;
handleInputChange(newKey, '');
};
// if secret is true, then the input field will be a password field if the value is not null
return secret ? (
<div className="clickable-input" onClick={() => handleInputClick(fullKey)}>
{value ? <i className="text-gray-500">********</i> : <i className="text-gray-500">{placeholder}</i>}
</div>
) : (
<div className="clickable-input" onClick={() => handleInputClick(fullKey)}>
{value || <i className="text-gray-500">{placeholder}</i>}
</div>
)
};
const removeDynamicTextInput = (key: string) => {
const newValues = { ...data.hardcodedValues };
delete newValues[key];
data.setHardcodedValues(newValues);
};
const handleDynamicTextInputChange = (key: string, value: string) => {
handleInputChange(key, value);
};
const renderInputField = (key: string, schema: any) => {
const error = errors[key];
switch (schema.type) {
case 'string':
return schema.enum ? (
<div key={key} className="input-container">
<select
value={data.hardcodedValues[key] || ''}
onChange={(e) => handleInputChange(key, e.target.value)}
className="select-input"
>
{schema.enum.map((option: string) => (
<option key={option} value={option}>
{option}
</option>
))}
</select>
{error && <span className="error-message">{error}</span>}
</div>
) : (
<div key={key} className="input-container">
<div className="clickable-input" onClick={() => handleInputClick(key)}>
{data.hardcodedValues[key] || `Enter ${key}`}
if (schema.type === 'object' && schema.properties) {
return (
<div key={fullKey} className="object-input">
<strong>{displayKey}:</strong>
{Object.entries(schema.properties).map(([propKey, propSchema]: [string, any]) => (
<div key={`${fullKey}.${propKey}`} className="nested-input">
{renderInputField(propKey, propSchema, fullKey, propSchema.title || beautifyString(propKey))}
</div>
))}
</div>
);
}
if (schema.type === 'object' && schema.additionalProperties) {
const objectValue = value || {};
return (
<div key={fullKey} className="object-input">
<strong>{displayKey}:</strong>
{Object.entries(objectValue).map(([propKey, propValue]: [string, any]) => (
<div key={`${fullKey}.${propKey}`} className="nested-input">
<div className="clickable-input" onClick={() => handleInputClick(`${fullKey}.${propKey}`)}>
{beautifyString(propKey)}: {typeof propValue === 'object' ? JSON.stringify(propValue, null, 2) : propValue}
</div>
<Button onClick={() => handleInputChange(`${fullKey}.${propKey}`, undefined)} className="array-item-remove">
&times;
</Button>
</div>
))}
{key === 'expected_format' && (
<div className="nested-input">
{keyValuePairs.map((pair, index) => (
<div key={index} className="key-value-input">
<Input
type="text"
placeholder="Key"
value={beautifyString(pair.key)}
onChange={(e) => {
const newPairs = [...keyValuePairs];
newPairs[index].key = e.target.value;
setKeyValuePairs(newPairs);
const expectedFormat = newPairs.reduce((acc, pair) => ({ ...acc, [pair.key]: pair.value }), {});
handleInputChange('expected_format', expectedFormat);
}}
/>
<Input
type="text"
placeholder="Value"
value={beautifyString(pair.value)}
onChange={(e) => {
const newPairs = [...keyValuePairs];
newPairs[index].value = e.target.value;
setKeyValuePairs(newPairs);
const expectedFormat = newPairs.reduce((acc, pair) => ({ ...acc, [pair.key]: pair.value }), {});
handleInputChange('expected_format', expectedFormat);
}}
/>
</div>
))}
<div className="key-value-input">
<Input
type="text"
placeholder="Key"
value={newKey}
onChange={(e) => setNewKey(e.target.value)}
/>
<Input
type="text"
placeholder="Value"
value={newValue}
onChange={(e) => setNewValue(e.target.value)}
/>
</div>
<Button onClick={handleAddProperty}>Add Property</Button>
</div>
)}
{error && <span className="error-message">{error}</span>}
</div>
);
}
if (schema.anyOf) {
const types = schema.anyOf.map((s: any) => s.type);
if (types.includes('string') && types.includes('null')) {
return (
<div key={fullKey} className="input-container">
{renderClickableInput(value, schema.placeholder || `Enter ${displayKey} (optional)`)}
{error && <span className="error-message">{error}</span>}
</div>
);
}
}
if (schema.allOf) {
return (
<div key={fullKey} className="object-input">
<strong>{displayKey}:</strong>
{schema.allOf[0].properties && Object.entries(schema.allOf[0].properties).map(([propKey, propSchema]: [string, any]) => (
<div key={`${fullKey}.${propKey}`} className="nested-input">
{renderInputField(propKey, propSchema, fullKey, propSchema.title || beautifyString(propKey))}
</div>
))}
</div>
);
}
if (schema.oneOf) {
return (
<div key={fullKey} className="object-input">
<strong>{displayKey}:</strong>
{schema.oneOf[0].properties && Object.entries(schema.oneOf[0].properties).map(([propKey, propSchema]: [string, any]) => (
<div key={`${fullKey}.${propKey}`} className="nested-input">
{renderInputField(propKey, propSchema, fullKey, propSchema.title || beautifyString(propKey))}
</div>
))}
</div>
);
}
switch (schema.type) {
case 'string':
if (schema.enum) {
return (
<div key={fullKey} className="input-container">
<select
value={value || ''}
onChange={(e) => handleInputChange(fullKey, e.target.value)}
className="select-input"
>
<option value="">Select {displayKey}</option>
{schema.enum.map((option: string) => (
<option key={option} value={option}>
{beautifyString(option)}
</option>
))}
</select>
{error && <span className="error-message">{error}</span>}
</div>
)
}
else if (schema.secret) {
return (<div key={fullKey} className="input-container">
{renderClickableInput(value, schema.placeholder || `Enter ${displayKey}`, true)}
{error && <span className="error-message">{error}</span>}
</div>)
}
else {
return (
<div key={fullKey} className="input-container">
{renderClickableInput(value, schema.placeholder || `Enter ${displayKey}`)}
{error && <span className="error-message">{error}</span>}
</div>
);
}
case 'boolean':
return (
<div key={key} className="input-container">
<label className="radio-label">
<input
type="radio"
value="true"
checked={data.hardcodedValues[key] === true}
onChange={() => handleInputChange(key, true)}
/>
True
</label>
<label className="radio-label">
<input
type="radio"
value="false"
checked={data.hardcodedValues[key] === false}
onChange={() => handleInputChange(key, false)}
/>
False
</label>
<div key={fullKey} className="input-container">
<select
value={value === undefined ? '' : value.toString()}
onChange={(e) => handleInputChange(fullKey, e.target.value === 'true')}
className="select-input"
>
<option value="">Select {displayKey}</option>
<option value="true">True</option>
<option value="false">False</option>
</select>
{error && <span className="error-message">{error}</span>}
</div>
);
case 'number':
case 'integer':
return (
<div key={key} className="input-container">
<div key={fullKey} className="input-container">
<input
type="number"
value={data.hardcodedValues[key] || ''}
onChange={(e) => handleInputChange(key, parseFloat(e.target.value))}
value={value || ''}
onChange={(e) => handleInputChange(fullKey, parseFloat(e.target.value))}
className="number-input"
/>
{error && <span className="error-message">{error}</span>}
@@ -237,143 +344,86 @@ const CustomNode: FC<NodeProps<CustomNodeData>> = ({ data, id }) => {
);
case 'array':
if (schema.items && schema.items.type === 'string') {
const arrayValues = data.hardcodedValues[key] || [];
const arrayValues = value || [];
return (
<div key={key} className="input-container">
<div key={fullKey} className="input-container">
{arrayValues.map((item: string, index: number) => (
<div key={`${key}-${index}`} className="array-item-container">
<div key={`${fullKey}.${index}`} className="array-item-container">
<input
type="text"
value={item}
onChange={(e) => handleArrayItemChange(key, index, e.target.value)}
onChange={(e) => handleInputChange(`${fullKey}.${index}`, e.target.value)}
className="array-item-input"
/>
<button onClick={() => removeArrayItem(key, index)} className="array-item-remove">
<Button onClick={() => handleInputChange(`${fullKey}.${index}`, '')} className="array-item-remove">
&times;
</button>
</Button>
</div>
))}
<button onClick={() => addArrayItem(key)} className="array-item-add">
<Button onClick={() => handleInputChange(fullKey, [...arrayValues, ''])} className="array-item-add">
Add Item
</button>
</Button>
{error && <span className="error-message">{error}</span>}
</div>
);
}
return null;
default:
return null;
return (
<div key={fullKey} className="input-container">
{renderClickableInput(value, schema.placeholder || `Enter ${beautifyString(displayKey)} (Complex)`)}
{error && <span className="error-message">{error}</span>}
</div>
);
}
};
const renderDynamicTextFields = () => {
const dynamicKeyPrefix = 'texts_$_';
const dynamicKeys = Object.keys(data.hardcodedValues).filter(key => key.startsWith(dynamicKeyPrefix));
return dynamicKeys.map((key, index) => (
<div key={key} className="input-container">
<div className="handle-container">
<Handle
type="target"
position={Position.Left}
id={key}
style={{ background: '#555', borderRadius: '50%' }}
/>
<span className="handle-label">{key}</span>
{!isHandleConnected(key) && (
<>
<input
type="text"
value={data.hardcodedValues[key]}
onChange={(e) => handleDynamicTextInputChange(key, e.target.value)}
className="dynamic-text-input"
/>
<button onClick={() => removeDynamicTextInput(key)} className="array-item-remove">
&times;
</button>
</>
)}
</div>
</div>
));
};
const validateInputs = () => {
const newErrors: { [key: string]: string | null } = {};
Object.keys(data.inputSchema.properties).forEach((key) => {
const value = data.hardcodedValues[key];
const schema = data.inputSchema.properties[key];
const error = validateInput(key, value, schema);
if (error) {
newErrors[key] = error;
}
});
const validateRecursive = (schema: any, parentKey: string = '') => {
Object.entries(schema.properties).forEach(([key, propSchema]: [string, any]) => {
const fullKey = parentKey ? `${parentKey}.${key}` : key;
const value = getValue(fullKey);
if (propSchema.type === 'object' && propSchema.properties) {
validateRecursive(propSchema, fullKey);
} else {
if (propSchema.required && !value) {
newErrors[fullKey] = `${fullKey} is required`;
}
}
});
};
validateRecursive(data.inputSchema);
setErrors(newErrors);
return Object.values(newErrors).every((error) => error === null);
};
const handleSubmit = () => {
if (validateInputs()) {
console.log("Valid data:", data.hardcodedValues);
} else {
console.log("Invalid data:", errors);
}
};
return (
<div className="custom-node">
<div className="node-header">
<div className="node-title">{data.blockType || data.title}</div>
<button onClick={toggleProperties} className="toggle-button">
&#9776;
</button>
<div className={`custom-node dark-theme ${data.status === 'RUNNING' ? 'running' : data.status === 'COMPLETED' ? 'completed' : data.status === 'FAILED' ? 'failed' : ''}`}>
<div className="mb-2">
<div className="text-lg font-bold">{beautifyString(data.blockType?.replace(/Block$/, '') || data.title)}</div>
</div>
<div className="node-content">
<div className="input-section">
<div>
{data.inputSchema &&
Object.keys(data.inputSchema.properties).map((key) => (
<div key={key}>
{key !== 'texts' ? (
<div>
<div className="handle-container">
<Handle
type="target"
position={Position.Left}
id={key}
style={{ background: '#555', borderRadius: '50%' }}
/>
<span className="handle-label">{key}</span>
</div>
{!isHandleConnected(key) && renderInputField(key, data.inputSchema.properties[key])}
</div>
) : (
<div key={key} className="input-container">
<div className="handle-container">
<Handle
type="target"
position={Position.Left}
id={key}
style={{ background: '#555', borderRadius: '50%' }}
/>
<span className="handle-label">{key}</span>
</div>
{renderDynamicTextFields()}
<button onClick={addDynamicTextInput} className="array-item-add">
Add Text Input
</button>
</div>
)}
</div>
))}
Object.entries(data.inputSchema.properties).map(([key, schema]) => {
const isRequired = data.inputSchema.required?.includes(key);
return (isRequired || isAdvancedOpen) && (
<div key={key}>
<NodeHandle keyName={key} isConnected={isHandleConnected(key)} schema={schema} side="left" />
{renderInputField(key, schema, '', schema.title || beautifyString(key))}
</div>
);
})}
</div>
<div className="output-section">
{data.outputSchema && generateHandles(data.outputSchema, 'source')}
<div>
{data.outputSchema && generateOutputHandles(data.outputSchema)}
</div>
</div>
{isPropertiesOpen && (
<div className="node-properties">
<h4>Node Output</h4>
{isOutputOpen && (
<div className="node-output">
<p>
<strong>Status:</strong>{' '}
{typeof data.status === 'object' ? JSON.stringify(data.status) : data.status || 'N/A'}
@@ -386,15 +436,25 @@ const CustomNode: FC<NodeProps<CustomNodeData>> = ({ data, id }) => {
</p>
</div>
)}
<button onClick={handleSubmit}>Submit</button>
<div className="flex items-center mt-2.5">
<Switch onCheckedChange={toggleOutput} className='custom-switch' />
<span className='m-1 mr-4'>Output</span>
{hasOptionalFields() && (
<>
<Switch onCheckedChange={toggleAdvancedSettings} className='custom-switch' />
<span className='m-1'>Advanced</span>
</>
)}
</div>
<ModalComponent
isOpen={isModalOpen}
onClose={() => setIsModalOpen(false)}
onSave={handleModalSave}
value={modalValue}
key={activeKey}
/>
</div>
);
};
export default memo(CustomNode);
export default memo(CustomNode);

View File

@@ -11,103 +11,116 @@ import ReactFlow, {
OnConnect,
NodeTypes,
Connection,
EdgeTypes,
MarkerType,
} from 'reactflow';
import 'reactflow/dist/style.css';
import CustomNode from './CustomNode';
import './flow.css';
import AutoGPTServerAPI, { Block, Graph, ObjectSchema } from '@/lib/autogpt-server-api';
import { Button } from './ui/button';
import { Input } from './ui/input';
import { ChevronRight, ChevronLeft } from "lucide-react";
import { deepEquals, getTypeColor } from '@/lib/utils';
import { beautifyString } from '@/lib/utils';
import { CustomEdge, CustomEdgeData } from './CustomEdge';
import ConnectionLine from './ConnectionLine';
type Schema = {
type: string;
properties: { [key: string]: any };
required?: string[];
};
type CustomNodeData = {
blockType: string;
title: string;
inputSchema: Schema;
outputSchema: Schema;
inputSchema: ObjectSchema;
outputSchema: ObjectSchema;
hardcodedValues: { [key: string]: any };
setHardcodedValues: (values: { [key: string]: any }) => void;
connections: Array<{ source: string; sourceHandle: string; target: string; targetHandle: string }>;
isPropertiesOpen: boolean;
isOutputOpen: boolean;
status?: string;
output_data?: any;
block_id: string;
backend_id?: string;
};
type AvailableNode = {
id: string;
name: string;
description: string;
inputSchema: Schema;
outputSchema: Schema;
};
const Sidebar: React.FC<{ isOpen: boolean, availableNodes: Block[], addNode: (id: string, name: string) => void }> =
({ isOpen, availableNodes, addNode }) => {
const [searchQuery, setSearchQuery] = useState('');
interface ExecData {
node_id: string;
status: string;
output_data: any;
}
if (!isOpen) return null;
const Sidebar: React.FC<{isOpen: boolean, availableNodes: AvailableNode[], addNode: (id: string, name: string) => void}> =
({isOpen, availableNodes, addNode}) => {
const [searchQuery, setSearchQuery] = useState('');
const filteredNodes = availableNodes.filter(node =>
node.name.toLowerCase().includes(searchQuery.toLowerCase())
);
if (!isOpen) return null;
return (
<div className={`sidebar dark-theme ${isOpen ? 'open' : ''}`}>
<h3>Nodes</h3>
<Input
type="text"
placeholder="Search nodes..."
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
/>
{filteredNodes.map((node) => (
<div key={node.id} className="sidebarNodeRowStyle dark-theme">
<span>{beautifyString(node.name).replace(/Block$/, '')}</span>
<Button onClick={() => addNode(node.id, node.name)}>Add</Button>
</div>
))}
</div>
);
};
const filteredNodes = availableNodes.filter(node =>
node.name.toLowerCase().includes(searchQuery.toLowerCase())
);
return (
<div style={{
position: 'absolute',
left: 0,
top: 0,
bottom: 0,
width: '250px',
backgroundColor: '#333',
padding: '20px',
zIndex: 4,
overflowY: 'auto'
}}>
<h3 style={{color: '#fff'}}>Nodes</h3>
<input
type="text"
placeholder="Search nodes..."
style={{width: '100%', marginBottom: '10px', padding: '5px'}}
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
/>
{filteredNodes.map((node) => (
<div key={node.id} style={{marginBottom: '10px', display: 'flex', justifyContent: 'space-between', alignItems: 'center'}}>
<span style={{color: '#fff'}}>{node.name}</span>
<button onClick={() => addNode(node.id, node.name)}>Add</button>
</div>
))}
</div>
);
};
const Flow: React.FC = () => {
const FlowEditor: React.FC<{
flowID?: string;
template?: boolean;
className?: string;
}> = ({ flowID, template, className }) => {
const [nodes, setNodes] = useState<Node<CustomNodeData>[]>([]);
const [edges, setEdges] = useState<Edge[]>([]);
const [edges, setEdges] = useState<Edge<CustomEdgeData>[]>([]);
const [nodeId, setNodeId] = useState<number>(1);
const [availableNodes, setAvailableNodes] = useState<AvailableNode[]>([]);
const [agentId, setAgentId] = useState<string | null>(null);
const [availableNodes, setAvailableNodes] = useState<Block[]>([]);
const [isSidebarOpen, setIsSidebarOpen] = useState(true);
const [savedAgent, setSavedAgent] = useState<Graph | null>(null);
const [agentDescription, setAgentDescription] = useState<string>('');
const [agentName, setAgentName] = useState<string>('');
const apiUrl = 'http://localhost:8000';
const apiUrl = process.env.AGPT_SERVER_URL!;
const api = useMemo(() => new AutoGPTServerAPI(apiUrl), [apiUrl]);
useEffect(() => {
fetch(`${apiUrl}/blocks`)
.then(response => response.json())
.then(data => setAvailableNodes(data))
.catch(error => console.error('Error fetching available blocks:', error));
api.connectWebSocket()
.then(() => {
console.log('WebSocket connected');
api.onWebSocketMessage('execution_event', (data) => {
updateNodesWithExecutionData([data]);
});
})
.catch((error) => {
console.error('Failed to connect WebSocket:', error);
});
return () => {
api.disconnectWebSocket();
};
}, [api]);
useEffect(() => {
api.getBlocks()
.then(blocks => setAvailableNodes(blocks))
.catch();
}, []);
// Load existing graph
useEffect(() => {
if (!flowID || availableNodes.length == 0) return;
(template ? api.getTemplate(flowID) : api.getGraph(flowID))
.then(graph => loadGraph(graph));
}, [flowID, template, availableNodes]);
const nodeTypes: NodeTypes = useMemo(() => ({ custom: CustomNode }), []);
const edgeTypes: EdgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
const onNodesChange: OnNodesChange = useCallback(
(changes) => setNodes((nds) => applyNodeChanges(changes, nds)),
@@ -119,33 +132,80 @@ const Flow: React.FC = () => {
[]
);
const onConnect: OnConnect = useCallback(
(connection: Connection) => {
setEdges((eds) => addEdge(connection, eds));
const getOutputType = (id: string, handleId: string) => {
const node = nodes.find((node) => node.id === id);
if (!node) return 'unknown';
const outputSchema = node.data.outputSchema;
if (!outputSchema) return 'unknown';
const outputType = outputSchema.properties[handleId].type;
return outputType;
}
const getNodePos = (id: string) => {
const node = nodes.find((node) => node.id === id);
if (!node) return 0;
return node.position;
}
const onConnect: OnConnect = (connection: Connection) => {
const edgeColor = getTypeColor(getOutputType(connection.source!, connection.sourceHandle!));
const sourcePos = getNodePos(connection.source!)
console.log('sourcePos', sourcePos);
setEdges((eds) => addEdge({
type: 'custom',
markerEnd: { type: MarkerType.ArrowClosed, strokeWidth: 2, color: edgeColor },
data: { edgeColor, sourcePos },
...connection
}, eds));
setNodes((nds) =>
nds.map((node) => {
if (node.id === connection.target || node.id === connection.source) {
return {
...node,
data: {
...node.data,
connections: [
...node.data.connections,
{
source: connection.source,
sourceHandle: connection.sourceHandle,
target: connection.target,
targetHandle: connection.targetHandle,
} as { source: string; sourceHandle: string; target: string; targetHandle: string },
],
},
};
}
return node;
})
);
}
const onEdgesDelete = useCallback(
(edgesToDelete: Edge<CustomEdgeData>[]) => {
setNodes((nds) =>
nds.map((node) => {
if (node.id === connection.target) {
return {
...node,
data: {
...node.data,
connections: [
...node.data.connections,
{
source: connection.source,
sourceHandle: connection.sourceHandle,
target: connection.target,
targetHandle: connection.targetHandle,
} as { source: string; sourceHandle: string; target: string; targetHandle: string },
],
},
};
}
return node;
})
nds.map((node) => ({
...node,
data: {
...node.data,
connections: node.data.connections.filter(
(conn: any) =>
!edgesToDelete.some(
(edge) =>
edge.source === conn.source &&
edge.target === conn.target &&
edge.sourceHandle === conn.sourceHandle &&
edge.targetHandle === conn.targetHandle
)
),
},
}))
);
},
[setEdges, setNodes]
[setNodes]
);
const addNode = (blockId: string, nodeType: string) => {
@@ -173,7 +233,7 @@ const Flow: React.FC = () => {
));
},
connections: [],
isPropertiesOpen: false,
isOutputOpen: false,
block_id: blockId,
},
};
@@ -182,7 +242,53 @@ const Flow: React.FC = () => {
setNodeId((prevId) => prevId + 1);
};
const prepareNodeInputData = (node: Node<CustomNodeData>, allNodes: Node<CustomNodeData>[], allEdges: Edge[]) => {
function loadGraph(graph: Graph) {
setSavedAgent(graph);
setAgentName(graph.name);
setAgentDescription(graph.description);
setNodes(graph.nodes.map(node => {
const block = availableNodes.find(block => block.id === node.block_id)!;
const newNode = {
id: node.id,
type: 'custom',
position: { x: node.metadata.position.x, y: node.metadata.position.y },
data: {
block_id: block.id,
blockType: block.name,
title: `${block.name} ${node.id}`,
inputSchema: block.inputSchema,
outputSchema: block.outputSchema,
hardcodedValues: node.input_default,
setHardcodedValues: (values: { [key: string]: any; }) => {
setNodes((nds) => nds.map((node) => node.id === newNode.id
? { ...node, data: { ...node.data, hardcodedValues: values } }
: node
));
},
connections: [],
isOutputOpen: false,
},
};
return newNode;
}));
setEdges(graph.links.map(link => ({
id: `${link.source_id}_${link.source_name}_${link.sink_id}_${link.sink_name}`,
type: 'custom',
data: {
edgeColor: getTypeColor(getOutputType(link.source_id, link.source_name!)),
sourcePos: getNodePos(link.source_id)
},
markerEnd: { type: MarkerType.ArrowClosed, strokeWidth: 2, color: getTypeColor(getOutputType(link.source_id, link.source_name!)) },
source: link.source_id,
target: link.sink_id,
sourceHandle: link.source_name || undefined,
targetHandle: link.sink_name || undefined
}) as Edge<CustomEdgeData>));
}
const prepareNodeInputData = (node: Node<CustomNodeData>, allNodes: Node<CustomNodeData>[], allEdges: Edge<CustomEdgeData>[]) => {
console.log("Preparing input data for node:", node.id, node.data.blockType);
const blockSchema = availableNodes.find(n => n.id === node.data.block_id)?.inputSchema;
@@ -192,130 +298,155 @@ const Flow: React.FC = () => {
return {};
}
let inputData: { [key: string]: any } = { ...node.data.hardcodedValues };
const getNestedData = (schema: ObjectSchema, values: { [key: string]: any }): { [key: string]: any } => {
let inputData: { [key: string]: any } = {};
// Get data from connected nodes
const incomingEdges = allEdges.filter(edge => edge.target === node.id);
incomingEdges.forEach(edge => {
const sourceNode = allNodes.find(n => n.id === edge.source);
if (sourceNode && sourceNode.data.output_data) {
const outputKey = Object.keys(sourceNode.data.output_data)[0]; // Assuming single output
inputData[edge.targetHandle as string] = sourceNode.data.output_data[outputKey];
if (schema.properties) {
Object.keys(schema.properties).forEach((key) => {
if (values[key] !== undefined) {
if (schema.properties[key].type === 'object') {
inputData[key] = getNestedData(schema.properties[key], values[key]);
} else {
inputData[key] = values[key];
}
}
});
}
});
// Filter out any inputs that are not in the block's schema
Object.keys(inputData).forEach(key => {
if (!blockSchema.properties[key]) {
delete inputData[key];
if (schema.additionalProperties) {
inputData = { ...inputData, ...values };
}
});
return inputData;
};
let inputData = getNestedData(blockSchema, node.data.hardcodedValues);
console.log(`Final prepared input for ${node.data.blockType} (${node.id}):`, inputData);
return inputData;
};
async function saveAgent (asTemplate: boolean = false) {
setNodes((nds) =>
nds.map((node) => ({
...node,
data: {
...node.data,
status: undefined,
},
}))
);
await new Promise((resolve) => setTimeout(resolve, 100));
console.log("All nodes before formatting:", nodes);
const blockIdToNodeIdMap = {};
const formattedNodes = nodes.map(node => {
nodes.forEach(node => {
const key = `${node.data.block_id}_${node.position.x}_${node.position.y}`;
blockIdToNodeIdMap[key] = node.id;
});
const inputDefault = prepareNodeInputData(node, nodes, edges);
const inputNodes = edges
.filter(edge => edge.target === node.id)
.map(edge => ({
name: edge.targetHandle || '',
node_id: edge.source,
}));
const outputNodes = edges
.filter(edge => edge.source === node.id)
.map(edge => ({
name: edge.sourceHandle || '',
node_id: edge.target,
}));
return {
id: node.id,
block_id: node.data.block_id,
input_default: inputDefault,
input_nodes: inputNodes,
output_nodes: outputNodes,
metadata: { position: node.position }
};
});
const links = edges.map(edge => ({
source_id: edge.source,
sink_id: edge.target,
source_name: edge.sourceHandle || '',
sink_name: edge.targetHandle || ''
}));
const payload = {
id: savedAgent?.id!,
name: agentName || 'Agent Name',
description: agentDescription || 'Agent Description',
nodes: formattedNodes,
links: links // Ensure this field is included
};
if (savedAgent && deepEquals(payload, savedAgent)) {
console.debug("No need to save: Graph is the same as version on server");
return;
} else {
console.debug("Saving new Graph version; old vs new:", savedAgent, payload);
}
const newSavedAgent = savedAgent
? await (savedAgent.is_template
? api.updateTemplate(savedAgent.id, payload)
: api.updateGraph(savedAgent.id, payload))
: await (asTemplate
? api.createTemplate(payload)
: api.createGraph(payload));
console.debug('Response from the API:', newSavedAgent);
setSavedAgent(newSavedAgent);
// Update the node IDs in the frontend
const updatedNodes = newSavedAgent.nodes.map(backendNode => {
const key = `${backendNode.block_id}_${backendNode.metadata.position.x}_${backendNode.metadata.position.y}`;
const frontendNodeId = blockIdToNodeIdMap[key];
const frontendNode = nodes.find(node => node.id === frontendNodeId);
return frontendNode
? {
...frontendNode,
position: backendNode.metadata.position,
data: {
...frontendNode.data,
backend_id: backendNode.id,
},
}
: null;
}).filter(node => node !== null);
setNodes(updatedNodes);
return newSavedAgent.id;
};
const runAgent = async () => {
try {
console.log("All nodes before formatting:", nodes);
const formattedNodes = nodes.map(node => {
console.log("Formatting node:", node.id, node.data.blockType);
const inputDefault = prepareNodeInputData(node, nodes, edges);
const inputNodes = edges
.filter(edge => edge.target === node.id)
.map(edge => ({
name: edge.targetHandle || '',
node_id: edge.source,
}));
const outputNodes = edges
.filter(edge => edge.source === node.id)
.map(edge => ({
name: edge.sourceHandle || '',
node_id: edge.target,
}));
return {
id: node.id,
block_id: node.data.block_id,
input_default: inputDefault,
input_nodes: inputNodes,
output_nodes: outputNodes,
metadata: { position: node.position }
};
});
const payload = {
id: agentId || '',
name: 'Agent Name',
description: 'Agent Description',
nodes: formattedNodes,
};
console.log("Payload being sent to the API:", JSON.stringify(payload, null, 2));
const createResponse = await fetch(`${apiUrl}/graphs`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload),
});
if (!createResponse.ok) {
throw new Error(`HTTP error! Status: ${createResponse.status}`);
const newAgentId = await saveAgent();
if (!newAgentId) {
console.error('Error saving agent; aborting run');
return;
}
const createData = await createResponse.json();
const newAgentId = createData.id;
setAgentId(newAgentId);
console.log('Response from the API:', JSON.stringify(createData, null, 2));
const executeResponse = await fetch(`${apiUrl}/graphs/${newAgentId}/execute`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({}),
});
if (!executeResponse.ok) {
throw new Error(`HTTP error! Status: ${executeResponse.status}`);
}
const executeData = await executeResponse.json();
const runId = executeData.id;
const pollExecution = async () => {
const response = await fetch(`${apiUrl}/graphs/${newAgentId}/executions/${runId}`);
if (!response.ok) {
throw new Error(`HTTP error! Status: ${response.status}`);
}
const data = await response.json();
data.forEach(updateNodeData);
if (data.every((node: any) => node.status === 'COMPLETED')) {
console.log('All nodes completed execution');
} else {
setTimeout(pollExecution, 1000);
}
};
pollExecution();
api.subscribeToExecution(newAgentId);
api.runGraph(newAgentId);
} catch (error) {
console.error('Error running agent:', error);
}
};
const updateNodesWithExecutionData = (executionData: any[]) => {
setNodes((nds) =>
nds.map((node) => {
const nodeExecution = executionData.find((exec) => exec.node_id === node.id);
const nodeExecution = executionData.find((exec) => exec.node_id === node.data.backend_id);
if (nodeExecution) {
return {
...node,
@@ -323,7 +454,7 @@ const Flow: React.FC = () => {
...node.data,
status: nodeExecution.status,
output_data: nodeExecution.output_data,
isPropertiesOpen: true,
isOutputOpen: true,
},
};
}
@@ -334,39 +465,22 @@ const Flow: React.FC = () => {
const toggleSidebar = () => setIsSidebarOpen(!isSidebarOpen);
const updateNodeData = (execData: ExecData) => {
setNodes((nds) =>
nds.map((node) => {
if (node.id === execData.node_id) {
return {
...node,
data: {
...node.data,
status: execData.status,
output_data: execData.output_data,
isPropertiesOpen: true, // Open the properties
},
};
}
return node;
})
);
};
return (
<div style={{ height: '100vh', width: '100%' }}>
<button
<div className={className}>
<Button
variant="outline"
size="icon"
onClick={toggleSidebar}
style={{
position: 'absolute',
left: isSidebarOpen ? '260px' : '10px',
top: '10px',
zIndex: 5,
transition: 'left 0.3s'
position: 'fixed',
left: isSidebarOpen ? '350px' : '10px',
zIndex: 10000,
backgroundColor: 'black',
color: 'white',
}}
>
{isSidebarOpen ? 'Hide Sidebar' : 'Show Sidebar'}
</button>
{isSidebarOpen ? <ChevronLeft className="h-4 w-4" /> : <ChevronRight className="h-4 w-4" />}
</Button>
<Sidebar isOpen={isSidebarOpen} availableNodes={availableNodes} addNode={addNode} />
<ReactFlow
nodes={nodes}
@@ -375,13 +489,39 @@ const Flow: React.FC = () => {
onEdgesChange={onEdgesChange}
onConnect={onConnect}
nodeTypes={nodeTypes}
edgeTypes={edgeTypes}
connectionLineComponent={ConnectionLine}
onEdgesDelete={onEdgesDelete}
deleteKeyCode={["Backspace", "Delete"]}
>
<div style={{ position: 'absolute', right: 10, top: 10, zIndex: 4 }}>
<button onClick={runAgent}>Run Agent</button>
<div style={{ position: 'absolute', right: 10, zIndex: 4 }}>
<Input
type="text"
placeholder="Agent Name"
value={agentName}
onChange={(e) => setAgentName(e.target.value)}
/>
<Input
type="text"
placeholder="Agent Description"
value={agentDescription}
onChange={(e) => setAgentDescription(e.target.value)}
/>
<div style={{ display: 'flex', flexDirection: 'column', gap: '10px' }}> {/* Added gap for spacing */}
<Button onClick={() => saveAgent(savedAgent?.is_template)}>
Save {savedAgent?.is_template ? "Template" : "Agent"}
</Button>
{!savedAgent?.is_template &&
<Button onClick={runAgent}>Save & Run Agent</Button>
}
{!savedAgent &&
<Button onClick={() => saveAgent(true)}>Save as Template</Button>
}
</div>
</div>
</ReactFlow>
</div>
);
};
export default Flow;
export default FlowEditor;

View File

@@ -1,5 +1,6 @@
import React, { FC } from 'react';
import './modal.css';
import React, { FC, useEffect, useRef } from 'react';
import { Button } from './ui/button';
import { Textarea } from './ui/textarea';
interface ModalProps {
isOpen: boolean;
@@ -10,6 +11,16 @@ interface ModalProps {
const ModalComponent: FC<ModalProps> = ({ isOpen, onClose, onSave, value }) => {
const [tempValue, setTempValue] = React.useState(value);
const textAreaRef = useRef<HTMLTextAreaElement>(null);
useEffect(() => {
if (isOpen) {
setTempValue(value);
if (textAreaRef.current) {
textAreaRef.current.select();
}
}
}, [isOpen, value]);
const handleSave = () => {
onSave(tempValue);
@@ -21,16 +32,18 @@ const ModalComponent: FC<ModalProps> = ({ isOpen, onClose, onSave, value }) => {
}
return (
<div className="modal-overlay">
<div className="modal">
<textarea
className="modal-textarea"
<div className="nodrag fixed inset-0 bg-white bg-opacity-60 flex justify-center items-center">
<div className="bg-white p-5 rounded-lg w-[500px] max-w-[90%]">
<center><h1>Enter input text</h1></center>
<Textarea
ref={textAreaRef}
className="w-full h-[200px] p-2.5 rounded border border-[#dfdfdf] text-black bg-[#dfdfdf]"
value={tempValue}
onChange={(e) => setTempValue(e.target.value)}
/>
<div className="modal-actions">
<button onClick={onClose}>Cancel</button>
<button onClick={handleSave}>Save</button>
<div className="flex justify-end gap-2.5 mt-2.5">
<Button onClick={onClose}>Cancel</Button>
<Button onClick={handleSave}>Save</Button>
</div>
</div>
</div>

View File

@@ -0,0 +1,74 @@
import { BlockSchema } from "@/lib/types";
import { beautifyString, getTypeBgColor, getTypeTextColor } from "@/lib/utils";
import { FC } from "react";
import { Handle, Position } from "reactflow";
import SchemaTooltip from "./SchemaTooltip";
type HandleProps = {
keyName: string,
schema: BlockSchema,
isConnected: boolean,
side: 'left' | 'right'
}
const NodeHandle: FC<HandleProps> = ({ keyName, isConnected, schema, side }) => {
const typeName: Record<string, string> = {
string: 'text',
number: 'number',
boolean: 'true/false',
object: 'complex',
array: 'list',
null: 'null',
};
const typeClass = `text-sm ${getTypeTextColor(schema.type)} ${side === 'left' ? 'text-left' : 'text-right'}`;
const label = (
<div className="flex flex-col flex-grow">
<span className="text-m text-gray-900 -mb-1 green">{schema.title || beautifyString(keyName)}</span>
<span className={typeClass}>{typeName[schema.type]}</span>
</div>
);
const dot = (
<div className={`w-4 h-4 m-1 ${isConnected ? getTypeBgColor(schema.type) : 'bg-gray-600'} rounded-full transition-colors duration-100 group-hover:bg-gray-300`} />
);
if (side === 'left') {
return (
<div key={keyName} className="handle-container">
<Handle
type="target"
position={Position.Left}
id={keyName}
className='group -ml-[29px]'
>
<div className="pointer-events-none flex items-center">
{dot}
{label}
</div>
</Handle>
<SchemaTooltip schema={schema} />
</div>
)
} else {
return (
<div key={keyName} className="handle-container justify-end">
<Handle
type="source"
position={Position.Right}
id={keyName}
className='group -mr-[29px]'
>
<div className="pointer-events-none flex items-center">
{label}
{dot}
</div>
</Handle>
</div >
)
}
}
export default NodeHandle;

View File

@@ -0,0 +1,30 @@
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
import { BlockSchema } from "@/lib/types";
import { Info } from 'lucide-react';
import ReactMarkdown from 'react-markdown';
const SchemaTooltip: React.FC<{ schema: BlockSchema }> = ({ schema }) => {
if (!schema.description) return null;
return (
<TooltipProvider delayDuration={400}>
<Tooltip>
<TooltipTrigger asChild>
<Info className="p-1 rounded-full hover:bg-gray-300" size={24} />
</TooltipTrigger>
<TooltipContent className="max-w-xs tooltip-content">
<ReactMarkdown components={{
a: ({ node, ...props }) => <a className="text-blue-400 underline" {...props} />,
}}>{schema.description}</ReactMarkdown>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
export default SchemaTooltip;

View File

@@ -0,0 +1,180 @@
import { z } from "zod"
import { useForm } from "react-hook-form"
import { zodResolver } from "@hookform/resolvers/zod"
import React, { useState } from "react"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { Button } from "@/components/ui/button"
import { Switch } from "@/components/ui/switch"
import { Textarea } from "@/components/ui/textarea"
import AutoGPTServerAPI, { Graph, GraphCreatable } from "@/lib/autogpt-server-api"
import { cn } from "@/lib/utils"
import { EnterIcon } from "@radix-ui/react-icons"
const formSchema = z.object({
agentFile: z.instanceof(File),
agentName: z.string().min(1, "Agent name is required"),
agentDescription: z.string(),
importAsTemplate: z.boolean(),
})
export const AgentImportForm: React.FC<React.FormHTMLAttributes<HTMLFormElement>> = (
{ className, ...props }
) => {
const [agentObject, setAgentObject] = useState<GraphCreatable | null>(null)
const api = new AutoGPTServerAPI()
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
defaultValues: {
agentName: "",
agentDescription: "",
importAsTemplate: false,
},
})
function onSubmit(values: z.infer<typeof formSchema>) {
if (!agentObject) {
form.setError("root", { message: "No Agent object to save" })
return
}
const payload: GraphCreatable = {
...agentObject,
name: values.agentName,
description: values.agentDescription,
is_active: !values.importAsTemplate,
is_template: values.importAsTemplate,
};
(values.importAsTemplate ? api.createTemplate(payload) : api.createGraph(payload))
.then((response) => {
const qID = values.importAsTemplate ? "templateID" : "flowID";
window.location.href = `/build?${qID}=${response.id}`;
})
.catch(error => {
const entity_type = values.importAsTemplate ? 'template' : 'agent';
form.setError("root", { message: `Could not create ${entity_type}: ${error}` });
})
}
return (
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className={cn("space-y-4", className)}
{...props}
>
<FormField
control={form.control}
name="agentFile"
render={({ field }) => (
<FormItem>
<FormLabel>Agent file</FormLabel>
<FormControl className="cursor-pointer">
<Input
type="file"
accept="application/json"
onChange={(e) => {
const file = e.target.files?.[0];
if (file) {
field.onChange(file)
const reader = new FileReader();
// Attach parser to file reader
reader.onload = (event) => {
try {
const obj = JSON.parse(
event.target?.result as string
);
if (
!["name", "description", "nodes", "links"]
.every(key => !!obj[key])
) {
throw new Error(
"Invalid agent object in file: "
+ JSON.stringify(obj, null, 2)
);
}
const agent = obj as Graph;
setAgentObject(agent);
form.setValue("agentName", agent.name);
form.setValue("agentDescription", agent.description);
form.setValue("importAsTemplate", agent.is_template);
} catch (error) {
console.error("Error loading agent file:", error);
}
};
// Load file
reader.readAsText(file);
}
}}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agentName"
disabled={!agentObject}
render={({ field }) => (
<FormItem>
<FormLabel>Agent name</FormLabel>
<FormControl>
<Input {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agentDescription"
disabled={!agentObject}
render={({ field }) => (
<FormItem>
<FormLabel>Agent description</FormLabel>
<FormControl>
<Textarea {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="importAsTemplate"
disabled={!agentObject}
render={({ field }) => (
<FormItem>
<FormLabel>Import as</FormLabel>
<FormControl>
<div className="flex space-x-2 items-center">
<span className={field.value ? "text-gray-400 dark:text-gray-600" : ""}>Agent</span>
<Switch
disabled={field.disabled}
checked={field.value}
onCheckedChange={field.onChange}
/>
<span className={field.value ? "" : "text-gray-400 dark:text-gray-600"}>Template</span>
</div>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<Button type="submit" className="w-full" disabled={!agentObject}>
<EnterIcon className="mr-2" /> Import & Edit
</Button>
</form>
</Form>
)
}

View File

@@ -1,56 +1,38 @@
.custom-node {
padding: 20px;
border: 2px solid #fff;
border-radius: 20px;
background: #333;
color: #e0e0e0;
width: 250px;
}
.node-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 10px;
}
.node-title {
font-size: 18px;
font-weight: bold;
}
.toggle-button {
background: transparent;
border: none;
cursor: pointer;
color: #e0e0e0;
padding: 15px;
border: 3px solid #4b5563;
border-radius: 12px;
background: #ffffff; /* White background */
color: #000000;
width: 500px;
box-sizing: border-box;
transition: border-color 0.3s ease-in-out;
}
.node-content {
display: flex;
flex-direction: row;
justify-content: space-between;
gap: 20px;
}
.input-section {
flex: 1;
}
.output-section {
flex: 1;
align-items: flex-start;
gap: 1px;
}
.handle-container {
display: flex;
align-items: center;
position: relative;
margin-bottom: 5px;
margin-bottom: 0px;
padding: 5px;
min-height: 44px;
width: 100%;
height: 100%;
}
.handle-label {
color: #e0e0e0;
margin-left: 10px;
.react-flow__handle {
background: transparent;
width: auto;
height: auto;
border: 0;
position: relative;
transform: none;
}
.input-container {
@@ -59,35 +41,50 @@
.clickable-input {
padding: 5px;
width: 325px;
border-radius: 4px;
border: 1px solid #555;
background: #444;
color: #e0e0e0;
background: #ffffff;
border: 1px solid #d1d1d1;
color: #000000;
cursor: pointer;
word-break: break-all;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
position: relative;
}
.clickable-input span {
display: inline-block;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
max-width: calc(100% - 100px);
vertical-align: middle;
}
.select-input {
width: 100%;
padding: 5px;
border-radius: 4px;
border: 1px solid #555;
background: #444;
color: #e0e0e0;
border: 1px solid #000;
background: #fff;
color: #000;
}
.radio-label {
display: block;
margin: 5px 0;
color: #e0e0e0;
color: #000;
}
.number-input {
width: 100%;
padding: 5px;
border-radius: 4px;
border: 1px solid #555;
background: #444;
color: #e0e0e0;
border: 1px solid #000;
background: #fff;
color: #000;
}
.array-item-container {
@@ -100,9 +97,9 @@
flex-grow: 1;
padding: 5px;
border-radius: 4px;
border: 1px solid #555;
background: #444;
color: #e0e0e0;
border: 1px solid #000;
background: #fff;
color: #000;
}
.array-item-remove {
@@ -125,11 +122,14 @@
margin-top: 5px;
}
.node-properties {
margin-top: 20px;
background: #444;
.node-output {
margin-top: 5px;
margin-bottom: 5px;
background: #fff;
border: 1px solid #000; /* Border for output section */
padding: 10px;
border-radius: 10px;
width: 100%;
}
.error-message {
@@ -137,3 +137,41 @@
font-size: 12px;
margin-top: 5px;
}
.object-input {
margin-left: 10px;
border-left: 1px solid #000; /* Border for nested inputs */
padding-left: 10px;
}
.nested-input {
margin-top: 5px;
}
.key-value-input {
display: flex;
gap: 5px;
align-items: center;
margin-bottom: 5px;
}
.key-value-input input {
flex-grow: 1;
}
/* Styles for node states */
.completed {
border-color: #27ae60; /* Green border for completed nodes */
}
.running {
border-color: #f39c12; /* Orange border for running nodes */
}
.failed {
border-color: #c0392b; /* Red border for failed nodes */
}
.custom-switch {
padding-left: 2px;
}

View File

@@ -17,8 +17,8 @@ code {
}
button {
background-color: #444;
color: #e0e0e0;
background-color: #ffffff;
color: #000000;
padding: 10px;
border: none;
border-radius: 4px;
@@ -31,8 +31,8 @@ button:hover {
}
input, textarea {
background-color: #333;
color: #e0e0e0;
background-color: #ffffff;
color: #000000;
border: 1px solid #555;
padding: 8px;
border-radius: 4px;
@@ -53,11 +53,11 @@ input::placeholder, textarea::placeholder {
bottom: auto;
margin-right: -50%;
transform: translate(-50%, -50%);
background: #333;
background: #ffffff;
padding: 20px;
border: 1px solid #ccc;
border-radius: 4px;
color: #e0e0e0;
color: #000000;
}
.overlay {
@@ -91,13 +91,14 @@ input::placeholder, textarea::placeholder {
top: 0;
left: -600px;
width: 350px;
height: 100%;
background-color: #333;
color: #fff;
height: calc(100vh - 68px); /* Full height minus top offset */
background-color: #ffffff;
color: #000000;
padding: 20px;
transition: left 0.3s ease;
z-index: 1000;
overflow-y: auto;
margin-top: 68px; /* Margin to push content below the top fixed area */
}
.sidebar.open {
@@ -105,16 +106,21 @@ input::placeholder, textarea::placeholder {
}
.sidebar h3 {
margin: 0 0 20px;
margin: 0 0 10px;
}
.sidebar input {
margin: 0 0 10px;
}
.sidebarNodeRowStyle {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #444;
background-color: #e2e2e2;
padding: 10px;
border-radius: 4px;
margin-bottom: 10px;
border-radius: 10px;
cursor: grab;
}

View File

@@ -1,34 +0,0 @@
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(0, 0, 0, 0.6);
display: flex;
justify-content: center;
align-items: center;
}
.modal {
background: #fff;
padding: 20px;
border-radius: 8px;
width: 500px;
max-width: 90%;
}
.modal-textarea {
width: 100%;
height: 200px;
padding: 10px;
border-radius: 4px;
border: 1px solid #ccc;
}
.modal-actions {
display: flex;
justify-content: flex-end;
gap: 10px;
margin-top: 10px;
}

View File

@@ -0,0 +1,50 @@
"use client"
import * as React from "react"
import * as AvatarPrimitive from "@radix-ui/react-avatar"
import { cn } from "@/lib/utils"
const Avatar = React.forwardRef<
React.ElementRef<typeof AvatarPrimitive.Root>,
React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Root>
>(({ className, ...props }, ref) => (
<AvatarPrimitive.Root
ref={ref}
className={cn(
"relative flex h-10 w-10 shrink-0 overflow-hidden rounded-full",
className
)}
{...props}
/>
))
Avatar.displayName = AvatarPrimitive.Root.displayName
const AvatarImage = React.forwardRef<
React.ElementRef<typeof AvatarPrimitive.Image>,
React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Image>
>(({ className, ...props }, ref) => (
<AvatarPrimitive.Image
ref={ref}
className={cn("aspect-square h-full w-full", className)}
{...props}
/>
))
AvatarImage.displayName = AvatarPrimitive.Image.displayName
const AvatarFallback = React.forwardRef<
React.ElementRef<typeof AvatarPrimitive.Fallback>,
React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Fallback>
>(({ className, ...props }, ref) => (
<AvatarPrimitive.Fallback
ref={ref}
className={cn(
"flex h-full w-full items-center justify-center rounded-full bg-neutral-100 dark:bg-neutral-800",
className
)}
{...props}
/>
))
AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName
export { Avatar, AvatarImage, AvatarFallback }

View File

@@ -0,0 +1,36 @@
import * as React from "react"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const badgeVariants = cva(
"inline-flex items-center rounded-md border border-neutral-200 px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-neutral-950 focus:ring-offset-2 dark:border-neutral-800 dark:focus:ring-neutral-300 cursor-default",
{
variants: {
variant: {
default:
"border-transparent bg-neutral-900 text-neutral-50 shadow dark:bg-neutral-50 dark:text-neutral-900",
secondary:
"border-transparent bg-neutral-100 text-neutral-900 dark:bg-neutral-800 dark:text-neutral-50",
destructive:
"border-transparent bg-red-500 text-neutral-50 shadow dark:bg-red-900 dark:text-neutral-50",
outline: "text-neutral-950 dark:text-neutral-50",
},
},
defaultVariants: {
variant: "default",
},
}
)
export interface BadgeProps
extends React.HTMLAttributes<HTMLDivElement>,
VariantProps<typeof badgeVariants> {}
function Badge({ className, variant, ...props }: BadgeProps) {
return (
<div className={cn(badgeVariants({ variant }), className)} {...props} />
)
}
export { Badge, badgeVariants }

View File

@@ -0,0 +1,57 @@
import * as React from "react"
import { Slot } from "@radix-ui/react-slot"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const buttonVariants = cva(
"inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-gray-950 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-gray-300",
{
variants: {
variant: {
default:
"bg-gray-900 text-gray-50 shadow hover:bg-gray-900/90 dark:bg-gray-50 dark:text-gray-900 dark:hover:bg-gray-50/90",
destructive:
"bg-red-500 text-gray-50 shadow-sm hover:bg-red-500/90 dark:bg-red-900 dark:text-gray-50 dark:hover:bg-red-900/90",
outline:
"border border-gray-200 bg-white shadow-sm hover:bg-gray-100 hover:text-gray-900 dark:border-gray-800 dark:bg-gray-950 dark:hover:bg-gray-800 dark:hover:text-gray-50",
secondary:
"bg-gray-100 text-gray-900 shadow-sm hover:bg-gray-100/80 dark:bg-gray-800 dark:text-gray-50 dark:hover:bg-gray-800/80",
ghost: "hover:bg-gray-100 hover:text-gray-900 dark:hover:bg-gray-800 dark:hover:text-gray-50 dark:text-white",
link: "text-gray-900 underline-offset-4 hover:underline dark:text-gray-50",
},
size: {
default: "h-9 px-4 py-2",
sm: "h-8 rounded-md px-3 text-xs",
lg: "h-10 rounded-md px-8",
icon: "h-9 w-9",
},
},
defaultVariants: {
variant: "default",
size: "default",
},
}
)
export interface ButtonProps
extends React.ButtonHTMLAttributes<HTMLButtonElement>,
VariantProps<typeof buttonVariants> {
asChild?: boolean
}
const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
({ className, variant, size, asChild = false, ...props }, ref) => {
const Comp = asChild ? Slot : "button"
return (
<Comp
className={cn(buttonVariants({ variant, size, className }))}
ref={ref}
{...props}
/>
)
}
)
Button.displayName = "Button"
export { Button, buttonVariants }

View File

@@ -0,0 +1,72 @@
"use client"
import * as React from "react"
import { ChevronLeftIcon, ChevronRightIcon } from "@radix-ui/react-icons"
import { DayPicker } from "react-day-picker"
import { cn } from "@/lib/utils"
import { buttonVariants } from "@/components/ui/button"
export type CalendarProps = React.ComponentProps<typeof DayPicker>
function Calendar({
className,
classNames,
showOutsideDays = true,
...props
}: CalendarProps) {
return (
<DayPicker
showOutsideDays={showOutsideDays}
className={cn("p-3", className)}
classNames={{
months: "flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",
month: "space-y-4",
caption: "flex justify-center pt-1 relative items-center",
caption_label: "text-sm font-medium",
nav: "space-x-1 flex items-center",
nav_button: cn(
buttonVariants({ variant: "outline" }),
"h-7 w-7 bg-transparent p-0 opacity-50 hover:opacity-100"
),
nav_button_previous: "absolute left-1",
nav_button_next: "absolute right-1",
table: "w-full border-collapse space-y-1",
head_row: "flex",
head_cell:
"text-neutral-500 rounded-md w-8 font-normal text-[0.8rem] dark:text-neutral-400",
row: "flex w-full mt-2",
cell: cn(
"relative p-0 text-center text-sm focus-within:relative focus-within:z-20 [&:has([aria-selected])]:bg-neutral-100 [&:has([aria-selected].day-outside)]:bg-neutral-100/50 [&:has([aria-selected].day-range-end)]:rounded-r-md dark:[&:has([aria-selected])]:bg-neutral-800 dark:[&:has([aria-selected].day-outside)]:bg-neutral-800/50",
props.mode === "range"
? "[&:has(>.day-range-end)]:rounded-r-md [&:has(>.day-range-start)]:rounded-l-md first:[&:has([aria-selected])]:rounded-l-md last:[&:has([aria-selected])]:rounded-r-md"
: "[&:has([aria-selected])]:rounded-md"
),
day: cn(
buttonVariants({ variant: "ghost" }),
"h-8 w-8 p-0 font-normal aria-selected:opacity-100"
),
day_range_start: "day-range-start",
day_range_end: "day-range-end",
day_selected:
"bg-neutral-900 text-neutral-50 hover:bg-neutral-900 hover:text-neutral-50 focus:bg-neutral-900 focus:text-neutral-50 dark:bg-neutral-50 dark:text-neutral-900 dark:hover:bg-neutral-50 dark:hover:text-neutral-900 dark:focus:bg-neutral-50 dark:focus:text-neutral-900",
day_today: "bg-neutral-100 text-neutral-900 dark:bg-neutral-800 dark:text-neutral-50",
day_outside:
"day-outside text-neutral-500 opacity-50 aria-selected:bg-neutral-100/50 aria-selected:text-neutral-500 aria-selected:opacity-30 dark:text-neutral-400 dark:aria-selected:bg-neutral-800/50 dark:aria-selected:text-neutral-400",
day_disabled: "text-neutral-500 opacity-50 dark:text-neutral-400",
day_range_middle:
"aria-selected:bg-neutral-100 aria-selected:text-neutral-900 dark:aria-selected:bg-neutral-800 dark:aria-selected:text-neutral-50",
day_hidden: "invisible",
...classNames,
}}
components={{
IconLeft: ({ ...props }) => <ChevronLeftIcon className="h-4 w-4" />,
IconRight: ({ ...props }) => <ChevronRightIcon className="h-4 w-4" />,
}}
{...props}
/>
)
}
Calendar.displayName = "Calendar"
export { Calendar }

View File

@@ -0,0 +1,76 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Card = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn(
"rounded-xl border border-neutral-200 bg-white text-neutral-950 shadow dark:border-neutral-800 dark:bg-neutral-950 dark:text-neutral-50",
className
)}
{...props}
/>
))
Card.displayName = "Card"
const CardHeader = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex flex-col space-y-1.5 p-6", className)}
{...props}
/>
))
CardHeader.displayName = "CardHeader"
const CardTitle = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLHeadingElement>
>(({ className, ...props }, ref) => (
<h3
ref={ref}
className={cn("font-semibold leading-none tracking-tight", className)}
{...props}
/>
))
CardTitle.displayName = "CardTitle"
const CardDescription = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLParagraphElement>
>(({ className, ...props }, ref) => (
<p
ref={ref}
className={cn("text-sm text-neutral-500 dark:text-neutral-400", className)}
{...props}
/>
))
CardDescription.displayName = "CardDescription"
const CardContent = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div ref={ref} className={cn("p-6 pt-0", className)} {...props} />
))
CardContent.displayName = "CardContent"
const CardFooter = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex items-center p-6 pt-0", className)}
{...props}
/>
))
CardFooter.displayName = "CardFooter"
export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }

View File

@@ -0,0 +1,122 @@
"use client"
import * as React from "react"
import * as DialogPrimitive from "@radix-ui/react-dialog"
import { Cross2Icon } from "@radix-ui/react-icons"
import { cn } from "@/lib/utils"
const Dialog = DialogPrimitive.Root
const DialogTrigger = DialogPrimitive.Trigger
const DialogPortal = DialogPrimitive.Portal
const DialogClose = DialogPrimitive.Close
const DialogOverlay = React.forwardRef<
React.ElementRef<typeof DialogPrimitive.Overlay>,
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>
>(({ className, ...props }, ref) => (
<DialogPrimitive.Overlay
ref={ref}
className={cn(
"fixed inset-0 z-50 bg-black/80 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
className
)}
{...props}
/>
))
DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
const DialogContent = React.forwardRef<
React.ElementRef<typeof DialogPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Content>
>(({ className, children, ...props }, ref) => (
<DialogPortal>
<DialogOverlay />
<DialogPrimitive.Content
ref={ref}
className={cn(
"fixed left-[50%] top-[50%] z-50 grid w-full max-w-lg translate-x-[-50%] translate-y-[-50%] gap-4 border border-neutral-200 bg-white p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%] data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%] sm:rounded-lg dark:border-neutral-800 dark:bg-neutral-950",
className
)}
{...props}
>
{children}
<DialogPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-white transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-neutral-950 focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-neutral-100 data-[state=open]:text-neutral-500 dark:ring-offset-neutral-950 dark:focus:ring-neutral-300 dark:data-[state=open]:bg-neutral-800 dark:data-[state=open]:text-neutral-400">
<Cross2Icon className="h-4 w-4" />
<span className="sr-only">Close</span>
</DialogPrimitive.Close>
</DialogPrimitive.Content>
</DialogPortal>
))
DialogContent.displayName = DialogPrimitive.Content.displayName
const DialogHeader = ({
className,
...props
}: React.HTMLAttributes<HTMLDivElement>) => (
<div
className={cn(
"flex flex-col space-y-1.5 text-center sm:text-left",
className
)}
{...props}
/>
)
DialogHeader.displayName = "DialogHeader"
const DialogFooter = ({
className,
...props
}: React.HTMLAttributes<HTMLDivElement>) => (
<div
className={cn(
"flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2",
className
)}
{...props}
/>
)
DialogFooter.displayName = "DialogFooter"
const DialogTitle = React.forwardRef<
React.ElementRef<typeof DialogPrimitive.Title>,
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Title>
>(({ className, ...props }, ref) => (
<DialogPrimitive.Title
ref={ref}
className={cn(
"text-lg font-semibold leading-none tracking-tight",
className
)}
{...props}
/>
))
DialogTitle.displayName = DialogPrimitive.Title.displayName
const DialogDescription = React.forwardRef<
React.ElementRef<typeof DialogPrimitive.Description>,
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Description>
>(({ className, ...props }, ref) => (
<DialogPrimitive.Description
ref={ref}
className={cn("text-sm text-neutral-500 dark:text-neutral-400", className)}
{...props}
/>
))
DialogDescription.displayName = DialogPrimitive.Description.displayName
export {
Dialog,
DialogPortal,
DialogOverlay,
DialogTrigger,
DialogClose,
DialogContent,
DialogHeader,
DialogFooter,
DialogTitle,
DialogDescription,
}

View File

@@ -0,0 +1,205 @@
"use client"
import * as React from "react"
import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu"
import {
CheckIcon,
ChevronRightIcon,
DotFilledIcon,
} from "@radix-ui/react-icons"
import { cn } from "@/lib/utils"
const DropdownMenu = DropdownMenuPrimitive.Root
const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
const DropdownMenuGroup = DropdownMenuPrimitive.Group
const DropdownMenuPortal = DropdownMenuPrimitive.Portal
const DropdownMenuSub = DropdownMenuPrimitive.Sub
const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup
const DropdownMenuSubTrigger = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.SubTrigger>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubTrigger> & {
inset?: boolean
}
>(({ className, inset, children, ...props }, ref) => (
<DropdownMenuPrimitive.SubTrigger
ref={ref}
className={cn(
"flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none focus:bg-neutral-100 data-[state=open]:bg-neutral-100 dark:focus:bg-neutral-800 dark:data-[state=open]:bg-neutral-800",
inset && "pl-8",
className
)}
{...props}
>
{children}
<ChevronRightIcon className="ml-auto h-4 w-4" />
</DropdownMenuPrimitive.SubTrigger>
))
DropdownMenuSubTrigger.displayName =
DropdownMenuPrimitive.SubTrigger.displayName
const DropdownMenuSubContent = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.SubContent>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent>
>(({ className, ...props }, ref) => (
<DropdownMenuPrimitive.SubContent
ref={ref}
className={cn(
"z-50 min-w-[8rem] overflow-hidden rounded-md border border-neutral-200 bg-white p-1 text-neutral-950 shadow-lg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:border-neutral-800 dark:bg-neutral-950 dark:text-neutral-50",
className
)}
{...props}
/>
))
DropdownMenuSubContent.displayName =
DropdownMenuPrimitive.SubContent.displayName
const DropdownMenuContent = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
>(({ className, sideOffset = 4, ...props }, ref) => (
<DropdownMenuPrimitive.Portal>
<DropdownMenuPrimitive.Content
ref={ref}
sideOffset={sideOffset}
className={cn(
"z-50 min-w-[8rem] overflow-hidden rounded-md border border-neutral-200 bg-white p-1 text-neutral-950 shadow-md dark:border-neutral-800 dark:bg-neutral-950 dark:text-neutral-50",
"data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",
className
)}
{...props}
/>
</DropdownMenuPrimitive.Portal>
))
DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
const DropdownMenuItem = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Item>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & {
inset?: boolean
}
>(({ className, inset, ...props }, ref) => (
<DropdownMenuPrimitive.Item
ref={ref}
className={cn(
"relative flex cursor-pointer select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-neutral-100 focus:text-neutral-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-neutral-800 dark:focus:text-neutral-50",
inset && "pl-8",
className
)}
{...props}
/>
))
DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
const DropdownMenuCheckboxItem = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.CheckboxItem>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.CheckboxItem>
>(({ className, children, checked, ...props }, ref) => (
<DropdownMenuPrimitive.CheckboxItem
ref={ref}
className={cn(
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-neutral-100 focus:text-neutral-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-neutral-800 dark:focus:text-neutral-50",
className
)}
checked={checked}
{...props}
>
<span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<CheckIcon className="h-4 w-4" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.CheckboxItem>
))
DropdownMenuCheckboxItem.displayName =
DropdownMenuPrimitive.CheckboxItem.displayName
const DropdownMenuRadioItem = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.RadioItem>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.RadioItem>
>(({ className, children, ...props }, ref) => (
<DropdownMenuPrimitive.RadioItem
ref={ref}
className={cn(
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-neutral-100 focus:text-neutral-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-neutral-800 dark:focus:text-neutral-50",
className
)}
{...props}
>
<span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<DotFilledIcon className="h-4 w-4 fill-current" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.RadioItem>
))
DropdownMenuRadioItem.displayName = DropdownMenuPrimitive.RadioItem.displayName
const DropdownMenuLabel = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Label>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & {
inset?: boolean
}
>(({ className, inset, ...props }, ref) => (
<DropdownMenuPrimitive.Label
ref={ref}
className={cn(
"px-2 py-1.5 text-sm font-semibold",
inset && "pl-8",
className
)}
{...props}
/>
))
DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName
const DropdownMenuSeparator = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Separator>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator>
>(({ className, ...props }, ref) => (
<DropdownMenuPrimitive.Separator
ref={ref}
className={cn("-mx-1 my-1 h-px bg-neutral-100 dark:bg-neutral-800", className)}
{...props}
/>
))
DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName
const DropdownMenuShortcut = ({
className,
...props
}: React.HTMLAttributes<HTMLSpanElement>) => {
return (
<span
className={cn("ml-auto text-xs tracking-widest opacity-60", className)}
{...props}
/>
)
}
DropdownMenuShortcut.displayName = "DropdownMenuShortcut"
export {
DropdownMenu,
DropdownMenuTrigger,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuCheckboxItem,
DropdownMenuRadioItem,
DropdownMenuLabel,
DropdownMenuSeparator,
DropdownMenuShortcut,
DropdownMenuGroup,
DropdownMenuPortal,
DropdownMenuSub,
DropdownMenuSubContent,
DropdownMenuSubTrigger,
DropdownMenuRadioGroup,
}

View File

@@ -0,0 +1,178 @@
"use client"
import * as React from "react"
import * as LabelPrimitive from "@radix-ui/react-label"
import { Slot } from "@radix-ui/react-slot"
import {
Controller,
ControllerProps,
FieldPath,
FieldValues,
FormProvider,
useFormContext,
} from "react-hook-form"
import { cn } from "@/lib/utils"
import { Label } from "@/components/ui/label"
const Form = FormProvider
type FormFieldContextValue<
TFieldValues extends FieldValues = FieldValues,
TName extends FieldPath<TFieldValues> = FieldPath<TFieldValues>
> = {
name: TName
}
const FormFieldContext = React.createContext<FormFieldContextValue>(
{} as FormFieldContextValue
)
const FormField = <
TFieldValues extends FieldValues = FieldValues,
TName extends FieldPath<TFieldValues> = FieldPath<TFieldValues>
>({
...props
}: ControllerProps<TFieldValues, TName>) => {
return (
<FormFieldContext.Provider value={{ name: props.name }}>
<Controller {...props} />
</FormFieldContext.Provider>
)
}
const useFormField = () => {
const fieldContext = React.useContext(FormFieldContext)
const itemContext = React.useContext(FormItemContext)
const { getFieldState, formState } = useFormContext()
const fieldState = getFieldState(fieldContext.name, formState)
if (!fieldContext) {
throw new Error("useFormField should be used within <FormField>")
}
const { id } = itemContext
return {
id,
name: fieldContext.name,
formItemId: `${id}-form-item`,
formDescriptionId: `${id}-form-item-description`,
formMessageId: `${id}-form-item-message`,
...fieldState,
}
}
type FormItemContextValue = {
id: string
}
const FormItemContext = React.createContext<FormItemContextValue>(
{} as FormItemContextValue
)
const FormItem = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => {
const id = React.useId()
return (
<FormItemContext.Provider value={{ id }}>
<div ref={ref} className={cn("space-y-2", className)} {...props} />
</FormItemContext.Provider>
)
})
FormItem.displayName = "FormItem"
const FormLabel = React.forwardRef<
React.ElementRef<typeof LabelPrimitive.Root>,
React.ComponentPropsWithoutRef<typeof LabelPrimitive.Root>
>(({ className, ...props }, ref) => {
const { error, formItemId } = useFormField()
return (
<Label
ref={ref}
className={cn(error && "text-red-500 dark:text-red-900", className)}
htmlFor={formItemId}
{...props}
/>
)
})
FormLabel.displayName = "FormLabel"
const FormControl = React.forwardRef<
React.ElementRef<typeof Slot>,
React.ComponentPropsWithoutRef<typeof Slot>
>(({ ...props }, ref) => {
const { error, formItemId, formDescriptionId, formMessageId } = useFormField()
return (
<Slot
ref={ref}
id={formItemId}
aria-describedby={
!error
? `${formDescriptionId}`
: `${formDescriptionId} ${formMessageId}`
}
aria-invalid={!!error}
{...props}
/>
)
})
FormControl.displayName = "FormControl"
const FormDescription = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLParagraphElement>
>(({ className, ...props }, ref) => {
const { formDescriptionId } = useFormField()
return (
<p
ref={ref}
id={formDescriptionId}
className={cn("text-[0.8rem] text-neutral-500 dark:text-neutral-400", className)}
{...props}
/>
)
})
FormDescription.displayName = "FormDescription"
const FormMessage = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLParagraphElement>
>(({ className, children, ...props }, ref) => {
const { error, formMessageId } = useFormField()
const body = error ? String(error?.message) : children
if (!body) {
return null
}
return (
<p
ref={ref}
id={formMessageId}
className={cn("text-[0.8rem] font-medium text-red-500 dark:text-red-900", className)}
{...props}
>
{body}
</p>
)
})
FormMessage.displayName = "FormMessage"
export {
useFormField,
Form,
FormItem,
FormLabel,
FormControl,
FormDescription,
FormMessage,
FormField,
}

View File

@@ -0,0 +1,26 @@
import * as React from "react"
import { cn } from "@/lib/utils"
export interface InputProps
extends React.InputHTMLAttributes<HTMLInputElement> {}
const Input = React.forwardRef<HTMLInputElement, InputProps>(
({ className, type, ...props }, ref) => {
return (
<input
type={type}
className={cn(
"flex h-9 w-full rounded-md border border-gray-200 bg-transparent px-3 py-1 text-sm shadow-sm transition-colors file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-gray-500 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-gray-950 disabled:cursor-not-allowed disabled:opacity-50 dark:border-gray-800 dark:placeholder:text-gray-400 dark:focus-visible:ring-gray-300",
type == "file" ? "pt-1.5 pb-0.5" : "", // fix alignment
className
)}
ref={ref}
{...props}
/>
)
}
)
Input.displayName = "Input"
export { Input }

View File

@@ -0,0 +1,26 @@
"use client"
import * as React from "react"
import * as LabelPrimitive from "@radix-ui/react-label"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const labelVariants = cva(
"text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70"
)
const Label = React.forwardRef<
React.ElementRef<typeof LabelPrimitive.Root>,
React.ComponentPropsWithoutRef<typeof LabelPrimitive.Root> &
VariantProps<typeof labelVariants>
>(({ className, ...props }, ref) => (
<LabelPrimitive.Root
ref={ref}
className={cn(labelVariants(), className)}
{...props}
/>
))
Label.displayName = LabelPrimitive.Root.displayName
export { Label }

View File

@@ -0,0 +1,33 @@
"use client"
import * as React from "react"
import * as PopoverPrimitive from "@radix-ui/react-popover"
import { cn } from "@/lib/utils"
const Popover = PopoverPrimitive.Root
const PopoverTrigger = PopoverPrimitive.Trigger
const PopoverAnchor = PopoverPrimitive.Anchor
const PopoverContent = React.forwardRef<
React.ElementRef<typeof PopoverPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof PopoverPrimitive.Content>
>(({ className, align = "center", sideOffset = 4, ...props }, ref) => (
<PopoverPrimitive.Portal>
<PopoverPrimitive.Content
ref={ref}
align={align}
sideOffset={sideOffset}
className={cn(
"z-50 w-72 rounded-md border border-neutral-200 bg-white p-4 text-neutral-950 shadow-md outline-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:border-neutral-800 dark:bg-neutral-950 dark:text-neutral-50",
className
)}
{...props}
/>
</PopoverPrimitive.Portal>
))
PopoverContent.displayName = PopoverPrimitive.Content.displayName
export { Popover, PopoverTrigger, PopoverContent, PopoverAnchor }

View File

@@ -0,0 +1,29 @@
"use client"
import * as React from "react"
import * as SwitchPrimitives from "@radix-ui/react-switch"
import { cn } from "@/lib/utils"
const Switch = React.forwardRef<
React.ElementRef<typeof SwitchPrimitives.Root>,
React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
>(({ className, ...props }, ref) => (
<SwitchPrimitives.Root
className={cn(
"peer inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-950 focus-visible:ring-offset-2 focus-visible:ring-offset-white disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-neutral-900 data-[state=unchecked]:bg-neutral-200 dark:focus-visible:ring-neutral-300 dark:focus-visible:ring-offset-neutral-950 dark:data-[state=checked]:bg-neutral-50 dark:data-[state=unchecked]:bg-neutral-800",
className
)}
{...props}
ref={ref}
>
<SwitchPrimitives.Thumb
className={cn(
"pointer-events-none block h-4 w-4 rounded-full bg-white shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-4 data-[state=unchecked]:translate-x-0 dark:bg-neutral-950"
)}
/>
</SwitchPrimitives.Root>
))
Switch.displayName = SwitchPrimitives.Root.displayName
export { Switch }

View File

@@ -0,0 +1,120 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Table = React.forwardRef<
HTMLTableElement,
React.HTMLAttributes<HTMLTableElement>
>(({ className, ...props }, ref) => (
<div className="relative w-full overflow-auto">
<table
ref={ref}
className={cn("w-full caption-bottom text-sm", className)}
{...props}
/>
</div>
))
Table.displayName = "Table"
const TableHeader = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
))
TableHeader.displayName = "TableHeader"
const TableBody = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tbody
ref={ref}
className={cn("[&_tr:last-child]:border-0", className)}
{...props}
/>
))
TableBody.displayName = "TableBody"
const TableFooter = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tfoot
ref={ref}
className={cn(
"border-t bg-neutral-100/50 font-medium [&>tr]:last:border-b-0 dark:bg-neutral-800/50",
className
)}
{...props}
/>
))
TableFooter.displayName = "TableFooter"
const TableRow = React.forwardRef<
HTMLTableRowElement,
React.HTMLAttributes<HTMLTableRowElement>
>(({ className, ...props }, ref) => (
<tr
ref={ref}
className={cn(
"border-b transition-colors hover:bg-neutral-100/50 data-[state=selected]:bg-neutral-100 dark:hover:bg-neutral-800/50 dark:data-[state=selected]:bg-neutral-800",
className
)}
{...props}
/>
))
TableRow.displayName = "TableRow"
const TableHead = React.forwardRef<
HTMLTableCellElement,
React.ThHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<th
ref={ref}
className={cn(
"h-10 px-2 text-left align-middle font-medium text-neutral-500 [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px] dark:text-neutral-400",
className
)}
{...props}
/>
))
TableHead.displayName = "TableHead"
const TableCell = React.forwardRef<
HTMLTableCellElement,
React.TdHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<td
ref={ref}
className={cn(
"p-2 align-middle [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className
)}
{...props}
/>
))
TableCell.displayName = "TableCell"
const TableCaption = React.forwardRef<
HTMLTableCaptionElement,
React.HTMLAttributes<HTMLTableCaptionElement>
>(({ className, ...props }, ref) => (
<caption
ref={ref}
className={cn("mt-4 text-sm text-neutral-500 dark:text-neutral-400", className)}
{...props}
/>
))
TableCaption.displayName = "TableCaption"
export {
Table,
TableHeader,
TableBody,
TableFooter,
TableHead,
TableRow,
TableCell,
TableCaption,
}

View File

@@ -0,0 +1,24 @@
import * as React from "react"
import { cn } from "@/lib/utils"
export interface TextareaProps
extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
({ className, ...props }, ref) => {
return (
<textarea
className={cn(
"flex min-h-[80px] w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50",
className
)}
ref={ref}
{...props}
/>
)
}
)
Textarea.displayName = "Textarea"
export { Textarea }

View File

@@ -0,0 +1,34 @@
"use client"
import * as React from "react"
import * as TooltipPrimitive from "@radix-ui/react-tooltip"
import { cn } from "@/lib/utils"
const TooltipProvider = TooltipPrimitive.Provider
const Tooltip = ({ children, delayDuration = 10 }) => (
<TooltipPrimitive.Root delayDuration={delayDuration}>
{children}
</TooltipPrimitive.Root>
);
const TooltipTrigger = TooltipPrimitive.Trigger
const TooltipContent = React.forwardRef<
React.ElementRef<typeof TooltipPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
>(({ className, sideOffset = 4, ...props }, ref) => (
<TooltipPrimitive.Content
ref={ref}
sideOffset={sideOffset}
className={cn(
"z-50 overflow-hidden rounded-md bg-neutral-900 px-3 py-1.5 text-xs text-neutral-50 animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:bg-neutral-50 dark:text-neutral-900",
className
)}
{...props}
/>
))
TooltipContent.displayName = TooltipPrimitive.Content.displayName
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }

View File

@@ -0,0 +1,214 @@
import {
Block,
Graph,
GraphCreatable,
GraphUpdateable,
GraphMeta,
GraphExecuteResponse,
NodeExecutionResult,
} from "./types"
export default class AutoGPTServerAPI {
private baseUrl: string;
private wsUrl: string;
private socket: WebSocket | null = null;
private messageHandlers: { [key: string]: (data: any) => void } = {};
constructor(
baseUrl: string = process.env.AGPT_SERVER_URL || "http://localhost:8000/api"
) {
this.baseUrl = baseUrl;
this.wsUrl = `ws://${new URL(this.baseUrl).host}/ws`;
}
async getBlocks(): Promise<Block[]> {
return await this._get("/blocks");
}
async listGraphs(): Promise<GraphMeta[]> {
return this._get("/graphs")
}
async listTemplates(): Promise<GraphMeta[]> {
return this._get("/templates")
}
async getGraph(id: string, version?: number): Promise<Graph> {
const query = version !== undefined ? `?version=${version}` : "";
return this._get(`/graphs/${id}` + query);
}
async getTemplate(id: string, version?: number): Promise<Graph> {
const query = version !== undefined ? `?version=${version}` : "";
return this._get(`/templates/${id}` + query);
}
async getGraphAllVersions(id: string): Promise<Graph[]> {
return this._get(`/graphs/${id}/versions`);
}
async getTemplateAllVersions(id: string): Promise<Graph[]> {
return this._get(`/templates/${id}/versions`);
}
async createGraph(graphCreateBody: GraphCreatable): Promise<Graph>;
async createGraph(fromTemplateID: string, templateVersion: number): Promise<Graph>;
async createGraph(
graphOrTemplateID: GraphCreatable | string, templateVersion?: number
): Promise<Graph> {
let requestBody: GraphCreateRequestBody;
if (typeof(graphOrTemplateID) == "string") {
if (templateVersion == undefined) {
throw new Error("templateVersion not specified")
}
requestBody = {
template_id: graphOrTemplateID,
template_version: templateVersion,
}
} else {
requestBody = { graph: graphOrTemplateID }
}
return this._request("POST", "/graphs", requestBody);
}
async createTemplate(templateCreateBody: GraphCreatable): Promise<Graph> {
const requestBody: GraphCreateRequestBody = { graph: templateCreateBody };
return this._request("POST", "/templates", requestBody);
}
async updateGraph(id: string, graph: GraphUpdateable): Promise<Graph> {
return await this._request("PUT", `/graphs/${id}`, graph);
}
async updateTemplate(id: string, template: GraphUpdateable): Promise<Graph> {
return await this._request("PUT", `/templates/${id}`, template);
}
async setGraphActiveVersion(id: string, version: number): Promise<Graph> {
return this._request(
"PUT", `/graphs/${id}/versions/active`, { active_graph_version: version }
);
}
async executeGraph(
id: string, inputData: { [key: string]: any } = {}
): Promise<GraphExecuteResponse> {
return this._request("POST", `/graphs/${id}/execute`, inputData);
}
async listGraphRunIDs(graphID: string, graphVersion?: number): Promise<string[]> {
const query = graphVersion !== undefined ? `?graph_version=${graphVersion}` : "";
return this._get(`/graphs/${graphID}/executions` + query);
}
async getGraphExecutionInfo(graphID: string, runID: string): Promise<NodeExecutionResult[]> {
return (await this._get(`/graphs/${graphID}/executions/${runID}`))
.map((result: any) => ({
...result,
add_time: new Date(result.add_time),
queue_time: result.queue_time ? new Date(result.queue_time) : undefined,
start_time: result.start_time ? new Date(result.start_time) : undefined,
end_time: result.end_time ? new Date(result.end_time) : undefined,
}));
}
private async _get(path: string) {
return this._request("GET", path);
}
private async _request(
method: "GET" | "POST" | "PUT" | "PATCH",
path: string,
payload?: { [key: string]: any },
) {
if (method != "GET") {
console.debug(`${method} ${path} payload:`, payload);
}
const response = await fetch(
this.baseUrl + path,
method != "GET" ? {
method,
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload),
} : undefined
);
const response_data = await response.json();
if (!response.ok) {
console.warn(
`${method} ${path} returned non-OK response:`, response_data.detail, response
);
throw new Error(`HTTP error ${response.status}! ${response_data.detail}`);
}
return response_data;
}
connectWebSocket(): Promise<void> {
return new Promise((resolve, reject) => {
this.socket = new WebSocket(this.wsUrl);
this.socket.onopen = () => {
console.log('WebSocket connection established');
resolve();
};
this.socket.onclose = (event) => {
console.log('WebSocket connection closed', event);
this.socket = null;
};
this.socket.onerror = (error) => {
console.error('WebSocket error:', error);
reject(error);
};
this.socket.onmessage = (event) => {
const message = JSON.parse(event.data);
if (this.messageHandlers[message.method]) {
this.messageHandlers[message.method](message.data);
}
};
});
}
disconnectWebSocket() {
if (this.socket && this.socket.readyState === WebSocket.OPEN) {
this.socket.close();
}
}
sendWebSocketMessage(method: string, data: any) {
if (this.socket && this.socket.readyState === WebSocket.OPEN) {
this.socket.send(JSON.stringify({ method, data }));
} else {
console.error('WebSocket is not connected');
}
}
onWebSocketMessage(method: string, handler: (data: any) => void) {
this.messageHandlers[method] = handler;
}
subscribeToExecution(graphId: string) {
this.sendWebSocketMessage('subscribe', { graph_id: graphId });
}
runGraph(graphId: string, data: any = {}) {
this.sendWebSocketMessage('run_graph', { graph_id: graphId, data });
}
}
/* *** UTILITY TYPES *** */
type GraphCreateRequestBody = {
template_id: string;
template_version: number;
} | {
graph: GraphCreatable;
}

View File

@@ -0,0 +1,5 @@
import AutoGPTServerAPI from "./client";
export default AutoGPTServerAPI;
export * from "./types";
export * from "./utils";

View File

@@ -0,0 +1,93 @@
/* Mirror of autogpt_server/data/block.py:Block */
export type Block = {
id: string;
name: string;
description: string;
inputSchema: ObjectSchema;
outputSchema: ObjectSchema;
};
export type ObjectSchema = {
type: string;
properties: { [key: string]: any };
additionalProperties?: { type: string };
required?: string[];
};
/* Mirror of autogpt_server/data/graph.py:Node */
export type Node = {
id: string;
block_id: string;
input_default: { [key: string]: any };
input_nodes: Array<{ name: string, node_id: string }>;
output_nodes: Array<{ name: string, node_id: string }>;
metadata: {
position: { x: number; y: number; };
[key: string]: any;
};
};
/* Mirror of autogpt_server/data/graph.py:Link */
export type Link = {
id: string;
source_id: string;
sink_id: string;
source_name: string;
sink_name: string;
}
export type LinkCreatable = Omit<Link, "id"> & {
id?: string;
}
/* Mirror of autogpt_server/data/graph.py:GraphMeta */
export type GraphMeta = {
id: string;
version: number;
is_active: boolean;
is_template: boolean;
name: string;
description: string;
}
/* Mirror of autogpt_server/data/graph.py:Graph */
export type Graph = GraphMeta & {
nodes: Array<Node>;
links: Array<Link>;
};
export type GraphUpdateable = Omit<
Graph,
"version" | "is_active" | "is_template" | "links"
> & {
version?: number;
is_active?: boolean;
is_template?: boolean;
links: Array<LinkCreatable>;
}
export type GraphCreatable = Omit<GraphUpdateable, "id"> & { id?: string }
/* Derived from autogpt_server/executor/manager.py:ExecutionManager.add_execution */
export type GraphExecuteResponse = {
/** ID of the initiated run */
id: string;
/** List of node executions */
executions: Array<{ id: string, node_id: string }>;
};
/* Mirror of autogpt_server/data/execution.py:ExecutionResult */
export type NodeExecutionResult = {
graph_exec_id: string;
node_exec_id: string;
graph_id: string;
graph_version: number;
node_id: string;
status: 'INCOMPLETE' | 'QUEUED' | 'RUNNING' | 'COMPLETED' | 'FAILED';
input_data: { [key: string]: any };
output_data: { [key: string]: Array<any> };
add_time: Date;
queue_time?: Date;
start_time?: Date;
end_time?: Date;
};

View File

@@ -0,0 +1,20 @@
import { Graph, Block, Node } from "./types";
/** Creates a copy of the graph with all secrets removed */
export function safeCopyGraph(graph: Graph, block_defs: Block[]): Graph {
return {
...graph,
nodes: graph.nodes.map(node => {
const block = block_defs.find(b => b.id == node.block_id)!;
return {
...node,
input_default: Object.keys(node.input_default)
.filter(k => !block.inputSchema.properties[k].secret)
.reduce((obj: Node['input_default'], key) => {
obj[key] = node.input_default[key];
return obj;
}, {}),
}
}),
}
}

View File

@@ -0,0 +1,14 @@
export type BlockSchema = {
type: string;
properties: { [key: string]: any };
required?: string[];
enum?: string[];
items?: BlockSchema;
additionalProperties?: { type: string };
title?: string;
description?: string;
placeholder?: string;
allOf?: any[];
anyOf?: any[];
oneOf?: any[];
};

View File

@@ -0,0 +1,121 @@
import { type ClassValue, clsx } from "clsx"
import { twMerge } from "tailwind-merge"
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs))
}
/** Derived from https://stackoverflow.com/a/7616484 */
export function hashString(str: string): number {
let hash = 0, chr: number;
if (str.length === 0) return hash;
for (let i = 0; i < str.length; i++) {
chr = str.charCodeAt(i);
hash = ((hash << 5) - hash) + chr;
hash |= 0; // Convert to 32bit integer
}
return hash;
}
/** Derived from https://stackoverflow.com/a/32922084 */
export function deepEquals(x: any, y: any): boolean {
const ok = Object.keys, tx = typeof x, ty = typeof y;
return x && y && tx === ty && (
tx === 'object'
? (
ok(x).length === ok(y).length &&
ok(x).every(key => deepEquals(x[key], y[key]))
)
: (x === y)
);
}
/** Get tailwind text color class from type name */
export function getTypeTextColor(type: string | null): string {
if (type === null) return 'bg-gray-500';
return {
string: 'text-green-500',
number: 'text-blue-500',
boolean: 'text-yellow-500',
object: 'text-purple-500',
array: 'text-indigo-500',
null: 'text-gray-500',
'': 'text-gray-500',
}[type] || 'text-gray-500';
}
/** Get tailwind bg color class from type name */
export function getTypeBgColor(type: string | null): string {
if (type === null) return 'bg-gray-500';
return {
string: 'bg-green-500',
number: 'bg-blue-500',
boolean: 'bg-yellow-500',
object: 'bg-purple-500',
array: 'bg-indigo-500',
null: 'bg-gray-500',
'': 'bg-gray-500',
}[type] || 'bg-gray-500';
}
export function getTypeColor(type: string | null): string {
if (type === null) return 'bg-gray-500';
return {
string: '#22c55e',
number: '#3b82f6',
boolean: '#eab308',
object: '#a855f7',
array: '#6366f1',
null: '#6b7280',
'': '#6b7280',
}[type] || '#6b7280';
}
export function beautifyString(name: string): string {
// Regular expression to identify places to split, considering acronyms
const result = name
.replace(/([a-z])([A-Z])/g, '$1 $2') // Add space before capital letters
.replace(/([A-Z])([A-Z][a-z])/g, '$1 $2') // Add space between acronyms and next word
.replace(/_/g, ' ') // Replace underscores with spaces
.replace(/\b\w/g, char => char.toUpperCase()); // Capitalize the first letter of each word
return applyExceptions(result);
};
const exceptionMap: Record<string, string> = {
'Auto GPT': 'AutoGPT',
'Gpt': 'GPT',
'Creds': 'Credentials',
'Id': 'ID',
'Openai': 'OpenAI',
'Api': 'API',
'Url': 'URL',
'Http': 'HTTP',
'Json': 'JSON',
};
const applyExceptions = (str: string): string => {
Object.keys(exceptionMap).forEach(key => {
const regex = new RegExp(`\\b${key}\\b`, 'g');
str = str.replace(regex, exceptionMap[key]);
});
return str;
};
export function exportAsJSONFile(obj: object, filename: string): void {
// Create downloadable blob
const jsonString = JSON.stringify(obj, null, 2);
const blob = new Blob([jsonString], { type: 'application/json' });
const url = URL.createObjectURL(blob);
// Trigger the browser to download the blob to a file
const link = document.createElement('a');
link.href = url;
link.download = filename;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
// Clean up
URL.revokeObjectURL(url);
}

View File

@@ -1,20 +1,37 @@
import type { Config } from "tailwindcss";
const config: Config = {
const config = {
darkMode: ["class"],
content: [
"./src/pages/**/*.{js,ts,jsx,tsx,mdx}",
"./src/components/**/*.{js,ts,jsx,tsx,mdx}",
"./src/app/**/*.{js,ts,jsx,tsx,mdx}",
'./src/**/*.{ts,tsx}',
],
prefix: "",
theme: {
container: {
center: true,
padding: "2rem",
screens: {
"2xl": "1400px",
},
},
extend: {
backgroundImage: {
"gradient-radial": "radial-gradient(var(--tw-gradient-stops))",
"gradient-conic":
"conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))",
keyframes: {
"accordion-down": {
from: { height: "0" },
to: { height: "var(--radix-accordion-content-height)" },
},
"accordion-up": {
from: { height: "var(--radix-accordion-content-height)" },
to: { height: "0" },
},
},
animation: {
"accordion-down": "accordion-down 0.2s ease-out",
"accordion-up": "accordion-up 0.2s ease-out",
},
},
},
plugins: [],
};
plugins: [require("tailwindcss-animate")],
} satisfies Config;
export default config;

View File

@@ -0,0 +1,7 @@
module.exports = {
devServer: {
proxy: {
'/graphs': 'http://localhost:8000'
}
}
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}"
DB_USER=agpt_user
DB_PASS=pass123
DB_NAME=agpt_local
DB_PORT=5432
PRISMA_SCHEMA="schema_postgres.prisma"

View File

@@ -0,0 +1,10 @@
# LLM
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
GROQ_API_KEY=
# Reddit
REDDIT_CLIENT_ID=
REDDIT_CLIENT_SECRET=
REDDIT_USERNAME=
REDDIT_PASSWORD=

View File

@@ -0,0 +1,67 @@
# AutoGPT Agent Server Advanced set up
This guide walks you through a dockerized set up, with an external DB (postgres)
## Setup
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
```sh
poetry install
```
4. Generate the Prisma client
```sh
poetry run prisma generate --schema postgres/schema.prisma
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/autogpt-server-TQIRSwR6-py3.12/bin/prisma`
5. Run the postgres database from the /rnd folder
```sh
docker compose up -d
```
6. Run the migrations
```sh
prisma migrate dev --schema postgres/schema.prisma
```
## Running The Server
### Starting the server directly
Run the following command:
```sh
poetry run app
```

View File

@@ -1,74 +1,152 @@
# Next Gen AutoGPT
# AutoGPT Agent Server
This is a research project into creating the next generation of autogpt, which is an autogpt agent server.
The agent server will enable the creation of composite multi-agent system that utilize AutoGPT Agent as its default agent.
## Project Outline
Currently the project mainly consist of these components:
*agent_api*
A component that will expose API endpoints for the creation & execution of agents.
This component will make connections to the database to persist and read the agents.
It will also trigger the agent execution by pushing its execution request to the ExecutionQueue.
*agent_executor*
A component that will execute the agents.
This component will be a pool of processes/threads that will consume the ExecutionQueue and execute the agent accordingly.
The result and progress of its execution will be persisted in the database.
This is an initial project for creating the next generation of agent execution, which is an AutoGPT agent server.
The agent server will enable the creation of composite multi-agent systems that utilize AutoGPT agents and other non-agent components as its primitives.
## Setup
This setup is for MacOS/Linux.
To setup the project follow these steps inside the project directory:
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
1. Enter poetry shell
```
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
1. Install dependencies
```
```sh
poetry install
```
4. Generate the Prisma client
1. Generate prisma client
```
```sh
poetry run prisma generate
```
In case prisma generates client for the global python installation instead of the virtual environment the current mitigation is to just uninstall the global prisma package:
```
pip uninstall prisma
```
And then run the generation again.
The path *should* look something like this:
`<some path>/pypoetry/virtualenvs/autogpt-server-TQIRSwR6-py3.12/bin/prisma`
1. Migrate the database, be careful because this deletes current data in the database
```
poetry run prisma migrate dev
```
1. Start the server, this starts the server in the background
```
poetry run python ./autogpt_server/cli.py start
```
You may need to change the permissions of the file to make it executable
```
chmod +x autogpt_server/cli.py
```
1. Stop the server
```
poetry run python ./autogpt_server/cli.py stop
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/autogpt-server-TQIRSwR6-py3.12/bin/prisma`
1. To run the tests
```
poetry run pytest
```
5. Migrate the database. Be careful because this deletes current data in the database.
```sh
poetry run prisma migrate dev
```
## Running The Server
### Starting the server directly
Run the following command:
```sh
poetry run app
```
## Testing
To run the tests:
```sh
poetry run pytest
```
## Development
### Formatting & Linting
Auto formatter and linter are set up in the project. To run them:
Install:
```sh
poetry install --with dev
```
Format the code:
```sh
poetry run format
```
Lint the code:
```sh
poetry run lint
```
## Project Outline
The current project has the following main modules:
### **blocks**
This module stores all the Agent Blocks, which are reusable components to build a graph that represents the agent's behavior.
### **data**
This module stores the logical model that is persisted in the database.
It abstracts the database operations into functions that can be called by the service layer.
Any code that interacts with Prisma objects or the database should reside in this module.
The main models are:
* `block`: anything related to the block used in the graph
* `execution`: anything related to the execution graph execution
* `graph`: anything related to the graph, node, and its relations
### **execution**
This module stores the business logic of executing the graph.
It currently has the following main modules:
* `manager`: A service that consumes the queue of the graph execution and executes the graph. It contains both pieces of logic.
* `scheduler`: A service that triggers scheduled graph execution based on a cron expression. It pushes an execution request to the manager.
### **server**
This module stores the logic for the server API.
It contains all the logic used for the API that allows the client to create, execute, and monitor the graph and its execution.
This API service interacts with other services like those defined in `manager` and `scheduler`.
### **utils**
This module stores utility functions that are used across the project.
Currently, it has two main modules:
* `process`: A module that contains the logic to spawn a new process.
* `service`: A module that serves as a parent class for all the services in the project.
## Service Communication
Currently, there are only 3 active services:
- AgentServer (the API, defined in `server.py`)
- ExecutionManager (the executor, defined in `manager.py`)
- ExecutionScheduler (the scheduler, defined in `scheduler.py`)
The services run in independent Python processes and communicate through an IPC.
A communication layer (`service.py`) is created to decouple the communication library from the implementation.
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
## Adding a New Agent Block
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
* All the block code should live in the `blocks` (`autogpt_server.blocks`) module.
* `input_schema`: the schema of the input data, represented by a Pydantic object.
* `output_schema`: the schema of the output data, represented by a Pydantic object.
* `run` method: the main logic of the block.
* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block.
* You can mock the functions declared in the block using the `test_mock` field for your unit tests.
* Once you finish creating the block, you can test it by running `pytest -s test/block/test_block.py`.

View File

@@ -1,4 +1,5 @@
from multiprocessing import freeze_support, set_start_method
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
from autogpt_server.server import AgentServer
from autogpt_server.util.process import AppProcess

View File

@@ -0,0 +1,34 @@
import glob
import importlib
import os
from pathlib import Path
from autogpt_server.data.block import Block
# Dynamically load all modules under autogpt_server.blocks
AVAILABLE_MODULES = []
current_dir = os.path.dirname(__file__)
modules = glob.glob(os.path.join(current_dir, "*.py"))
modules = [
Path(f).stem
for f in modules
if os.path.isfile(f) and f.endswith(".py") and not f.endswith("__init__.py")
]
for module in modules:
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS = {}
for cls in Block.__subclasses__():
block = cls()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
AVAILABLE_BLOCKS[block.id] = block
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]

View File

@@ -0,0 +1,190 @@
from __future__ import annotations
import asyncio
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Iterator
from autogpt.agents.agent import Agent, AgentSettings
from autogpt.app.config import ConfigBuilder
from forge.agent.components import AgentComponent
from forge.agent.protocols import CommandProvider
from forge.command import command
from forge.command.command import Command
from forge.file_storage import FileStorageBackendName, get_storage
from forge.file_storage.base import FileStorage
from forge.llm.providers import MultiProvider
from forge.llm.providers.openai import OpenAICredentials, OpenAIProvider
from forge.llm.providers.schema import ModelProviderName
from forge.models.json_schema import JSONSchema
from pydantic import Field, SecretStr
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
if TYPE_CHECKING:
from autogpt.app.config import AppConfig
logger = logging.getLogger(__name__)
class BlockAgentSettings(AgentSettings):
enabled_components: list[str] = Field(default_factory=list)
class OutputComponent(CommandProvider):
def get_commands(self) -> Iterator[Command]:
yield self.output
@command(
parameters={
"output": JSONSchema(
type=JSONSchema.Type.STRING,
description="Output data to be returned.",
required=True,
),
},
)
def output(self, output: str) -> str:
"""Use this to output the result."""
return output
class BlockAgent(Agent):
def __init__(
self,
settings: BlockAgentSettings,
llm_provider: MultiProvider,
file_storage: FileStorage,
app_config: AppConfig,
):
super().__init__(settings, llm_provider, file_storage, app_config)
self.output = OutputComponent()
# Disable components
for attr_name in list(self.__dict__.keys()):
attr_value = getattr(self, attr_name)
if not isinstance(attr_value, AgentComponent):
continue
component_name = type(attr_value).__name__
if (
component_name != "SystemComponent"
and component_name not in settings.enabled_components
):
delattr(self, attr_name)
class AutoGPTAgentBlock(Block):
class Input(BlockSchema):
task: str = SchemaField(
description="Task description for the agent.",
placeholder="Calculate and use Output command",
)
input: str = SchemaField(
description="Input data for the task",
placeholder="8 + 5",
)
openai_api_key: BlockSecret = SecretField(
key="openai_api_key", description="OpenAI API key"
)
enabled_components: list[str] = Field(
default_factory=lambda: [OutputComponent.__name__],
description="List of [AgentComponents](https://docs.agpt.co/forge/components/built-in-components/) enabled for the agent.",
)
disabled_commands: list[str] = Field(
default_factory=list,
description="List of commands from enabled components to disable.",
)
fast_mode: bool = Field(
False,
description="If true uses fast llm, otherwise uses smart and slow llm.",
)
class Output(BlockSchema):
result: str
def __init__(self):
super().__init__(
id="d2e2ecd2-9ae6-422d-8dfe-ceca500ce6a6",
description="AutoGPT agent, it utilizes a Large Language Model and enabled components/tools to perform a task.",
categories={BlockCategory.LLM},
input_schema=AutoGPTAgentBlock.Input,
output_schema=AutoGPTAgentBlock.Output,
test_input={
"task": "Make calculations and use output command to output the result",
"input": "5 + 3",
"openai_api_key": "openai_api_key",
"enabled_components": [OutputComponent.__name__],
"disabled_commands": ["finish"],
"fast_mode": True,
},
test_output=[
("result", "8"),
],
test_mock={
"get_provider": lambda _: MultiProvider(),
"get_result": lambda _: "8",
},
)
@staticmethod
def get_provider(openai_api_key: str) -> MultiProvider:
# LLM provider
settings = OpenAIProvider.default_settings.model_copy()
settings.credentials = OpenAICredentials(api_key=SecretStr(openai_api_key))
openai_provider = OpenAIProvider(settings=settings)
multi_provider = MultiProvider()
# HACK: Add OpenAI provider to the multi provider with api key
multi_provider._provider_instances[ModelProviderName.OPENAI] = openai_provider
return multi_provider
@staticmethod
def get_result(agent: BlockAgent) -> str:
error: Exception | None = None
for tries in range(3):
try:
proposal = asyncio.run(agent.propose_action())
result = asyncio.run(agent.execute(proposal))
return str(result)
except Exception as e:
error = e
raise error or Exception("Failed to get result")
def run(self, input_data: Input) -> BlockOutput:
# Set up configuration
config = ConfigBuilder.build_config_from_env()
# Disable commands
config.disabled_commands.extend(input_data.disabled_commands)
# Storage
local = config.file_storage_backend == FileStorageBackendName.LOCAL
restrict_to_root = not local or config.restrict_to_workspace
file_storage = get_storage(
config.file_storage_backend,
root_path=Path("data"),
restrict_to_root=restrict_to_root,
)
file_storage.initialize()
# State
state = BlockAgentSettings(
agent_id="TemporaryAgentID",
name="WrappedAgent",
description="Wrapped agent for the Agent Server.",
task=f"Your task: {input_data.task}\n" f"Input data: {input_data.input}",
enabled_components=input_data.enabled_components,
)
# Switch big brain mode
state.config.big_brain = not input_data.fast_mode
provider = self.get_provider(input_data.openai_api_key.get_secret_value())
agent = BlockAgent(state, provider, file_storage, config)
result = self.get_result(agent)
yield "result", result

View File

@@ -0,0 +1,141 @@
from typing import Any
from pydantic import Field
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
class ValueBlock(Block):
"""
This block allows you to provide a constant value as a block, in a stateless manner.
The common use-case is simply pass the `input` data, it will `output` the same data.
But this will not retain the state, once it is executed, the output is consumed.
To retain the state, you can feed the `output` to the `data` input, so that the data
is retained in the block for the next execution. You can then trigger the block by
feeding the `input` pin with any data, and the block will produce value of `data`.
Ex:
<constant_data> <any_trigger>
|| ||
=====> `data` `input`
|| \\ //
|| ValueBlock
|| ||
========= `output`
"""
class Input(BlockSchema):
input: Any = Field(
description="Trigger the block to produce the output. "
"The value is only used when `data` is None."
)
data: Any = Field(
description="The constant data to be retained in the block. "
"This value is passed as `output`.",
default=None,
)
class Output(BlockSchema):
output: Any
def __init__(self):
super().__init__(
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
description="This block forwards the `input` pin to `output` pin. "
"If the `data` is provided, it will prioritize forwarding `data` "
"over `input`. By connecting the `output` pin to `data` pin, "
"you can retain a constant value for the next executions.",
categories={BlockCategory.BASIC},
input_schema=ValueBlock.Input,
output_schema=ValueBlock.Output,
test_input=[
{"input": "Hello, World!"},
{"input": "Hello, World!", "data": "Existing Data"},
],
test_output=[
("output", "Hello, World!"), # No data provided, so trigger is returned
("output", "Existing Data"), # Data is provided, so data is returned.
],
)
def run(self, input_data: Input) -> BlockOutput:
yield "output", input_data.data or input_data.input
class PrintingBlock(Block):
class Input(BlockSchema):
text: str
class Output(BlockSchema):
status: str
def __init__(self):
super().__init__(
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
description="Print the given text to the console, this is used for a debugging purpose.",
categories={BlockCategory.BASIC},
input_schema=PrintingBlock.Input,
output_schema=PrintingBlock.Output,
test_input={"text": "Hello, World!"},
test_output=("status", "printed"),
)
def run(self, input_data: Input) -> BlockOutput:
print(">>>>> Print: ", input_data.text)
yield "status", "printed"
class ObjectLookupBlock(Block):
class Input(BlockSchema):
input: Any = Field(description="Dictionary to lookup from")
key: str | int = Field(description="Key to lookup in the dictionary")
class Output(BlockSchema):
output: Any = Field(description="Value found for the given key")
missing: Any = Field(description="Value of the input that missing the key")
def __init__(self):
super().__init__(
id="b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6",
description="Lookup the given key in the input dictionary/object/list and return the value.",
categories={BlockCategory.BASIC},
input_schema=ObjectLookupBlock.Input,
output_schema=ObjectLookupBlock.Output,
test_input=[
{"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"},
{"input": {"x": 10, "y": 20, "z": 30}, "key": "w"},
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": ObjectLookupBlock.Input(input="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", 2),
("missing", {"x": 10, "y": 20, "z": 30}),
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
)
def run(self, input_data: Input) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:
yield "missing", input_data.input

View File

@@ -0,0 +1,63 @@
import os
import re
from typing import Type
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.util.test import execute_block_test
class BlockInstallationBlock(Block):
"""
This block allows the verification and installation of other blocks in the system.
NOTE:
This block allows remote code execution on the server, and it should be used
for development purposes only.
"""
class Input(BlockSchema):
code: str
class Output(BlockSchema):
success: str
error: str
def __init__(self):
super().__init__(
id="45e78db5-03e9-447f-9395-308d712f5f08",
description="Given a code string, this block allows the verification and installation of a block code into the system.",
categories={BlockCategory.BASIC},
input_schema=BlockInstallationBlock.Input,
output_schema=BlockInstallationBlock.Output,
)
def run(self, input_data: Input) -> BlockOutput:
code = input_data.code
if search := re.search(r"class (\w+)\(Block\):", code):
class_name = search.group(1)
else:
yield "error", "No class found in the code."
return
if search := re.search(r"id=\"(\w+-\w+-\w+-\w+-\w+)\"", code):
file_name = search.group(1)
else:
yield "error", "No UUID found in the code."
return
block_dir = os.path.dirname(__file__)
file_path = f"{block_dir}/{file_name}.py"
module_name = f"autogpt_server.blocks.{file_name}"
with open(file_path, "w") as f:
f.write(code)
try:
module = __import__(module_name, fromlist=[class_name])
block_class: Type[Block] = getattr(module, class_name)
block = block_class()
execute_block_test(block)
yield "success", "Block installed successfully."
except Exception as e:
os.remove(file_path)
yield "error", f"[Code]\n{code}\n\n[Error]\n{str(e)}"

View File

@@ -0,0 +1,169 @@
from typing import List
import requests
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
class CreateMediumPostBlock(Block):
class Input(BlockSchema):
author_id: BlockSecret = SecretField(
key="medium_author_id",
description="""The Medium AuthorID of the user. You can get this by calling the /me endpoint of the Medium API.\n\ncurl -H "Authorization: Bearer YOUR_ACCESS_TOKEN" https://api.medium.com/v1/me" the response will contain the authorId field.""",
placeholder="Enter the author's Medium AuthorID",
)
title: str = SchemaField(
description="The title of your Medium post",
placeholder="Enter your post title",
)
content: str = SchemaField(
description="The main content of your Medium post",
placeholder="Enter your post content",
)
content_format: str = SchemaField(
description="The format of the content: 'html' or 'markdown'",
placeholder="html",
)
tags: List[str] = SchemaField(
description="List of tags for your Medium post (up to 5)",
placeholder="['technology', 'AI', 'blogging']",
)
canonical_url: str | None = SchemaField(
default=None,
description="The original home of this content, if it was originally published elsewhere",
placeholder="https://yourblog.com/original-post",
)
publish_status: str = SchemaField(
description="The publish status: 'public', 'draft', or 'unlisted'",
placeholder="public",
)
license: str = SchemaField(
default="all-rights-reserved",
description="The license of the post: 'all-rights-reserved', 'cc-40-by', 'cc-40-by-sa', 'cc-40-by-nd', 'cc-40-by-nc', 'cc-40-by-nc-nd', 'cc-40-by-nc-sa', 'cc-40-zero', 'public-domain'",
placeholder="all-rights-reserved",
)
notify_followers: bool = SchemaField(
default=False,
description="Whether to notify followers that the user has published",
placeholder="False",
)
api_key: BlockSecret = SecretField(
key="medium_api_key",
description="""The API key for the Medium integration. You can get this from https://medium.com/me/settings/security and scrolling down to "integration Tokens".""",
placeholder="Enter your Medium API key",
)
class Output(BlockSchema):
post_id: str = SchemaField(description="The ID of the created Medium post")
post_url: str = SchemaField(description="The URL of the created Medium post")
author_id: str = SchemaField(description="The Medium user ID of the author")
published_at: int = SchemaField(
description="The timestamp when the post was published"
)
error: str = SchemaField(
description="Error message if the post creation failed"
)
def __init__(self):
super().__init__(
id="3f7b2dcb-4a78-4e3f-b0f1-88132e1b89df",
input_schema=CreateMediumPostBlock.Input,
output_schema=CreateMediumPostBlock.Output,
test_input={
"author_id": "1234567890abcdef",
"title": "Test Post",
"content": "<h1>Test Content</h1><p>This is a test post.</p>",
"content_format": "html",
"tags": ["test", "automation"],
"license": "all-rights-reserved",
"notify_followers": False,
"publish_status": "draft",
"api_key": "your_test_api_key",
},
test_output=[
("post_id", "e6f36a"),
("post_url", "https://medium.com/@username/test-post-e6f36a"),
("author_id", "1234567890abcdef"),
("published_at", 1626282600),
],
test_mock={
"create_post": lambda *args, **kwargs: {
"data": {
"id": "e6f36a",
"url": "https://medium.com/@username/test-post-e6f36a",
"authorId": "1234567890abcdef",
"publishedAt": 1626282600,
}
}
},
)
def create_post(
self,
api_key,
author_id,
title,
content,
content_format,
tags,
canonical_url,
publish_status,
license,
notify_followers,
):
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Accept": "application/json",
}
data = {
"title": title,
"content": content,
"contentFormat": content_format,
"tags": tags,
"canonicalUrl": canonical_url,
"publishStatus": publish_status,
"license": license,
"notifyFollowers": notify_followers,
}
response = requests.post(
f"https://api.medium.com/v1/users/{author_id}/posts",
headers=headers,
json=data,
)
return response.json()
def run(self, input_data: Input) -> BlockOutput:
try:
response = self.create_post(
input_data.api_key.get_secret_value(),
input_data.author_id.get_secret_value(),
input_data.title,
input_data.content,
input_data.content_format,
input_data.tags,
input_data.canonical_url,
input_data.publish_status,
input_data.license,
input_data.notify_followers,
)
if "data" in response:
yield "post_id", response["data"]["id"]
yield "post_url", response["data"]["url"]
yield "author_id", response["data"]["authorId"]
yield "published_at", response["data"]["publishedAt"]
else:
error_message = response.get("errors", [{}])[0].get(
"message", "Unknown error occurred"
)
yield "error", f"Failed to create Medium post: {error_message}"
except requests.RequestException as e:
yield "error", f"Network error occurred while creating Medium post: {str(e)}"
except Exception as e:
yield "error", f"Error occurred while creating Medium post: {str(e)}"

View File

@@ -0,0 +1,35 @@
from typing import Any, List, Tuple
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.model import SchemaField
class ForEachBlock(Block):
class Input(BlockSchema):
items: List[Any] = SchemaField(
description="The list of items to iterate over",
placeholder="[1, 2, 3, 4, 5]",
)
class Output(BlockSchema):
item: Tuple[int, Any] = SchemaField(
description="A tuple with the index and current item in the iteration"
)
def __init__(self):
super().__init__(
id="f8e7d6c5-b4a3-2c1d-0e9f-8g7h6i5j4k3l",
input_schema=ForEachBlock.Input,
output_schema=ForEachBlock.Output,
test_input={"items": [1, "two", {"three": 3}, [4, 5]]},
test_output=[
("item", (0, 1)),
("item", (1, "two")),
("item", (2, {"three": 3})),
("item", (3, [4, 5])),
],
)
def run(self, input_data: Input) -> BlockOutput:
for index, item in enumerate(input_data.items):
yield "item", (index, item)

View File

@@ -0,0 +1,53 @@
from enum import Enum
import requests
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class HttpRequestBlock(Block):
class Input(BlockSchema):
url: str
method: HttpMethod = HttpMethod.POST
headers: dict[str, str] = {}
body: object = {}
class Output(BlockSchema):
response: object
client_error: object
server_error: object
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.BASIC},
input_schema=HttpRequestBlock.Input,
output_schema=HttpRequestBlock.Output,
)
def run(self, input_data: Input) -> BlockOutput:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=input_data.body,
)
if response.status_code // 100 == 2:
yield "response", response.json()
elif response.status_code // 100 == 4:
yield "client_error", response.json()
elif response.status_code // 100 == 5:
yield "server_error", response.json()
else:
raise ValueError(f"Unexpected status code: {response.status_code}")

View File

@@ -0,0 +1,409 @@
import logging
from enum import Enum
from typing import NamedTuple
import anthropic
import ollama
import openai
from groq import Groq
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SecretField
from autogpt_server.util import json
logger = logging.getLogger(__name__)
LlmApiKeys = {
"openai": BlockSecret("openai_api_key"),
"anthropic": BlockSecret("anthropic_api_key"),
"groq": BlockSecret("groq_api_key"),
"ollama": BlockSecret(value=""),
}
class ModelMetadata(NamedTuple):
provider: str
context_window: int
class LlmModel(str, Enum):
# OpenAI models
GPT4O_MINI = "gpt-4o-mini"
GPT4O = "gpt-4o"
GPT4_TURBO = "gpt-4-turbo"
GPT3_5_TURBO = "gpt-3.5-turbo"
# Anthropic models
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
# Groq models
LLAMA3_8B = "llama3-8b-8192"
LLAMA3_70B = "llama3-70b-8192"
MIXTRAL_8X7B = "mixtral-8x7b-32768"
GEMMA_7B = "gemma-7b-it"
GEMMA2_9B = "gemma2-9b-it"
# New Groq models (Preview)
LLAMA3_1_405B = "llama-3.1-405b-reasoning"
LLAMA3_1_70B = "llama-3.1-70b-versatile"
LLAMA3_1_8B = "llama-3.1-8b-instant"
# Ollama models
OLLAMA_LLAMA3_8B = "llama3"
@property
def metadata(self) -> ModelMetadata:
return MODEL_METADATA[self]
MODEL_METADATA = {
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000),
LlmModel.GPT4O: ModelMetadata("openai", 128000),
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000),
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385),
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000),
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000),
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192),
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192),
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768),
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192),
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192),
LlmModel.LLAMA3_1_405B: ModelMetadata(
"groq", 8192
), # Limited to 16k during preview
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072),
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072),
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192),
}
class ObjectLlmCallBlock(Block):
class Input(BlockSchema):
prompt: str
expected_format: dict[str, str]
model: LlmModel = LlmModel.GPT4_TURBO
api_key: BlockSecret = SecretField(value="")
sys_prompt: str = ""
retry: int = 3
class Output(BlockSchema):
response: dict[str, str]
error: str
def __init__(self):
super().__init__(
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
categories={BlockCategory.LLM},
input_schema=ObjectLlmCallBlock.Input,
output_schema=ObjectLlmCallBlock.Output,
test_input={
"model": LlmModel.GPT4_TURBO,
"api_key": "fake-api",
"expected_format": {
"key1": "value1",
"key2": "value2",
},
"prompt": "User prompt",
},
test_output=("response", {"key1": "key1Value", "key2": "key2Value"}),
test_mock={
"llm_call": lambda *args, **kwargs: json.dumps(
{
"key1": "key1Value",
"key2": "key2Value",
}
)
},
)
@staticmethod
def llm_call(
api_key: str, model: LlmModel, prompt: list[dict], json_format: bool
) -> str:
provider = model.metadata.provider
if provider == "openai":
openai.api_key = api_key
response_format = {"type": "json_object"} if json_format else None
response = openai.chat.completions.create(
model=model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
)
return response.choices[0].message.content or ""
elif provider == "anthropic":
sysprompt = "".join([p["content"] for p in prompt if p["role"] == "system"])
usrprompt = [p for p in prompt if p["role"] == "user"]
client = anthropic.Anthropic(api_key=api_key)
response = client.messages.create(
model=model.value,
max_tokens=4096,
system=sysprompt,
messages=usrprompt, # type: ignore
)
return response.content[0].text if response.content else ""
elif provider == "groq":
client = Groq(api_key=api_key)
response_format = {"type": "json_object"} if json_format else None
response = client.chat.completions.create(
model=model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
)
return response.choices[0].message.content or ""
elif provider == "ollama":
response = ollama.generate(
model=model.value,
prompt=prompt[0]["content"],
)
return response["response"]
else:
raise ValueError(f"Unsupported LLM provider: {provider}")
def run(self, input_data: Input) -> BlockOutput:
prompt = []
def trim_prompt(s: str) -> str:
lines = s.strip().split("\n")
return "\n".join([line.strip().lstrip("|") for line in lines])
if input_data.sys_prompt:
prompt.append({"role": "system", "content": input_data.sys_prompt})
if input_data.expected_format:
expected_format = [
f'"{k}": "{v}"' for k, v in input_data.expected_format.items()
]
format_prompt = ",\n ".join(expected_format)
sys_prompt = trim_prompt(
f"""
|Reply in json format:
|{{
| {format_prompt}
|}}
"""
)
prompt.append({"role": "system", "content": sys_prompt})
prompt.append({"role": "user", "content": input_data.prompt})
def parse_response(resp: str) -> tuple[dict[str, str], str | None]:
try:
parsed = json.loads(resp)
miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys())
if miss_keys:
return parsed, f"Missing keys: {miss_keys}"
return parsed, None
except Exception as e:
return {}, f"JSON decode error: {e}"
logger.warning(f"LLM request: {prompt}")
retry_prompt = ""
model = input_data.model
api_key = (
input_data.api_key.get_secret_value()
or LlmApiKeys[model.metadata.provider].get_secret_value()
)
for retry_count in range(input_data.retry):
try:
response_text = self.llm_call(
api_key=api_key,
model=model,
prompt=prompt,
json_format=bool(input_data.expected_format),
)
logger.warning(f"LLM attempt-{retry_count} response: {response_text}")
if input_data.expected_format:
parsed_dict, parsed_error = parse_response(response_text)
if not parsed_error:
yield "response", {k: str(v) for k, v in parsed_dict.items()}
return
else:
yield "response", {"response": response_text}
return
retry_prompt = trim_prompt(
f"""
|This is your previous error response:
|--
|{response_text}
|--
|
|And this is the error:
|--
|{parsed_error}
|--
"""
)
prompt.append({"role": "user", "content": retry_prompt})
except Exception as e:
logger.error(f"Error calling LLM: {e}")
retry_prompt = f"Error calling LLM: {e}"
yield "error", retry_prompt
class TextLlmCallBlock(Block):
class Input(BlockSchema):
prompt: str
model: LlmModel = LlmModel.GPT4_TURBO
api_key: BlockSecret = SecretField(value="")
sys_prompt: str = ""
retry: int = 3
class Output(BlockSchema):
response: str
error: str
def __init__(self):
super().__init__(
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
categories={BlockCategory.LLM},
input_schema=TextLlmCallBlock.Input,
output_schema=TextLlmCallBlock.Output,
test_input={"prompt": "User prompt"},
test_output=("response", "Response text"),
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
)
@staticmethod
def llm_call(input_data: ObjectLlmCallBlock.Input) -> str:
object_block = ObjectLlmCallBlock()
for output_name, output_data in object_block.run(input_data):
if output_name == "response":
return output_data["response"]
else:
raise output_data
raise ValueError("Failed to get a response from the LLM.")
def run(self, input_data: Input) -> BlockOutput:
try:
object_input_data = ObjectLlmCallBlock.Input(
**{attr: getattr(input_data, attr) for attr in input_data.model_fields},
expected_format={},
)
yield "response", self.llm_call(object_input_data)
except Exception as e:
yield "error", str(e)
class TextSummarizerBlock(Block):
class Input(BlockSchema):
text: str
model: LlmModel = LlmModel.GPT4_TURBO
api_key: BlockSecret = SecretField(value="")
# TODO: Make this dynamic
max_tokens: int = 4000 # Adjust based on the model's context window
chunk_overlap: int = 100 # Overlap between chunks to maintain context
class Output(BlockSchema):
summary: str
error: str
def __init__(self):
super().__init__(
id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
description="Utilize a Large Language Model (LLM) to summarize a long text.",
categories={BlockCategory.LLM, BlockCategory.TEXT},
input_schema=TextSummarizerBlock.Input,
output_schema=TextSummarizerBlock.Output,
test_input={"text": "Lorem ipsum..." * 100},
test_output=("summary", "Final summary of a long text"),
test_mock={
"llm_call": lambda input_data: (
{"final_summary": "Final summary of a long text"}
if "final_summary" in input_data.expected_format
else {"summary": "Summary of a chunk of text"}
)
},
)
def run(self, input_data: Input) -> BlockOutput:
try:
for output in self._run(input_data):
yield output
except Exception as e:
yield "error", str(e)
def _run(self, input_data: Input) -> BlockOutput:
chunks = self._split_text(
input_data.text, input_data.max_tokens, input_data.chunk_overlap
)
summaries = []
for chunk in chunks:
chunk_summary = self._summarize_chunk(chunk, input_data)
summaries.append(chunk_summary)
final_summary = self._combine_summaries(summaries, input_data)
yield "summary", final_summary
@staticmethod
def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]:
words = text.split()
chunks = []
chunk_size = max_tokens - overlap
for i in range(0, len(words), chunk_size):
chunk = " ".join(words[i : i + max_tokens])
chunks.append(chunk)
return chunks
@staticmethod
def llm_call(input_data: ObjectLlmCallBlock.Input) -> dict[str, str]:
llm_block = ObjectLlmCallBlock()
for output_name, output_data in llm_block.run(input_data):
if output_name == "response":
return output_data
raise ValueError("Failed to get a response from the LLM.")
def _summarize_chunk(self, chunk: str, input_data: Input) -> str:
prompt = f"Summarize the following text concisely:\n\n{chunk}"
llm_response = self.llm_call(
ObjectLlmCallBlock.Input(
prompt=prompt,
api_key=input_data.api_key,
model=input_data.model,
expected_format={"summary": "The summary of the given text."},
)
)
return llm_response["summary"]
def _combine_summaries(self, summaries: list[str], input_data: Input) -> str:
combined_text = " ".join(summaries)
if len(combined_text.split()) <= input_data.max_tokens:
prompt = (
"Provide a final, concise summary of the following summaries:\n\n"
+ combined_text
)
llm_response = self.llm_call(
ObjectLlmCallBlock.Input(
prompt=prompt,
api_key=input_data.api_key,
model=input_data.model,
expected_format={
"final_summary": "The final summary of all provided summaries."
},
)
)
return llm_response["final_summary"]
else:
# If combined summaries are still too long, recursively summarize
return self._run(
TextSummarizerBlock.Input(
text=combined_text,
api_key=input_data.api_key,
model=input_data.model,
max_tokens=input_data.max_tokens,
chunk_overlap=input_data.chunk_overlap,
)
).send(None)[
1
] # Get the first yielded value

View File

@@ -0,0 +1,172 @@
import operator
from enum import Enum
from typing import Any, Union
import pydantic
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.model import SchemaField
class Operation(Enum):
ADD = "Add"
SUBTRACT = "Subtract"
MULTIPLY = "Multiply"
DIVIDE = "Divide"
POWER = "Power"
class MathsResult(pydantic.BaseModel):
result: Union[float, int] | None = None
explanation: str
class CounterResult(pydantic.BaseModel):
count: int | None = None
type: str
explanation: str
class MathsBlock(Block):
class Input(BlockSchema):
operation: Operation = SchemaField(
description="Choose the math operation you want to perform",
placeholder="Select an operation",
)
a: float = SchemaField(
description="Enter the first number (A)", placeholder="For example: 10"
)
b: float = SchemaField(
description="Enter the second number (B)", placeholder="For example: 5"
)
round_result: bool = SchemaField(
description="Do you want to round the result to a whole number?",
default=False,
)
class Output(BlockSchema):
result: MathsResult = SchemaField(
description="The result of your calculation with an explanation"
)
def __init__(self):
super().__init__(
id="b1ab9b19-67a6-406d-abf5-2dba76d00c79",
input_schema=MathsBlock.Input,
output_schema=MathsBlock.Output,
test_input={
"operation": Operation.ADD.value,
"a": 10.0,
"b": 5.0,
"round_result": False,
},
test_output=[
(
"result",
MathsResult(
result=15.0, explanation="Added 10.0 and 5.0 to get 15.0"
),
),
],
)
def run(self, input_data: Input) -> BlockOutput:
operation = input_data.operation
a = input_data.a
b = input_data.b
operations = {
Operation.ADD: (operator.add, "Added"),
Operation.SUBTRACT: (operator.sub, "Subtracted"),
Operation.MULTIPLY: (operator.mul, "Multiplied"),
Operation.DIVIDE: (operator.truediv, "Divided"),
Operation.POWER: (operator.pow, "Raised"),
}
op_func, op_word = operations[operation]
try:
if operation == Operation.DIVIDE and b == 0:
raise ZeroDivisionError("Cannot divide by zero")
result = op_func(a, b)
if operation == Operation.POWER:
explanation = f"{op_word} {a} to the power of {b} to get {result}"
elif operation == Operation.DIVIDE:
explanation = f"{op_word} {a} by {b} to get {result}"
else:
explanation = f"{op_word} {a} and {b} to get {result}"
if input_data.round_result:
result = round(result)
explanation += " (rounded to the nearest whole number)"
yield "result", MathsResult(result=result, explanation=explanation)
except ZeroDivisionError:
yield "result", MathsResult(
result=None, explanation="Cannot divide by zero"
)
except Exception as e:
yield "result", MathsResult(
result=None, explanation=f"An error occurred: {str(e)}"
)
class CounterBlock(Block):
class Input(BlockSchema):
collection: Any = SchemaField(
description="Enter the collection you want to count. This can be a list, dictionary, string, or any other iterable.",
placeholder="For example: [1, 2, 3] or {'a': 1, 'b': 2} or 'hello'",
)
class Output(BlockSchema):
result: CounterResult = SchemaField(description="The result of the count")
def __init__(self):
super().__init__(
id="3c9c2f42-b0c3-435f-ba35-05f7a25c772a",
input_schema=CounterBlock.Input,
output_schema=CounterBlock.Output,
test_input={"collection": [1, 2, 3, 4, 5]},
test_output=[
(
"result",
CounterResult(
count=5, type="list", explanation="Counted 5 items in a list"
),
),
],
)
def run(self, input_data: Input) -> BlockOutput:
collection = input_data.collection
try:
if isinstance(collection, (str, list, tuple, set, dict)):
count = len(collection)
collection_type = type(collection).__name__
elif hasattr(collection, "__iter__"):
count = sum(1 for _ in collection)
collection_type = "iterable"
else:
raise ValueError("Input is not a countable collection")
if isinstance(collection, str):
item_word = "character" if count == 1 else "characters"
elif isinstance(collection, dict):
item_word = "key-value pair" if count == 1 else "key-value pairs"
else:
item_word = "item" if count == 1 else "items"
explanation = f"Counted {count} {item_word} in a {collection_type}"
yield "result", CounterResult(
count=count, type=collection_type, explanation=explanation
)
except Exception as e:
yield "result", CounterResult(
count=None, type="error", explanation=f"An error occurred: {str(e)}"
)

Some files were not shown because too many files have changed in this diff Show More