Compare commits

..

58 Commits

Author SHA1 Message Date
Reinier van der Leer
7e0b1156cc feat(agent): Improve history format for code flow execution results 2024-07-23 23:15:50 +02:00
Reinier van der Leer
da9360fdeb feat(agent/api): Pretty-print execute_code_flow proposal in Agent Protocol output 2024-07-23 22:55:16 +02:00
Reinier van der Leer
9dea6a273e Merge branch 'master' into zamilmajdy/code-validation 2024-07-23 22:42:32 +02:00
Reinier van der Leer
e19636ac3e feat(agent/cli): Pretty-print code flow proposal
- Amend `main.py:update_user(..)` to improve displaying of existing + new prompt strategy output
- Replace `"execute_code_flow"` literals with references to the command definition
2024-07-23 22:18:54 +02:00
Reinier van der Leer
f03c6546b8 Merge branch 'master' into zamilmajdy/code-validation 2024-07-23 20:38:30 +02:00
Reinier van der Leer
2c4afd4458 Migrate autogpt/agents/prompt_strategies/code_flow.py to Pydantic v2 2024-07-04 01:07:16 -06:00
Reinier van der Leer
8b1d416de3 Merge branch 'master' into zamilmajdy/code-validation 2024-07-04 01:05:40 -06:00
Reinier van der Leer
7f6b7d6d7e remove unused import in forge/llm/providers/openai.py 2024-07-02 13:05:53 -06:00
Reinier van der Leer
736ac778cc Merge branch 'master' into zamilmajdy/code-validation 2024-07-02 13:05:31 -06:00
Reinier van der Leer
38eafdbb66 Update CodeFlowPromptStrategy with upstream changes (#7223) 2024-07-02 04:29:55 +02:00
Reinier van der Leer
6d9f564dc5 Merge branch 'master' into zamilmajdy/code-validation 2024-07-01 20:16:31 -06:00
Reinier van der Leer
3e675123d7 Merge branch 'master' into zamilmajdy/code-validation 2024-06-27 14:12:19 -06:00
Reinier van der Leer
37cc047656 lint-fix + minor refactor 2024-06-25 09:55:13 -07:00
Reinier van der Leer
9f804080ed address feedback: pass commands getter to CodeFlowExecutionComponent(..) 2024-06-25 09:30:26 -07:00
Reinier van der Leer
680fbf49aa Merge branch 'master' into zamilmajdy/code-validation 2024-06-24 20:42:52 -07:00
Krzysztof Czerwinski
901dadefc3 Merge branch 'master' into zamilmajdy/code-validation 2024-06-19 13:05:40 +02:00
Nicholas Tindle
e204491c6c Merge branch 'master' into zamilmajdy/code-validation 2024-06-13 17:33:44 -05:00
Zamil Majdy
3597f801a7 Merge branch 'master' of github.com:Significant-Gravitas/AutoGPT into zamilmajdy/code-validation 2024-06-10 13:05:05 +07:00
Zamil Majdy
b59862c402 Address comment 2024-06-10 13:04:54 +07:00
Reinier van der Leer
81bac301e8 fix type issues 2024-06-08 23:44:45 +02:00
Reinier van der Leer
a9eb49d54e Merge branch 'master' into zamilmajdy/code-validation 2024-06-08 21:52:36 +02:00
Reinier van der Leer
2c6e1eb4c8 fix type issue in test_code_flow_strategy.py 2024-06-08 21:38:22 +02:00
Reinier van der Leer
3e8849b08e fix linting and type issues 2024-06-08 21:32:10 +02:00
Reinier van der Leer
111e8585b5 feat(forge/llm): allow async completion parsers 2024-06-08 21:29:35 +02:00
Reinier van der Leer
8144d26cef fix type issues 2024-06-08 21:02:44 +02:00
Reinier van der Leer
e264bf7764 forge.llm.providers.schema + code_flow_executor lint-fix and cleanup 2024-06-08 15:28:52 +02:00
Reinier van der Leer
6dd0975236 clean up & improve @command decorator
- add ability to extract parameter descriptions from docstring
- add ability to determine parameter JSON schemas from function signature
- add `JSONSchema.from_python_type` factory
2024-06-08 15:05:45 +02:00
Reinier van der Leer
c3acb99314 clean up forge.command.command 2024-06-08 15:01:00 +02:00
Reinier van der Leer
0578fb0246 fix async issues with code flow execution 2024-06-08 02:03:22 +02:00
Reinier van der Leer
731d0345f0 implement annotation expansion for non-builtin types 2024-06-08 02:01:20 +02:00
Reinier van der Leer
b4cd735f26 fix name collision with type in Command.return_type 2024-06-07 12:57:30 +02:00
Reinier van der Leer
6e715b6c71 simplify function header generation 2024-06-07 12:56:41 +02:00
Reinier van der Leer
fcca4cc893 clarify execute_code_flow 2024-06-03 22:00:34 +02:00
Reinier van der Leer
5c7c276c10 Merge branch 'master' into zamilmajdy/code-validation 2024-06-03 21:43:59 +02:00
Zamil Majdy
ae63aa8ebb Merge remote-tracking branch 'origin/zamilmajdy/code-validation' into zamilmajdy/code-validation 2024-05-20 22:39:47 +07:00
Zamil Majdy
fdd9f9b5ec Log fix 2024-05-20 22:39:30 +07:00
Zamil Majdy
a825aa8515 Merge branch 'master' into zamilmajdy/code-validation 2024-05-20 16:53:52 +02:00
Zamil Majdy
ae43136c2c Fix linting 2024-05-20 18:48:44 +07:00
Zamil Majdy
c8e16f3fe1 Fix linting 2024-05-20 18:42:36 +07:00
Zamil Majdy
3a60504138 isort 2024-05-20 18:21:17 +07:00
Zamil Majdy
dfa77739c3 Remove unnecessary changes 2024-05-20 18:14:39 +07:00
Zamil Majdy
9f6e25664c Debug Log changes 2024-05-20 18:11:53 +07:00
Zamil Majdy
3c4ff60e11 Add unit tests 2024-05-20 18:09:16 +07:00
Zamil Majdy
47eeaf0325 Revert dumb changes 2024-05-20 17:07:55 +07:00
Zamil Majdy
81ad3cb69a Merge conflicts 2024-05-20 17:00:25 +07:00
Zamil Majdy
834eb6c6e0 Some quality polishing 2024-05-20 15:56:18 +07:00
Zamil Majdy
fb802400ba Add return type 2024-05-17 17:10:54 +02:00
Zamil Majdy
922e643737 Fix Await fiasco 2024-05-17 00:57:29 +02:00
Zamil Majdy
7b5272f1f2 Fix Await fiasco 2024-05-17 00:50:11 +02:00
Zamil Majdy
ea134c7dbd Benchmark test 2024-05-16 20:09:10 +02:00
Zamil Majdy
f7634524fa More prompt engineering 2024-05-16 19:53:42 +02:00
Zamil Majdy
0eccbe1483 Prompt change 2024-05-15 21:04:52 +02:00
Zamil Majdy
0916df4df7 Fix async fiasco 2024-05-15 19:30:29 +02:00
Zamil Majdy
22e2373a0b Add code flow as a loop 2024-05-15 17:10:51 +02:00
Zamil Majdy
40426e4646 Merge master 2024-05-14 23:36:31 +02:00
Zamil Majdy
ef1fe7c4e8 Update notebook 2024-05-11 12:12:30 +02:00
Reinier van der Leer
ca7ca226ff one_shot_flow.ipynb + edits to make it work 2024-05-10 20:03:40 +02:00
Zamil Majdy
ed5f12c02b Add code validation 2024-05-10 16:43:53 +02:00
448 changed files with 12092 additions and 41615 deletions

View File

@@ -23,18 +23,6 @@
# Frontend
!frontend/build/web/
# rnd
!rnd/
# Explicitly re-ignore some folders
.*
**/__pycache__
# rnd
rnd/autogpt_builder/.next/
rnd/autogpt_builder/node_modules
rnd/autogpt_builder/.env.example
rnd/autogpt_builder/.env.local
rnd/autogpt_server/.env
rnd/autogpt_server/.venv/
rnd/market/.env

2
.gitattributes vendored
View File

@@ -6,5 +6,3 @@ docs/_javascript/** linguist-vendored
# Exclude VCR cassettes from stats
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
* text=auto

12
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,5 @@
* @Significant-Gravitas/maintainers
.github/workflows/ @Significant-Gravitas/devops
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
rnd/infra @Significant-Gravitas/devops
.github/CODEOWNERS @Significant-Gravitas/admins
.github/workflows/ @Significant-Gravitas/devops
autogpt/ @Significant-Gravitas/maintainers
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers

View File

@@ -17,7 +17,6 @@ defaults:
working-directory: rnd/autogpt_builder
jobs:
lint:
runs-on: ubuntu-latest
@@ -32,10 +31,6 @@ jobs:
run: |
npm install
- name: Check formatting with Prettier
run: |
npx prettier --check .
- name: Run lint
run: |
npm run lint

View File

@@ -1,56 +0,0 @@
name: AutoGPT Builder Infra
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
pull_request:
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
defaults:
run:
shell: bash
working-directory: rnd/infra
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: TFLint
uses: pauloconnor/tflint-action@v0.0.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tflint_path: terraform/
tflint_recurse: true
tflint_changed_only: false
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
with:
version: v3.14.4
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Run chart-testing (lint)
if: steps.list-changed.outputs.changed == 'true'
run: ct lint --target-branch ${{ github.event.repository.default_branch }}

View File

@@ -37,8 +37,8 @@ jobs:
- name: Setup PostgreSQL
uses: ikalnytskyi/action-setup-postgres@v6
with:
username: ${{ secrets.DB_USER || 'postgres' }}
password: ${{ secrets.DB_PASS || 'postgres' }}
username: ${{ secrets.DB_USER }}
password: ${{ secrets.DB_PASS }}
database: postgres
port: 5432
id: postgres
@@ -115,38 +115,31 @@ jobs:
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
run: poetry run prisma generate --schema postgres/schema.prisma
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
run: poetry run prisma migrate dev --schema postgres/schema.prisma --name updates
env:
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
- id: lint
name: Run Linter
- name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
poetry run pytest -vv \
test
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
DB_USER: ${{ secrets.DB_USER }}
DB_PASS: ${{ secrets.DB_PASS }}
DB_NAME: postgres
DB_PORT: 5432
RUN_ENV: local
PORT: 8080
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
DATABASE_URL: postgresql://${{ secrets.DB_USER }}:${{ secrets.DB_PASS }}@localhost:5432/${{ secrets.DB_NAME }}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4

View File

@@ -0,0 +1,55 @@
import os
import requests
import sys
# GitHub API endpoint
api_url = os.environ["GITHUB_API_URL"]
repo = os.environ["GITHUB_REPOSITORY"]
sha = os.environ["GITHUB_SHA"]
# GitHub token for authentication
github_token = os.environ["GITHUB_TOKEN"]
# API endpoint for check runs for the specific SHA
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
# Set up headers for authentication
headers = {
"Authorization": f"token {github_token}",
"Accept": "application/vnd.github.v3+json"
}
# Make the API request
response = requests.get(endpoint, headers=headers)
if response.status_code != 200:
print(f"Error: Unable to fetch check runs data. Status code: {response.status_code}")
sys.exit(1)
check_runs = response.json()["check_runs"]
# Flag to track if all other check runs have passed
all_others_passed = True
# Current run id
current_run_id = os.environ["GITHUB_RUN_ID"]
for run in check_runs:
if str(run["id"]) != current_run_id:
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}")
else:
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)

View File

@@ -1,31 +1,51 @@
name: PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
workflow_run:
workflows: ["*"]
types:
- completed
jobs:
status-check:
name: Check PR Status
name: Check Actions Status
runs-on: ubuntu-latest
steps:
# - name: Wait some time for all actions to start
# run: sleep 30
- uses: actions/checkout@v4
# with:
# fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install requests
- name: Check PR Status
run: |
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install requests
- name: Debug Information
run: |
echo "Event name: ${{ github.event_name }}"
echo "Workflow: ${{ github.workflow }}"
echo "Action: ${{ github.action }}"
echo "Actor: ${{ github.actor }}"
echo "Repository: ${{ github.repository }}"
echo "Ref: ${{ github.ref }}"
echo "Head ref: ${{ github.head_ref }}"
echo "Base ref: ${{ github.base_ref }}"
echo "Event payload:"
cat $GITHUB_EVENT_PATH
- name: Debug File Structure
run: |
echo "Current directory:"
pwd
echo "Directory contents:"
ls -R
echo "GitHub workspace:"
echo $GITHUB_WORKSPACE
echo "GitHub workspace contents:"
ls -R $GITHUB_WORKSPACE
- name: Check Actions Status
run: |
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python .github/scripts/check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -28,26 +28,12 @@
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "market",
"path": "../rnd/market"
},
{
"name": "lib",
"path": "../rnd/autogpt_libs"
},
{
"name": "infra",
"path": "../rnd/infra"
},
{
"name": "[root]",
"path": ".."
}
],
"settings": {
"python.analysis.typeCheckingMode": "basic"
},
"settings": {},
"extensions": {
"recommendations": [
"charliermarsh.ruff",

View File

@@ -105,7 +105,6 @@
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
# HUGGINGFACE_API_TOKEN=
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)

View File

@@ -1,3 +0,0 @@
{
"python.analysis.typeCheckingMode": "basic",
}

View File

@@ -23,6 +23,7 @@ from forge.components.code_executor.code_executor import (
CodeExecutorComponent,
CodeExecutorConfiguration,
)
from forge.components.code_flow_executor import CodeFlowExecutionComponent
from forge.components.context.context import AgentContext, ContextComponent
from forge.components.file_manager import FileManagerComponent
from forge.components.git_operations import GitOperationsComponent
@@ -40,7 +41,6 @@ from forge.llm.providers import (
ChatModelResponse,
MultiProvider,
)
from forge.llm.providers.utils import function_specs_from_commands
from forge.models.action import (
ActionErrorResult,
ActionInterruptedByHuman,
@@ -56,6 +56,7 @@ from forge.utils.exceptions import (
)
from pydantic import Field
from .prompt_strategies.code_flow import CodeFlowAgentPromptStrategy
from .prompt_strategies.one_shot import (
OneShotAgentActionProposal,
OneShotAgentPromptStrategy,
@@ -96,11 +97,14 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
llm_provider: MultiProvider,
file_storage: FileStorage,
app_config: AppConfig,
prompt_strategy_class: type[
OneShotAgentPromptStrategy | CodeFlowAgentPromptStrategy
] = CodeFlowAgentPromptStrategy,
):
super().__init__(settings)
self.llm_provider = llm_provider
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
prompt_config = prompt_strategy_class.default_configuration.model_copy(
deep=True
)
prompt_config.use_functions_api = (
@@ -108,7 +112,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
# Anthropic currently doesn't support tools + prefilling :(
and self.llm.provider_name != "anthropic"
)
self.prompt_strategy = OneShotAgentPromptStrategy(prompt_config, logger)
self.prompt_strategy = prompt_strategy_class(prompt_config, logger)
self.commands: list[Command] = []
# Components
@@ -145,6 +149,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
ContextComponent
)
self.code_flow_executor = CodeFlowExecutionComponent(lambda: self.commands)
self.event_history = settings.history
self.app_config = app_config
@@ -185,7 +190,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
task=self.state.task,
ai_profile=self.state.ai_profile,
ai_directives=directives,
commands=function_specs_from_commands(self.commands),
commands=self.commands,
include_os_info=include_os_info,
)
@@ -201,9 +206,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
if exception:
prompt.messages.append(ChatMessage.system(f"Error: {exception}"))
response: ChatModelResponse[
OneShotAgentActionProposal
] = await self.llm_provider.create_chat_completion(
response: ChatModelResponse = await self.llm_provider.create_chat_completion(
prompt.messages,
model_name=self.llm.name,
completion_parser=self.prompt_strategy.parse_response_content,
@@ -281,7 +284,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
except AgentException:
raise
except Exception as e:
raise CommandExecutionError(str(e))
raise CommandExecutionError(str(e)) from e
def _get_command(self, command_name: str) -> Command:
for command in reversed(self.commands):

View File

@@ -0,0 +1,355 @@
import inspect
import re
from logging import Logger
from typing import Callable, Iterable, Sequence, get_args, get_origin
from forge.command import Command
from forge.components.code_flow_executor import CodeFlowExecutionComponent
from forge.config.ai_directives import AIDirectives
from forge.config.ai_profile import AIProfile
from forge.json.parsing import extract_dict_from_json
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
from forge.llm.prompting.utils import indent
from forge.llm.providers.schema import (
AssistantChatMessage,
AssistantFunctionCall,
ChatMessage,
)
from forge.models.config import SystemConfiguration
from forge.models.json_schema import JSONSchema
from forge.utils.exceptions import InvalidAgentResponseError
from forge.utils.function.code_validation import CodeValidator
from forge.utils.function.model import FunctionDef
from pydantic import BaseModel, Field
from autogpt.agents.prompt_strategies.one_shot import (
AssistantThoughts,
OneShotAgentActionProposal,
OneShotAgentPromptConfiguration,
)
_RESPONSE_INTERFACE_NAME = "AssistantResponse"
class CodeFlowAgentActionProposal(BaseModel):
thoughts: AssistantThoughts
immediate_plan: str = Field(
...,
description="We will be running an iterative process to execute the plan, "
"Write the partial / immediate plan to execute your plan as detailed and "
"efficiently as possible without the help of the reasoning/intelligence. "
"The plan should describe the output of the immediate plan, so that the next "
"iteration can be executed by taking the output into account. "
"Try to do as much as possible without making any assumption or uninformed "
"guesses. Avoid large output at all costs!!!\n"
"Format: Objective[Objective of this iteration, explain what's the use of this "
"iteration for the next one] Plan[Plan that does not require any reasoning or "
"intelligence] Output[Output of the plan / should be small, avoid whole file "
"output]",
)
python_code: str = Field(
...,
description=(
"Write the fully-functional Python code of the immediate plan. "
"The output will be an `async def main() -> str` function of the immediate "
"plan that return the string output, the output will be passed into the "
"LLM context window so avoid returning the whole content!. "
"Use ONLY the listed available functions and built-in Python features. "
"Leverage the given magic functions to implement function calls for which "
"the arguments can't be determined yet. "
"Example:`async def main() -> str:\n"
" return await provided_function('arg1', 'arg2').split('\\n')[0]`"
),
)
FINAL_INSTRUCTION: str = (
"You have to give the answer in the from of JSON schema specified previously. "
"For the `python_code` field, you have to write Python code to execute your plan "
"as efficiently as possible. Your code will be executed directly without any "
"editing, if it doesn't work you will be held responsible. "
"Use ONLY the listed available functions and built-in Python features. "
"Do not make uninformed assumptions "
"(e.g. about the content or format of an unknown file). Leverage the given magic "
"functions to implement function calls for which the arguments can't be determined "
"yet. Reduce the amount of unnecessary data passed into these magic functions "
"where possible, because magic costs money and magically processing large amounts "
"of data is expensive. If you think are done with the task, you can simply call "
"finish(reason='your reason') to end the task, "
"a function that has one `finish` command, don't mix finish with other functions! "
"If you still need to do other functions, "
"let the next cycle execute the `finish` function. "
"Avoid hard-coding input values as input, and avoid returning large outputs. "
"The code that you have been executing in the past cycles can also be buggy, "
"so if you see undesired output, you can always try to re-plan, and re-code. "
)
class CodeFlowAgentPromptStrategy(PromptStrategy):
default_configuration: OneShotAgentPromptConfiguration = (
OneShotAgentPromptConfiguration()
)
def __init__(
self,
configuration: SystemConfiguration,
logger: Logger,
):
self.config = configuration
self.response_schema = JSONSchema.from_dict(
CodeFlowAgentActionProposal.model_json_schema()
)
self.logger = logger
self.commands: Sequence[Command] = [] # Sequence -> disallow list modification
@property
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.SMART_MODEL # FIXME: dynamic switching
def build_prompt(
self,
*,
messages: list[ChatMessage],
task: str,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: Sequence[Command],
**extras,
) -> ChatPrompt:
"""Constructs and returns a prompt with the following structure:
1. System prompt
3. `cycle_instruction`
"""
system_prompt, response_prefill = self.build_system_prompt(
ai_profile=ai_profile,
ai_directives=ai_directives,
commands=commands,
)
self.commands = commands
final_instruction_msg = ChatMessage.system(FINAL_INSTRUCTION)
return ChatPrompt(
messages=[
ChatMessage.system(system_prompt),
ChatMessage.user(f'"""{task}"""'),
*messages,
*(
[final_instruction_msg]
if not any(m.role == "assistant" for m in messages)
else []
),
],
prefill_response=response_prefill,
)
def build_system_prompt(
self,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: Iterable[Command],
) -> tuple[str, str]:
"""
Builds the system prompt.
Returns:
str: The system prompt body
str: The desired start for the LLM's response; used to steer the output
"""
response_fmt_instruction, response_prefill = self.response_format_instruction()
system_prompt_parts = (
self._generate_intro_prompt(ai_profile)
+ [
"## Your Task\n"
"The user will specify a task for you to execute, in triple quotes,"
" in the next message. Your job is to complete the task, "
"and terminate when your task is done."
]
+ ["## Available Functions\n" + self._generate_function_headers(commands)]
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
)
# Join non-empty parts together into paragraph format
return (
"\n\n".join(filter(None, system_prompt_parts)).strip("\n"),
response_prefill,
)
def response_format_instruction(self) -> tuple[str, str]:
response_schema = self.response_schema.model_copy(deep=True)
assert response_schema.properties
# Unindent for performance
response_format = re.sub(
r"\n\s+",
"\n",
response_schema.to_typescript_object_interface(_RESPONSE_INTERFACE_NAME),
)
response_prefill = f'{{\n "{list(response_schema.properties.keys())[0]}":'
return (
(
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
f"{response_format}"
),
response_prefill,
)
def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
"""Generates the introduction part of the prompt.
Returns:
list[str]: A list of strings forming the introduction part of the prompt.
"""
return [
f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
# "Your decisions must always be made independently without seeking "
# "user assistance. Play to your strengths as an LLM and pursue "
# "simple strategies with no legal complications.",
]
def _generate_function_headers(self, commands: Iterable[Command]) -> str:
function_stubs: list[str] = []
annotation_types_in_context: set[type] = set()
for f in commands:
# Add source code of non-builtin types from function signatures
new_annotation_types = extract_annotation_types(f.method).difference(
annotation_types_in_context
)
new_annotation_types_src = [
f"# {a.__module__}.{a.__qualname__}\n{inspect.getsource(a)}"
for a in new_annotation_types
]
annotation_types_in_context.update(new_annotation_types)
param_descriptions = "\n".join(
f"{param.name}: {param.spec.description}"
for param in f.parameters
if param.spec.description
)
full_function_stub = (
("\n".join(new_annotation_types_src) + "\n" + f.header).strip()
+ "\n"
+ indent(
(
'"""\n'
f"{f.description}\n\n"
f"Params:\n{indent(param_descriptions)}\n"
'"""\n'
"pass"
),
)
)
function_stubs.append(full_function_stub)
return "\n\n\n".join(function_stubs)
async def parse_response_content(
self,
response: AssistantChatMessage,
) -> OneShotAgentActionProposal:
if not response.content:
raise InvalidAgentResponseError("Assistant response has no text content")
self.logger.debug(
"LLM response content:"
+ (
f"\n{response.content}"
if "\n" in response.content
else f" '{response.content}'"
)
)
assistant_reply_dict = extract_dict_from_json(response.content)
parsed_response = CodeFlowAgentActionProposal.model_validate(
assistant_reply_dict
)
if not parsed_response.python_code:
raise ValueError("python_code is empty")
available_functions = {
c.name: FunctionDef(
name=c.name,
arg_types=[(p.name, p.spec.python_type) for p in c.parameters],
arg_descs={p.name: p.spec.description for p in c.parameters},
arg_defaults={
p.name: p.spec.default or "None"
for p in c.parameters
if p.spec.default or not p.spec.required
},
return_type=c.return_type,
return_desc="Output of the function",
function_desc=c.description,
is_async=c.is_async,
)
for c in self.commands
}
available_functions.update(
{
"main": FunctionDef(
name="main",
arg_types=[],
arg_descs={},
return_type="str",
return_desc="Output of the function",
function_desc="The main function to execute the plan",
is_async=True,
)
}
)
code_validation = await CodeValidator(
function_name="main",
available_functions=available_functions,
).validate_code(parsed_response.python_code)
clean_response = response.model_copy()
clean_response.content = parsed_response.model_dump_json(indent=4)
# TODO: prevent combining finish with other functions
if _finish_call := re.search(
r"finish\((reason=)?(.*?)\)", code_validation.functionCode
):
finish_reason = _finish_call.group(2)[1:-1] # remove quotes
result = OneShotAgentActionProposal(
thoughts=parsed_response.thoughts,
use_tool=AssistantFunctionCall(
name="finish",
arguments={"reason": finish_reason},
),
raw_message=clean_response,
)
else:
result = OneShotAgentActionProposal(
thoughts=parsed_response.thoughts,
use_tool=AssistantFunctionCall(
name=CodeFlowExecutionComponent.execute_code_flow.name,
arguments={
"python_code": code_validation.functionCode,
"plan_text": parsed_response.immediate_plan,
},
),
raw_message=clean_response,
)
return result
def extract_annotation_types(func: Callable) -> set[type]:
annotation_types = set()
for annotation in inspect.get_annotations(func).values():
annotation_types.update(_get_nested_types(annotation))
return annotation_types
def _get_nested_types(annotation: type) -> Iterable[type]:
if _args := get_args(annotation):
for a in _args:
yield from _get_nested_types(a)
if not _is_builtin_type(_a := get_origin(annotation) or annotation):
yield _a
def _is_builtin_type(_type: type):
"""Check if a given type is a built-in type."""
import sys
return _type.__module__ in sys.stdlib_module_names

View File

@@ -6,6 +6,7 @@ import re
from logging import Logger
import distro
from forge.command import Command
from forge.config.ai_directives import AIDirectives
from forge.config.ai_profile import AIProfile
from forge.json.parsing import extract_dict_from_json
@@ -16,6 +17,7 @@ from forge.llm.providers.schema import (
ChatMessage,
CompletionModelFunction,
)
from forge.llm.providers.utils import function_specs_from_commands
from forge.models.action import ActionProposal
from forge.models.config import SystemConfiguration, UserConfigurable
from forge.models.json_schema import JSONSchema
@@ -27,13 +29,21 @@ _RESPONSE_INTERFACE_NAME = "AssistantResponse"
class AssistantThoughts(ModelWithSummary):
past_action_summary: str = Field(
...,
description="Summary of the last action you took, if there is none, "
"you can leave it empty",
)
observations: str = Field(
description="Relevant observations from your last action (if any)"
description="Relevant observations from your last actions (if any)"
)
text: str = Field(description="Thoughts")
reasoning: str = Field(description="Reasoning behind the thoughts")
self_criticism: str = Field(description="Constructive self-criticism")
plan: list[str] = Field(description="Short list that conveys the long-term plan")
plan: list[str] = Field(
description="Short list that conveys the long-term plan, "
"considering the progress on your task so far",
)
speak: str = Field(description="Summary of thoughts, to say to user")
def summary(self) -> str:
@@ -101,7 +111,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
@property
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
return LanguageModelClassification.SMART_MODEL # FIXME: dynamic switching
def build_prompt(
self,
@@ -110,7 +120,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
task: str,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
commands: list[Command],
include_os_info: bool,
**extras,
) -> ChatPrompt:
@@ -118,10 +128,11 @@ class OneShotAgentPromptStrategy(PromptStrategy):
1. System prompt
3. `cycle_instruction`
"""
functions = function_specs_from_commands(commands)
system_prompt, response_prefill = self.build_system_prompt(
ai_profile=ai_profile,
ai_directives=ai_directives,
commands=commands,
functions=functions,
include_os_info=include_os_info,
)
@@ -135,14 +146,14 @@ class OneShotAgentPromptStrategy(PromptStrategy):
final_instruction_msg,
],
prefill_response=response_prefill,
functions=commands if self.config.use_functions_api else [],
functions=functions if self.config.use_functions_api else [],
)
def build_system_prompt(
self,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
functions: list[CompletionModelFunction],
include_os_info: bool,
) -> tuple[str, str]:
"""
@@ -162,7 +173,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
self.config.body_template.format(
constraints=format_numbered_list(ai_directives.constraints),
resources=format_numbered_list(ai_directives.resources),
commands=self._generate_commands_list(commands),
commands=self._generate_commands_list(functions),
best_practices=format_numbered_list(ai_directives.best_practices),
)
]

View File

@@ -23,6 +23,7 @@ from forge.agent_protocol.models import (
TaskRequestBody,
TaskStepsListResponse,
)
from forge.components.code_flow_executor import CodeFlowExecutionComponent
from forge.file_storage import FileStorage
from forge.llm.providers import ModelProviderBudget, MultiProvider
from forge.models.action import ActionErrorResult, ActionSuccessResult
@@ -298,11 +299,16 @@ class AgentProtocolServer:
else ""
)
output += f"{assistant_response.thoughts.speak}\n\n"
output += (
f"Next Command: {next_tool_to_use}"
if next_tool_to_use.name != ASK_COMMAND
else next_tool_to_use.arguments["question"]
)
if next_tool_to_use.name == CodeFlowExecutionComponent.execute_code_flow.name:
code = next_tool_to_use.arguments["python_code"]
plan = next_tool_to_use.arguments["plan_text"]
output += f"Code for next step:\n```py\n# {plan}\n\n{code}\n```"
else:
output += (
f"Next Command: {next_tool_to_use}"
if next_tool_to_use.name != ASK_COMMAND
else next_tool_to_use.arguments["question"]
)
additional_output = {
**(

View File

@@ -630,6 +630,9 @@ def update_user(
command_args: The arguments for the command.
assistant_reply_dict: The assistant's reply.
"""
from forge.components.code_flow_executor import CodeFlowExecutionComponent
from forge.llm.prompting.utils import indent
logger = logging.getLogger(__name__)
print_assistant_thoughts(
@@ -644,15 +647,29 @@ def update_user(
# First log new-line so user can differentiate sections better in console
print()
safe_tool_name = remove_ansi_escape(action_proposal.use_tool.name)
logger.info(
f"COMMAND = {Fore.CYAN}{safe_tool_name}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{action_proposal.use_tool.arguments}{Style.RESET_ALL}",
extra={
"title": "NEXT ACTION:",
"title_color": Fore.CYAN,
"preserve_color": True,
},
)
if safe_tool_name == CodeFlowExecutionComponent.execute_code_flow.name:
plan = action_proposal.use_tool.arguments["plan_text"]
code = action_proposal.use_tool.arguments["python_code"]
logger.info(
f"\n{indent(code, f'{Fore.GREEN}>>> {Fore.RESET}')}\n",
extra={
"title": "PROPOSED ACTION:",
"title_color": Fore.GREEN,
"preserve_color": True,
},
)
logger.debug(
f"{plan}\n", extra={"title": "EXPLANATION:", "title_color": Fore.YELLOW}
)
else:
logger.info(
str(action_proposal.use_tool),
extra={
"title": "PROPOSED ACTION:",
"title_color": Fore.GREEN,
"preserve_color": True,
},
)
async def get_user_feedback(
@@ -732,6 +749,12 @@ def print_assistant_thoughts(
)
if isinstance(thoughts, AssistantThoughts):
if thoughts.observations:
print_attribute(
"OBSERVATIONS",
remove_ansi_escape(thoughts.observations),
title_color=Fore.YELLOW,
)
print_attribute(
"REASONING", remove_ansi_escape(thoughts.reasoning), title_color=Fore.YELLOW
)
@@ -753,7 +776,7 @@ def print_assistant_thoughts(
line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}
)
print_attribute(
"CRITICISM",
"SELF-CRITICISM",
remove_ansi_escape(thoughts.self_criticism),
title_color=Fore.YELLOW,
)
@@ -764,7 +787,7 @@ def print_assistant_thoughts(
speak(assistant_thoughts_speak)
else:
print_attribute(
"SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW
"TL;DR", assistant_thoughts_speak, title_color=Fore.YELLOW
)
else:
speak(thoughts_text)

30
autogpt/poetry.lock generated
View File

@@ -4216,7 +4216,7 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
optional = true
optional = false
python-versions = "*"
files = [
{file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
@@ -5212,6 +5212,32 @@ files = [
[package.dependencies]
pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
version = "0.4.4"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.4.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:29d44ef5bb6a08e235c8249294fa8d431adc1426bfda99ed493119e6f9ea1bf6"},
{file = "ruff-0.4.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c4efe62b5bbb24178c950732ddd40712b878a9b96b1d02b0ff0b08a090cbd891"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c8e2f1e8fc12d07ab521a9005d68a969e167b589cbcaee354cb61e9d9de9c15"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60ed88b636a463214905c002fa3eaab19795679ed55529f91e488db3fe8976ab"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b90fc5e170fc71c712cc4d9ab0e24ea505c6a9e4ebf346787a67e691dfb72e85"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8e7e6ebc10ef16dcdc77fd5557ee60647512b400e4a60bdc4849468f076f6eef"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9ddb2c494fb79fc208cd15ffe08f32b7682519e067413dbaf5f4b01a6087bcd"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c51c928a14f9f0a871082603e25a1588059b7e08a920f2f9fa7157b5bf08cfe9"},
{file = "ruff-0.4.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5eb0a4bfd6400b7d07c09a7725e1a98c3b838be557fee229ac0f84d9aa49c36"},
{file = "ruff-0.4.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b1867ee9bf3acc21778dcb293db504692eda5f7a11a6e6cc40890182a9f9e595"},
{file = "ruff-0.4.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1aecced1269481ef2894cc495647392a34b0bf3e28ff53ed95a385b13aa45768"},
{file = "ruff-0.4.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9da73eb616b3241a307b837f32756dc20a0b07e2bcb694fec73699c93d04a69e"},
{file = "ruff-0.4.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:958b4ea5589706a81065e2a776237de2ecc3e763342e5cc8e02a4a4d8a5e6f95"},
{file = "ruff-0.4.4-py3-none-win32.whl", hash = "sha256:cb53473849f011bca6e754f2cdf47cafc9c4f4ff4570003a0dad0b9b6890e876"},
{file = "ruff-0.4.4-py3-none-win_amd64.whl", hash = "sha256:424e5b72597482543b684c11def82669cc6b395aa8cc69acc1858b5ef3e5daae"},
{file = "ruff-0.4.4-py3-none-win_arm64.whl", hash = "sha256:39df0537b47d3b597293edbb95baf54ff5b49589eb7ff41926d8243caa995ea6"},
{file = "ruff-0.4.4.tar.gz", hash = "sha256:f87ea42d5cdebdc6a69761a9d0bc83ae9b3b30d0ad78952005ba6568d6c022af"},
]
[[package]]
name = "s3transfer"
version = "0.10.0"
@@ -6758,4 +6784,4 @@ benchmark = ["agbenchmark"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "b3d4efee5861b32152024dada1ec61f4241122419cb538012c00a6ed55ac8a4b"
content-hash = "c729e10fd5ac85400d2499397974d1b1831fed3b591657a2fea9e86501b96e19"

View File

@@ -30,9 +30,12 @@ gitpython = "^3.1.32"
hypercorn = "^0.14.4"
openai = "^1.7.2"
orjson = "^3.8.10"
ptyprocess = "^0.7.0"
pydantic = "^2.7.2"
pyright = "^1.1.364"
python-dotenv = "^1.0.0"
requests = "*"
ruff = "^0.4.4"
sentry-sdk = "^1.40.4"
# Benchmarking
@@ -47,7 +50,6 @@ black = "^23.12.1"
flake8 = "^7.0.0"
isort = "^5.13.1"
pre-commit = "*"
pyright = "^1.1.364"
# Type stubs
types-colorama = "*"

View File

@@ -0,0 +1,126 @@
import logging
from typing import Optional
import pytest
from forge.agent.protocols import CommandProvider
from forge.command import Command, command
from forge.components.code_flow_executor import CodeFlowExecutionComponent
from forge.config.ai_directives import AIDirectives
from forge.config.ai_profile import AIProfile
from forge.llm.providers import AssistantChatMessage
from forge.llm.providers.schema import JSONSchema
from autogpt.agents.prompt_strategies.code_flow import CodeFlowAgentPromptStrategy
logger = logging.getLogger(__name__)
config = CodeFlowAgentPromptStrategy.default_configuration.copy(deep=True)
prompt_strategy = CodeFlowAgentPromptStrategy(config, logger)
class MockWebSearchProvider(CommandProvider):
def get_commands(self):
yield self.mock_web_search
@command(
description="Searches the web",
parameters={
"query": JSONSchema(
type=JSONSchema.Type.STRING,
description="The search query",
required=True,
),
"num_results": JSONSchema(
type=JSONSchema.Type.INTEGER,
description="The number of results to return",
minimum=1,
maximum=10,
required=False,
),
},
)
def mock_web_search(self, query: str, num_results: Optional[int] = None) -> str:
return "results"
@pytest.mark.asyncio
async def test_code_flow_build_prompt():
commands = list(MockWebSearchProvider().get_commands())
ai_profile = AIProfile()
ai_profile.ai_name = "DummyGPT"
ai_profile.ai_goals = ["A model for testing purposes"]
ai_profile.ai_role = "Help Testing"
ai_directives = AIDirectives()
ai_directives.resources = ["resource_1"]
ai_directives.constraints = ["constraint_1"]
ai_directives.best_practices = ["best_practice_1"]
prompt = str(
prompt_strategy.build_prompt(
task="Figure out from file.csv how much was spent on utilities",
messages=[],
ai_profile=ai_profile,
ai_directives=ai_directives,
commands=commands,
)
)
assert "DummyGPT" in prompt
assert (
"def mock_web_search(query: str, num_results: Optional[int] = None)" in prompt
)
@pytest.mark.asyncio
async def test_code_flow_parse_response():
response_content = """
{
"thoughts": {
"past_action_summary": "This is the past action summary.",
"observations": "This is the observation.",
"text": "Some text on the AI's thoughts.",
"reasoning": "This is the reasoning.",
"self_criticism": "This is the self-criticism.",
"plan": [
"Plan 1",
"Plan 2",
"Plan 3"
],
"speak": "This is what the AI would say."
},
"immediate_plan": "Objective[objective1] Plan[plan1] Output[out1]",
"python_code": "async def main() -> str:\n return 'You passed the test.'",
}
"""
response = await CodeFlowAgentPromptStrategy(config, logger).parse_response_content(
AssistantChatMessage(content=response_content)
)
assert "This is the observation." == response.thoughts.observations
assert "This is the reasoning." == response.thoughts.reasoning
assert CodeFlowExecutionComponent.execute_code_flow.name == response.use_tool.name
assert "async def main() -> str" in response.use_tool.arguments["python_code"]
assert (
"Objective[objective1] Plan[plan1] Output[out1]"
in response.use_tool.arguments["plan_text"]
)
@pytest.mark.asyncio
async def test_code_flow_execution():
executor = CodeFlowExecutionComponent(
lambda: [
Command(
names=["test_func"],
description="",
parameters=[],
method=lambda: "You've passed the test!",
)
]
)
result = await executor.execute_code_flow(
python_code="async def main() -> str:\n return test_func()",
plan_text="This is the plan text.",
)
assert "You've passed the test!" in result

View File

@@ -0,0 +1,75 @@
import pytest
from forge.utils.function.code_validation import CodeValidator, FunctionDef
@pytest.mark.asyncio
async def test_code_validation():
validator = CodeValidator(
available_functions={
"read_webpage": FunctionDef(
name="read_webpage",
arg_types=[("url", "str"), ("query", "str")],
arg_descs={
"url": "URL to read",
"query": "Query to search",
"return_type": "Type of return value",
},
return_type="str",
return_desc="Information matching the query",
function_desc="Read a webpage and return the info matching the query",
is_async=True,
),
"web_search": FunctionDef(
name="web_search",
arg_types=[("query", "str")],
arg_descs={"query": "Query to search"},
return_type="list[(str,str)]",
return_desc="List of tuples with title and URL",
function_desc="Search the web and return the search results",
is_async=True,
),
"main": FunctionDef(
name="main",
arg_types=[],
arg_descs={},
return_type="str",
return_desc="Answer in the text format",
function_desc="Get the num of contributors to the autogpt github repo",
is_async=False,
),
},
available_objects={},
)
response = await validator.validate_code(
raw_code="""
def crawl_info(url: str, query: str) -> str | None:
info = await read_webpage(url, query)
if info:
return info
urls = await read_webpage(url, "autogpt github contributor page")
for url in urls.split('\\n'):
info = await crawl_info(url, query)
if info:
return info
return None
def hehe():
return 'hehe'
def main() -> str:
query = "Find the number of contributors to the autogpt github repository"
for title, url in ("autogpt github contributor page"):
info = await crawl_info(url, query)
if info:
return info
x = await hehe()
return "No info found"
""",
packages=[],
)
assert response.functionCode is not None
assert "async def crawl_info" in response.functionCode # async is added
assert "async def main" in response.functionCode
assert "x = hehe()" in response.functionCode # await is removed

View File

@@ -2,5 +2,5 @@
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "none"
}

View File

@@ -1,109 +0,0 @@
import json
import os
import requests
import sys
import time
from typing import Dict, List, Tuple
def get_environment_variables() -> Tuple[str, str, str, str, str]:
"""Retrieve and return necessary environment variables."""
try:
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
event = json.load(f)
sha = event["pull_request"]["head"]["sha"]
return (
os.environ["GITHUB_API_URL"],
os.environ["GITHUB_REPOSITORY"],
sha,
os.environ["GITHUB_TOKEN"],
os.environ["GITHUB_RUN_ID"],
)
except KeyError as e:
print(f"Error: Missing required environment variable or event data: {e}")
sys.exit(1)
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
"""Make an API request and return the JSON response."""
try:
print("Making API request to:", url)
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
print(f"Error: API request failed. {e}")
sys.exit(1)
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
"""Process check runs and return their status."""
runs_in_progress = False
all_others_passed = True
for run in check_runs:
if str(run["name"]) != "Check PR Status":
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
)
else:
runs_in_progress = True
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
else:
print(
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
)
return runs_in_progress, all_others_passed
def main():
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
headers = {
"Accept": "application/vnd.github.v3+json",
}
if github_token:
headers["Authorization"] = f"token {github_token}"
print(f"Current run ID: {current_run_id}")
while True:
data = make_api_request(endpoint, headers)
check_runs = data["check_runs"]
print("Processing check runs...")
print(check_runs)
runs_in_progress, all_others_passed = process_check_runs(check_runs)
if not runs_in_progress:
break
print(
"Some check runs are still in progress. Waiting 3 minutes before checking again..."
)
time.sleep(180)
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)
if __name__ == "__main__":
main()

2
cli.py
View File

@@ -69,8 +69,6 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
bold=True,
)
)
else:
click.echo(click.style("🎉 Setup completed!\n", fg="green"))
@cli.group()

View File

@@ -69,8 +69,6 @@ Lets the agent execute non-interactive Shell commands and Python code. Python ex
| `shell_denylist` | List of prohibited shell commands | `List[str]` | `[]` |
| `docker_container_name` | Name of the Docker container used for code execution | `str` | `"agent_sandbox"` |
All shell command configurations are expected to be for convience only. This component is not secure and should not be used in production environments. It is recommended to use more appropriate sandboxing.
### CommandProvider
- `execute_shell` execute shell command
@@ -157,12 +155,11 @@ Allows agent to search the web. Google credentials aren't required for DuckDuckG
### `WebSearchConfiguration`
| Config variable | Details | Type | Default |
| -------------------------------- | ----------------------------------------------------------------------- | --------------------------- | ------- |
| `google_api_key` | Google API key, *ENV:* `GOOGLE_API_KEY` | `str` | `None` |
| `google_custom_search_engine_id` | Google Custom Search Engine ID, *ENV:* `GOOGLE_CUSTOM_SEARCH_ENGINE_ID` | `str` | `None` |
| `duckduckgo_max_attempts` | Maximum number of attempts to search using DuckDuckGo | `int` | `3` |
| `duckduckgo_backend` | Backend to be used for DDG sdk | `"api" \| "html" \| "lite"` | `"api"` |
| Config variable | Details | Type | Default |
| -------------------------------- | ----------------------------------------------------------------------- | ----- | ------- |
| `google_api_key` | Google API key, *ENV:* `GOOGLE_API_KEY` | `str` | `None` |
| `google_custom_search_engine_id` | Google Custom Search Engine ID, *ENV:* `GOOGLE_CUSTOM_SEARCH_ENGINE_ID` | `str` | `None` |
| `duckduckgo_max_attempts` | Maximum number of attempts to search using DuckDuckGo | `int` | `3` |
### DirectiveProvider
@@ -186,7 +183,6 @@ Allows agent to read websites using Selenium.
| `headless` | Run browser in headless mode | `bool` | `True` |
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
| `browse_spacy_language_model` | Spacy language model used for chunking text | `str` | `"en_core_web_sm"` |
| `selenium_proxy` | Http proxy to use with Selenium | `str` | `None` |
### DirectiveProvider

View File

@@ -4,26 +4,13 @@ Welcome to the AutoGPT Documentation.
The AutoGPT project consists of four main components:
- The [Server](#server) – known as the "AutoGPT Platform"
- The [Agent](#agent) – also known as just "AutoGPT"
- The [Benchmark](#benchmark) – AKA `agbenchmark`
- The [Forge](#forge)
- The [Frontend](#frontend)
* The [Agent](#agent) – also known as just "AutoGPT"
* The [Benchmark](#benchmark) – AKA `agbenchmark`
* The [Forge](#forge)
* The [Frontend](#frontend)
To tie these together, we also have a [CLI] at the root of the project.
## 🌐 Server
<!-- Setup, then Advanced, then New Blocks -->
**[📖 Setup](server/setup.md)**
&ensp;|&ensp;
**[📖 Advanced Setup](server/advanced_setup.md)**
&ensp;|&ensp;
**[📖 Making New Blocks](server/new_blocks.md)**
The server is the backbone of the New AutoGPT project. It provides the infrastructure for the agents to run, and the UI for you to interact with them. It integrates with the Forge, Agent, and a bespoke UI to provide a seamless experience.
---
## 🤖 Agent

View File

@@ -1,69 +0,0 @@
# Advanced Setup
The advanced steps below are intended for people with sysadmin experience. If you are not comfortable with these steps, please refer to the [basic setup guide](setup.md).
## Introduction
For the advanced setup, first follow the [basic setup guide](setup.md) to get the server up and running. Once you have the server running, you can follow the steps below to configure the server for your specific needs.
## Configuration
### Setting config via environment variables
The server uses environment variables to store configs. You can set these environment variables in a `.env` file in the root of the project. The `.env` file should look like this:
```bash
# .env
KEY1=value1
KEY2=value2
```
The server will automatically load the `.env` file when it starts. You can also set the environment variables directly in your shell. Refer to your operating system's documentation on how to set environment variables in the current session.
The valid options are listed in `.env.example` in the root of the builder and server directories. You can copy the `.env.example` file to `.env` and modify the values as needed.
```bash
# Copy the .env.example file to .env
cp .env.example .env
```
### Secrets directory
The secret directory is located at `./secrets`. You can store any secrets you need in this directory. The server will automatically load the secrets when it starts.
An example for a secret called `my_secret` would look like this:
```bash
# ./secrets/my_secret
my_secret_value
```
This is useful when running on docker so you can copy the secrets into the container without exposing them in the Dockerfile.
## Database selection
### SQLite
By default, the server uses SQLite as the database. SQLite is a file-based database that is easy to set up and use. However, it is not recommended for production usecases where auth is required because that subsystem requires Postgres.
### PostgreSQL
For production use, it is recommended to use PostgreSQL as the database. You will swap the commands you use to generate and run prisma to the following
```bash
poetry run prisma generate --schema postgres/schema.prisma
```
This will generate the Prisma client for PostgreSQL. You will also need to run the PostgreSQL database in a separate container. You can use the `docker-compose.yml` file in the `rnd` directory to run the PostgreSQL database.
```bash
cd rnd/
docker compose up -d
```
You can then run the migrations from the `autogpt_server` directory.
```bash
cd ../autogpt_server
prisma migrate dev --schema postgres/schema.prisma
```

View File

@@ -1,17 +0,0 @@
# Find available voices for D-ID
1. **ElevenLabs**
- Select any voice from the voice list: https://api.elevenlabs.io/v1/voices
- Copy the voice_id
- Use it as a string in the voice_id field in the CreateTalkingAvatarClip Block
2. **Microsoft Azure Voices**
- Select any voice from the voice gallery: https://speech.microsoft.com/portal/voicegallery
- Click on the "Sample code" tab on the right
- Copy the voice name, for example: config.SpeechSynthesisVoiceName ="en-GB-AbbiNeural"
- Use this string en-GB-AbbiNeural in the voice_id field in the CreateTalkingAvatarClip Block
3. **Amazon Polly Voices**
- Select any voice from the voice list: https://docs.aws.amazon.com/polly/latest/dg/available-voices.html
- Copy the voice name / ID
- Use it as string in the voice_id field in the CreateTalkingAvatarClip Block

View File

@@ -52,7 +52,7 @@ Follow these steps to create and test a new block:
```python
def __init__(self):
super().__init__(
# Unique ID for the block, used across users for templates
# Unique ID for the block
# you can generate this with this python one liner
# print(__import__('uuid').uuid4())
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
@@ -147,78 +147,3 @@ This approach allows us to test the block's logic comprehensively without relyin
6. **Update tests when changing block behavior**: If you modify your block, ensure the tests are updated accordingly.
By following these steps, you can create new blocks that extend the functionality of the AutoGPT Agent Server.
## Blocks we want to see
Below is a list of blocks that we would like to see implemented in the AutoGPT Agent Server. If you're interested in contributing, feel free to pick one of these blocks or suggest your own by editing [docs/content/server/new_blocks.md](https://github.com/Significant-Gravitas/AutoGPT/edit/master/docs/content/server/new_blocks.md) and opening a pull request.
If you would like to implement one of these blocks, open a pull request and we will start the review process.
### Consumer Services/Platforms
- Google sheets - Read/Append [Read in Progress](https://github.com/Significant-Gravitas/AutoGPT/pull/7521)
- Email - Read/Send with Gmail, Outlook, Yahoo, Proton, etc
- Calendar - Read/Write with Google Calendar, Outlook Calendar, etc
- Home Assistant - Call Service, Get Status
- Dominos - Order Pizza, Track Order
- Uber - Book Ride, Track Ride
- Notion - Create/Read Page, Create/Append/Read DB
- Google drive - read/write/overwrite file/folder
### Social Media
- Twitter - Post, Reply, Get Replies, Get Comments, Get Followers, Get Following, Get Tweets, Get Mentions
- Instagram - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
- TikTok - Post, Reply, Get Comments, Get Followers, Get Following, Get Videos, Get Mentions, Get Trending Videos
- LinkedIn - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
- YouTube - Transcribe Videos/Shorts, Post Videos/Shorts, Read/Reply/React to Comments, Update Thumbnails, Update Description, Update Tags, Update Titles, Get Views, Get Likes, Get Dislikes, Get Subscribers, Get Comments, Get Shares, Get Watch Time, Get Revenue, Get Trending Videos, Get Top Videos, Get Top Channels
- Reddit - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
- Treatwell (and related Platforms) - Book, Cancel, Review, Get Recommendations
- Substack - Read/Subscribe/Unsubscribe, Post/Reply, Get Recommendations
- Discord - Read/Post/Reply, Moderation actions
- GoodReads - Read/Post/Reply, Get Recommendations
### E-commerce
- Airbnb - Book, Cancel, Review, Get Recommendations
- Amazon - Order, Track Order, Return, Review, Get Recommendations
- eBay - Order, Track Order, Return, Review, Get Recommendations
- Upwork - Post Jobs, Hire Freelancer, Review Freelancer, Fire Freelancer
### Business Tools
- External Agents - Call other agents similar to AutoGPT
- Trello - Create/Read/Update/Delete Cards, Lists, Boards
- Jira - Create/Read/Update/Delete Issues, Projects, Boards
- Linear - Create/Read/Update/Delete Issues, Projects, Boards
- Excel - Read/Write/Update/Delete Rows, Columns, Sheets
- Slack - Read/Post/Reply to Messages, Create Channels, Invite Users
- ERPNext - Create/Read/Update/Delete Invoices, Orders, Customers, Products
- Salesforce - Create/Read/Update/Delete Leads, Opportunities, Accounts
- HubSpot - Create/Read/Update/Delete Contacts, Deals, Companies
- Zendesk - Create/Read/Update/Delete Tickets, Users, Organizations
- Odoo - Create/Read/Update/Delete Sales Orders, Invoices, Customers
- Shopify - Create/Read/Update/Delete Products, Orders, Customers
- WooCommerce - Create/Read/Update/Delete Products, Orders, Customers
- Squarespace - Create/Read/Update/Delete Pages, Products, Orders
## Agent Templates we want to see
### Data/Information
- Summarize top news of today, of this week, this month via Apple News or other large media outlets BBC, TechCrunch, hackernews, etc
- Create, read, and summarize substack newsletters or any newsletters (blog writer vs blog reader)
- Get/read/summarize the most viral Twitter, Instagram, TikTok (general social media accounts) of the day, week, month
- Get/Read any LinkedIn posts or profile that mention AI Agents
- Read/Summarize discord (might not be able to do this because you need access)
- Read / Get most read books in a given month, year, etc from GoodReads or Amazon Books, etc
- Get dates for specific shows across all streaming services
- Suggest/Recommend/Get most watched shows in a given month, year, etc across all streaming platforms
- Data analysis from xlsx data set
- Gather via Excel or Google Sheets data > Sample the data randomly (sample block takes top X, bottom X, randomly, etc) > pass that to LLM Block to generate a script for analysis of the full data > Python block to run the script> making a loop back through LLM Fix Block on error > create chart/visualization (potentially in the code block?) > show the image as output (this may require frontend changes to show)
- Tiktok video search and download
### Marketing
- Portfolio site design and enhancements

View File

@@ -1,37 +0,0 @@
# Running Ollama with AutoGPT
Follow these steps to set up and run Ollama and your AutoGPT project:
1. **Run Ollama**
- Open a terminal
- Execute the following command:
```
ollama run llama3
```
- Leave this terminal running
2. **Run the Backend**
- Open a new terminal
- Navigate to the backend directory in the AutoGPT project:
```
cd rnd/autogpt_server/
```
- Start the backend using Poetry:
```
poetry run app
```
3. **Run the Frontend**
- Open another terminal
- Navigate to the frontend directory in the AutoGPT project:
```
cd rnd/autogpt_builder/
```
- Start the frontend development server:
```
npm run dev
```
4. **Choose the Ollama Model**
- Add LLMBlock in the UI
- Choose the last option in the model selection dropdown

View File

@@ -1,137 +0,0 @@
# Setting up the server
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
## Introduction
This guide will help you setup the server and builder for the project.
<!-- The video is listed in the root Readme.md of the repo -->
We also offer this in video format. You can check it out [here](https://github.com/Significant-Gravitas/AutoGPT#how-to-get-started).
!!! warning
**DO NOT FOLLOW ANY OUTSIDE TUTORIALS AS THEY WILL LIKELY BE OUT OF DATE**
## Prerequisites
To setup the server, you need to have the following installed:
- [Node.js](https://nodejs.org/en/)
- [Python 3.10](https://www.python.org/downloads/)
### Checking if you have Node.js and Python installed
You can check if you have Node.js installed by running the following command:
```bash
node -v
```
You can check if you have Python installed by running the following command:
```bash
python --version
```
Once you have node and python installed, you can proceed to the next step.
### Installing the package managers
In order to install the dependencies, you need to have the appropriate package managers installed.
- Installing Yarn
Yarn is a package manager for Node.js. You can install it by running the following command:
```bash
npm install -g yarn
```
- Installing Poetry
Poetry is a package manager for Python. You can install it by running the following command:
```bash
pip install poetry
```
- Installing Docker and Docker Compose
Docker containerizes applications, while Docker Compose orchestrates multi-container Docker applications.
You can follow the steps here:
If you need assistance installing docker:
https://docs.docker.com/desktop/
If you need assistance installing docker compose:
https://docs.docker.com/compose/install/
### Installing the dependencies
Once you have installed Yarn and Poetry, you can run the following command to install the dependencies:
```bash
cd rnd/autogpt_server
cp .env.example .env
poetry install
```
**In another terminal**, run the following command to install the dependencies for the frontend:
```bash
cd rnd/autogpt_builder
yarn install
```
Once you have installed the dependencies, you can proceed to the next step.
### Setting up the database
In order to setup the database, you need to run the following commands, in the same terminal you ran the `poetry install` command:
```sh
docker compose up postgres redis -d
poetry run prisma migrate dev
```
After deploying the migration, to ensure that the database schema is correctly mapped to your codebase, allowing the application to interact with the database properly, you need to generate the Prisma database model:
```bash
poetry run prisma generate
```
Without running this command, the necessary Python modules (prisma.models) won't be available, leading to a `ModuleNotFoundError`.
### Running the server without Docker
To run the server, you can run the following commands in the same terminal you ran the `poetry install` command:
```bash
poetry run app
```
### Running the server within Docker
To run the server, you can run the following commands in the same terminal you ran the `poetry install` command:
```bash
docker compose build
docker compose up
```
In the other terminal from autogpt_builder, you can run the following command to start the frontend:
```bash
yarn dev
```
### Checking if the server is running
You can check if the server is running by visiting [http://localhost:3000](http://localhost:3000) in your browser.
### Notes:
By default the daemons for different services run on the following ports:
Execution Manager Daemon: 8002
Execution Scheduler Daemon: 8003
Rest Server Daemon: 8004

View File

@@ -7,10 +7,6 @@ nav:
- The AutoGPT Server 🆕:
- Build your own Blocks: server/new_blocks.md
- Setup: server/setup.md
- Advanced Setup: server/advanced_setup.md
- Using Ollama: server/ollama.md
- Using D-ID: serveer/d_id.md
- AutoGPT Agent:
- Introduction: AutoGPT/index.md

View File

@@ -1,3 +0,0 @@
{
"python.analysis.typeCheckingMode": "basic",
}

View File

@@ -1,17 +1,13 @@
from __future__ import annotations
import inspect
from typing import Callable, Concatenate, Generic, ParamSpec, TypeVar, cast
from forge.agent.protocols import CommandProvider
from typing import Callable, Generic, ParamSpec, TypeVar
from .parameter import CommandParameter
P = ParamSpec("P")
CO = TypeVar("CO") # command output
_CP = TypeVar("_CP", bound=CommandProvider)
class Command(Generic[P, CO]):
"""A class representing a command.
@@ -26,37 +22,60 @@ class Command(Generic[P, CO]):
self,
names: list[str],
description: str,
method: Callable[Concatenate[_CP, P], CO],
method: Callable[P, CO],
parameters: list[CommandParameter],
):
# Check if all parameters are provided
if not self._parameters_match(method, parameters):
raise ValueError(
f"Command {names[0]} has different parameters than provided schema"
)
self.names = names
self.description = description
# Method technically has a `self` parameter, but we can ignore that
# since Python passes it internally.
self.method = cast(Callable[P, CO], method)
self.method = method
self.parameters = parameters
# Check if all parameters are provided
if not self._parameters_match_signature():
raise ValueError(
f"Command {self.name} has different parameters than provided schema"
)
@property
def name(self) -> str:
return self.names[0] # TODO: fallback to other name if first one is taken
@property
def is_async(self) -> bool:
return inspect.iscoroutinefunction(self.method)
def _parameters_match(
self, func: Callable, parameters: list[CommandParameter]
) -> bool:
@property
def return_type(self) -> str:
_type = inspect.signature(self.method).return_annotation
if _type == inspect.Signature.empty:
return "None"
return _type.__name__
@property
def header(self) -> str:
"""Returns a function header representing the command's signature
Examples:
```py
def execute_python_code(code: str) -> str:
async def extract_info_from_content(content: str, instruction: str, output_type: type[~T]) -> ~T:
""" # noqa
return (
f"{'async ' if self.is_async else ''}"
f"def {self.name}{inspect.signature(self.method)}:"
)
def _parameters_match_signature(self) -> bool:
# Get the function's signature
signature = inspect.signature(func)
signature = inspect.signature(self.method)
# Extract parameter names, ignoring 'self' for methods
func_param_names = [
param.name
for param in signature.parameters.values()
if param.name != "self"
]
names = [param.name for param in parameters]
names = [param.name for param in self.parameters]
# Check if sorted lists of names/keys are equal
return sorted(func_param_names) == sorted(names)
@@ -71,7 +90,7 @@ class Command(Generic[P, CO]):
for param in self.parameters
]
return (
f"{self.names[0]}: {self.description.rstrip('.')}. "
f"{self.name}: {self.description.rstrip('.')}. "
f"Params: ({', '.join(params)})"
)

View File

@@ -1,21 +1,28 @@
import inspect
import logging
import re
from typing import Callable, Concatenate, Optional, TypeVar
from typing import Callable, Concatenate, Optional, TypeVar, cast
from forge.agent.protocols import CommandProvider
from forge.models.json_schema import JSONSchema
from .command import CO, Command, CommandParameter, P
logger = logging.getLogger(__name__)
_CP = TypeVar("_CP", bound=CommandProvider)
def command(
names: list[str] = [],
names: Optional[list[str]] = None,
description: Optional[str] = None,
parameters: dict[str, JSONSchema] = {},
) -> Callable[[Callable[Concatenate[_CP, P], CO]], Command[P, CO]]:
parameters: Optional[dict[str, JSONSchema]] = None,
) -> Callable[[Callable[Concatenate[_CP, P], CO] | Callable[P, CO]], Command[P, CO]]:
"""
The command decorator is used to make a Command from a function.
Make a `Command` from a function or a method on a `CommandProvider`.
All parameters are optional if the decorated function has a fully featured
docstring. For the requirements of such a docstring,
see `get_param_descriptions_from_docstring`.
Args:
names (list[str]): The names of the command.
@@ -27,34 +34,141 @@ def command(
that the command executes.
"""
def decorator(func: Callable[Concatenate[_CP, P], CO]) -> Command[P, CO]:
doc = func.__doc__ or ""
def decorator(
func: Callable[Concatenate[_CP, P], CO] | Callable[P, CO]
) -> Command[P, CO]:
# If names is not provided, use the function name
command_names = names or [func.__name__]
# If description is not provided, use the first part of the docstring
if not (command_description := description):
if not func.__doc__:
raise ValueError("Description is required if function has no docstring")
# Return the part of the docstring before double line break or everything
command_description = re.sub(r"\s+", " ", doc.split("\n\n")[0].strip())
_names = names or [func.__name__]
# If description is not provided, use the first part of the docstring
docstring = inspect.getdoc(func)
if not (_description := description):
if not docstring:
raise ValueError(
"'description' is required if function has no docstring"
)
_description = get_clean_description_from_docstring(docstring)
if not (_parameters := parameters):
if not docstring:
raise ValueError(
"'parameters' is required if function has no docstring"
)
# Combine descriptions from docstring with JSONSchemas from annotations
param_descriptions = get_param_descriptions_from_docstring(docstring)
_parameters = get_params_json_schemas(func)
for param, param_schema in _parameters.items():
if desc := param_descriptions.get(param):
param_schema.description = desc
# Parameters
typed_parameters = [
CommandParameter(
name=param_name,
spec=spec,
)
for param_name, spec in parameters.items()
for param_name, spec in _parameters.items()
]
# Wrap func with Command
command = Command(
names=command_names,
description=command_description,
method=func,
names=_names,
description=_description,
# Method technically has a `self` parameter, but we can ignore that
# since Python passes it internally.
method=cast(Callable[P, CO], func),
parameters=typed_parameters,
)
return command
return decorator
def get_clean_description_from_docstring(docstring: str) -> str:
"""Return the part of the docstring before double line break or everything"""
return re.sub(r"\s+", " ", docstring.split("\n\n")[0].strip())
def get_params_json_schemas(func: Callable) -> dict[str, JSONSchema]:
"""Gets the annotations of the given function and converts them to JSONSchemas"""
result: dict[str, JSONSchema] = {}
for name, parameter in inspect.signature(func).parameters.items():
if name == "return":
continue
param_schema = result[name] = JSONSchema.from_python_type(parameter.annotation)
if parameter.default:
param_schema.default = parameter.default
param_schema.required = False
return result
def get_param_descriptions_from_docstring(docstring: str) -> dict[str, str]:
"""
Get parameter descriptions from a docstring. Requirements for the docstring:
- The section describing the parameters MUST start with `Params:` or `Args:`, in any
capitalization.
- An entry describing a parameter MUST be indented by 4 spaces.
- An entry describing a parameter MUST start with the parameter name, an optional
type annotation, followed by a colon `:`.
- Continuations of parameter descriptions MUST be indented relative to the first
line of the entry.
- The docstring must not be indented as a whole. To get a docstring with the uniform
indentation stripped off, use `inspect.getdoc(func)`.
Example:
```python
\"\"\"
This is the description. This will be ignored.
The description can span multiple lines,
or contain any number of line breaks.
Params:
param1: This is a simple parameter description.
param2 (list[str]): This parameter also has a type annotation.
param3: This parameter has a long description. This means we will have to let it
continue on the next line. The continuation is indented relative to the first
line of the entry.
param4: Extra line breaks to group parameters are allowed. This will not break
the algorithm.
This text is
is indented by
less than 4 spaces
and is interpreted as the end of the `Params:` section.
\"\"\"
```
"""
param_descriptions: dict[str, str] = {}
param_section = False
last_param_name = ""
for line in docstring.split("\n"):
if not line.strip(): # ignore empty lines
continue
if line.lower().startswith(("params:", "args:")):
param_section = True
continue
if param_section:
if line.strip() and not line.startswith(" " * 4): # end of section
break
line = line[4:]
if line.startswith(" ") and last_param_name: # continuation of description
param_descriptions[last_param_name] += " " + line.strip()
else:
if _match := re.match(r"^(\w+).*?: (.*)", line):
param_name = _match.group(1)
param_desc = _match.group(2).strip()
else:
logger.warning(
f"Invalid line in docstring's parameter section: {repr(line)}"
)
continue
param_descriptions[param_name] = param_desc
last_param_name = param_name
return param_descriptions

View File

@@ -102,6 +102,8 @@ class ActionHistoryComponent(
@staticmethod
def _make_result_message(episode: Episode, result: ActionResult) -> ChatMessage:
from forge.components.code_flow_executor import CodeFlowExecutionComponent
if result.status == "success":
return (
ToolResultMessage(
@@ -110,11 +112,18 @@ class ActionHistoryComponent(
)
if episode.action.raw_message.tool_calls
else ChatMessage.user(
f"{episode.action.use_tool.name} returned: "
(
"Execution result:"
if (
episode.action.use_tool.name
== CodeFlowExecutionComponent.execute_code_flow.name
)
else f"{episode.action.use_tool.name} returned:"
)
+ (
f"```\n{result.outputs}\n```"
f"\n```\n{result.outputs}\n```"
if "\n" in str(result.outputs)
else f"`{result.outputs}`"
else f" `{result.outputs}`"
)
)
)

View File

@@ -0,0 +1,3 @@
from .code_flow_executor import CodeFlowExecutionComponent
__all__ = ["CodeFlowExecutionComponent"]

View File

@@ -0,0 +1,69 @@
"""Commands to generate images based on text input"""
import inspect
import logging
from typing import Any, Callable, Iterable, Iterator
from forge.agent.protocols import CommandProvider
from forge.command import Command, command
from forge.models.json_schema import JSONSchema
MAX_RESULT_LENGTH = 1000
logger = logging.getLogger(__name__)
class CodeFlowExecutionComponent(CommandProvider):
"""A component that provides commands to execute code flow."""
def __init__(self, get_available_commands: Callable[[], Iterable[Command]]):
self.get_available_commands = get_available_commands
def get_commands(self) -> Iterator[Command]:
yield self.execute_code_flow
@command(
parameters={
"python_code": JSONSchema(
type=JSONSchema.Type.STRING,
description="The Python code to execute",
required=True,
),
"plan_text": JSONSchema(
type=JSONSchema.Type.STRING,
description="The plan to written in a natural language",
required=False,
),
},
)
async def execute_code_flow(self, python_code: str, plan_text: str) -> str:
"""Execute the code flow.
Args:
python_code: The Python code to execute
plan_text: The plan implemented by the given Python code
Returns:
str: The result of the code execution
"""
locals: dict[str, Any] = {}
locals.update(self._get_available_functions())
code = f"{python_code}" "\n\n" "exec_output = main()"
logger.debug(f"Code-Flow Execution code:\n```py\n{code}\n```")
exec(code, locals)
result = await locals["exec_output"]
logger.debug(f"Code-Flow Execution result:\n{result}")
if inspect.isawaitable(result):
result = await result
# limit the result to limit the characters
if len(result) > MAX_RESULT_LENGTH:
result = result[:MAX_RESULT_LENGTH] + "...[Truncated, Content is too long]"
return result
def _get_available_functions(self) -> dict[str, Callable]:
return {
name: command
for command in self.get_available_commands()
for name in command.names
if name != self.execute_code_flow.name
}

View File

@@ -169,7 +169,8 @@ class FileManagerComponent(
parameters={
"folder": JSONSchema(
type=JSONSchema.Type.STRING,
description="The folder to list files in",
description="The folder to list files in. "
"Pass an empty string to list files in the workspace.",
required=True,
)
},

View File

@@ -25,7 +25,7 @@ class UserInteractionComponent(CommandProvider):
},
)
def ask_user(self, question: str) -> str:
"""If you need more details or information regarding the given goals,
"""If you need more details or information regarding the given task,
you can ask the user for input."""
print(f"\nQ: {question}")
resp = click.prompt("A")

View File

@@ -1,7 +1,7 @@
import json
import logging
import time
from typing import Iterator, Literal, Optional
from typing import Iterator, Optional
from duckduckgo_search import DDGS
from pydantic import BaseModel, SecretStr
@@ -24,7 +24,6 @@ class WebSearchConfiguration(BaseModel):
None, from_env="GOOGLE_CUSTOM_SEARCH_ENGINE_ID", exclude=True
)
duckduckgo_max_attempts: int = 3
duckduckgo_backend: Literal["api", "html", "lite"] = "api"
class WebSearchComponent(
@@ -90,9 +89,7 @@ class WebSearchComponent(
if not query:
return json.dumps(search_results)
search_results = DDGS().text(
query, max_results=num_results, backend=self.config.duckduckgo_backend
)
search_results = DDGS().text(query, max_results=num_results)
if search_results:
break

View File

@@ -68,8 +68,6 @@ class WebSeleniumConfiguration(BaseModel):
"""User agent used by the browser"""
browse_spacy_language_model: str = "en_core_web_sm"
"""Spacy language model used for chunking text"""
selenium_proxy: Optional[str] = None
"""Http proxy to use with Selenium"""
class WebSeleniumComponent(
@@ -303,9 +301,6 @@ class WebSeleniumComponent(
options.add_argument("--headless=new")
options.add_argument("--disable-gpu")
if self.config.selenium_proxy:
options.add_argument(f"--proxy-server={self.config.selenium_proxy}")
self._sideload_chrome_extensions(options, self.data_dir / "assets" / "crx")
if (chromium_driver_path := Path("/usr/bin/chromedriver")).exists():

View File

@@ -1,3 +1,4 @@
import inspect
import logging
from typing import (
Any,
@@ -154,7 +155,10 @@ class BaseOpenAIChatProvider(
self,
model_prompt: list[ChatMessage],
model_name: _ModelName,
completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None,
completion_parser: (
Callable[[AssistantChatMessage], Awaitable[_T]]
| Callable[[AssistantChatMessage], _T]
) = lambda _: None,
functions: Optional[list[CompletionModelFunction]] = None,
max_output_tokens: Optional[int] = None,
prefill_response: str = "",
@@ -208,7 +212,15 @@ class BaseOpenAIChatProvider(
parsed_result: _T = None # type: ignore
if not parse_errors:
try:
parsed_result = completion_parser(assistant_msg)
parsed_result = (
await _result
if inspect.isawaitable(
_result := completion_parser(assistant_msg)
)
# cast(..) needed because inspect.isawaitable(..) loses type:
# https://github.com/microsoft/pyright/issues/3690
else cast(_T, _result)
)
except Exception as e:
parse_errors.append(e)

View File

@@ -1,8 +1,19 @@
from __future__ import annotations
import enum
import inspect
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional, ParamSpec, Sequence, TypeVar
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Optional,
ParamSpec,
Sequence,
TypeVar,
cast,
)
import sentry_sdk
import tenacity
@@ -171,7 +182,10 @@ class AnthropicProvider(BaseChatModelProvider[AnthropicModelName, AnthropicSetti
self,
model_prompt: list[ChatMessage],
model_name: AnthropicModelName,
completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None,
completion_parser: (
Callable[[AssistantChatMessage], Awaitable[_T]]
| Callable[[AssistantChatMessage], _T]
) = lambda _: None,
functions: Optional[list[CompletionModelFunction]] = None,
max_output_tokens: Optional[int] = None,
prefill_response: str = "",
@@ -237,7 +251,14 @@ class AnthropicProvider(BaseChatModelProvider[AnthropicModelName, AnthropicSetti
+ "\n".join(str(e) for e in tool_call_errors)
)
parsed_result = completion_parser(assistant_msg)
# cast(..) needed because inspect.isawaitable(..) loses type info:
# https://github.com/microsoft/pyright/issues/3690
parsed_result = cast(
_T,
await _result
if inspect.isawaitable(_result := completion_parser(assistant_msg))
else _result,
)
break
except Exception as e:
self._logger.debug(

View File

@@ -1,7 +1,16 @@
from __future__ import annotations
import logging
from typing import Any, AsyncIterator, Callable, Optional, Sequence, TypeVar, get_args
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Optional,
Sequence,
TypeVar,
get_args,
)
from pydantic import ValidationError
@@ -99,7 +108,10 @@ class MultiProvider(BaseChatModelProvider[ModelName, ModelProviderSettings]):
self,
model_prompt: list[ChatMessage],
model_name: ModelName,
completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None,
completion_parser: (
Callable[[AssistantChatMessage], Awaitable[_T]]
| Callable[[AssistantChatMessage], _T]
) = lambda _: None,
functions: Optional[list[CompletionModelFunction]] = None,
max_output_tokens: Optional[int] = None,
prefill_response: str = "",

View File

@@ -6,6 +6,7 @@ from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
ClassVar,
Generic,
@@ -135,6 +136,8 @@ class CompletionModelFunction(BaseModel):
name: str
description: str
parameters: dict[str, "JSONSchema"]
return_type: str | None = None
is_async: bool = False
def fmt_line(self) -> str:
params = ", ".join(
@@ -143,6 +146,44 @@ class CompletionModelFunction(BaseModel):
)
return f"{self.name}: {self.description}. Params: ({params})"
def fmt_function_stub(self, impl: str = "pass") -> str:
"""
Formats and returns a function stub as a string with types and descriptions.
Returns:
str: The formatted function header.
"""
from forge.llm.prompting.utils import indent
params = ", ".join(
f"{name}: {p.python_type}"
+ (
f" = {str(p.default)}"
if p.default
else " = None"
if not p.required
else ""
)
for name, p in self.parameters.items()
)
_def = "async def" if self.is_async else "def"
_return = f" -> {self.return_type}" if self.return_type else ""
return f"{_def} {self.name}({params}){_return}:\n" + indent(
'"""\n'
f"{self.description}\n\n"
"Params:\n"
+ indent(
"\n".join(
f"{name}: {param.description}"
for name, param in self.parameters.items()
if param.description
)
)
+ "\n"
'"""\n'
f"{impl}"
)
def validate_call(
self, function_call: AssistantFunctionCall
) -> tuple[bool, list["ValidationError"]]:
@@ -415,7 +456,10 @@ class BaseChatModelProvider(BaseModelProvider[_ModelName, _ModelProviderSettings
self,
model_prompt: list[ChatMessage],
model_name: _ModelName,
completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None,
completion_parser: (
Callable[[AssistantChatMessage], Awaitable[_T]]
| Callable[[AssistantChatMessage], _T]
) = lambda _: None,
functions: Optional[list[CompletionModelFunction]] = None,
max_output_tokens: Optional[int] = None,
prefill_response: str = "",

View File

@@ -80,7 +80,7 @@ def function_specs_from_commands(
"""Get LLM-consumable function specs for the agent's available commands."""
return [
CompletionModelFunction(
name=command.names[0],
name=command.name,
description=command.description,
parameters={param.name: param.spec for param in command.parameters},
)

View File

@@ -1,6 +1,9 @@
import ast
import enum
import typing
from textwrap import indent
from typing import Optional, overload
from types import NoneType
from typing import Any, Optional, is_typeddict, overload
from jsonschema import Draft7Validator, ValidationError
from pydantic import BaseModel
@@ -14,14 +17,17 @@ class JSONSchema(BaseModel):
NUMBER = "number"
INTEGER = "integer"
BOOLEAN = "boolean"
TYPE = "type"
# TODO: add docstrings
description: Optional[str] = None
type: Optional[Type] = None
enum: Optional[list] = None
required: bool = False
default: Any = None
items: Optional["JSONSchema"] = None
properties: Optional[dict[str, "JSONSchema"]] = None
additional_properties: Optional["JSONSchema"] = None
minimum: Optional[int | float] = None
maximum: Optional[int | float] = None
minItems: Optional[int] = None
@@ -31,6 +37,7 @@ class JSONSchema(BaseModel):
schema: dict = {
"type": self.type.value if self.type else None,
"description": self.description,
"default": repr(self.default),
}
if self.type == "array":
if self.items:
@@ -45,6 +52,8 @@ class JSONSchema(BaseModel):
schema["required"] = [
name for name, prop in self.properties.items() if prop.required
]
if self.additional_properties:
schema["additionalProperties"] = self.additional_properties.to_dict()
elif self.enum:
schema["enum"] = self.enum
else:
@@ -63,11 +72,15 @@ class JSONSchema(BaseModel):
return JSONSchema(
description=schema.get("description"),
type=schema["type"],
default=ast.literal_eval(d) if (d := schema.get("default")) else None,
enum=schema.get("enum"),
items=JSONSchema.from_dict(schema["items"]) if "items" in schema else None,
items=JSONSchema.from_dict(i) if (i := schema.get("items")) else None,
properties=JSONSchema.parse_properties(schema)
if schema["type"] == "object"
else None,
additional_properties=JSONSchema.from_dict(ap)
if schema["type"] == "object" and (ap := schema.get("additionalProperties"))
else None,
minimum=schema.get("minimum"),
maximum=schema.get("maximum"),
minItems=schema.get("minItems"),
@@ -123,6 +136,82 @@ class JSONSchema(BaseModel):
f"interface {interface_name} " if interface_name else ""
) + f"{{\n{indent(attributes_string, ' ')}\n}}"
_PYTHON_TO_JSON_TYPE: dict[typing.Type, Type] = {
int: Type.INTEGER,
str: Type.STRING,
bool: Type.BOOLEAN,
float: Type.NUMBER,
}
@classmethod
def from_python_type(cls, T: typing.Type) -> "JSONSchema":
if _t := cls._PYTHON_TO_JSON_TYPE.get(T):
partial_schema = cls(type=_t, required=True)
elif (
typing.get_origin(T) is typing.Union and typing.get_args(T)[-1] is NoneType
):
if len(typing.get_args(T)[:-1]) > 1:
raise NotImplementedError("Union types are currently not supported")
partial_schema = cls.from_python_type(typing.get_args(T)[0])
partial_schema.required = False
return partial_schema
elif issubclass(T, BaseModel):
partial_schema = JSONSchema.from_dict(T.schema())
elif T is list or typing.get_origin(T) is list:
partial_schema = JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema.from_python_type(T_v)
if (T_v := typing.get_args(T)[0])
else None,
)
elif T is dict or typing.get_origin(T) is dict:
partial_schema = JSONSchema(
type=JSONSchema.Type.OBJECT,
additional_properties=JSONSchema.from_python_type(T_v)
if (T_v := typing.get_args(T)[1])
else None,
)
elif is_typeddict(T):
partial_schema = JSONSchema(
type=JSONSchema.Type.OBJECT,
properties={
k: JSONSchema.from_python_type(v)
for k, v in T.__annotations__.items()
},
)
else:
raise TypeError(f"JSONSchema.from_python_type is not implemented for {T}")
partial_schema.required = True
return partial_schema
_JSON_TO_PYTHON_TYPE: dict[Type, typing.Type] = {
j: p for p, j in _PYTHON_TO_JSON_TYPE.items()
}
@property
def python_type(self) -> str:
if self.type in self._JSON_TO_PYTHON_TYPE:
return self._JSON_TO_PYTHON_TYPE[self.type].__name__
elif self.type == JSONSchema.Type.ARRAY:
return f"list[{self.items.python_type}]" if self.items else "list"
elif self.type == JSONSchema.Type.OBJECT:
if not self.properties:
return "dict"
raise NotImplementedError(
"JSONSchema.python_type doesn't support TypedDicts yet"
)
elif self.enum:
return "Union[" + ", ".join(repr(v) for v in self.enum) + "]"
elif self.type == JSONSchema.Type.TYPE:
return "type"
elif self.type is None:
return "Any"
else:
raise NotImplementedError(
f"JSONSchema.python_type does not support Type.{self.type.name} yet"
)
@property
def typescript_type(self) -> str:
if not self.type:
@@ -141,6 +230,10 @@ class JSONSchema(BaseModel):
return self.to_typescript_object_interface()
if self.enum:
return " | ".join(repr(v) for v in self.enum)
elif self.type == JSONSchema.Type.TYPE:
return "type"
elif self.type is None:
return "any"
raise NotImplementedError(
f"JSONSchema.typescript_type does not support Type.{self.type.name} yet"

View File

@@ -0,0 +1,515 @@
import ast
import collections
import datetime
import json
import logging
import pathlib
import re
import typing
import black
import isort
from forge.utils.function.model import FunctionDef, ObjectType, ValidationResponse
from forge.utils.function.visitor import FunctionVisitor
from forge.utils.function.util import (
genererate_line_error,
generate_object_code,
generate_compiled_code,
validate_matching_function,
)
from forge.utils.function.exec import (
exec_external_on_contents,
ExecError,
PROJECT_TEMP_DIR,
DEFAULT_DEPS,
execute_command,
setup_if_required,
)
logger = logging.getLogger(__name__)
class CodeValidator:
def __init__(
self,
function_name: str | None = None,
available_functions: dict[str, FunctionDef] | None = None,
available_objects: dict[str, ObjectType] | None = None,
):
self.func_name: str = function_name or ""
self.available_functions: dict[str, FunctionDef] = available_functions or {}
self.available_objects: dict[str, ObjectType] = available_objects or {}
async def reformat_code(
self,
code: str,
packages: list[str] = [],
) -> str:
"""
Reformat the code snippet
Args:
code (str): The code snippet to reformat
packages (list[str]): The list of packages to validate
Returns:
str: The reformatted code snippet
"""
try:
code = (
await self.validate_code(
raw_code=code,
packages=packages,
raise_validation_error=False,
add_code_stubs=False,
)
).get_compiled_code()
except Exception as e:
# We move on with unfixed code if there's an error
logger.warning(f"Error formatting code for route #{self.func_name}: {e}")
raise e
for formatter in [
lambda code: isort.code(code),
lambda code: black.format_str(code, mode=black.FileMode()),
]:
try:
code = formatter(code)
except Exception as e:
# We move on with unformatted code if there's an error
logger.warning(
f"Error formatting code for route #{self.func_name}: {e}"
)
return code
async def validate_code(
self,
raw_code: str,
packages: list[str] = [],
raise_validation_error: bool = True,
add_code_stubs: bool = True,
call_cnt: int = 0,
) -> ValidationResponse:
"""
Validate the code snippet for any error
Args:
packages (list[Package]): The list of packages to validate
raw_code (str): The code snippet to validate
Returns:
ValidationResponse: The response of the validation
Raise:
ValidationError(e): The list of validation errors in the code snippet
"""
validation_errors: list[str] = []
try:
tree = ast.parse(raw_code)
visitor = FunctionVisitor()
visitor.visit(tree)
validation_errors.extend(visitor.errors)
except Exception as e:
# parse invalid code line and add it to the error message
error = f"Error parsing code: {e}"
if "async lambda" in raw_code:
error += "\nAsync lambda is not supported in Python. "
"Use async def instead!"
if line := re.search(r"line (\d+)", error):
raise Exception(
genererate_line_error(error, raw_code, int(line.group(1)))
)
else:
raise Exception(error)
# Eliminate duplicate visitor.functions and visitor.objects, prefer the last one
visitor.imports = list(set(visitor.imports))
visitor.functions = list({f.name: f for f in visitor.functions}.values())
visitor.objects = list(
{
o.name: o
for o in visitor.objects
if o.name not in self.available_objects
}.values()
)
# Add implemented functions into the main function, only link the stub functions
deps_funcs = [f for f in visitor.functions if f.is_implemented]
stub_funcs = [f for f in visitor.functions if not f.is_implemented]
objects_block = zip(
["\n\n" + generate_object_code(obj) for obj in visitor.objects],
visitor.objectsIdx,
)
functions_block = zip(
["\n\n" + fun.function_code for fun in deps_funcs], visitor.functionsIdx
)
globals_block = zip(
["\n\n" + glob for glob in visitor.globals], visitor.globalsIdx
)
function_code = "".join(
code
for code, _ in sorted(
list(objects_block) + list(functions_block) + list(globals_block),
key=lambda x: x[1],
)
).strip()
# No need to validate main function if it's not provided
if self.func_name:
main_func = self.__validate_main_function(
deps_funcs=deps_funcs,
function_code=function_code,
validation_errors=validation_errors,
)
function_template = main_func.function_template
else:
function_template = None
# Validate that code is not re-declaring any existing entities.
already_declared_entities = set(
[
obj.name
for obj in visitor.objects
if obj.name in self.available_objects.keys()
]
+ [
func.name
for func in visitor.functions
if func.name in self.available_functions.keys()
]
)
if not already_declared_entities:
validation_errors.append(
"These class/function names has already been declared in the code, "
"no need to declare them again: " + ", ".join(already_declared_entities)
)
result = ValidationResponse(
function_name=self.func_name,
available_objects=self.available_objects,
available_functions=self.available_functions,
rawCode=function_code,
imports=visitor.imports.copy(),
objects=[], # Objects will be bundled in the function_code instead.
template=function_template or "",
functionCode=function_code,
functions=stub_funcs,
packages=packages,
)
# Execute static validators and fixers.
# print('old compiled code import ---->', result.imports)
old_compiled_code = generate_compiled_code(result, add_code_stubs)
validation_errors.extend(await static_code_analysis(result))
new_compiled_code = result.get_compiled_code()
# Auto-fixer works, retry validation (limit to 5 times, to avoid infinite loop)
if old_compiled_code != new_compiled_code and call_cnt < 5:
return await self.validate_code(
packages=packages,
raw_code=new_compiled_code,
raise_validation_error=raise_validation_error,
add_code_stubs=add_code_stubs,
call_cnt=call_cnt + 1,
)
if validation_errors:
if raise_validation_error:
error_message = "".join("\n * " + e for e in validation_errors)
raise Exception("Error validating code: " + error_message)
else:
# This should happen only on `reformat_code` call
logger.warning("Error validating code: %s", validation_errors)
return result
def __validate_main_function(
self,
deps_funcs: list[FunctionDef],
function_code: str,
validation_errors: list[str],
) -> FunctionDef:
"""
Validate the main function body and signature
Returns:
tuple[str, FunctionDef]: The function ID and the function object
"""
# Validate that the main function is implemented.
func_obj = next((f for f in deps_funcs if f.name == self.func_name), None)
if not func_obj or not func_obj.is_implemented:
raise Exception(
f"Main Function body {self.func_name} is not implemented."
f" Please complete the implementation of this function!"
)
func_obj.function_code = function_code
# Validate that the main function is matching the expected signature.
func_req: FunctionDef | None = self.available_functions.get(self.func_name)
if not func_req:
raise AssertionError(f"Function {self.func_name} does not exist on DB")
try:
validate_matching_function(func_obj, func_req)
except Exception as e:
validation_errors.append(e.__str__())
return func_obj
# ======= Static Code Validation Helper Functions =======#
async def static_code_analysis(func: ValidationResponse) -> list[str]:
"""
Run static code analysis on the function code and mutate the function code to
fix any issues.
Args:
func (ValidationResponse):
The function to run static code analysis on. `func` will be mutated.
Returns:
list[str]: The list of validation errors
"""
validation_errors = []
validation_errors += await __execute_ruff(func)
validation_errors += await __execute_pyright(func)
return validation_errors
CODE_SEPARATOR = "#------Code-Start------#"
def __pack_import_and_function_code(func: ValidationResponse) -> str:
return "\n".join(func.imports + [CODE_SEPARATOR, func.rawCode])
def __unpack_import_and_function_code(code: str) -> tuple[list[str], str]:
split = code.split(CODE_SEPARATOR)
return split[0].splitlines(), split[1].strip()
async def __execute_ruff(func: ValidationResponse) -> list[str]:
code = __pack_import_and_function_code(func)
try:
# Currently Disabled Rule List
# E402 module level import not at top of file
# F841 local variable is assigned to but never used
code = await exec_external_on_contents(
command_arguments=[
"ruff",
"check",
"--fix",
"--ignore",
"F841",
"--ignore",
"E402",
"--ignore",
"F811", # Redefinition of unused '...' from line ...
],
file_contents=code,
suffix=".py",
raise_file_contents_on_error=True,
)
func.imports, func.rawCode = __unpack_import_and_function_code(code)
return []
except ExecError as e:
if e.content:
# Ruff failed, but the code is reformatted
code = e.content
e = str(e)
error_messages = [
v
for v in str(e).split("\n")
if v.strip()
if re.match(r"Found \d+ errors?\.*", v) is None
]
added_imports, error_messages = await __fix_missing_imports(
error_messages, func
)
# Append problematic line to the error message or add it as TODO line
validation_errors: list[str] = []
split_pattern = r"(.+):(\d+):(\d+): (.+)"
for error_message in error_messages:
error_split = re.match(split_pattern, error_message)
if not error_split:
error = error_message
else:
_, line, _, error = error_split.groups()
error = genererate_line_error(error, code, int(line))
validation_errors.append(error)
func.imports, func.rawCode = __unpack_import_and_function_code(code)
func.imports.extend(added_imports) # Avoid line-code change, do it at the end.
return validation_errors
async def __execute_pyright(func: ValidationResponse) -> list[str]:
code = __pack_import_and_function_code(func)
validation_errors: list[str] = []
# Create temporary directory under the TEMP_DIR with random name
temp_dir = PROJECT_TEMP_DIR / (func.function_name)
py_path = await setup_if_required(temp_dir)
async def __execute_pyright_commands(code: str) -> list[str]:
try:
await execute_command(
["pip", "install", "-r", "requirements.txt"], temp_dir, py_path
)
except Exception as e:
# Unknown deps should be reported as validation errors
validation_errors.append(e.__str__())
# execute pyright
result = await execute_command(
["pyright", "--outputjson"], temp_dir, py_path, raise_on_error=False
)
if not result:
return []
try:
json_response = json.loads(result)["generalDiagnostics"]
except Exception as e:
logger.error(f"Error parsing pyright output, error: {e} output: {result}")
raise e
for e in json_response:
rule: str = e.get("rule", "")
severity: str = e.get("severity", "")
excluded_rules = ["reportRedeclaration"]
if severity != "error" or any([rule.startswith(r) for r in excluded_rules]):
continue
e = genererate_line_error(
error=f"{e['message']}. {e.get('rule', '')}",
code=code,
line_number=e["range"]["start"]["line"] + 1,
)
validation_errors.append(e)
# read code from code.py. split the code into imports and raw code
code = open(f"{temp_dir}/code.py").read()
code, error_messages = await __fix_async_calls(code, validation_errors)
func.imports, func.rawCode = __unpack_import_and_function_code(code)
return validation_errors
packages = "\n".join([str(p) for p in func.packages if p not in DEFAULT_DEPS])
(temp_dir / "requirements.txt").write_text(packages)
(temp_dir / "code.py").write_text(code)
return await __execute_pyright_commands(code)
async def find_module_dist_and_source(
module: str, py_path: pathlib.Path | str
) -> typing.Tuple[pathlib.Path | None, pathlib.Path | None]:
# Find the module in the env
modules_path = pathlib.Path(py_path).parent / "lib" / "python3.11" / "site-packages"
matches = modules_path.glob(f"{module}*")
# resolve the generator to an array
matches = list(matches)
if not matches:
return None, None
# find the dist info path and the module path
dist_info_path: typing.Optional[pathlib.Path] = None
module_path: typing.Optional[pathlib.Path] = None
# find the dist info path
for match in matches:
if re.match(f"{module}-[0-9]+.[0-9]+.[0-9]+.dist-info", match.name):
dist_info_path = match
break
# Get the module path
for match in matches:
if module == match.name:
module_path = match
break
return dist_info_path, module_path
AUTO_IMPORT_TYPES: dict[str, str] = {
"Enum": "from enum import Enum",
"array": "from array import array",
}
for t in typing.__all__:
AUTO_IMPORT_TYPES[t] = f"from typing import {t}"
for t in datetime.__all__:
AUTO_IMPORT_TYPES[t] = f"from datetime import {t}"
for t in collections.__all__:
AUTO_IMPORT_TYPES[t] = f"from collections import {t}"
async def __fix_async_calls(code: str, errors: list[str]) -> tuple[str, list[str]]:
"""
Fix the async calls in the code
Args:
code (str): The code snippet
errors (list[str]): The list of errors
func (ValidationResponse): The function to fix the async calls
Returns:
tuple[str, list[str]]: The fixed code snippet and the list of errors
"""
async_calls = set()
new_errors = []
for error in errors:
pattern = '"__await__" is not present. reportGeneralTypeIssues -> (.+)'
match = re.search(pattern, error)
if match:
async_calls.add(match.group(1))
else:
new_errors.append(error)
for async_call in async_calls:
func_call = re.search(r"await ([a-zA-Z0-9_]+)", async_call)
if func_call:
func_name = func_call.group(1)
code = code.replace(f"await {func_name}", f"{func_name}")
return code, new_errors
async def __fix_missing_imports(
errors: list[str], func: ValidationResponse
) -> tuple[set[str], list[str]]:
"""
Generate missing imports based on the errors
Args:
errors (list[str]): The list of errors
func (ValidationResponse): The function to fix the imports
Returns:
tuple[set[str], list[str]]: The set of missing imports and the list
of non-missing import errors
"""
missing_imports = []
filtered_errors = []
for error in errors:
pattern = r"Undefined name `(.+?)`"
match = re.search(pattern, error)
if not match:
filtered_errors.append(error)
continue
missing = match.group(1)
if missing in AUTO_IMPORT_TYPES:
missing_imports.append(AUTO_IMPORT_TYPES[missing])
elif missing in func.available_functions:
# TODO FIX THIS!! IMPORT AUTOGPT CORRECY SERVICE.
missing_imports.append(f"from project.{missing}_service import {missing}")
elif missing in func.available_objects:
# TODO FIX THIS!! IMPORT AUTOGPT CORRECY SERVICE.
missing_imports.append(f"from project.{missing}_object import {missing}")
else:
filtered_errors.append(error)
return set(missing_imports), filtered_errors

View File

@@ -0,0 +1,195 @@
import asyncio
import enum
import logging
import os
import subprocess
import tempfile
from asyncio.subprocess import Process
from pathlib import Path
logger = logging.getLogger(__name__)
class OutputType(enum.Enum):
STD_OUT = "stdout"
STD_ERR = "stderr"
BOTH = "both"
class ExecError(Exception):
content: str | None
def __init__(self, error: str, content: str | None = None):
super().__init__(error)
self.content = content
async def exec_external_on_contents(
command_arguments: list[str],
file_contents,
suffix: str = ".py",
output_type: OutputType = OutputType.BOTH,
raise_file_contents_on_error: bool = False,
) -> str:
"""
Execute an external tool with the provided command arguments and file contents
:param command_arguments: The command arguments to execute
:param file_contents: The file contents to execute the command on
:param suffix: The suffix of the temporary file. Default is ".py"
:return: The file contents after the command has been executed
Note: The file contents are written to a temporary file and the command is executed
on that file. The command arguments should be a list of strings, where the first
element is the command to execute and the rest of the elements are the arguments to
the command. There is no need to provide the file path as an argument, as it will
be appended to the command arguments.
Example:
exec_external(["ruff", "check"], "print('Hello World')")
will run the command "ruff check <temp_file_path>" with the file contents
"print('Hello World')" and return the file contents after the command
has been executed.
"""
errors = ""
if len(command_arguments) == 0:
raise ExecError("No command arguments provided")
# Run ruff to validate the code
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
temp_file_path = temp_file.name
temp_file.write(file_contents.encode("utf-8"))
temp_file.flush()
command_arguments.append(str(temp_file_path))
# Run Ruff on the temporary file
try:
r: Process = await asyncio.create_subprocess_exec(
*command_arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
result = await r.communicate()
stdout, stderr = result[0].decode("utf-8"), result[1].decode("utf-8")
logger.debug(f"Output: {stdout}")
if temp_file_path in stdout:
stdout = stdout # .replace(temp_file.name, "/generated_file")
logger.debug(f"Errors: {stderr}")
if output_type == OutputType.STD_OUT:
errors = stdout
elif output_type == OutputType.STD_ERR:
errors = stderr
else:
errors = stdout + "\n" + stderr
with open(temp_file_path, "r") as f:
file_contents = f.read()
finally:
# Ensure the temporary file is deleted
os.remove(temp_file_path)
if not errors:
return file_contents
if raise_file_contents_on_error:
raise ExecError(errors, file_contents)
raise ExecError(errors)
FOLDER_NAME = "agpt-static-code-analysis"
PROJECT_PARENT_DIR = Path(__file__).resolve().parent.parent.parent / f".{FOLDER_NAME}"
PROJECT_TEMP_DIR = Path(tempfile.gettempdir()) / FOLDER_NAME
DEFAULT_DEPS = ["pyright", "pydantic", "virtualenv-clone"]
def is_env_exists(path: Path):
return (
(path / "venv/bin/python").exists()
and (path / "venv/bin/pip").exists()
and (path / "venv/bin/virtualenv-clone").exists()
and (path / "venv/bin/pyright").exists()
)
async def setup_if_required(
cwd: Path = PROJECT_PARENT_DIR, copy_from_parent: bool = True
) -> Path:
"""
Set-up the virtual environment if it does not exist
This setup is executed expectedly once per application run
Args:
cwd (Path): The current working directory
copy_from_parent (bool):
Whether to copy the virtual environment from PROJECT_PARENT_DIR
Returns:
Path: The path to the virtual environment
"""
if not cwd.exists():
cwd.mkdir(parents=True, exist_ok=True)
path = cwd / "venv/bin"
if is_env_exists(cwd):
return path
if copy_from_parent and cwd != PROJECT_PARENT_DIR:
if (cwd / "venv").exists():
await execute_command(["rm", "-rf", str(cwd / "venv")], cwd, None)
await execute_command(
["virtualenv-clone", str(PROJECT_PARENT_DIR / "venv"), str(cwd / "venv")],
cwd,
await setup_if_required(PROJECT_PARENT_DIR),
)
return path
# Create a virtual environment
output = await execute_command(["python", "-m", "venv", "venv"], cwd, None)
logger.info(f"[Setup] Created virtual environment: {output}")
# Install dependencies
output = await execute_command(["pip", "install", "-I"] + DEFAULT_DEPS, cwd, path)
logger.info(f"[Setup] Installed {DEFAULT_DEPS}: {output}")
output = await execute_command(["pyright"], cwd, path, raise_on_error=False)
logger.info(f"[Setup] Set up pyright: {output}")
return path
async def execute_command(
command: list[str],
cwd: str | Path | None,
python_path: str | Path | None = None,
raise_on_error: bool = True,
) -> str:
"""
Execute a command in the shell
Args:
command (list[str]): The command to execute
cwd (str | Path): The current working directory
python_path (str | Path): The python executable path
raise_on_error (bool): Whether to raise an error if the command fails
Returns:
str: The output of the command
"""
# Set the python path by replacing the env 'PATH' with the provided python path
venv = os.environ.copy()
if python_path:
# PATH prioritize first occurrence of python_path, so we need to prepend.
venv["PATH"] = f"{python_path}:{venv['PATH']}"
r = await asyncio.create_subprocess_exec(
*command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(cwd),
env=venv,
)
stdout, stderr = await r.communicate()
if r.returncode == 0:
return (stdout or stderr).decode("utf-8")
if raise_on_error:
raise Exception((stderr or stdout).decode("utf-8"))
else:
return (stderr or stdout).decode("utf-8")

View File

@@ -0,0 +1,110 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class ObjectType(BaseModel):
name: str = Field(description="The name of the object")
code: Optional[str] = Field(description="The code of the object", default=None)
description: Optional[str] = Field(
description="The description of the object", default=None
)
Fields: List["ObjectField"] = Field(description="The fields of the object")
is_pydantic: bool = Field(
description="Whether the object is a pydantic model", default=True
)
is_implemented: bool = Field(
description="Whether the object is implemented", default=True
)
is_enum: bool = Field(description="Whether the object is an enum", default=False)
class ObjectField(BaseModel):
name: str = Field(description="The name of the field")
description: Optional[str] = Field(
description="The description of the field", default=None
)
type: str = Field(
description="The type of the field. Can be a string like List[str] or an use "
"any of they related types like list[User]",
)
value: Optional[str] = Field(description="The value of the field", default=None)
related_types: Optional[List[ObjectType]] = Field(
description="The related types of the field", default=[]
)
class FunctionDef(BaseModel):
name: str
arg_types: list[tuple[str, str]]
arg_defaults: dict[str, str] = {}
arg_descs: dict[str, str]
return_type: str | None = None
return_desc: str
function_desc: str
is_implemented: bool = False
function_code: str = ""
function_template: str | None = None
is_async: bool = False
def __generate_function_template(self) -> str:
args_str = ", ".join(
[
f"{name}: {type}"
+ (
f" = {self.arg_defaults.get(name, '')}"
if name in self.arg_defaults
else ""
)
for name, type in self.arg_types
]
)
arg_desc = f"\n{' '*4}".join(
[
f'{name} ({type}): {self.arg_descs.get(name, "-")}'
for name, type in self.arg_types
]
)
_def = "async def" if "await " in self.function_code or self.is_async else "def"
_return_type = f" -> {self.return_type}" if self.return_type else ""
func_desc = self.function_desc.replace("\n", "\n ")
template = f"""
{_def} {self.name}({args_str}){_return_type}:
\"\"\"
{func_desc}
Args:
{arg_desc}
Returns:
{self.return_type}{': ' + self.return_desc if self.return_desc else ''}
\"\"\"
pass
"""
return "\n".join([line for line in template.split("\n")]).strip()
def __init__(self, function_template: Optional[str] = None, **data):
super().__init__(**data)
self.function_template = (
function_template or self.__generate_function_template()
)
class ValidationResponse(BaseModel):
function_name: str
available_objects: dict[str, ObjectType]
available_functions: dict[str, FunctionDef]
template: str
rawCode: str
packages: List[str]
imports: List[str]
functionCode: str
functions: List[FunctionDef]
objects: List[ObjectType]
def get_compiled_code(self) -> str:
return "\n".join(self.imports) + "\n\n" + self.rawCode.strip()

View File

@@ -0,0 +1,292 @@
from typing import List, Tuple, __all__ as all_types
from forge.utils.function.model import FunctionDef, ObjectType, ValidationResponse
OPEN_BRACES = {"{": "Dict", "[": "List", "(": "Tuple"}
CLOSE_BRACES = {"}": "Dict", "]": "List", ")": "Tuple"}
RENAMED_TYPES = {
"dict": "Dict",
"list": "List",
"tuple": "Tuple",
"set": "Set",
"frozenset": "FrozenSet",
"type": "Type",
}
PYTHON_TYPES = set(all_types)
def unwrap_object_type(type: str) -> Tuple[str, List[str]]:
"""
Get the type and children of a composite type.
Args:
type (str): The type to parse.
Returns:
str: The type.
[str]: The children types.
"""
type = type.replace(" ", "")
if not type:
return "", []
def split_outer_level(type: str, separator: str) -> List[str]:
brace_count = 0
last_index = 0
splits = []
for i, c in enumerate(type):
if c in OPEN_BRACES:
brace_count += 1
elif c in CLOSE_BRACES:
brace_count -= 1
elif c == separator and brace_count == 0:
splits.append(type[last_index:i])
last_index = i + 1
splits.append(type[last_index:])
return splits
# Unwrap primitive union types
union_split = split_outer_level(type, "|")
if len(union_split) > 1:
if len(union_split) == 2 and "None" in union_split:
return "Optional", [v for v in union_split if v != "None"]
return "Union", union_split
# Unwrap primitive dict/list/tuple types
if type[0] in OPEN_BRACES and type[-1] in CLOSE_BRACES:
type_name = OPEN_BRACES[type[0]]
type_children = split_outer_level(type[1:-1], ",")
return type_name, type_children
brace_pos = type.find("[")
if brace_pos != -1 and type[-1] == "]":
# Unwrap normal composite types
type_name = type[:brace_pos]
type_children = split_outer_level(type[brace_pos + 1 : -1], ",")
else:
# Non-composite types, no need to unwrap
type_name = type
type_children = []
return RENAMED_TYPES.get(type_name, type_name), type_children
def is_type_equal(type1: str | None, type2: str | None) -> bool:
"""
Check if two types are equal.
This function handle composite types like list, dict, and tuple.
group similar types like list[str], List[str], and [str] as equal.
"""
if type1 is None and type2 is None:
return True
if type1 is None or type2 is None:
return False
evaluated_type1, children1 = unwrap_object_type(type1)
evaluated_type2, children2 = unwrap_object_type(type2)
# Compare the class name of the types (ignoring the module)
# TODO(majdyz): compare the module name as well.
t_len = min(len(evaluated_type1), len(evaluated_type2))
if evaluated_type1.split(".")[-t_len:] != evaluated_type2.split(".")[-t_len:]:
return False
if len(children1) != len(children2):
return False
if len(children1) == len(children2) == 0:
return True
for c1, c2 in zip(children1, children2):
if not is_type_equal(c1, c2):
return False
return True
def validate_matching_function(this: FunctionDef, that: FunctionDef):
expected_args = that.arg_types
expected_rets = that.return_type
func_name = that.name
errors = []
# Fix the async flag based on the expectation.
if this.is_async != that.is_async:
this.is_async = that.is_async
if this.is_async and f"async def {this.name}" not in this.function_code:
this.function_code = this.function_code.replace(
f"def {this.name}", f"async def {this.name}"
)
if not this.is_async and f"async def {this.name}" in this.function_code:
this.function_code = this.function_code.replace(
f"async def {this.name}", f"def {this.name}"
)
if any(
[
x[0] != y[0] or not is_type_equal(x[1], y[1]) and x[1] != "object"
# TODO: remove sorted and provide a stable order for one-to-many arg-types.
for x, y in zip(sorted(expected_args), sorted(this.arg_types))
]
):
errors.append(
f"Function {func_name} has different arguments than expected, "
f"expected {expected_args} but got {this.arg_types}"
)
if not is_type_equal(expected_rets, this.return_type) and expected_rets != "object":
errors.append(
f"Function {func_name} has different return type than expected, expected "
f"{expected_rets} but got {this.return_type}"
)
if errors:
raise Exception("Signature validation errors:\n " + "\n ".join(errors))
def normalize_type(type: str, renamed_types: dict[str, str] = {}) -> str:
"""
Normalize the type to a standard format.
e.g. list[str] -> List[str], dict[str, int | float] -> Dict[str, Union[int, float]]
Args:
type (str): The type to normalize.
Returns:
str: The normalized type.
"""
parent_type, children = unwrap_object_type(type)
if parent_type in renamed_types:
parent_type = renamed_types[parent_type]
if len(children) == 0:
return parent_type
content_type = ", ".join([normalize_type(c, renamed_types) for c in children])
return f"{parent_type}[{content_type}]"
def generate_object_code(obj: ObjectType) -> str:
if not obj.name:
return "" # Avoid generating an empty object
# Auto-generate a template for the object, this will not capture any class functions
fields = f"\n{' ' * 4}".join(
[
f"{field.name}: {field.type} "
f"{('= '+field.value) if field.value else ''} "
f"{('# '+field.description) if field.description else ''}"
for field in obj.Fields or []
]
)
parent_class = ""
if obj.is_enum:
parent_class = "Enum"
elif obj.is_pydantic:
parent_class = "BaseModel"
doc_string = (
f"""\"\"\"
{obj.description}
\"\"\""""
if obj.description
else ""
)
method_body = ("\n" + " " * 4).join(obj.code.split("\n")) + "\n" if obj.code else ""
template = f"""
class {obj.name}({parent_class}):
{doc_string if doc_string else ""}
{fields if fields else ""}
{method_body if method_body else ""}
{"pass" if not fields and not method_body else ""}
"""
return "\n".join(line for line in template.split("\n")).strip()
def genererate_line_error(error: str, code: str, line_number: int) -> str:
lines = code.split("\n")
if line_number > len(lines):
return error
code_line = lines[line_number - 1]
return f"{error} -> '{code_line.strip()}'"
def generate_compiled_code(
resp: ValidationResponse, add_code_stubs: bool = True
) -> str:
"""
Regenerate imports & raw code using the available objects and functions.
"""
resp.imports = sorted(set(resp.imports))
def __append_comment(code_block: str, comment: str) -> str:
"""
Append `# noqa` to the first line of the code block.
This is to suppress flake8 warnings for redefined names.
"""
lines = code_block.split("\n")
lines[0] = lines[0] + " # " + comment
return "\n".join(lines)
def __generate_stub(name, is_enum):
if not name:
return ""
elif is_enum:
return f"class {name}(Enum):\n pass"
else:
return f"class {name}(BaseModel):\n pass"
stub_objects = resp.available_objects if add_code_stubs else {}
stub_functions = resp.available_functions if add_code_stubs else {}
object_stubs_code = "\n\n".join(
[
__append_comment(__generate_stub(obj.name, obj.is_enum), "type: ignore")
for obj in stub_objects.values()
]
+ [
__append_comment(__generate_stub(obj.name, obj.is_enum), "type: ignore")
for obj in resp.objects
if obj.name not in stub_objects
]
)
objects_code = "\n\n".join(
[
__append_comment(generate_object_code(obj), "noqa")
for obj in stub_objects.values()
]
+ [
__append_comment(generate_object_code(obj), "noqa")
for obj in resp.objects
if obj.name not in stub_objects
]
)
functions_code = "\n\n".join(
[
__append_comment(f.function_template.strip(), "type: ignore")
for f in stub_functions.values()
if f.name != resp.function_name and f.function_template
]
+ [
__append_comment(f.function_template.strip(), "type: ignore")
for f in resp.functions
if f.name not in stub_functions and f.function_template
]
)
resp.rawCode = (
object_stubs_code.strip()
+ "\n\n"
+ objects_code.strip()
+ "\n\n"
+ functions_code.strip()
+ "\n\n"
+ resp.functionCode.strip()
)
return resp.get_compiled_code()

View File

@@ -0,0 +1,222 @@
import ast
import re
from forge.utils.function.model import FunctionDef, ObjectType, ObjectField
from forge.utils.function.util import normalize_type, PYTHON_TYPES
class FunctionVisitor(ast.NodeVisitor):
"""
Visits a Python AST and extracts function definitions and Pydantic class definitions
To use this class, create an instance and call the visit method with the AST.
as the argument The extracted function definitions and Pydantic class definitions
can be accessed from the functions and objects attributes respectively.
Example:
```
visitor = FunctionVisitor()
visitor.visit(ast.parse("def foo(x: int) -> int: return x"))
print(visitor.functions)
```
"""
def __init__(self):
self.functions: list[FunctionDef] = []
self.functionsIdx: list[int] = []
self.objects: list[ObjectType] = []
self.objectsIdx: list[int] = []
self.globals: list[str] = []
self.globalsIdx: list[int] = []
self.imports: list[str] = []
self.errors: list[str] = []
def visit_Import(self, node):
for alias in node.names:
import_line = f"import {alias.name}"
if alias.asname:
import_line += f" as {alias.asname}"
self.imports.append(import_line)
self.generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
import_line = f"from {node.module} import {alias.name}"
if alias.asname:
import_line += f" as {alias.asname}"
self.imports.append(import_line)
self.generic_visit(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):
# treat async functions as normal functions
self.visit_FunctionDef(node) # type: ignore
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
args = []
for arg in node.args.args:
arg_type = ast.unparse(arg.annotation) if arg.annotation else "object"
args.append((arg.arg, normalize_type(arg_type)))
return_type = (
normalize_type(ast.unparse(node.returns)) if node.returns else None
)
# Extract doc_string & function body
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Constant)
):
doc_string = node.body[0].value.s.strip()
template_body = [node.body[0], ast.Pass()]
is_implemented = not isinstance(node.body[1], ast.Pass)
else:
doc_string = ""
template_body = [ast.Pass()]
is_implemented = not isinstance(node.body[0], ast.Pass)
# Construct function template
original_body = node.body.copy()
node.body = template_body # type: ignore
function_template = ast.unparse(node)
node.body = original_body
function_code = ast.unparse(node)
if "await" in function_code and "async def" not in function_code:
function_code = function_code.replace("def ", "async def ")
function_template = function_template.replace("def ", "async def ")
def split_doc(keywords: list[str], doc: str) -> tuple[str, str]:
for keyword in keywords:
if match := re.search(f"{keyword}\\s?:", doc):
return doc[: match.start()], doc[match.end() :]
return doc, ""
# Decompose doc_pattern into func_doc, args_doc, rets_doc, errs_doc, usage_doc
# by splitting in reverse order
func_doc = doc_string
func_doc, usage_doc = split_doc(
["Ex", "Usage", "Usages", "Example", "Examples"], func_doc
)
func_doc, errs_doc = split_doc(["Error", "Errors", "Raise", "Raises"], func_doc)
func_doc, rets_doc = split_doc(["Return", "Returns"], func_doc)
func_doc, args_doc = split_doc(
["Arg", "Args", "Argument", "Arguments"], func_doc
)
# Extract Func
function_desc = func_doc.strip()
# Extract Args
args_descs = {}
split_pattern = r"\n(\s+.+):"
for match in reversed(list(re.finditer(split_pattern, string=args_doc))):
arg = match.group(1).strip().split(" ")[0]
desc = args_doc.rsplit(match.group(1), 1)[1].strip(": ")
args_descs[arg] = desc.strip()
args_doc = args_doc[: match.start()]
# Extract Returns
return_desc = ""
if match := re.match(split_pattern, string=rets_doc):
return_desc = rets_doc[match.end() :].strip()
self.functions.append(
FunctionDef(
name=node.name,
arg_types=args,
arg_descs=args_descs,
return_type=return_type,
return_desc=return_desc,
is_implemented=is_implemented,
function_desc=function_desc,
function_template=function_template,
function_code=function_code,
)
)
self.functionsIdx.append(node.lineno)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""
Visits a ClassDef node in the AST and checks if it is a Pydantic class.
If it is a Pydantic class, adds its name to the list of Pydantic classes.
"""
is_pydantic = any(
[
(isinstance(base, ast.Name) and base.id == "BaseModel")
or (isinstance(base, ast.Attribute) and base.attr == "BaseModel")
for base in node.bases
]
)
is_enum = any(
[
(isinstance(base, ast.Name) and base.id.endswith("Enum"))
or (isinstance(base, ast.Attribute) and base.attr.endswith("Enum"))
for base in node.bases
]
)
is_implemented = not any(isinstance(v, ast.Pass) for v in node.body)
doc_string = ""
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Constant)
):
doc_string = node.body[0].value.s.strip()
if node.name in PYTHON_TYPES:
self.errors.append(
f"Can't declare class with a Python built-in name "
f"`{node.name}`. Please use a different name."
)
fields = []
methods = []
for v in node.body:
if isinstance(v, ast.AnnAssign):
field = ObjectField(
name=ast.unparse(v.target),
type=normalize_type(ast.unparse(v.annotation)),
value=ast.unparse(v.value) if v.value else None,
)
if field.value is None and field.type.startswith("Optional"):
field.value = "None"
elif isinstance(v, ast.Assign):
if len(v.targets) > 1:
self.errors.append(
f"Class {node.name} has multiple assignments in a single line."
)
field = ObjectField(
name=ast.unparse(v.targets[0]),
type=type(ast.unparse(v.value)).__name__,
value=ast.unparse(v.value) if v.value else None,
)
elif isinstance(v, ast.Expr) and isinstance(v.value, ast.Constant):
# skip comments and docstrings
continue
else:
methods.append(ast.unparse(v))
continue
fields.append(field)
self.objects.append(
ObjectType(
name=node.name,
code="\n".join(methods),
description=doc_string,
Fields=fields,
is_pydantic=is_pydantic,
is_enum=is_enum,
is_implemented=is_implemented,
)
)
self.objectsIdx.append(node.lineno)
def visit(self, node):
if (
isinstance(node, ast.Assign)
or isinstance(node, ast.AnnAssign)
or isinstance(node, ast.AugAssign)
) and node.col_offset == 0:
self.globals.append(ast.unparse(node))
self.globalsIdx.append(node.lineno)
super().visit(node)

View File

@@ -1,6 +1,6 @@
## [AutoGPT Forge Part 1: A Comprehensive Guide to Your First Steps](https://aiedge.medium.com/autogpt-forge-a-comprehensive-guide-to-your-first-steps-a1dfdf46e3b4)
![Header](..%2F..%2F..%2Fdocs/content/imgs/quickstart/000_header_img.png)
![Header](../../../docs/content/imgs/quickstart/000_header_img.png)
**Written by Craig Swift & [Ryan Brandt](https://github.com/paperMoose)**
@@ -15,22 +15,22 @@ The Forge serves as a comprehensive template for building your own AutoGPT agent
To begin, you need to fork the [repository](https://github.com/Significant-Gravitas/AutoGPT) by navigating to the main page of the repository and clicking **Fork** in the top-right corner.
![The Github repository](..%2F..%2F..%2Fdocs/content/imgs/quickstart/001_repo.png)
![The Github repository](../../../docs/content/imgs/quickstart/001_repo.png)
Follow the on-screen instructions to complete the process.
![Create Fork Page](..%2F..%2F..%2Fdocs/content/imgs/quickstart/002_fork.png)
![Create Fork Page](../../../docs/content/imgs/quickstart/002_fork.png)
### Cloning the Repository
Next, clone your newly forked repository to your local system. Ensure you have Git installed to proceed with this step. You can download Git from [here](https://git-scm.com/downloads). Then clone the repo using the following command and the url for your repo. You can find the correct url by clicking on the green Code button on your repos main page.
![img_1.png](..%2F..%2F..%2Fdocs/content/imgs/quickstart/003A_clone.png)
![img_1.png](../../../docs/content/imgs/quickstart/003A_clone.png)
```bash
# replace the url with the one for your forked repo
git clone https://github.com/<YOUR REPO PATH HERE>
```
![Clone the Repository](..%2F..%2F..%2Fdocs/content/imgs/quickstart/003_clone.png)
![Clone the Repository](../../../docs/content/imgs/quickstart/003_clone.png)
### Setting up the Project
@@ -41,8 +41,8 @@ cd AutoGPT
```
To set up the project, utilize the `./run setup` command in the terminal. Follow the instructions to install necessary dependencies and set up your GitHub access token.
![Setup the Project](..%2F..%2F..%2Fdocs/content/imgs/quickstart/005_setup.png)
![Setup Complete](..%2F..%2F..%2Fdocs/content/imgs/quickstart/006_setup_complete.png)
![Setup the Project](../../../docs/content/imgs/quickstart/005_setup.png)
![Setup Complete](../../../docs/content/imgs/quickstart/006_setup_complete.png)
## Section 3: Creating Your Agent
@@ -55,7 +55,7 @@ Create your agent template using the command:
```
Replacing YOUR_AGENT_NAME with the name you chose in the previous step.
![Create an Agent](..%2F..%2F..%2Fdocs/content/imgs/quickstart/007_create_agent.png)
![Create an Agent](../../../docs/content/imgs/quickstart/007_create_agent.png)
## Section 4: Running Your Agent
@@ -66,13 +66,13 @@ Begin by starting your agent using the command:
```
This will initiate the agent on `http://localhost:8000/`.
![Start the Agent](..%2F..%2F..%2Fdocs/content/imgs/quickstart/009_start_agent.png)
![Start the Agent](../../../docs/content/imgs/quickstart/009_start_agent.png)
### Logging in and Sending Tasks to Your Agent
Access the frontend at `http://localhost:8000/` and log in using a Google or GitHub account. Once you're logged you'll see the agent tasking interface! However... the agent won't do anything yet. We'll implement the logic for our agent to run tasks in the upcoming tutorial chapters.
![Login](..%2F..%2F..%2Fdocs/content/imgs/quickstart/010_login.png)
![Home](..%2F..%2F..%2Fdocs/content/imgs/quickstart/011_home.png)
![Login](../../../docs/content/imgs/quickstart/010_login.png)
![Home](../../../docs/content/imgs/quickstart/011_home.png)
### Stopping and Restarting Your Agent
When needed, use Ctrl+C to end the session or use the stop command:

View File

@@ -7,7 +7,7 @@
---
![Header](..%2F..%2Fdocs/content/imgs/quickstart/t2_01.png)
![Header](../../../docs/content/imgs/quickstart/t2_01.png)
@@ -21,14 +21,14 @@ Large Language Models (LLMs) are state-of-the-art machine learning models that h
Traditional autonomous agents operated with limited knowledge, often confined to specific tasks or environments. They were like calculators — efficient but limited to predefined functions. LLM-based agents, on the other hand dont just compute; they understand, reason, and then act, drawing from a vast reservoir of information.
![AI visualising AI researchers hard at work](..%2F..%2Fdocs/content/imgs/quickstart/t2_02.png)
![AI visualising AI researchers hard at work](../../../docs/content/imgs/quickstart/t2_02.png)
## The Anatomy of an LLM-Based AI Agent
Diving deep into the core of an LLM-based AI agent, we find its structured much like a human, with distinct components akin to personality, memory, thought process, and abilities. Lets break these down:
![The Github repository](..%2F..%2Fdocs/content/imgs/quickstart/t2_03.png)
![The Github repository](../../../docs/content/imgs/quickstart/t2_03.png)
Anatomy of an Agent from the Agent Landscape Survey
### **Profile**

View File

@@ -1,6 +1,6 @@
# AutoGPT Forge: Crafting Intelligent Agent Logic
![Header](..%2F..%2F..%2Fdocs/content/imgs/quickstart/t3_01.png)
![Header](../../../docs/content/imgs/quickstart/t3_01.png)
**By Craig Swift & [Ryan Brandt](https://github.com/paperMoose)**
Hey there! Ready for part 3 of our AutoGPT Forge tutorial series? If you missed the earlier parts, catch up here:
@@ -17,7 +17,7 @@ Make sure you've set up your project and created an agent as described in our in
In the image below, you'll see my "SmartAgent" and the agent.py file inside the 'forge' folder. That's where we'll be adding our LLM-based logic. If you're unsure about the project structure or agent functions from our last guide, don't worry. We'll cover the basics as we go!
![SmartAgent](..%2F..%2F..%2Fdocs/content/imgs/quickstart/t3_02.png)
![SmartAgent](../../../docs/content/imgs/quickstart/t3_02.png)
---
@@ -125,7 +125,7 @@ Now that we've set this up, let's move to the next exciting part: The PromptEngi
**The Art of Prompting**
![Prompting 101](..%2F..%2F..%2Fdocs/content/imgs/quickstart/t3_03.png)
![Prompting 101](../../../docs/content/imgs/quickstart/t3_03.png)
Prompting is like shaping messages for powerful language models like ChatGPT. Since these models respond to input details, creating the right prompt can be a challenge. That's where the **PromptEngine** comes in.
@@ -479,7 +479,7 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
3. **Navigate to Benchmarking**
- Look to the left, and you'll spot a trophy icon. Click it to enter the benchmarking arena.
![Benchmarking page of the AutoGPT UI](..%2F..%2F..%2Fdocs/content/imgs/quickstart/t3_04.png)
![Benchmarking page of the AutoGPT UI](../../../docs/content/imgs/quickstart/t3_04.png)
4. **Select the 'WriteFile' Test**
- Choose the 'WriteFile' test from the available options.

View File

@@ -1,114 +1,36 @@
# AutoGPT Platform
This is a guide to setting up and running the AutoGPT Server and Builder. This tutorial will cover downloading the necessary files, setting up the server, and testing the system.
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
https://github.com/user-attachments/assets/fd0d0f35-3155-4263-b575-ba3efb126cb4
## Getting Started
1. Navigate to the AutoGPT GitHub repository.
2. Click the "Code" button, then select "Download ZIP".
3. Once downloaded, extract the ZIP file to a folder of your choice.
### Prerequisites
- Docker
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
### Running the System
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine.
2. Navigate to the project directory.
3. Run the following command:
```
docker compose up -d
```
This command will start all the necessary services defined in the `docker-compose.yml` file in detached mode.
### Docker Compose Commands
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose up -d`: Start the services in detached mode.
- `docker compose stop`: Stop the running services without removing them.
- `docker compose rm`: Remove stopped service containers.
- `docker compose build`: Build or rebuild services.
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
```
docker compose ps
```
This shows the current status of all services defined in your docker-compose.yml file.
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
1. Open the `docker-compose.yml` file in a text editor.
2. Add volume configurations for PostgreSQL and Redis services:
```yaml
services:
postgres:
# ... other configurations ...
volumes:
- postgres_data:/var/lib/postgresql/data
redis:
# ... other configurations ...
volumes:
- redis_data:/data
volumes:
postgres_data:
redis_data:
```
3. Save the file and run `docker compose up -d` to apply the changes.
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
4. Open the extracted folder and navigate to the "rnd" directory.
5. Enter the "AutoGPT server" folder.
6. Open a terminal window in this directory.
7. Locate and open the README file in the AutoGPT server folder: [doc](./autogpt_server/README.md#setup).
8. Copy and paste each command from the setup section in the README into your terminal.
- Important: Wait for each command to finish before running the next one.
9. If all commands run without errors, enter the final command: `poetry run app`
10. You should now see the server running in your terminal.
11. Navigate back to the "rnd" folder.
12. Open the "AutoGPT builder" folder.
13. Open the README file in this folder: [doc](./autogpt_builder/README.md#getting-started).
14. In your terminal, run the following commands:
```
npm install
```
```
npm run dev
```
15. Once the front-end is running, click the link to navigate to `localhost:3000`.
16. Click on the "Build" option.
17. Add a few blocks to test the functionality.
18. Connect the blocks together.
19. Click "Run".
20. Check your terminal window - you should see that the server has received the request, is processing it, and has executed it.
And there you have it! You've successfully set up and tested AutoGPT.

View File

View File

@@ -1,15 +1 @@
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8000/api
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8005/api/v1/market
## Supabase credentials
## YOU ONLY NEED THEM IF YOU WANT TO USE SUPABASE USER AUTHENTICATION
## If you're using self-hosted version then you most likely don't need to set this
# NEXT_PUBLIC_SUPABASE_URL=your-project-url
# NEXT_PUBLIC_SUPABASE_ANON_KEY=your-anon-key
## OAuth Callback URL
## This should be {domain}/auth/callback
## Only used if you're using Supabase and OAuth
AUTH_CALLBACK_URL=http://localhost:3000/auth/callback
GA_MEASUREMENT_ID=G-FH2XK2W4GN
AGPT_SERVER_URL=http://localhost:8000/api

View File

@@ -1,4 +0,0 @@
node_modules
.next
build
public

View File

@@ -1,3 +0,0 @@
{
"plugins": ["prettier-plugin-tailwindcss"]
}

View File

@@ -1,6 +1,7 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Next.js: debug server-side",
"type": "node-terminal",
@@ -23,6 +24,7 @@
"uriFormat": "%s",
"action": "debugWithEdge"
}
}
},
]
}

View File

@@ -1,32 +0,0 @@
# Base stage for both dev and prod
FROM node:21-alpine AS base
WORKDIR /app
COPY rnd/autogpt_builder/package.json rnd/autogpt_builder/yarn.lock ./
RUN yarn install --frozen-lockfile
# Dev stage
FROM base AS dev
ENV NODE_ENV=development
COPY rnd/autogpt_builder/ .
EXPOSE 3000
CMD ["yarn", "run", "dev"]
# Build stage for prod
FROM base AS build
COPY rnd/autogpt_builder/ .
RUN npm run build
# Prod stage
FROM node:21-alpine AS prod
ENV NODE_ENV=production
WORKDIR /app
COPY --from=build /app/package.json /app/yarn.lock ./
RUN yarn install --frozen-lockfile
COPY --from=build /app/.next ./.next
COPY --from=build /app/public ./public
COPY --from=build /app/next.config.mjs ./next.config.mjs
EXPOSE 3000
CMD ["npm", "start"]

View File

@@ -2,19 +2,7 @@ This is the frontend for AutoGPT's next generation
## Getting Started
Run the following installation once.
```bash
npm install
# or
yarn install
# or
pnpm install
# or
bun install
```
Next, run the development server:
First, run the development server:
```bash
npm run dev
@@ -30,12 +18,8 @@ Open [http://localhost:3000](http://localhost:3000) with your browser to see the
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`.
If the project is updated via git, you will need to `npm install` after each update.
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
## Deploy
## Deploy
TODO
TODO

View File

@@ -14,4 +14,4 @@
"components": "@/components",
"utils": "@/lib/utils"
}
}
}

View File

@@ -1,31 +1,22 @@
import dotenv from "dotenv";
import dotenv from 'dotenv';
// Load environment variables
dotenv.config();
/** @type {import('next').NextConfig} */
const nextConfig = {
env: {
NEXT_PUBLIC_AGPT_SERVER_URL: process.env.NEXT_PUBLIC_AGPT_SERVER_URL,
NEXT_PUBLIC_AGPT_MARKETPLACE_URL:
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL,
},
images: {
domains: ["images.unsplash.com"],
},
async redirects() {
return [
{
source: "/monitor", // FIXME: Remove after 2024-09-01
destination: "/",
permanent: false,
},
];
},
// TODO: Re-enable TypeScript checks once current issues are resolved
typescript: {
ignoreBuildErrors: true,
},
env: {
AGPT_SERVER_URL: process.env.AGPT_SERVER_URL,
},
async redirects() {
return [
{
source: '/',
destination: '/build',
permanent: false,
},
];
},
};
export default nextConfig;

View File

@@ -6,35 +6,21 @@
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"format": "prettier --write ."
"lint": "next lint"
},
"dependencies": {
"@hookform/resolvers": "^3.9.0",
"@next/third-parties": "^14.2.5",
"@radix-ui/react-avatar": "^1.1.0",
"@radix-ui/react-checkbox": "^1.1.1",
"@radix-ui/react-collapsible": "^1.1.0",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-popover": "^1.1.1",
"@radix-ui/react-scroll-area": "^1.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-separator": "^1.1.0",
"@radix-ui/react-slot": "^1.1.0",
"@radix-ui/react-switch": "^1.1.0",
"@radix-ui/react-toast": "^1.2.1",
"@radix-ui/react-tooltip": "^1.1.2",
"@supabase/ssr": "^0.4.0",
"@supabase/supabase-js": "^2.45.0",
"@tanstack/react-table": "^8.20.5",
"@xyflow/react": "^12.1.0",
"ajv": "^8.17.1",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "1.0.0",
"date-fns": "^3.6.0",
"dotenv": "^16.4.5",
"lucide-react": "^0.407.0",
@@ -45,14 +31,12 @@
"react-day-picker": "^8.10.1",
"react-dom": "^18",
"react-hook-form": "^7.52.1",
"react-icons": "^5.2.1",
"react-markdown": "^9.0.1",
"react-modal": "^3.16.1",
"react-shepherd": "^6.1.1",
"reactflow": "^11.11.4",
"recharts": "^2.12.7",
"tailwind-merge": "^2.3.0",
"tailwindcss-animate": "^1.0.7",
"uuid": "^10.0.0",
"zod": "^3.23.8"
},
"devDependencies": {
@@ -63,8 +47,6 @@
"eslint": "^8",
"eslint-config-next": "14.2.4",
"postcss": "^8",
"prettier": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.6",
"tailwindcss": "^3.4.1",
"typescript": "^5"
}

View File

@@ -1,18 +0,0 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import React from "react";
function AdminDashboard() {
return (
<div>
<h1>Admin Dashboard</h1>
{/* Add your admin-only content here */}
</div>
);
}
export default async function AdminDashboardPage() {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminDashboard = await withAdminAccess(AdminDashboard);
return <ProtectedAdminDashboard />;
}

View File

@@ -1,100 +0,0 @@
"use client";
import { useState } from "react";
import Link from "next/link";
import { BinaryIcon, XIcon } from "lucide-react";
import { usePathname } from "next/navigation"; // Add this import
const tabs = [
{ name: "Dashboard", href: "/admin/dashboard" },
{ name: "Marketplace", href: "/admin/marketplace" },
{ name: "Users", href: "/admin/users" },
{ name: "Settings", href: "/admin/settings" },
];
export default function AdminLayout({
children,
}: {
children: React.ReactNode;
}) {
const pathname = usePathname(); // Get the current pathname
const [activeTab, setActiveTab] = useState(() => {
// Set active tab based on the current route
return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name;
});
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
return (
<div className="min-h-screen bg-gray-100">
<nav className="bg-white shadow-sm">
<div className="max-w-10xl mx-auto px-4 sm:px-6 lg:px-8">
<div className="flex h-16 items-center justify-between">
<div className="flex items-center">
<div className="flex-shrink-0">
<h1 className="text-xl font-bold">Admin Panel</h1>
</div>
<div className="hidden sm:ml-6 sm:flex sm:space-x-8">
{tabs.map((tab) => (
<Link
key={tab.name}
href={tab.href}
className={`${
activeTab === tab.name
? "border-indigo-500 text-indigo-600"
: "border-transparent text-gray-500 hover:border-gray-300 hover:text-gray-700"
} inline-flex items-center border-b-2 px-1 pt-1 text-sm font-medium`}
onClick={() => setActiveTab(tab.name)}
>
{tab.name}
</Link>
))}
</div>
</div>
<div className="sm:hidden">
<button
type="button"
className="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-100 hover:text-gray-500 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500"
onClick={() => setMobileMenuOpen(!mobileMenuOpen)}
>
<span className="sr-only">Open main menu</span>
{mobileMenuOpen ? (
<XIcon className="block h-6 w-6" aria-hidden="true" />
) : (
<BinaryIcon className="block h-6 w-6" aria-hidden="true" />
)}
</button>
</div>
</div>
</div>
{mobileMenuOpen && (
<div className="sm:hidden">
<div className="space-y-1 pb-3 pt-2">
{tabs.map((tab) => (
<Link
key={tab.name}
href={tab.href}
className={`${
activeTab === tab.name
? "border-indigo-500 bg-indigo-50 text-indigo-700"
: "border-transparent text-gray-600 hover:border-gray-300 hover:bg-gray-50 hover:text-gray-800"
} block border-l-4 py-2 pl-3 pr-4 text-base font-medium`}
onClick={() => {
setActiveTab(tab.name);
setMobileMenuOpen(false);
}}
>
{tab.name}
</Link>
))}
</div>
</div>
)}
</nav>
<main className="py-10">
<div className="mx-auto max-w-7xl px-4 sm:px-6 lg:px-8">{children}</div>
</main>
</div>
);
}

View File

@@ -1,25 +0,0 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import React from "react";
import { getReviewableAgents } from "@/components/admin/marketplace/actions";
import AdminMarketplaceAgentList from "@/components/admin/marketplace/AdminMarketplaceAgentList";
import AdminFeaturedAgentsControl from "@/components/admin/marketplace/AdminFeaturedAgentsControl";
import { Separator } from "@/components/ui/separator";
async function AdminMarketplace() {
const reviewableAgents = await getReviewableAgents();
return (
<>
<AdminMarketplaceAgentList agents={reviewableAgents.agents} />
<Separator className="my-4" />
<AdminFeaturedAgentsControl className="mt-4" />
</>
);
}
export default async function AdminDashboardPage() {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminMarketplace = await withAdminAccess(AdminMarketplace);
return <ProtectedAdminMarketplace />;
}

View File

@@ -1,18 +0,0 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import React from "react";
function AdminSettings() {
return (
<div>
<h1>Admin Settings</h1>
{/* Add your admin-only settings content here */}
</div>
);
}
export default async function AdminSettingsPage() {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminSettings = await withAdminAccess(AdminSettings);
return <ProtectedAdminSettings />;
}

View File

@@ -1,18 +0,0 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import React from "react";
function AdminUsers() {
return (
<div>
<h1>Users Dashboard</h1>
{/* Add your admin-only content here */}
</div>
);
}
export default async function AdminUsersPage() {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminUsers = await withAdminAccess(AdminUsers);
return <ProtectedAdminUsers />;
}

View File

@@ -1,36 +0,0 @@
"use client";
import { useEffect, useState } from "react";
export default function AuthErrorPage() {
const [errorType, setErrorType] = useState<string | null>(null);
const [errorCode, setErrorCode] = useState<string | null>(null);
const [errorDescription, setErrorDescription] = useState<string | null>(null);
useEffect(() => {
// This code only runs on the client side
if (typeof window !== "undefined") {
const hash = window.location.hash.substring(1); // Remove the leading '#'
const params = new URLSearchParams(hash);
setErrorType(params.get("error"));
setErrorCode(params.get("error_code"));
setErrorDescription(
params.get("error_description")?.replace(/\+/g, " ") ?? null,
); // Replace '+' with space
}
}, []);
if (!errorType && !errorCode && !errorDescription) {
return <div>Loading...</div>;
}
return (
<div>
<h1>Authentication Error</h1>
{errorType && <p>Error Type: {errorType}</p>}
{errorCode && <p>Error Code: {errorCode}</p>}
{errorDescription && <p>Error Description: {errorDescription}</p>}
</div>
);
}

View File

@@ -1,36 +0,0 @@
import { NextResponse } from "next/server";
import { createServerClient } from "@/lib/supabase/server";
// Handle the callback to complete the user session login
export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
// if "next" is in param, use it as the redirect URL
const next = searchParams.get("next") ?? "/profile";
if (code) {
const supabase = createServerClient();
if (!supabase) {
return NextResponse.redirect(`${origin}/error`);
}
const { data, error } = await supabase.auth.exchangeCodeForSession(code);
// data.session?.refresh_token is available if you need to store it for later use
if (!error) {
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
const isLocalEnv = process.env.NODE_ENV === "development";
if (isLocalEnv) {
// we can be sure that there is no load balancer in between, so no need to watch for X-Forwarded-Host
return NextResponse.redirect(`${origin}${next}`);
} else if (forwardedHost) {
return NextResponse.redirect(`https://${forwardedHost}${next}`);
} else {
return NextResponse.redirect(`${origin}${next}`);
}
}
}
// return the user to an error page with instructions
return NextResponse.redirect(`${origin}/auth/auth-code-error`);
}

View File

@@ -1,33 +0,0 @@
import { type EmailOtpType } from "@supabase/supabase-js";
import { type NextRequest } from "next/server";
import { redirect } from "next/navigation";
import { createServerClient } from "@/lib/supabase/server";
// Email confirmation route
export async function GET(request: NextRequest) {
const { searchParams } = new URL(request.url);
const token_hash = searchParams.get("token_hash");
const type = searchParams.get("type") as EmailOtpType | null;
const next = searchParams.get("next") ?? "/";
if (token_hash && type) {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
const { error } = await supabase.auth.verifyOtp({
type,
token_hash,
});
if (!error) {
// redirect user to specified redirect URL or root of app
redirect(next);
}
}
// redirect the user to an error page with some instructions
redirect("/error");
}

View File

@@ -1,5 +1,5 @@
"use client";
import Image from "next/image";
import { useSearchParams } from "next/navigation";
import FlowEditor from '@/components/Flow';
@@ -7,10 +7,38 @@ export default function Home() {
const query = useSearchParams();
return (
<FlowEditor
className="flow-container w-full min-h-[86vh] border border-gray-300 dark:border-gray-700 rounded-lg"
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
template={!!query.get("templateID")}
/>
<div className="flex flex-col items-center min-h-screen">
<div className="z-10 w-full flex items-center justify-between font-mono text-sm relative">
<p className="border border-gray-600 rounded-xl pb-4 pt-4 p-4">
Get started by adding a&nbsp;
<code className="font-mono font-bold">block</code>
</p>
<div className="absolute top-0 right-0 p-4">
<a
className="pointer-events-auto flex place-items-center gap-2"
href="https://news.agpt.co/"
target="_blank"
rel="noopener noreferrer"
>
By{" "}
<Image
src="/AUTOgpt_Logo_dark.png"
alt="AutoGPT Logo"
width={100}
height={24}
priority
/>
</a>
</div>
</div>
<div className="w-full flex justify-center mt-10">
<FlowEditor
className="flow-container w-full min-h-[75vh] border border-gray-300 dark:border-gray-700 rounded-lg"
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
template={!!query.get("templateID")}
/>
</div>
</div>
);
}

View File

@@ -1,43 +0,0 @@
"use client";
import { useEffect } from "react";
import { IconCircleAlert } from "@/components/ui/icons";
import { Button } from "@/components/ui/button";
import Link from "next/link";
export default function Error({
error,
reset,
}: {
error: Error & { digest?: string };
reset: () => void;
}) {
useEffect(() => {
console.error(error);
}, [error]);
return (
<div className="fixed inset-0 flex items-center justify-center bg-background">
<div className="w-full max-w-md px-4 text-center sm:px-6">
<div className="mx-auto flex size-12 items-center justify-center rounded-full bg-muted">
<IconCircleAlert className="size-10" />
</div>
<h1 className="mt-8 text-2xl font-bold tracking-tight text-foreground">
Oops, something went wrong!
</h1>
<p className="mt-4 text-muted-foreground">
We&apos;re sorry, but an unexpected error has occurred. Please try
again later or contact support if the issue persists.
</p>
<div className="mt-6 flex flex-row justify-center gap-4">
<Button onClick={reset} variant="outline">
Retry
</Button>
<Button>
<Link href="/">Go to Homepage</Link>
</Button>
</div>
</div>
</div>
);
}

View File

@@ -7,69 +7,3 @@
text-wrap: balance;
}
}
@layer base {
:root {
--background: 0 0% 100%;
--foreground: 240 10% 3.9%;
--card: 0 0% 100%;
--card-foreground: 240 10% 3.9%;
--popover: 0 0% 100%;
--popover-foreground: 240 10% 3.9%;
--primary: 240 5.9% 10%;
--primary-foreground: 0 0% 98%;
--secondary: 240 4.8% 95.9%;
--secondary-foreground: 240 5.9% 10%;
--muted: 240 4.8% 95.9%;
--muted-foreground: 240 3.8% 46.1%;
--accent: 240 4.8% 95.9%;
--accent-foreground: 240 5.9% 10%;
--destructive: 0 84.2% 60.2%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
--input: 240 5.9% 90%;
--ring: 240 5.9% 10%;
--radius: 0.5rem;
--chart-1: 12 76% 61%;
--chart-2: 173 58% 39%;
--chart-3: 197 37% 24%;
--chart-4: 43 74% 66%;
--chart-5: 27 87% 67%;
}
.dark {
--background: 240 10% 3.9%;
--foreground: 0 0% 98%;
--card: 240 10% 3.9%;
--card-foreground: 0 0% 98%;
--popover: 240 10% 3.9%;
--popover-foreground: 0 0% 98%;
--primary: 0 0% 98%;
--primary-foreground: 240 5.9% 10%;
--secondary: 240 3.7% 15.9%;
--secondary-foreground: 0 0% 98%;
--muted: 240 3.7% 15.9%;
--muted-foreground: 240 5% 64.9%;
--accent: 240 3.7% 15.9%;
--accent-foreground: 0 0% 98%;
--destructive: 0 62.8% 30.6%;
--destructive-foreground: 0 0% 98%;
--border: 240 3.7% 15.9%;
--input: 240 3.7% 15.9%;
--ring: 240 4.9% 83.9%;
--chart-1: 220 70% 50%;
--chart-2: 160 60% 45%;
--chart-3: 30 80% 55%;
--chart-4: 280 65% 60%;
--chart-5: 340 75% 55%;
}
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
}
}

View File

@@ -1,14 +1,18 @@
import React from "react";
import React from 'react';
import type { Metadata } from "next";
import { ThemeProvider as NextThemeProvider } from "next-themes";
import { type ThemeProviderProps } from "next-themes/dist/types";
import { Inter } from "next/font/google";
import { Providers } from "@/app/providers";
import { NavBar } from "@/components/NavBar";
import { cn } from "@/lib/utils";
import Link from "next/link";
import { CubeIcon, Pencil1Icon, ReaderIcon, TimerIcon } from "@radix-ui/react-icons";
import "./globals.css";
import TallyPopupSimple from "@/components/TallyPopup";
import { GoogleAnalytics } from "@next/third-parties/google";
import { Toaster } from "@/components/ui/toaster";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import { Button, buttonVariants } from "@/components/ui/button";
import {
DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger
} from "@/components/ui/dropdown-menu";
const inter = Inter({ subsets: ["latin"] });
@@ -17,6 +21,39 @@ export const metadata: Metadata = {
description: "Your one stop shop to creating AI Agents",
};
function ThemeProvider({ children, ...props }: ThemeProviderProps) {
return <NextThemeProvider {...props}>{children}</NextThemeProvider>
}
const NavBar = () => (
<nav className="bg-white dark:bg-slate-800 p-4 flex justify-between items-center shadow">
<div className="flex space-x-4">
<Link href="/monitor" className={buttonVariants({ variant: "ghost" })}>
<TimerIcon className="mr-1" /> Monitor
</Link>
<Link href="/build" className={buttonVariants({ variant: "ghost" })}>
<Pencil1Icon className="mr-1" /> Build
</Link>
</div>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" className="h-8 w-8 rounded-full">
<Avatar>
<AvatarImage src="https://github.com/shadcn.png" alt="@shadcn" />
<AvatarFallback>CN</AvatarFallback>
</Avatar>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
<DropdownMenuItem>Profile</DropdownMenuItem>
<DropdownMenuItem>Settings</DropdownMenuItem>
<DropdownMenuItem>Switch Workspace</DropdownMenuItem>
<DropdownMenuItem>Log out</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</nav>
);
export default function RootLayout({
children,
}: Readonly<{
@@ -24,26 +61,20 @@ export default function RootLayout({
}>) {
return (
<html lang="en">
<body className={cn("antialiased transition-colors", inter.className)}>
<Providers
<body className={inter.className}>
<ThemeProvider
attribute="class"
defaultTheme="light"
// Feel free to remove this line if you want to use the system theme by default
// enableSystem
disableTransitionOnChange
>
<div className="flex min-h-screen flex-col">
<div className="min-h-screen bg-gray-200 text-gray-900">
<NavBar />
<main className="flex-1 overflow-hidden p-4">{children}</main>
<TallyPopupSimple />
<main className="mx-auto p-4">
{children}
</main>
</div>
<Toaster />
</Providers>
</ThemeProvider>
</body>
<GoogleAnalytics
gaId={process.env.GA_MEASUREMENT_ID || "G-FH2XK2W4GN"} // This is the measurement Id for the Google Analytics dev project
/>
</html>
);
}

View File

@@ -1,21 +0,0 @@
import AgentFlowListSkeleton from "@/components/monitor/skeletons/AgentFlowListSkeleton";
import React from "react";
import FlowRunsListSkeleton from "@/components/monitor/skeletons/FlowRunsListSkeleton";
import FlowRunsStatusSkeleton from "@/components/monitor/skeletons/FlowRunsStatusSkeleton";
export default function MonitorLoadingSkeleton() {
return (
<div className="space-y-4 p-4">
<div className="grid grid-cols-1 gap-4 md:grid-cols-3">
{/* Agents Section */}
<AgentFlowListSkeleton />
{/* Runs Section */}
<FlowRunsListSkeleton />
{/* Stats Section */}
<FlowRunsStatusSkeleton />
</div>
</div>
);
}

View File

@@ -1,54 +0,0 @@
"use server";
import { revalidatePath } from "next/cache";
import { redirect } from "next/navigation";
import { createServerClient } from "@/lib/supabase/server";
import { z } from "zod";
const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
});
export async function login(values: z.infer<typeof loginFormSchema>) {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signInWithPassword(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
}
export async function signup(values: z.infer<typeof loginFormSchema>) {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signUp(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
}

View File

@@ -1,234 +0,0 @@
"use client";
import useUser from "@/hooks/useUser";
import { login, signup } from "./actions";
import { Button } from "@/components/ui/button";
import {
Form,
FormControl,
FormDescription,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { useForm } from "react-hook-form";
import { Input } from "@/components/ui/input";
import { z } from "zod";
import { zodResolver } from "@hookform/resolvers/zod";
import { PasswordInput } from "@/components/PasswordInput";
import { FaGoogle, FaGithub, FaDiscord, FaSpinner } from "react-icons/fa";
import { useState } from "react";
import { useSupabase } from "@/components/SupabaseProvider";
import { useRouter } from "next/navigation";
import Link from "next/link";
import { Checkbox } from "@/components/ui/checkbox";
const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
agreeToTerms: z.boolean().refine((value) => value === true, {
message: "You must agree to the Terms of Service and Privacy Policy",
}),
});
export default function LoginPage() {
const { supabase, isLoading: isSupabaseLoading } = useSupabase();
const { user, isLoading: isUserLoading } = useUser();
const [feedback, setFeedback] = useState<string | null>(null);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const form = useForm<z.infer<typeof loginFormSchema>>({
resolver: zodResolver(loginFormSchema),
defaultValues: {
email: "",
password: "",
agreeToTerms: false,
},
});
if (user) {
console.log("User exists, redirecting to profile");
router.push("/profile");
}
if (isUserLoading || isSupabaseLoading || user) {
return (
<div className="flex h-[80vh] items-center justify-center">
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
</div>
);
}
if (!supabase) {
return (
<div>
User accounts are disabled because Supabase client is unavailable
</div>
);
}
async function handleSignInWithProvider(
provider: "google" | "github" | "discord",
) {
const { data, error } = await supabase!.auth.signInWithOAuth({
provider: provider,
options: {
redirectTo:
process.env.AUTH_CALLBACK_URL ??
`http://localhost:3000/auth/callback`,
},
});
if (!error) {
setFeedback(null);
return;
}
setFeedback(error.message);
}
const onLogin = async (data: z.infer<typeof loginFormSchema>) => {
setIsLoading(true);
const error = await login(data);
setIsLoading(false);
if (error) {
setFeedback(error);
return;
}
setFeedback(null);
};
const onSignup = async (data: z.infer<typeof loginFormSchema>) => {
if (await form.trigger()) {
setIsLoading(true);
const error = await signup(data);
setIsLoading(false);
if (error) {
setFeedback(error);
return;
}
setFeedback(null);
}
};
return (
<div className="flex h-[80vh] items-center justify-center">
<div className="w-full max-w-md space-y-6 rounded-lg p-8 shadow-md">
<div className="mb-6 space-y-2">
<Button
className="w-full"
onClick={() => handleSignInWithProvider("google")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaGoogle className="mr-2 h-4 w-4" />
Sign in with Google
</Button>
<Button
className="w-full"
onClick={() => handleSignInWithProvider("github")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaGithub className="mr-2 h-4 w-4" />
Sign in with GitHub
</Button>
<Button
className="w-full"
onClick={() => handleSignInWithProvider("discord")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaDiscord className="mr-2 h-4 w-4" />
Sign in with Discord
</Button>
</div>
<Form {...form}>
<form onSubmit={form.handleSubmit(onLogin)}>
<FormField
control={form.control}
name="email"
render={({ field }) => (
<FormItem className="mb-4">
<FormLabel>Email</FormLabel>
<FormControl>
<Input placeholder="user@email.com" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="password"
render={({ field }) => (
<FormItem>
<FormLabel>Password</FormLabel>
<FormControl>
<PasswordInput placeholder="password" {...field} />
</FormControl>
<FormDescription>
Password needs to be at least 6 characters long
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agreeToTerms"
render={({ field }) => (
<FormItem className="mt-4 flex flex-row items-start space-x-3 space-y-0">
<FormControl>
<Checkbox
checked={field.value}
onCheckedChange={field.onChange}
/>
</FormControl>
<div className="space-y-1 leading-none">
<FormLabel>
I agree to the{" "}
<Link href="/terms-of-service" className="underline">
Terms of Service
</Link>{" "}
and{" "}
<Link
href="https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984"
className="underline"
>
Privacy Policy
</Link>
</FormLabel>
<FormMessage />
</div>
</FormItem>
)}
/>
<div className="mb-6 mt-6 flex w-full space-x-4">
<Button
className="flex w-1/2 justify-center"
type="submit"
disabled={isLoading}
>
Log in
</Button>
<Button
className="flex w-1/2 justify-center"
variant="outline"
type="button"
onClick={form.handleSubmit(onSignup)}
disabled={isLoading}
>
Sign up
</Button>
</div>
</form>
<p className="text-sm text-red-500">{feedback}</p>
</Form>
</div>
</div>
);
}

View File

@@ -1,41 +0,0 @@
import { Suspense } from "react";
import { notFound } from "next/navigation";
import MarketplaceAPI from "@/lib/marketplace-api";
import { AgentDetailResponse } from "@/lib/marketplace-api";
import AgentDetailContent from "@/components/marketplace/AgentDetailContent";
async function getAgentDetails(id: string): Promise<AgentDetailResponse> {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8001/api/v1/market";
const api = new MarketplaceAPI(apiUrl);
try {
console.log(`Fetching agent details for id: ${id}`);
const agent = await api.getAgentDetails(id);
console.log(`Agent details fetched:`, agent);
return agent;
} catch (error) {
console.error(`Error fetching agent details:`, error);
throw error;
}
}
export default async function AgentDetailPage({
params,
}: {
params: { id: string };
}) {
let agent: AgentDetailResponse;
try {
agent = await getAgentDetails(params.id);
} catch (error) {
return notFound();
}
return (
<Suspense fallback={<div>Loading...</div>}>
<AgentDetailContent agent={agent} />
</Suspense>
);
}

View File

@@ -1,317 +0,0 @@
"use client";
import React, { useEffect, useMemo, useState, useCallback } from "react";
import { useRouter } from "next/navigation";
import Image from "next/image";
import { Input } from "@/components/ui/input";
import { Button } from "@/components/ui/button";
import MarketplaceAPI, {
AgentResponse,
AgentListResponse,
AgentWithRank,
} from "@/lib/marketplace-api";
import {
ChevronLeft,
ChevronRight,
PlusCircle,
Search,
Star,
} from "lucide-react";
// Utility Functions
function debounce<T extends (...args: any[]) => any>(
func: T,
wait: number,
): (...args: Parameters<T>) => void {
let timeout: NodeJS.Timeout | null = null;
return (...args: Parameters<T>) => {
if (timeout) clearTimeout(timeout);
timeout = setTimeout(() => func(...args), wait);
};
}
// Types
type Agent = AgentResponse | AgentWithRank;
// Components
const HeroSection: React.FC = () => {
const router = useRouter();
return (
<div className="relative bg-indigo-600 py-6">
<div className="absolute inset-0 z-0">
<Image
src="https://images.unsplash.com/photo-1562408590-e32931084e23?auto=format&fit=crop&w=2070&q=80"
alt="Marketplace background"
layout="fill"
objectFit="cover"
quality={75}
priority
className="opacity-20"
/>
<div
className="absolute inset-0 bg-indigo-600 mix-blend-multiply"
aria-hidden="true"
></div>
</div>
<div className="relative mx-auto flex max-w-7xl items-center justify-between px-4 py-4 sm:px-6 lg:px-8">
<div>
<h1 className="text-2xl font-extrabold tracking-tight text-white sm:text-3xl lg:text-4xl">
AutoGPT Marketplace
</h1>
<p className="mt-2 max-w-3xl text-sm text-indigo-100 sm:text-base">
Discover and share proven AI Agents to supercharge your business.
</p>
</div>
<Button
onClick={() => router.push("/marketplace/submit")}
className="flex items-center bg-white text-indigo-600 hover:bg-indigo-50"
>
<PlusCircle className="mr-2 h-4 w-4" />
Submit Agent
</Button>
</div>
</div>
);
};
const SearchInput: React.FC<{
value: string;
onChange: (e: React.ChangeEvent<HTMLInputElement>) => void;
}> = ({ value, onChange }) => (
<div className="relative mb-8">
<Input
placeholder="Search agents..."
type="text"
className="w-full rounded-full border-gray-300 py-2 pl-10 pr-4 focus:border-indigo-500 focus:ring-indigo-500"
value={value}
onChange={onChange}
/>
<Search
className="absolute left-3 top-1/2 -translate-y-1/2 transform text-gray-400"
size={20}
/>
</div>
);
const AgentCard: React.FC<{ agent: Agent; featured?: boolean }> = ({
agent,
featured = false,
}) => {
const router = useRouter();
const handleClick = () => {
router.push(`/marketplace/${agent.id}`);
};
return (
<div
className={`flex cursor-pointer flex-col justify-between rounded-lg border p-6 transition-colors duration-200 hover:bg-gray-50 ${featured ? "border-indigo-500 shadow-md" : "border-gray-200"}`}
onClick={handleClick}
>
<div>
<div className="mb-2 flex items-center justify-between">
<h3 className="truncate text-lg font-semibold text-gray-900">
{agent.name}
</h3>
{featured && <Star className="text-indigo-500" size={20} />}
</div>
<p className="mb-4 line-clamp-2 text-sm text-gray-500">
{agent.description}
</p>
<div className="mb-2 text-xs text-gray-400">
Categories: {agent.categories.join(", ")}
</div>
</div>
<div className="flex items-end justify-between">
<div className="text-xs text-gray-400">
Updated {new Date(agent.updatedAt).toLocaleDateString()}
</div>
<div className="text-xs text-gray-400">Downloads {agent.downloads}</div>
{"rank" in agent && (
<div className="text-xs text-indigo-600">
Rank: {agent.rank.toFixed(2)}
</div>
)}
</div>
</div>
);
};
const AgentGrid: React.FC<{
agents: Agent[];
title: string;
featured?: boolean;
}> = ({ agents, title, featured = false }) => (
<div className="mb-12">
<h2 className="mb-4 text-2xl font-bold text-gray-900">{title}</h2>
<div className="grid grid-cols-1 gap-6 md:grid-cols-2 lg:grid-cols-3">
{agents.map((agent) => (
<AgentCard agent={agent} key={agent.id} featured={featured} />
))}
</div>
</div>
);
const Pagination: React.FC<{
page: number;
totalPages: number;
onPrevPage: () => void;
onNextPage: () => void;
}> = ({ page, totalPages, onPrevPage, onNextPage }) => (
<div className="mt-8 flex items-center justify-between">
<Button
onClick={onPrevPage}
disabled={page === 1}
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
>
<ChevronLeft size={16} />
<span>Previous</span>
</Button>
<span className="text-sm text-gray-700">
Page {page} of {totalPages}
</span>
<Button
onClick={onNextPage}
disabled={page === totalPages}
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
>
<span>Next</span>
<ChevronRight size={16} />
</Button>
</div>
);
// Main Component
const Marketplace: React.FC = () => {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8001/api/v1/market";
const api = useMemo(() => new MarketplaceAPI(apiUrl), [apiUrl]);
const [searchValue, setSearchValue] = useState("");
const [searchResults, setSearchResults] = useState<Agent[]>([]);
const [featuredAgents, setFeaturedAgents] = useState<Agent[]>([]);
const [topAgents, setTopAgents] = useState<Agent[]>([]);
const [page, setPage] = useState(1);
const [totalPages, setTotalPages] = useState(1);
const [isLoading, setIsLoading] = useState(false);
const fetchTopAgents = useCallback(
async (currentPage: number) => {
setIsLoading(true);
try {
const response = await api.getTopDownloadedAgents(currentPage, 9);
setTopAgents(response.agents);
setTotalPages(response.total_pages);
} catch (error) {
console.error("Error fetching top agents:", error);
} finally {
setIsLoading(false);
}
},
[api],
);
const fetchFeaturedAgents = useCallback(async () => {
try {
const featured = await api.getFeaturedAgents();
setFeaturedAgents(featured.agents);
} catch (error) {
console.error("Error fetching featured agents:", error);
}
}, [api]);
const searchAgents = useCallback(
async (searchTerm: string) => {
setIsLoading(true);
try {
const response = await api.searchAgents(searchTerm, 1, 30);
const filteredAgents = response.filter((agent) => agent.rank > 0);
setSearchResults(filteredAgents);
} catch (error) {
console.error("Error searching agents:", error);
} finally {
setIsLoading(false);
}
},
[api],
);
const debouncedSearch = useMemo(
() => debounce(searchAgents, 300),
[searchAgents],
);
useEffect(() => {
if (searchValue) {
debouncedSearch(searchValue);
} else {
fetchTopAgents(page);
}
}, [searchValue, page, debouncedSearch, fetchTopAgents]);
useEffect(() => {
fetchFeaturedAgents();
}, [fetchFeaturedAgents]);
const handleInputChange = (e: React.ChangeEvent<HTMLInputElement>) => {
setSearchValue(e.target.value);
setPage(1);
};
const handleNextPage = () => {
if (page < totalPages) {
setPage(page + 1);
}
};
const handlePrevPage = () => {
if (page > 1) {
setPage(page - 1);
}
};
return (
<div className="min-h-screen bg-gray-50">
<HeroSection />
<div className="mx-auto max-w-7xl px-4 py-12 sm:px-6 lg:px-8">
<SearchInput value={searchValue} onChange={handleInputChange} />
{isLoading ? (
<div className="py-12 text-center">
<div className="inline-block h-8 w-8 animate-spin rounded-full border-b-2 border-gray-900"></div>
<p className="mt-2 text-gray-600">Loading agents...</p>
</div>
) : searchValue ? (
searchResults.length > 0 ? (
<AgentGrid agents={searchResults} title="Search Results" />
) : (
<div className="py-12 text-center">
<p className="text-gray-600">
No agents found matching your search criteria.
</p>
</div>
)
) : (
<>
{featuredAgents.length > 0 && (
<AgentGrid
agents={featuredAgents}
title="Featured Agents"
featured={true}
/>
)}
<AgentGrid agents={topAgents} title="Top Downloaded Agents" />
<Pagination
page={page}
totalPages={totalPages}
onPrevPage={handlePrevPage}
onNextPage={handleNextPage}
/>
</>
)}
</div>
</div>
);
};
export default Marketplace;

View File

@@ -1,408 +0,0 @@
"use client";
import React, { useState, useEffect, useMemo } from "react";
import { useRouter } from "next/navigation";
import { useForm, Controller } from "react-hook-form";
import MarketplaceAPI from "@/lib/marketplace-api";
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
import { Card } from "@/components/ui/card";
import { Input } from "@/components/ui/input";
import { Button } from "@/components/ui/button";
import { Textarea } from "@/components/ui/textarea";
import { Alert, AlertTitle, AlertDescription } from "@/components/ui/alert";
import { Checkbox } from "@/components/ui/checkbox";
import {
MultiSelector,
MultiSelectorContent,
MultiSelectorInput,
MultiSelectorItem,
MultiSelectorList,
MultiSelectorTrigger,
} from "@/components/ui/multiselect";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
type FormData = {
name: string;
description: string;
author: string;
keywords: string[];
categories: string[];
agreeToTerms: boolean;
selectedAgentId: string;
};
const SubmitPage: React.FC = () => {
const router = useRouter();
const {
control,
handleSubmit,
watch,
setValue,
formState: { errors },
} = useForm<FormData>({
defaultValues: {
selectedAgentId: "", // Initialize with an empty string
name: "",
description: "",
author: "",
keywords: [],
categories: [],
agreeToTerms: false,
},
});
const [isSubmitting, setIsSubmitting] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [userAgents, setUserAgents] = useState<
Array<{ id: string; name: string; version: number }>
>([]);
const [selectedAgentGraph, setSelectedAgentGraph] = useState<any>(null);
const selectedAgentId = watch("selectedAgentId");
useEffect(() => {
const fetchUserAgents = async () => {
const api = new AutoGPTServerAPI();
const agents = await api.listGraphs();
console.log(agents);
setUserAgents(
agents.map((agent) => ({
id: agent.id,
name: agent.name || `Agent (${agent.id})`,
version: agent.version,
})),
);
};
fetchUserAgents();
}, []);
useEffect(() => {
const fetchAgentGraph = async () => {
if (selectedAgentId) {
const api = new AutoGPTServerAPI();
const graph = await api.getGraph(selectedAgentId);
setSelectedAgentGraph(graph);
setValue("name", graph.name);
setValue("description", graph.description);
}
};
fetchAgentGraph();
}, [selectedAgentId, setValue]);
const onSubmit = async (data: FormData) => {
setIsSubmitting(true);
setSubmitError(null);
if (!data.agreeToTerms) {
throw new Error("You must agree to the terms of service");
}
try {
if (!selectedAgentGraph) {
throw new Error("Please select an agent");
}
const api = new MarketplaceAPI();
await api.submitAgent(
{
...selectedAgentGraph,
name: data.name,
description: data.description,
},
data.author,
data.keywords,
data.categories,
);
router.push("/marketplace?submission=success");
} catch (error) {
console.error("Submission error:", error);
setSubmitError(
error instanceof Error ? error.message : "An unknown error occurred",
);
} finally {
setIsSubmitting(false);
}
};
return (
<div className="container mx-auto px-4 py-8">
<h1 className="mb-6 text-3xl font-bold">Submit Your Agent</h1>
<Card className="p-6">
<form onSubmit={handleSubmit(onSubmit)}>
<div className="space-y-4">
<Controller
name="selectedAgentId"
control={control}
rules={{ required: "Please select an agent" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Select Agent
</label>
<Select
onValueChange={field.onChange}
value={field.value || ""}
>
<SelectTrigger className="w-full">
<SelectValue placeholder="Select an agent" />
</SelectTrigger>
<SelectContent>
{userAgents.map((agent) => (
<SelectItem key={agent.id} value={agent.id}>
{agent.name} (v{agent.version})
</SelectItem>
))}
</SelectContent>
</Select>
{errors.selectedAgentId && (
<p className="mt-1 text-sm text-red-600">
{errors.selectedAgentId.message}
</p>
)}
</div>
)}
/>
{/* {selectedAgentGraph && (
<div className="mt-4" style={{ height: "600px" }}>
<ReactFlow
nodes={nodes}
edges={edges}
fitView
attributionPosition="bottom-left"
nodesConnectable={false}
nodesDraggable={false}
zoomOnScroll={false}
panOnScroll={false}
elementsSelectable={false}
>
<Controls showInteractive={false} />
<Background />
</ReactFlow>
</div>
)} */}
<Controller
name="name"
control={control}
rules={{ required: "Name is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Agent Name
</label>
<Input
id={field.name}
placeholder="Enter your agent's name"
{...field}
/>
{errors.name && (
<p className="mt-1 text-sm text-red-600">
{errors.name.message}
</p>
)}
</div>
)}
/>
<Controller
name="description"
control={control}
rules={{ required: "Description is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Description
</label>
<Textarea
id={field.name}
placeholder="Describe your agent"
{...field}
/>
{errors.description && (
<p className="mt-1 text-sm text-red-600">
{errors.description.message}
</p>
)}
</div>
)}
/>
<Controller
name="author"
control={control}
rules={{ required: "Author is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Author
</label>
<Input
id={field.name}
placeholder="Your name or username"
{...field}
/>
{errors.author && (
<p className="mt-1 text-sm text-red-600">
{errors.author.message}
</p>
)}
</div>
)}
/>
<Controller
name="keywords"
control={control}
rules={{ required: "At least one keyword is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Keywords
</label>
<MultiSelector
values={field.value || []}
onValuesChange={field.onChange}
>
<MultiSelectorTrigger>
<MultiSelectorInput placeholder="Add keywords" />
</MultiSelectorTrigger>
<MultiSelectorContent>
<MultiSelectorList>
<MultiSelectorItem value="keyword1">
Keyword 1
</MultiSelectorItem>
<MultiSelectorItem value="keyword2">
Keyword 2
</MultiSelectorItem>
{/* Add more predefined keywords as needed */}
</MultiSelectorList>
</MultiSelectorContent>
</MultiSelector>
{errors.keywords && (
<p className="mt-1 text-sm text-red-600">
{errors.keywords.message}
</p>
)}
</div>
)}
/>
<Controller
name="categories"
control={control}
rules={{ required: "At least one category is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Categories
</label>
<MultiSelector
values={field.value || []}
onValuesChange={field.onChange}
>
<MultiSelectorTrigger>
<MultiSelectorInput placeholder="Select categories" />
</MultiSelectorTrigger>
<MultiSelectorContent>
<MultiSelectorList>
<MultiSelectorItem value="productivity">
Productivity
</MultiSelectorItem>
<MultiSelectorItem value="entertainment">
Entertainment
</MultiSelectorItem>
<MultiSelectorItem value="education">
Education
</MultiSelectorItem>
<MultiSelectorItem value="business">
Business
</MultiSelectorItem>
<MultiSelectorItem value="other">
Other
</MultiSelectorItem>
</MultiSelectorList>
</MultiSelectorContent>
</MultiSelector>
{errors.categories && (
<p className="mt-1 text-sm text-red-600">
{errors.categories.message}
</p>
)}
</div>
)}
/>
<Controller
name="agreeToTerms"
control={control}
rules={{ required: "You must agree to the terms of service" }}
render={({ field }) => (
<div className="flex items-center space-x-2">
<Checkbox
id="agreeToTerms"
checked={field.value}
onCheckedChange={field.onChange}
/>
<label
htmlFor="agreeToTerms"
className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70"
>
I agree to the{" "}
<a href="/terms" className="text-blue-500 hover:underline">
terms of service
</a>
</label>
</div>
)}
/>
{errors.agreeToTerms && (
<p className="mt-1 text-sm text-red-600">
{errors.agreeToTerms.message}
</p>
)}
{submitError && (
<Alert variant="destructive">
<AlertTitle>Submission Failed</AlertTitle>
<AlertDescription>{submitError}</AlertDescription>
</Alert>
)}
<Button type="submit" className="w-full" disabled={isSubmitting}>
{isSubmitting ? "Submitting..." : "Submit Agent"}
</Button>
</div>
</form>
</Card>
</div>
);
};
export default SubmitPage;

View File

@@ -0,0 +1,712 @@
"use client";
import React, { useEffect, useState } from 'react';
import Link from 'next/link';
import moment from 'moment';
import {
ComposedChart,
DefaultLegendContentProps,
Legend,
Line,
ResponsiveContainer,
Scatter,
Tooltip,
XAxis,
YAxis,
} from 'recharts';
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuLabel,
DropdownMenuRadioGroup,
DropdownMenuRadioItem,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu"
import AutoGPTServerAPI, {
Graph,
GraphMeta,
NodeExecutionResult,
safeCopyGraph,
} from '@/lib/autogpt-server-api';
import { ChevronDownIcon, ClockIcon, EnterIcon, ExitIcon, Pencil2Icon } from '@radix-ui/react-icons';
import { cn, exportAsJSONFile, hashString } from '@/lib/utils';
import { Badge } from "@/components/ui/badge";
import { Button, buttonVariants } from "@/components/ui/button";
import { Calendar } from "@/components/ui/calendar";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { Dialog, DialogContent, DialogHeader, DialogTrigger } from '@/components/ui/dialog';
import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table";
import { AgentImportForm } from '@/components/agent-import-form';
const Monitor = () => {
const [flows, setFlows] = useState<GraphMeta[]>([]);
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
const [selectedFlow, setSelectedFlow] = useState<GraphMeta | null>(null);
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
const api = new AutoGPTServerAPI();
useEffect(() => fetchFlowsAndRuns(), []);
useEffect(() => {
const intervalId = setInterval(() => flows.map(f => refreshFlowRuns(f.id)), 5000);
return () => clearInterval(intervalId);
}, []);
function fetchFlowsAndRuns() {
api.listGraphs()
.then(flows => {
setFlows(flows);
flows.map(flow => refreshFlowRuns(flow.id));
});
}
function refreshFlowRuns(flowID: string) {
// Fetch flow run IDs
api.listGraphRunIDs(flowID)
.then(runIDs => runIDs.map(runID => {
let run;
if (
(run = flowRuns.find(fr => fr.id == runID))
&& !["waiting", "running"].includes(run.status)
) {
return
}
// Fetch flow run
api.getGraphExecutionInfo(flowID, runID)
.then(execInfo => setFlowRuns(flowRuns => {
if (execInfo.length == 0) return flowRuns;
const flowRunIndex = flowRuns.findIndex(fr => fr.id == runID);
const flowRun = flowRunFromNodeExecutionResults(execInfo);
if (flowRunIndex > -1) {
flowRuns.splice(flowRunIndex, 1, flowRun)
}
else {
flowRuns.push(flowRun)
}
return [...flowRuns]
}));
}));
}
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
return (
<div className="grid grid-cols-1 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10 gap-4">
<AgentFlowList
className={column1}
flows={flows}
flowRuns={flowRuns}
selectedFlow={selectedFlow}
onSelectFlow={f => {
setSelectedRun(null);
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
}}
/>
<FlowRunsList
className={column2}
flows={flows}
runs={
(
selectedFlow
? flowRuns.filter(v => v.graphID == selectedFlow.id)
: flowRuns
)
.toSorted((a, b) => Number(a.startTime) - Number(b.startTime))
}
selectedRun={selectedRun}
onSelectRun={r => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
{selectedRun && (
<FlowRunInfo
flow={selectedFlow || flows.find(f => f.id == selectedRun.graphID)!}
flowRun={selectedRun}
className={column3}
/>
) || selectedFlow && (
<FlowInfo
flow={selectedFlow}
flowRuns={flowRuns.filter(r => r.graphID == selectedFlow.id)}
className={column3}
/>
) || (
<Card className={`p-6 ${column3}`}>
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
</Card>
)}
</div>
);
};
type FlowRun = {
id: string
graphID: string
graphVersion: number
status: 'running' | 'waiting' | 'success' | 'failed'
startTime: number // unix timestamp (ms)
endTime: number // unix timestamp (ms)
duration: number // seconds
totalRunTime: number // seconds
nodeExecutionResults: NodeExecutionResult[]
};
function flowRunFromNodeExecutionResults(
nodeExecutionResults: NodeExecutionResult[]
): FlowRun {
// Determine overall status
let status: 'running' | 'waiting' | 'success' | 'failed' = 'success';
for (const execution of nodeExecutionResults) {
if (execution.status === 'FAILED') {
status = 'failed';
break;
} else if (['QUEUED', 'RUNNING'].includes(execution.status)) {
status = 'running';
break;
} else if (execution.status === 'INCOMPLETE') {
status = 'waiting';
}
}
// Determine aggregate startTime, endTime, and totalRunTime
const now = Date.now();
const startTime = Math.min(
...nodeExecutionResults.map(ner => ner.add_time.getTime()), now
);
const endTime = (
['success', 'failed'].includes(status)
? Math.max(
...nodeExecutionResults.map(ner => ner.end_time?.getTime() || 0), startTime
)
: now
);
const duration = (endTime - startTime) / 1000; // Convert to seconds
const totalRunTime = nodeExecutionResults.reduce((cum, node) => (
cum + ((node.end_time?.getTime() ?? now) - (node.start_time?.getTime() ?? now))
), 0) / 1000;
return {
id: nodeExecutionResults[0].graph_exec_id,
graphID: nodeExecutionResults[0].graph_id,
graphVersion: nodeExecutionResults[0].graph_version,
status,
startTime,
endTime,
duration,
totalRunTime,
nodeExecutionResults: nodeExecutionResults,
};
}
const AgentFlowList = (
{ flows, flowRuns, selectedFlow, onSelectFlow, className }: {
flows: GraphMeta[],
flowRuns?: FlowRun[],
selectedFlow: GraphMeta | null,
onSelectFlow: (f: GraphMeta) => void,
className?: string,
}
) => {
const [templates, setTemplates] = useState<GraphMeta[]>([]);
const api = new AutoGPTServerAPI();
useEffect(() => {
api.listTemplates().then(templates => setTemplates(templates))
}, []);
return <Card className={className}>
<CardHeader className="flex-row justify-between items-center space-x-3 space-y-0">
<CardTitle>Agents</CardTitle>
<div className="flex items-center">{/* Split "Create" button */}
<Button variant="outline" className="rounded-r-none" asChild>
<Link href="/build">Create</Link>
</Button>
<Dialog>{/* https://ui.shadcn.com/docs/components/dialog#notes */}
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="outline" className={"rounded-l-none border-l-0 px-2"}>
<ChevronDownIcon />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
<DialogTrigger asChild>
<DropdownMenuItem>
<EnterIcon className="mr-2" /> Import from file
</DropdownMenuItem>
</DialogTrigger>
{templates.length > 0 && <>{/* List of templates */}
<DropdownMenuSeparator />
<DropdownMenuLabel>Use a template</DropdownMenuLabel>
{templates.map(template => (
<DropdownMenuItem
key={template.id}
onClick={() => {
api.createGraph(template.id, template.version)
.then(newGraph => {
window.location.href = `/build?flowID=${newGraph.id}`;
});
}}
>
{template.name}
</DropdownMenuItem>
))}
</>}
</DropdownMenuContent>
</DropdownMenu>
<DialogContent>
<DialogHeader className="text-lg">
Import an Agent (template) from a file
</DialogHeader>
<AgentImportForm />
</DialogContent>
</Dialog>
</div>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Name</TableHead>
{/* <TableHead>Status</TableHead> */}
{/* <TableHead>Last updated</TableHead> */}
{flowRuns && <TableHead className="md:hidden lg:table-cell"># of runs</TableHead>}
{flowRuns && <TableHead>Last run</TableHead>}
</TableRow>
</TableHeader>
<TableBody>
{flows
.map((flow) => {
let runCount = 0, lastRun: FlowRun | null = null;
if (flowRuns) {
const _flowRuns = flowRuns.filter(r => r.graphID == flow.id);
runCount = _flowRuns.length;
lastRun = runCount == 0 ? null : _flowRuns.reduce(
(a, c) => a.startTime > c.startTime ? a : c
);
}
return { flow, runCount, lastRun };
})
.sort((a, b) => {
if (!a.lastRun && !b.lastRun) return 0;
if (!a.lastRun) return 1;
if (!b.lastRun) return -1;
return b.lastRun.startTime - a.lastRun.startTime;
})
.map(({ flow, runCount, lastRun }) => (
<TableRow
key={flow.id}
className="cursor-pointer"
onClick={() => onSelectFlow(flow)}
data-state={selectedFlow?.id == flow.id ? "selected" : null}
>
<TableCell>{flow.name}</TableCell>
{/* <TableCell><FlowStatusBadge status={flow.status ?? "active"} /></TableCell> */}
{/* <TableCell>
{flow.updatedAt ?? "???"}
</TableCell> */}
{flowRuns && <TableCell className="md:hidden lg:table-cell">{runCount}</TableCell>}
{flowRuns && (!lastRun ? <TableCell /> :
<TableCell title={moment(lastRun.startTime).toString()}>
{moment(lastRun.startTime).fromNow()}
</TableCell>)}
</TableRow>
))
}
</TableBody>
</Table>
</CardContent>
</Card>
};
const FlowStatusBadge = ({ status }: { status: "active" | "disabled" | "failing" }) => (
<Badge
variant="default"
className={
status === 'active' ? 'bg-green-500 dark:bg-green-600' :
status === 'failing' ? 'bg-red-500 dark:bg-red-700' :
'bg-gray-500 dark:bg-gray-600'
}
>
{status}
</Badge>
);
const FlowRunsList: React.FC<{
flows: GraphMeta[];
runs: FlowRun[];
className?: string;
selectedRun?: FlowRun | null;
onSelectRun: (r: FlowRun) => void;
}> = ({ flows, runs, selectedRun, onSelectRun, className }) => (
<Card className={className}>
<CardHeader>
<CardTitle>Runs</CardTitle>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Agent</TableHead>
<TableHead>Started</TableHead>
<TableHead>Status</TableHead>
<TableHead>Duration</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{runs.map((run) => (
<TableRow
key={run.id}
className="cursor-pointer"
onClick={() => onSelectRun(run)}
data-state={selectedRun?.id == run.id ? "selected" : null}
>
<TableCell>{flows.find(f => f.id == run.graphID)!.name}</TableCell>
<TableCell>{moment(run.startTime).format("HH:mm")}</TableCell>
<TableCell><FlowRunStatusBadge status={run.status} /></TableCell>
<TableCell>{formatDuration(run.duration)}</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</CardContent>
</Card>
);
const FlowRunStatusBadge: React.FC<{
status: FlowRun['status'];
className?: string;
}> = ({ status, className }) => (
<Badge
variant="default"
className={cn(
status === 'running' ? 'bg-blue-500 dark:bg-blue-700' :
status === 'waiting' ? 'bg-yellow-500 dark:bg-yellow-600' :
status === 'success' ? 'bg-green-500 dark:bg-green-600' :
'bg-red-500 dark:bg-red-700',
className,
)}
>
{status}
</Badge>
);
const FlowInfo: React.FC<React.HTMLAttributes<HTMLDivElement> & {
flow: GraphMeta;
flowRuns: FlowRun[];
flowVersion?: number | "all";
}> = ({ flow, flowRuns, flowVersion, ...props }) => {
const api = new AutoGPTServerAPI();
const [flowVersions, setFlowVersions] = useState<Graph[] | null>(null);
const [selectedVersion, setSelectedFlowVersion] = useState(flowVersion ?? "all");
const selectedFlowVersion: Graph | undefined = flowVersions?.find(v => (
v.version == (selectedVersion == "all" ? flow.version : selectedVersion)
));
useEffect(() => {
api.getGraphAllVersions(flow.id).then(result => setFlowVersions(result));
}, [flow.id]);
return <Card {...props}>
<CardHeader className="flex-row justify-between space-y-0 space-x-3">
<div>
<CardTitle>
{flow.name} <span className="font-light">v{flow.version}</span>
</CardTitle>
<p className="mt-2">Agent ID: <code>{flow.id}</code></p>
</div>
<div className="flex items-start space-x-2">
{(flowVersions?.length ?? 0) > 1 &&
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="outline">
<ClockIcon className="mr-2" />
{selectedVersion == "all" ? "All versions" : `Version ${selectedVersion}`}
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent className="w-56">
<DropdownMenuLabel>Choose a version</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuRadioGroup
value={String(selectedVersion)}
onValueChange={choice => setSelectedFlowVersion(
choice == "all" ? choice : Number(choice)
)}
>
<DropdownMenuRadioItem value="all">All versions</DropdownMenuRadioItem>
{flowVersions?.map(v =>
<DropdownMenuRadioItem key={v.version} value={v.version.toString()}>
Version {v.version}{v.is_active ? " (active)" : ""}
</DropdownMenuRadioItem>
)}
</DropdownMenuRadioGroup>
</DropdownMenuContent>
</DropdownMenu>}
<Link className={buttonVariants({ variant: "outline" })} href={`/build?flowID=${flow.id}`}>
<Pencil2Icon className="mr-2" /> Edit
</Link>
<Button
variant="outline"
className="px-2.5"
title="Export to a JSON-file"
onClick={async () => exportAsJSONFile(
safeCopyGraph(
flowVersions!.find(v => v.version == selectedFlowVersion!.version)!,
await api.getBlocks(),
),
`${flow.name}_v${selectedFlowVersion!.version}.json`
)}
>
<ExitIcon />
</Button>
</div>
</CardHeader>
<CardContent>
<FlowRunsStats
flows={[selectedFlowVersion ?? flow]}
flowRuns={flowRuns.filter(r =>
r.graphID == flow.id
&& (selectedVersion == "all" || r.graphVersion == selectedVersion)
)}
/>
</CardContent>
</Card>;
};
const FlowRunInfo: React.FC<React.HTMLAttributes<HTMLDivElement> & {
flow: GraphMeta;
flowRun: FlowRun;
}> = ({ flow, flowRun, ...props }) => {
if (flowRun.graphID != flow.id) {
throw new Error(`FlowRunInfo can't be used with non-matching flowRun.flowID and flow.id`)
}
return <Card {...props}>
<CardHeader className="flex-row items-center justify-between space-y-0 space-x-3">
<div>
<CardTitle>
{flow.name} <span className="font-light">v{flow.version}</span>
</CardTitle>
<p className="mt-2">Agent ID: <code>{flow.id}</code></p>
<p className="mt-1">Run ID: <code>{flowRun.id}</code></p>
</div>
<Link className={buttonVariants({ variant: "outline" })} href={`/build?flowID=${flow.id}`}>
<Pencil2Icon className="mr-2" /> Edit Agent
</Link>
</CardHeader>
<CardContent>
<p><strong>Status:</strong> <FlowRunStatusBadge status={flowRun.status} /></p>
<p><strong>Started:</strong> {moment(flowRun.startTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Finished:</strong> {moment(flowRun.endTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Duration (run time):</strong> {flowRun.duration} ({flowRun.totalRunTime}) seconds</p>
{/* <p><strong>Total cost:</strong> €1,23</p> */}
</CardContent>
</Card>;
};
const FlowRunsStats: React.FC<{
flows: GraphMeta[],
flowRuns: FlowRun[],
title?: string,
className?: string,
}> = ({ flows, flowRuns, title, className }) => {
/* "dateMin": since the first flow in the dataset
* number > 0: custom date (unix timestamp)
* number < 0: offset relative to Date.now() (in seconds) */
const [statsSince, setStatsSince] = useState<number | "dataMin">(-24*3600)
const statsSinceTimestamp = ( // unix timestamp or null
typeof(statsSince) == "string"
? null
: statsSince < 0
? Date.now() + (statsSince*1000)
: statsSince
)
const filteredFlowRuns = statsSinceTimestamp != null
? flowRuns.filter(fr => fr.startTime > statsSinceTimestamp)
: flowRuns;
return (
<div className={className}>
<div className="flex flex-row items-center justify-between">
<CardTitle>{ title || "Stats" }</CardTitle>
<div className="flex space-x-2">
<Button variant="outline" size="sm" onClick={() => setStatsSince(-2*3600)}>2h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-8*3600)}>8h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-24*3600)}>24h</Button>
<Button variant="outline" size="sm" onClick={() => setStatsSince(-7*24*3600)}>7d</Button>
<Popover>
<PopoverTrigger asChild>
<Button variant={"outline"} size="sm">Custom</Button>
</PopoverTrigger>
<PopoverContent className="w-auto p-0" align="start">
<Calendar
mode="single"
onSelect={(_, selectedDay) => setStatsSince(selectedDay.getTime())}
initialFocus
/>
</PopoverContent>
</Popover>
<Button variant="outline" size="sm" onClick={() => setStatsSince("dataMin")}>All</Button>
</div>
</div>
<FlowRunsTimeline flows={flows} flowRuns={flowRuns} dataMin={statsSince} className="mt-3" />
<hr className="my-4" />
<div>
<p><strong>Total runs:</strong> {filteredFlowRuns.length}</p>
<p>
<strong>Total run time:</strong> {
filteredFlowRuns.reduce((total, run) => total + run.totalRunTime, 0)
} seconds
</p>
{/* <p><strong>Total cost:</strong> €1,23</p> */}
</div>
</div>
)
}
const FlowRunsTimeline = (
{ flows, flowRuns, dataMin, className }: {
flows: GraphMeta[],
flowRuns: FlowRun[],
dataMin: "dataMin" | number,
className?: string,
}
) => (
/* TODO: make logarithmic? */
<ResponsiveContainer width="100%" height={120} className={className}>
<ComposedChart>
<XAxis
dataKey="time"
type="number"
domain={[
typeof(dataMin) == "string"
? dataMin
: dataMin < 0
? Date.now() + (dataMin*1000)
: dataMin,
Date.now()
]}
allowDataOverflow={true}
tickFormatter={(unixTime) => {
const now = moment();
const time = moment(unixTime);
return now.diff(time, 'hours') < 24
? time.format('HH:mm')
: time.format('YYYY-MM-DD HH:mm');
}}
name="Time"
scale="time"
/>
<YAxis
dataKey="_duration"
name="Duration (s)"
tickFormatter={s => s > 90 ? `${Math.round(s / 60)}m` : `${s}s`}
/>
<Tooltip
content={({ payload, label }) => {
if (payload && payload.length) {
const data: FlowRun & { time: number, _duration: number } = payload[0].payload;
const flow = flows.find(f => f.id === data.graphID);
return (
<Card className="p-2 text-xs leading-normal">
<p><strong>Agent:</strong> {flow ? flow.name : 'Unknown'}</p>
<p>
<strong>Status:</strong>&nbsp;
<FlowRunStatusBadge status={data.status} className="px-1.5 py-0" />
</p>
<p><strong>Started:</strong> {moment(data.startTime).format('YYYY-MM-DD HH:mm:ss')}</p>
<p><strong>Duration / run time:</strong> {
formatDuration(data.duration)} / {formatDuration(data.totalRunTime)
}</p>
</Card>
);
}
return null;
}}
/>
{flows.map((flow) => (
<Scatter
key={flow.id}
data={flowRuns.filter(fr => fr.graphID == flow.id).map(fr => ({
...fr,
time: fr.startTime + (fr.totalRunTime * 1000),
_duration: fr.totalRunTime,
}))}
name={flow.name}
fill={`hsl(${hashString(flow.id) * 137.5 % 360}, 70%, 50%)`}
/>
))}
{flowRuns.map((run) => (
<Line
key={run.id}
type="linear"
dataKey="_duration"
data={[
{ ...run, time: run.startTime, _duration: 0 },
{ ...run, time: run.endTime, _duration: run.totalRunTime }
]}
stroke={`hsl(${hashString(run.graphID) * 137.5 % 360}, 70%, 50%)`}
strokeWidth={2}
dot={false}
legendType="none"
/>
))}
<Legend
content={<ScrollableLegend />}
wrapperStyle={{
bottom: 0,
left: 0,
right: 0,
width: "100%",
display: "flex",
justifyContent: "center",
}}
/>
</ComposedChart>
</ResponsiveContainer>
);
const ScrollableLegend: React.FC<DefaultLegendContentProps & { className?: string }> = (
{ payload, className }
) => {
return (
<div
className={cn(
"whitespace-nowrap px-4 text-sm overflow-x-auto space-x-3",
className,
)}
style={{ scrollbarWidth: "none" }}
>
{payload.map((entry, index) => {
if (entry.type == "none") return;
return (
<span key={`item-${index}`} className="inline-flex items-center">
<span
className="size-2.5 inline-block mr-1 rounded-full"
style={{backgroundColor: entry.color}}
/>
<span>{entry.value}</span>
</span>
)
})}
</div>
);
};
function formatDuration(seconds: number): string {
return (
seconds < 100
? seconds.toPrecision(2)
: Math.round(seconds)
).toString() + "s";
}
export default Monitor;

View File

@@ -1,178 +0,0 @@
"use client";
import React, { useCallback, useEffect, useMemo, useState } from "react";
import AutoGPTServerAPI, {
GraphMeta,
NodeExecutionResult,
} from "@/lib/autogpt-server-api";
import { Card } from "@/components/ui/card";
import { FlowRun } from "@/lib/types";
import {
AgentFlowList,
FlowInfo,
FlowRunInfo,
FlowRunsList,
FlowRunsStats,
} from "@/components/monitor";
const Monitor = () => {
const [flows, setFlows] = useState<GraphMeta[]>([]);
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
const [selectedFlow, setSelectedFlow] = useState<GraphMeta | null>(null);
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
const api = useMemo(() => new AutoGPTServerAPI(), []);
const refreshFlowRuns = useCallback(
(flowID: string) => {
// Fetch flow run IDs
api.listGraphRunIDs(flowID).then((runIDs) =>
runIDs.map((runID) => {
let run;
if (
(run = flowRuns.find((fr) => fr.id == runID)) &&
!["waiting", "running"].includes(run.status)
) {
return;
}
// Fetch flow run
api.getGraphExecutionInfo(flowID, runID).then((execInfo) =>
setFlowRuns((flowRuns) => {
if (execInfo.length == 0) return flowRuns;
const flowRunIndex = flowRuns.findIndex((fr) => fr.id == runID);
const flowRun = flowRunFromNodeExecutionResults(execInfo);
if (flowRunIndex > -1) {
flowRuns.splice(flowRunIndex, 1, flowRun);
} else {
flowRuns.push(flowRun);
}
return [...flowRuns];
}),
);
}),
);
},
[api, flowRuns],
);
const fetchFlowsAndRuns = useCallback(() => {
api.listGraphs().then((flows) => {
setFlows(flows);
flows.map((flow) => refreshFlowRuns(flow.id));
});
}, [api, refreshFlowRuns]);
useEffect(() => fetchFlowsAndRuns(), [fetchFlowsAndRuns]);
useEffect(() => {
const intervalId = setInterval(
() => flows.map((f) => refreshFlowRuns(f.id)),
5000,
);
return () => clearInterval(intervalId);
}, [flows, refreshFlowRuns]);
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
return (
<div className="grid grid-cols-1 gap-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10">
<AgentFlowList
className={column1}
flows={flows}
flowRuns={flowRuns}
selectedFlow={selectedFlow}
onSelectFlow={(f) => {
setSelectedRun(null);
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
}}
/>
<FlowRunsList
className={column2}
flows={flows}
runs={[
...(selectedFlow
? flowRuns.filter((v) => v.graphID == selectedFlow.id)
: flowRuns),
].sort((a, b) => Number(a.startTime) - Number(b.startTime))}
selectedRun={selectedRun}
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
{(selectedRun && (
<FlowRunInfo
flow={selectedFlow || flows.find((f) => f.id == selectedRun.graphID)!}
flowRun={selectedRun}
className={column3}
/>
)) ||
(selectedFlow && (
<FlowInfo
flow={selectedFlow}
flowRuns={flowRuns.filter((r) => r.graphID == selectedFlow.id)}
className={column3}
/>
)) || (
<Card className={`p-6 ${column3}`}>
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
</Card>
)}
</div>
);
};
function flowRunFromNodeExecutionResults(
nodeExecutionResults: NodeExecutionResult[],
): FlowRun {
// Determine overall status
let status: "running" | "waiting" | "success" | "failed" = "success";
for (const execution of nodeExecutionResults) {
if (execution.status === "FAILED") {
status = "failed";
break;
} else if (["QUEUED", "RUNNING"].includes(execution.status)) {
status = "running";
break;
} else if (execution.status === "INCOMPLETE") {
status = "waiting";
}
}
// Determine aggregate startTime, endTime, and totalRunTime
const now = Date.now();
const startTime = Math.min(
...nodeExecutionResults.map((ner) => ner.add_time.getTime()),
now,
);
const endTime = ["success", "failed"].includes(status)
? Math.max(
...nodeExecutionResults.map((ner) => ner.end_time?.getTime() || 0),
startTime,
)
: now;
const duration = (endTime - startTime) / 1000; // Convert to seconds
const totalRunTime =
nodeExecutionResults.reduce(
(cum, node) =>
cum +
((node.end_time?.getTime() ?? now) -
(node.start_time?.getTime() ?? now)),
0,
) / 1000;
return {
id: nodeExecutionResults[0].graph_exec_id,
graphID: nodeExecutionResults[0].graph_id,
graphVersion: nodeExecutionResults[0].graph_version,
status,
startTime,
endTime,
duration,
totalRunTime,
nodeExecutionResults: nodeExecutionResults,
};
}
export default Monitor;

View File

@@ -1,33 +0,0 @@
"use client";
import { useSupabase } from "@/components/SupabaseProvider";
import { Button } from "@/components/ui/button";
import useUser from "@/hooks/useUser";
import { useRouter } from "next/navigation";
import { FaSpinner } from "react-icons/fa";
export default function PrivatePage() {
const { user, isLoading, error } = useUser();
const { supabase } = useSupabase();
const router = useRouter();
if (isLoading) {
return (
<div className="flex h-[80vh] items-center justify-center">
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
</div>
);
}
if (error || !user || !supabase) {
router.push("/login");
return null;
}
return (
<div>
<p>Hello {user.email}</p>
<Button onClick={() => supabase.auth.signOut()}>Log out</Button>
</div>
);
}

View File

@@ -1,17 +0,0 @@
"use client";
import * as React from "react";
import { ThemeProvider as NextThemesProvider } from "next-themes";
import { ThemeProviderProps } from "next-themes/dist/types";
import { TooltipProvider } from "@/components/ui/tooltip";
import SupabaseProvider from "@/components/SupabaseProvider";
export function Providers({ children, ...props }: ThemeProviderProps) {
return (
<NextThemesProvider {...props}>
<SupabaseProvider>
<TooltipProvider>{children}</TooltipProvider>
</SupabaseProvider>
</NextThemesProvider>
);
}

View File

@@ -1,9 +0,0 @@
// app/unauthorized/page.tsx
export default function Unauthorized() {
return (
<div>
<h1>Unauthorized Access</h1>
<p>You do not have permission to view this page.</p>
</div>
);
}

View File

@@ -1,23 +1,9 @@
import {
BaseEdge,
ConnectionLineComponentProps,
getBezierPath,
Position,
} from "@xyflow/react";
import { BaseEdge, ConnectionLineComponentProps, getBezierPath, Position } from "reactflow";
const ConnectionLine: React.FC<ConnectionLineComponentProps> = ({
fromPosition,
fromHandle,
fromX,
fromY,
toPosition,
toX,
toY,
}) => {
const sourceX =
fromPosition === Position.Right
? fromX + (fromHandle?.width! / 2 - 5)
: fromX - (fromHandle?.width! / 2 - 5);
const ConnectionLine: React.FC<ConnectionLineComponentProps> = ({ fromPosition, fromHandle, fromX, fromY, toPosition, toX, toY }) => {
const sourceX = fromPosition === Position.Right ?
fromX + (fromHandle?.width! / 2 - 5) : fromX - (fromHandle?.width! / 2 - 5);
const [path] = getBezierPath({
sourceX: sourceX,
@@ -28,7 +14,9 @@ const ConnectionLine: React.FC<ConnectionLineComponentProps> = ({
targetPosition: toPosition,
});
return <BaseEdge path={path} style={{ strokeWidth: 2, stroke: "#555" }} />;
return (
<BaseEdge path={path} style={{ strokeWidth: 2, stroke: '#555' }} />
);
};
export default ConnectionLine;

View File

@@ -1,235 +1,37 @@
import React, { useCallback, useContext, useEffect, useState } from "react";
import {
BaseEdge,
EdgeLabelRenderer,
EdgeProps,
useReactFlow,
XYPosition,
Edge,
Node,
} from "@xyflow/react";
import "./customedge.css";
import { X } from "lucide-react";
import { useBezierPath } from "@/hooks/useBezierPath";
import { FlowContext } from "./Flow";
import { FC, memo, useMemo } from "react";
import { BaseEdge, EdgeProps, getBezierPath, XYPosition } from "reactflow";
export type CustomEdgeData = {
edgeColor: string;
sourcePos?: XYPosition;
isStatic?: boolean;
beadUp?: number;
beadDown?: number;
beadData?: any[];
};
edgeColor: string
sourcePos: XYPosition
}
type Bead = {
t: number;
targetT: number;
startTime: number;
};
const CustomEdgeFC: FC<EdgeProps<CustomEdgeData>> = ({ data, selected, source, sourcePosition, sourceX, sourceY, target, targetPosition, targetX, targetY, markerEnd }) => {
export type CustomEdge = Edge<CustomEdgeData, "custom">;
export function CustomEdge({
id,
data,
selected,
sourceX,
sourceY,
targetX,
targetY,
markerEnd,
}: EdgeProps<CustomEdge>) {
const [isHovered, setIsHovered] = useState(false);
const [beads, setBeads] = useState<{
beads: Bead[];
created: number;
destroyed: number;
}>({ beads: [], created: 0, destroyed: 0 });
const { svgPath, length, getPointForT, getTForDistance } = useBezierPath(
sourceX - 5,
const [path] = getBezierPath({
sourceX: sourceX - 5,
sourceY,
targetX + 3,
sourcePosition,
targetX: targetX + 4,
targetY,
);
const { deleteElements } = useReactFlow<Node, CustomEdge>();
const { visualizeBeads } = useContext(FlowContext) ?? {
visualizeBeads: "no",
};
targetPosition,
});
const onEdgeRemoveClick = () => {
deleteElements({ edges: [{ id }] });
};
// Calculate y difference between source and source node, to adjust self-loop edge
const yDifference = useMemo(() => sourceY - data!.sourcePos.y, [data!.sourcePos.y]);
const animationDuration = 500; // Duration in milliseconds for bead to travel the curve
const beadDiameter = 12;
const deltaTime = 16;
const setTargetPositions = useCallback(
(beads: Bead[]) => {
const distanceBetween = Math.min(
(length - beadDiameter) / (beads.length + 1),
beadDiameter,
);
return beads.map((bead, index) => {
const distanceFromEnd = beadDiameter * 1.35;
const targetPosition = distanceBetween * index + distanceFromEnd;
const t = getTForDistance(-targetPosition);
return {
...bead,
t: visualizeBeads === "animate" ? bead.t : t,
targetT: t,
} as Bead;
});
},
[getTForDistance, length, visualizeBeads],
);
useEffect(() => {
if (data?.beadUp === 0 && data?.beadDown === 0) {
setBeads({ beads: [], created: 0, destroyed: 0 });
return;
}
const beadUp = data?.beadUp!;
// Add beads
setBeads(({ beads, created, destroyed }) => {
const newBeads = [];
for (let i = 0; i < beadUp - created; i++) {
newBeads.push({ t: 0, targetT: 0, startTime: Date.now() });
}
const b = setTargetPositions([...beads, ...newBeads]);
return { beads: b, created: beadUp, destroyed };
});
// Remove beads if not animating
if (visualizeBeads !== "animate") {
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => ({ ...bead }))
.filter((bead, index) => {
const beadDown = data?.beadDown!;
// Remove always one less bead in case of static edge, so it stays at the connection point
const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0);
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
return;
}
// Animate and remove beads
const interval = setInterval(() => {
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => {
const progressIncrement = deltaTime / animationDuration;
const t = Math.min(
bead.t + bead.targetT * progressIncrement,
bead.targetT,
);
return {
...bead,
t,
};
})
.filter((bead, index) => {
const beadDown = data?.beadDown!;
// Remove always one less bead in case of static edge, so it stays at the connection point
const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0);
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
}, deltaTime);
return () => clearInterval(interval);
}, [data, setTargetPositions, visualizeBeads]);
const middle = getPointForT(0.5);
// Define special edge path for self-loop
const edgePath = source === target ?
`M ${sourceX - 5} ${sourceY} C ${sourceX + 128} ${sourceY - yDifference - 128} ${targetX - 128} ${sourceY - yDifference - 128} ${targetX + 3}, ${targetY}` :
path;
return (
<>
<BaseEdge
path={svgPath}
markerEnd={markerEnd}
style={{
strokeWidth: (isHovered ? 3 : 2) + (data?.isStatic ? 0.5 : 0),
stroke:
(data?.edgeColor ?? "#555555") +
(selected || isHovered ? "" : "80"),
strokeDasharray: data?.isStatic ? "5 3" : "0",
}}
/>
<path
d={svgPath}
fill="none"
strokeOpacity={0}
strokeWidth={20}
className="react-flow__edge-interaction"
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
/>
<EdgeLabelRenderer>
<div
style={{
position: "absolute",
transform: `translate(-50%, -50%) translate(${middle.x}px,${middle.y}px)`,
pointerEvents: "all",
}}
className="edge-label-renderer"
>
<button
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
className={`edge-label-button ${isHovered ? "visible" : ""}`}
onClick={onEdgeRemoveClick}
>
<X className="size-4" />
</button>
</div>
</EdgeLabelRenderer>
{beads.beads.map((bead, index) => {
const pos = getPointForT(bead.t);
return (
<circle
key={index}
cx={pos.x}
cy={pos.y}
r={beadDiameter / 2} // Bead radius
fill={data?.edgeColor ?? "#555555"}
/>
);
})}
</>
);
}
<BaseEdge
style={{ strokeWidth: 2, stroke: (data?.edgeColor ?? '#555555') + (selected ? '' : '80') }}
path={edgePath}
markerEnd={markerEnd}
/>
)
};
export const CustomEdge = memo(CustomEdgeFC);

File diff suppressed because it is too large Load Diff

View File

@@ -1,92 +0,0 @@
import { beautifyString } from "@/lib/utils";
import { Button } from "./ui/button";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "./ui/table";
import { Clipboard } from "lucide-react";
import { useToast } from "./ui/use-toast";
type DataTableProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function DataTable({
title,
truncateLongData,
data,
}: DataTableProps) {
const { toast } = useToast();
const maxChars = 100;
const copyData = (pin: string, data: string) => {
navigator.clipboard.writeText(data).then(() => {
toast({
title: `"${pin}" data copied to clipboard!`,
duration: 2000,
});
});
};
return (
<>
{title && <strong className="mt-2 flex justify-center">{title}</strong>}
<Table className="cursor-default select-text">
<TableHeader>
<TableRow>
<TableHead>Pin</TableHead>
<TableHead>Data</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{Object.entries(data).map(([key, value]) => (
<TableRow className="group" key={key}>
<TableCell className="cursor-text">
{beautifyString(key)}
</TableCell>
<TableCell className="cursor-text">
<div className="flex min-h-9 items-center">
<Button
className="absolute right-1 top-auto m-1 hidden p-2 group-hover:block"
variant="outline"
size="icon"
onClick={() =>
copyData(
beautifyString(key),
value
.map((i) =>
typeof i === "object"
? JSON.stringify(i)
: String(i),
)
.join(", "),
)
}
title="Copy Data"
>
<Clipboard size={18} />
</Button>
{value
.map((i) => {
const text =
typeof i === "object" ? JSON.stringify(i) : String(i);
return truncateLongData && text.length > maxChars
? text.slice(0, maxChars) + "..."
: text;
})
.join(", ")}
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</>
);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,107 +0,0 @@
import React, { FC, useEffect, useState } from "react";
import { Button } from "./ui/button";
import { Textarea } from "./ui/textarea";
import { Maximize2, Minimize2, Clipboard } from "lucide-react";
import { createPortal } from "react-dom";
import { toast } from "./ui/use-toast";
interface ModalProps {
isOpen: boolean;
onClose: () => void;
onSave: (value: string) => void;
title?: string;
defaultValue: string;
}
const InputModalComponent: FC<ModalProps> = ({
isOpen,
onClose,
onSave,
title,
defaultValue,
}) => {
const [tempValue, setTempValue] = useState(defaultValue);
const [isMaximized, setIsMaximized] = useState(false);
useEffect(() => {
if (isOpen) {
setTempValue(defaultValue);
setIsMaximized(false);
}
}, [isOpen, defaultValue]);
const handleSave = () => {
onSave(tempValue);
onClose();
};
const toggleSize = () => {
setIsMaximized(!isMaximized);
};
const copyValue = () => {
navigator.clipboard.writeText(tempValue).then(() => {
toast({
title: "Input value copied to clipboard!",
duration: 2000,
});
});
};
if (!isOpen) {
return null;
}
const modalContent = (
<div
id="modal-content"
className={`fixed rounded-lg border-[1.5px] bg-white p-5 ${
isMaximized ? "inset-[128px] flex flex-col" : `w-[90%] max-w-[800px]`
}`}
>
<h2 className="mb-4 text-center text-lg font-semibold">
{title || "Enter input text"}
</h2>
<div className="nowheel relative flex-grow">
<Textarea
className="h-full min-h-[200px] w-full resize-none"
value={tempValue}
onChange={(e) => setTempValue(e.target.value)}
/>
<div className="absolute bottom-2 right-2 flex space-x-2">
<Button onClick={copyValue} size="icon" variant="outline">
<Clipboard size={18} />
</Button>
<Button onClick={toggleSize} size="icon" variant="outline">
{isMaximized ? <Minimize2 size={18} /> : <Maximize2 size={18} />}
</Button>
</div>
</div>
<div className="mt-4 flex justify-end space-x-2">
<Button onClick={onClose} variant="outline">
Cancel
</Button>
<Button onClick={handleSave}>Save</Button>
</div>
</div>
);
return (
<>
{isMaximized ? (
createPortal(
<div className="fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>,
document.body,
)
) : (
<div className="nodrag fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>
)}
</>
);
};
export default InputModalComponent;

View File

@@ -0,0 +1,53 @@
import React, { FC, useEffect, useRef } from 'react';
import { Button } from './ui/button';
import { Textarea } from './ui/textarea';
interface ModalProps {
isOpen: boolean;
onClose: () => void;
onSave: (value: string) => void;
value: string;
}
const ModalComponent: FC<ModalProps> = ({ isOpen, onClose, onSave, value }) => {
const [tempValue, setTempValue] = React.useState(value);
const textAreaRef = useRef<HTMLTextAreaElement>(null);
useEffect(() => {
if (isOpen) {
setTempValue(value);
if (textAreaRef.current) {
textAreaRef.current.select();
}
}
}, [isOpen, value]);
const handleSave = () => {
onSave(tempValue);
onClose();
};
if (!isOpen) {
return null;
}
return (
<div className="nodrag fixed inset-0 bg-white bg-opacity-60 flex justify-center items-center">
<div className="bg-white p-5 rounded-lg w-[500px] max-w-[90%]">
<center><h1>Enter input text</h1></center>
<Textarea
ref={textAreaRef}
className="w-full h-[200px] p-2.5 rounded border border-[#dfdfdf] text-black bg-[#dfdfdf]"
value={tempValue}
onChange={(e) => setTempValue(e.target.value)}
/>
<div className="flex justify-end gap-2.5 mt-2.5">
<Button onClick={onClose}>Cancel</Button>
<Button onClick={handleSave}>Save</Button>
</div>
</div>
</div>
);
};
export default ModalComponent;

View File

@@ -1,112 +0,0 @@
import Link from "next/link";
import { Button } from "@/components/ui/button";
import React from "react";
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet";
import Image from "next/image";
import getServerUser from "@/hooks/getServerUser";
import ProfileDropdown from "./ProfileDropdown";
import {
IconCircleUser,
IconMenu,
IconPackage2,
IconSquareActivity,
IconWorkFlow,
} from "@/components/ui/icons";
export async function NavBar() {
const isAvailable = Boolean(
process.env.NEXT_PUBLIC_SUPABASE_URL &&
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY,
);
const { user } = await getServerUser();
return (
<header className="sticky top-0 z-50 flex h-16 items-center gap-4 border-b bg-background px-4 md:px-6">
<div className="flex flex-1 items-center gap-4">
<Sheet>
<SheetTrigger asChild>
<Button
variant="outline"
size="icon"
className="shrink-0 md:hidden"
>
<IconMenu />
<span className="sr-only">Toggle navigation menu</span>
</Button>
</SheetTrigger>
<SheetContent side="left">
<nav className="grid gap-6 text-lg font-medium">
<Link
href="/"
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
>
<IconSquareActivity /> Monitor
</Link>
<Link
href="/build"
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
>
<IconWorkFlow /> Build
</Link>
<Link
href="/marketplace"
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
>
<IconPackage2 /> Marketplace
</Link>
</nav>
</SheetContent>
</Sheet>
<nav className="hidden md:flex md:flex-row md:items-center md:gap-5 lg:gap-6">
<Link
href="/"
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
>
<IconSquareActivity /> Monitor
</Link>
<Link
href="/build"
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
>
<IconWorkFlow /> Build
</Link>
<Link
href="/marketplace"
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
>
<IconPackage2 /> Marketplace
</Link>
</nav>
</div>
<div className="relative flex flex-1 justify-center">
<a
className="pointer-events-auto flex place-items-center gap-2"
href="https://news.agpt.co/"
target="_blank"
rel="noopener noreferrer"
>
By{" "}
<Image
src="/AUTOgpt_Logo_dark.png"
alt="AutoGPT Logo"
width={100}
height={20}
priority
/>
</a>
</div>
<div className="flex flex-1 items-center justify-end gap-4">
{isAvailable && !user && (
<Link
href="/login"
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
>
Log In
<IconCircleUser />
</Link>
)}
{isAvailable && user && <ProfileDropdown />}
</div>
</header>
);
}

View File

@@ -1,68 +1,57 @@
import { BlockIOSubSchema } from "@/lib/autogpt-server-api/types";
import { BlockSchema } from "@/lib/types";
import { beautifyString, getTypeBgColor, getTypeTextColor } from "@/lib/utils";
import { FC } from "react";
import { Handle, Position } from "@xyflow/react";
import { Handle, Position } from "reactflow";
import SchemaTooltip from "./SchemaTooltip";
type HandleProps = {
keyName: string;
schema: BlockIOSubSchema;
isConnected: boolean;
isRequired?: boolean;
side: "left" | "right";
};
keyName: string,
schema: BlockSchema,
isConnected: boolean,
side: 'left' | 'right'
}
const NodeHandle: FC<HandleProps> = ({ keyName, isConnected, schema, side }) => {
const NodeHandle: FC<HandleProps> = ({
keyName,
schema,
isConnected,
isRequired,
side,
}) => {
const typeName: Record<string, string> = {
string: "text",
number: "number",
boolean: "true/false",
object: "object",
array: "list",
null: "null",
string: 'text',
number: 'number',
boolean: 'true/false',
object: 'complex',
array: 'list',
null: 'null',
};
const typeClass = `text-sm ${getTypeTextColor(schema.type || "any")} ${side === "left" ? "text-left" : "text-right"}`;
const typeClass = `text-sm ${getTypeTextColor(schema.type)} ${side === 'left' ? 'text-left' : 'text-right'}`;
const label = (
<div className="flex flex-grow flex-col">
<span className="text-m green -mb-1 text-gray-900">
{schema.title || beautifyString(keyName)}
{isRequired ? "*" : ""}
</span>
<span className={typeClass}>{typeName[schema.type] || "any"}</span>
<div className="flex flex-col flex-grow">
<span className="text-m text-gray-900 -mb-1 green">{schema.title || beautifyString(keyName)}</span>
<span className={typeClass}>{typeName[schema.type]}</span>
</div>
);
const dot = (
<div
className={`m-1 h-4 w-4 border-2 bg-white ${isConnected ? getTypeBgColor(schema.type || "any") : "border-gray-300"} rounded-full transition-colors duration-100 group-hover:bg-gray-300`}
/>
<div className={`w-4 h-4 m-1 ${isConnected ? getTypeBgColor(schema.type) : 'bg-gray-600'} rounded-full transition-colors duration-100 group-hover:bg-gray-300`} />
);
if (side === "left") {
if (side === 'left') {
return (
<div key={keyName} className="handle-container">
<Handle
type="target"
position={Position.Left}
id={keyName}
className="background-color: white; border: 2px solid black; width: 15px; height: 15px; border-radius: 50%; bottom: -7px; left: 20%; group -ml-[26px]"
className='group -ml-[29px]'
>
<div className="pointer-events-none flex items-center">
{dot}
{label}
</div>
</Handle>
<SchemaTooltip description={schema.description} />
<SchemaTooltip schema={schema} />
</div>
);
)
} else {
return (
<div key={keyName} className="handle-container justify-end">
@@ -70,16 +59,16 @@ const NodeHandle: FC<HandleProps> = ({
type="source"
position={Position.Right}
id={keyName}
className="group -mr-[26px]"
className='group -mr-[29px]'
>
<div className="pointer-events-none flex items-center">
{label}
{dot}
</div>
</Handle>
</div>
);
</div >
)
}
};
}
export default NodeHandle;

View File

@@ -1,45 +0,0 @@
import React, { FC } from "react";
import { Button } from "./ui/button";
import { NodeExecutionResult } from "@/lib/autogpt-server-api/types";
import DataTable from "./DataTable";
import { Separator } from "@/components/ui/separator";
interface OutputModalProps {
isOpen: boolean;
onClose: () => void;
executionResults: {
execId: string;
data: NodeExecutionResult["output_data"];
}[];
}
const OutputModalComponent: FC<OutputModalProps> = ({
isOpen,
onClose,
executionResults,
}) => {
if (!isOpen) {
return null;
}
return (
<div className="nodrag nowheel fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
<div className="w-[500px] max-w-[90%] rounded-lg border-[1.5px] bg-white p-5">
<strong>Output Data History</strong>
<div className="my-2 max-h-[384px] flex-grow overflow-y-auto rounded-md border-[1.5px] p-2">
{executionResults.map((data, i) => (
<>
<DataTable key={i} title={data.execId} data={data.data} />
<Separator />
</>
))}
</div>
<div className="mt-2.5 flex justify-end gap-2.5">
<Button onClick={onClose}>Close</Button>
</div>
</div>
</div>
);
};
export default OutputModalComponent;

View File

@@ -1,54 +0,0 @@
import { forwardRef, useState } from "react";
import { EyeIcon, EyeOffIcon } from "lucide-react";
import { Button } from "@/components/ui/button";
import { Input, InputProps } from "@/components/ui/input";
import { cn } from "@/lib/utils";
const PasswordInput = forwardRef<HTMLInputElement, InputProps>(
({ className, ...props }, ref) => {
const [showPassword, setShowPassword] = useState(false);
const disabled =
props.value === "" || props.value === undefined || props.disabled;
return (
<div className="relative">
<Input
type={showPassword ? "text" : "password"}
className={cn("hide-password-toggle pr-10", className)}
ref={ref}
{...props}
/>
<Button
type="button"
variant="ghost"
size="sm"
className="absolute right-0 top-0 h-full px-3 py-2 hover:bg-transparent"
onClick={() => setShowPassword((prev) => !prev)}
disabled={disabled}
>
{showPassword && !disabled ? (
<EyeIcon className="h-4 w-4" aria-hidden="true" />
) : (
<EyeOffIcon className="h-4 w-4" aria-hidden="true" />
)}
<span className="sr-only">
{showPassword ? "Hide password" : "Show password"}
</span>
</Button>
{/* hides browsers password toggles */}
<style>{`
.hide-password-toggle::-ms-reveal,
.hide-password-toggle::-ms-clear {
visibility: hidden;
pointer-events: none;
display: none;
}
`}</style>
</div>
);
},
);
PasswordInput.displayName = "PasswordInput";
export { PasswordInput };

Some files were not shown because too many files have changed in this diff Show More