diff --git a/agenthub/__init__.py b/agenthub/__init__.py index 4678f96538..a976927218 100644 --- a/agenthub/__init__.py +++ b/agenthub/__init__.py @@ -1,4 +1,8 @@ -from . import langchains_agent -from . import codeact_agent +from dotenv import load_dotenv +load_dotenv() + +# Import agents after environment variables are loaded +from . import langchains_agent # noqa: E402 +from . import codeact_agent # noqa: E402 __all__ = ['langchains_agent', 'codeact_agent'] diff --git a/agenthub/codeact_agent/__init__.py b/agenthub/codeact_agent/__init__.py index aa5a2e4d0e..a0c393b188 100644 --- a/agenthub/codeact_agent/__init__.py +++ b/agenthub/codeact_agent/__init__.py @@ -1,8 +1,9 @@ import os import re -from termcolor import colored from typing import List, Mapping +from termcolor import colored + from opendevin.agent import Agent from opendevin.state import State from opendevin.action import ( diff --git a/opendevin/server/README.md b/opendevin/server/README.md index 21e7fe9b74..134f7ac3c9 100644 --- a/opendevin/server/README.md +++ b/opendevin/server/README.md @@ -1,18 +1,42 @@ -# OpenDevin server -This is currently just a POC that starts an echo websocket inside docker, and -forwards messages between the client and the docker container. +# OpenDevin Server + +This is a WebSocket server that executes tasks using an agent. + +## Install + +Create a `.env` file with the contents + +```sh +OPENAI_API_KEY= +``` + +Install requirements: + +```sh +python3.12 -m venv venv +source venv/bin/activate +python -m pip install -r requirements.txt +``` ## Start the Server -``` -python -m pip install -r requirements.txt + +```sh uvicorn opendevin.server.listen:app --reload --port 3000 ``` ## Test the Server + You can use `websocat` to test the server: https://github.com/vi/websocat -``` +```sh websocat ws://127.0.0.1:3000/ws {"action": "start", "args": {"task": "write a bash script that prints hello"}} ``` +## Supported Environment Variables + +```sh +OPENAI_API_KEY=sk-... # Your OpenAI API Key +MODEL_NAME=gpt-4-0125-preview # Default model for the agent to use +WORKSPACE_DIR=/path/to/your/workspace # Default path to model's workspace +``` diff --git a/opendevin/server/session.py b/opendevin/server/session.py index 49465b4ec0..bbe44b8a5a 100644 --- a/opendevin/server/session.py +++ b/opendevin/server/session.py @@ -4,10 +4,6 @@ from typing import Optional, Dict, Type from fastapi import WebSocketDisconnect -from opendevin.agent import Agent -from opendevin.controller import AgentController -from opendevin.llm.llm import LLM - from opendevin.action import ( Action, CmdRunAction, @@ -19,6 +15,9 @@ from opendevin.action import ( AgentThinkAction, AgentFinishAction, ) +from opendevin.agent import Agent +from opendevin.controller import AgentController +from opendevin.llm.llm import LLM from opendevin.observation import ( Observation, UserMessageObservation @@ -38,6 +37,7 @@ ACTION_TYPE_TO_CLASS: Dict[str, Type[Action]] = { DEFAULT_WORKSPACE_DIR = os.getenv("WORKSPACE_DIR", os.path.join(os.getcwd(), "workspace")) +MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4-0125-preview") def parse_event(data): if "action" not in data: @@ -119,7 +119,7 @@ class Session: agent_cls = "LangchainsAgent" if start_event and "agent_cls" in start_event.args: agent_cls = start_event.args["agent_cls"] - model = "gpt-4-0125-preview" + model = MODEL_NAME if start_event and "model" in start_event.args: model = start_event.args["model"] if not os.path.exists(directory):