diff --git a/core/agents/code_monkey.py b/core/agents/code_monkey.py index 0a267436..710da8dc 100644 --- a/core/agents/code_monkey.py +++ b/core/agents/code_monkey.py @@ -5,7 +5,7 @@ from pydantic import BaseModel, Field from core.agents.base import BaseAgent from core.agents.convo import AgentConvo from core.agents.response import AgentResponse, ResponseType -from core.config import DESCRIBE_FILES_AGENT_NAME +from core.config import CODE_MONKEY_AGENT_NAME, DESCRIBE_FILES_AGENT_NAME from core.llm.parser import JSONParser, OptionalCodeBlockParser from core.log import get_logger @@ -56,7 +56,7 @@ class CodeMonkey(BaseAgent): iterations = self.current_state.iterations user_feedback = None user_feedback_qa = None - llm = self.get_llm() + llm = self.get_llm(CODE_MONKEY_AGENT_NAME) if "task_review_feedback" in task and task["task_review_feedback"]: instructions = task.get("task_review_feedback") diff --git a/core/agents/troubleshooter.py b/core/agents/troubleshooter.py index f39199e0..6a3ebccc 100644 --- a/core/agents/troubleshooter.py +++ b/core/agents/troubleshooter.py @@ -7,7 +7,6 @@ from core.agents.base import BaseAgent from core.agents.convo import AgentConvo from core.agents.mixins import IterationPromptMixin from core.agents.response import AgentResponse -from core.config import ROUTE_FILES_AGENT_NAME from core.db.models.file import File from core.db.models.project_state import IterationStatus, TaskStatus from core.llm.parser import JSONParser, OptionalCodeBlockParser @@ -47,7 +46,9 @@ class Troubleshooter(IterationPromptMixin, BaseAgent): user_feedback_qa = self.current_state.current_iteration.get("user_feedback_qa") bug_hunting_cycles = self.current_state.current_iteration.get("bug_hunting_cycles") - llm_solution = await self.find_solution(user_feedback, user_feedback_qa=user_feedback_qa, bug_hunting_cycles=bug_hunting_cycles) + llm_solution = await self.find_solution( + user_feedback, user_feedback_qa=user_feedback_qa, bug_hunting_cycles=bug_hunting_cycles + ) self.next_state.current_iteration["description"] = llm_solution self.next_state.current_iteration["status"] = IterationStatus.IMPLEMENT_SOLUTION @@ -103,7 +104,6 @@ class Troubleshooter(IterationPromptMixin, BaseAgent): # this might be caused if we show the input field instead of buttons iteration_status = IterationStatus.FIND_SOLUTION - self.next_state.iterations = self.current_state.iterations + [ { "id": uuid4().hex, @@ -189,7 +189,7 @@ class Troubleshooter(IterationPromptMixin, BaseAgent): async def _get_route_files(self) -> list[File]: """Returns the list of file paths that have routes defined in them.""" - llm = self.get_llm(ROUTE_FILES_AGENT_NAME) + llm = self.get_llm() convo = AgentConvo(self).template("get_route_files").require_schema(RouteFilePaths) file_list = await llm(convo, parser=JSONParser(RouteFilePaths)) route_files: set[str] = set(file_list.files) @@ -230,15 +230,13 @@ class Troubleshooter(IterationPromptMixin, BaseAgent): if run_command: await self.ui.send_run_command(run_command) - buttons = { - "continue": "Everything works", - "change": "I want to make a change", - "bug": "There is an issue" - } + buttons = {"continue": "Everything works", "change": "I want to make a change", "bug": "There is an issue"} if last_iteration: buttons["loop"] = "I'm stuck in a loop" - user_response = await self.ask_question(test_message, buttons=buttons, default="continue", buttons_only=True, hint=hint) + user_response = await self.ask_question( + test_message, buttons=buttons, default="continue", buttons_only=True, hint=hint + ) if user_response.button == "continue" or user_response.cancelled: should_iterate = False @@ -261,7 +259,9 @@ class Troubleshooter(IterationPromptMixin, BaseAgent): is_loop = True elif user_response.button == "change": - user_description = await self.ask_question("Please describe the change you want to make (one at the time please)") + user_description = await self.ask_question( + "Please describe the change you want to make (one at the time please)" + ) change_description = user_description.text elif user_response.button == "bug": diff --git a/core/config/__init__.py b/core/config/__init__.py index 17f753dd..71ae6f35 100644 --- a/core/config/__init__.py +++ b/core/config/__init__.py @@ -34,8 +34,8 @@ IGNORE_SIZE_THRESHOLD = 50000 # 50K+ files are ignored by default # Agents with sane setup in the default configuration DEFAULT_AGENT_NAME = "default" +CODE_MONKEY_AGENT_NAME = "CodeMonkey" DESCRIBE_FILES_AGENT_NAME = "CodeMonkey.describe_files" -ROUTE_FILES_AGENT_NAME = "Troubleshooter.get_route_files" CHECK_LOGS_AGENT_NAME = "BugHunter.check_logs" # Endpoint for the external documentation @@ -112,7 +112,7 @@ class AgentLLMConfig(_StrictModel): """ provider: LLMProvider = LLMProvider.OPENAI - model: str = Field(description="Model to use", default="gpt-4-0125-preview") + model: str = Field(description="Model to use", default="gpt-4o-2024-05-13") temperature: float = Field( default=0.5, description="Temperature to use for sampling", @@ -310,9 +310,9 @@ class Config(_StrictModel): agent: dict[str, AgentLLMConfig] = Field( default={ DEFAULT_AGENT_NAME: AgentLLMConfig(), + CODE_MONKEY_AGENT_NAME: AgentLLMConfig(model="gpt-4-0125-preview", temperature=0.0), DESCRIBE_FILES_AGENT_NAME: AgentLLMConfig(model="gpt-3.5-turbo", temperature=0.0), CHECK_LOGS_AGENT_NAME: AgentLLMConfig(model="claude-3-5-sonnet-20240620", temperature=0.0), - ROUTE_FILES_AGENT_NAME: AgentLLMConfig(model="gpt-4o", temperature=0.0), } ) prompt: PromptConfig = PromptConfig() diff --git a/core/prompts/problem-solver/get_alternative_solutions.prompt b/core/prompts/problem-solver/get_alternative_solutions.prompt index a0cbfc28..dbe41f2e 100644 --- a/core/prompts/problem-solver/get_alternative_solutions.prompt +++ b/core/prompts/problem-solver/get_alternative_solutions.prompt @@ -44,7 +44,7 @@ Then, upon implementing these changes, your colleague came back with the followi {% endif %} {% if user_input != '' %} -Your colleague who is testing the app "{{ name }}" sent you this report now: +Your colleague who is testing the app "{{ state.branch.project.name }}" sent you this report now: ``` {{ user_input }} ``` diff --git a/core/prompts/troubleshooter/iteration.prompt b/core/prompts/troubleshooter/iteration.prompt index 902409d8..8825992e 100644 --- a/core/prompts/troubleshooter/iteration.prompt +++ b/core/prompts/troubleshooter/iteration.prompt @@ -4,7 +4,8 @@ You are working on an app called "{{ state.branch.project.name }}" and you need {% if state.tasks and state.current_task %} Development process of this app was split into smaller tasks. Here is the list of all tasks: -```{% for task in state.tasks %} +``` +{% for task in state.tasks %} {{ loop.index }}. {{ task.description }} {% endfor %} ``` diff --git a/core/templates/registry.py b/core/templates/registry.py index 1e8f432f..f52f89b8 100644 --- a/core/templates/registry.py +++ b/core/templates/registry.py @@ -2,7 +2,6 @@ from enum import Enum from core.log import get_logger -from .javascript_react import JavascriptReactProjectTemplate from .node_express_mongoose import NodeExpressMongooseProjectTemplate from .react_express import ReactExpressProjectTemplate @@ -12,13 +11,13 @@ log = get_logger(__name__) class ProjectTemplateEnum(str, Enum): """Choices of available project templates.""" - JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name + # JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name NODE_EXPRESS_MONGOOSE = NodeExpressMongooseProjectTemplate.name REACT_EXPRESS = ReactExpressProjectTemplate.name PROJECT_TEMPLATES = { - JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate, + # JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate, NodeExpressMongooseProjectTemplate.name: NodeExpressMongooseProjectTemplate, ReactExpressProjectTemplate.name: ReactExpressProjectTemplate, } diff --git a/example-config.json b/example-config.json index 800845c7..5afefdcf 100644 --- a/example-config.json +++ b/example-config.json @@ -28,7 +28,7 @@ "agent": { "default": { "provider": "openai", - "model": "gpt-4o", + "model": "gpt-4o-2024-05-13", "temperature": 0.5 }, "CodeMonkey": { diff --git a/pyproject.toml b/pyproject.toml index 6b6d1794..5b4c4f52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gpt-pilot" -version = "0.2.7" +version = "0.2.10" description = "Build complete apps using AI agents" authors = ["Senko Rasic "] license = "FSL-1.1-MIT" diff --git a/tests/agents/test_architect.py b/tests/agents/test_architect.py index 10d6730e..96ba3a2e 100644 --- a/tests/agents/test_architect.py +++ b/tests/agents/test_architect.py @@ -19,7 +19,7 @@ async def test_run(agentcontext): side_effect=[ TemplateSelection( architecture="dummy arch", - template="javascript_react", + template="node_express_mongoose", ), Architecture( system_dependencies=[ @@ -52,4 +52,4 @@ async def test_run(agentcontext): assert sm.current_state.specification.architecture == "dummy arch" assert sm.current_state.specification.system_dependencies[0]["name"] == "docker" assert sm.current_state.specification.package_dependencies[0]["name"] == "express" - assert "javascript_react" in sm.current_state.specification.templates + assert "node_express_mongoose" in sm.current_state.specification.templates diff --git a/tests/agents/test_tech_lead.py b/tests/agents/test_tech_lead.py index 41343c04..892a9979 100644 --- a/tests/agents/test_tech_lead.py +++ b/tests/agents/test_tech_lead.py @@ -31,7 +31,7 @@ async def test_create_initial_epic(agentcontext): async def test_apply_project_template(agentcontext): sm, _, ui, _ = agentcontext - sm.current_state.specification.templates = {"javascript_react": {}} + sm.current_state.specification.templates = {"node_express_mongoose": {}} sm.current_state.epics = [{"name": "Initial Project"}] await sm.commit() diff --git a/tests/config/test_config.py b/tests/config/test_config.py index ce97e820..06b8692b 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -65,7 +65,7 @@ def test_builtin_defaults(): config = ConfigLoader.from_json("{}") assert config.llm_for_agent().provider == LLMProvider.OPENAI - assert config.llm_for_agent().model == "gpt-4-0125-preview" + assert config.llm_for_agent().model == "gpt-4o-2024-05-13" assert config.llm_for_agent().base_url is None assert config.llm_for_agent().api_key is None diff --git a/tests/templates/test_templates.py b/tests/templates/test_templates.py index 42efa5cf..25d61741 100644 --- a/tests/templates/test_templates.py +++ b/tests/templates/test_templates.py @@ -55,6 +55,7 @@ async def test_render_react_express_nosql(mock_get_config, testmanager): assert "prisma/schema.prisma" not in files +@pytest.mark.skip @pytest.mark.asyncio @patch("core.state.state_manager.get_config") async def test_render_javascript_react(mock_get_config, testmanager):