mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 21:27:53 -05:00
merge main
This commit is contained in:
@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field
|
||||
from core.agents.base import BaseAgent
|
||||
from core.agents.convo import AgentConvo
|
||||
from core.agents.response import AgentResponse, ResponseType
|
||||
from core.config import DESCRIBE_FILES_AGENT_NAME
|
||||
from core.config import CODE_MONKEY_AGENT_NAME, DESCRIBE_FILES_AGENT_NAME
|
||||
from core.llm.parser import JSONParser, OptionalCodeBlockParser
|
||||
from core.log import get_logger
|
||||
|
||||
@@ -56,7 +56,7 @@ class CodeMonkey(BaseAgent):
|
||||
iterations = self.current_state.iterations
|
||||
user_feedback = None
|
||||
user_feedback_qa = None
|
||||
llm = self.get_llm()
|
||||
llm = self.get_llm(CODE_MONKEY_AGENT_NAME)
|
||||
|
||||
if "task_review_feedback" in task and task["task_review_feedback"]:
|
||||
instructions = task.get("task_review_feedback")
|
||||
|
||||
@@ -7,7 +7,6 @@ from core.agents.base import BaseAgent
|
||||
from core.agents.convo import AgentConvo
|
||||
from core.agents.mixins import IterationPromptMixin
|
||||
from core.agents.response import AgentResponse
|
||||
from core.config import ROUTE_FILES_AGENT_NAME
|
||||
from core.db.models.file import File
|
||||
from core.db.models.project_state import IterationStatus, TaskStatus
|
||||
from core.llm.parser import JSONParser, OptionalCodeBlockParser
|
||||
@@ -47,7 +46,9 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
user_feedback_qa = self.current_state.current_iteration.get("user_feedback_qa")
|
||||
bug_hunting_cycles = self.current_state.current_iteration.get("bug_hunting_cycles")
|
||||
|
||||
llm_solution = await self.find_solution(user_feedback, user_feedback_qa=user_feedback_qa, bug_hunting_cycles=bug_hunting_cycles)
|
||||
llm_solution = await self.find_solution(
|
||||
user_feedback, user_feedback_qa=user_feedback_qa, bug_hunting_cycles=bug_hunting_cycles
|
||||
)
|
||||
|
||||
self.next_state.current_iteration["description"] = llm_solution
|
||||
self.next_state.current_iteration["status"] = IterationStatus.IMPLEMENT_SOLUTION
|
||||
@@ -103,7 +104,6 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
# this might be caused if we show the input field instead of buttons
|
||||
iteration_status = IterationStatus.FIND_SOLUTION
|
||||
|
||||
|
||||
self.next_state.iterations = self.current_state.iterations + [
|
||||
{
|
||||
"id": uuid4().hex,
|
||||
@@ -189,7 +189,7 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
async def _get_route_files(self) -> list[File]:
|
||||
"""Returns the list of file paths that have routes defined in them."""
|
||||
|
||||
llm = self.get_llm(ROUTE_FILES_AGENT_NAME)
|
||||
llm = self.get_llm()
|
||||
convo = AgentConvo(self).template("get_route_files").require_schema(RouteFilePaths)
|
||||
file_list = await llm(convo, parser=JSONParser(RouteFilePaths))
|
||||
route_files: set[str] = set(file_list.files)
|
||||
@@ -230,15 +230,13 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
if run_command:
|
||||
await self.ui.send_run_command(run_command)
|
||||
|
||||
buttons = {
|
||||
"continue": "Everything works",
|
||||
"change": "I want to make a change",
|
||||
"bug": "There is an issue"
|
||||
}
|
||||
buttons = {"continue": "Everything works", "change": "I want to make a change", "bug": "There is an issue"}
|
||||
if last_iteration:
|
||||
buttons["loop"] = "I'm stuck in a loop"
|
||||
|
||||
user_response = await self.ask_question(test_message, buttons=buttons, default="continue", buttons_only=True, hint=hint)
|
||||
user_response = await self.ask_question(
|
||||
test_message, buttons=buttons, default="continue", buttons_only=True, hint=hint
|
||||
)
|
||||
if user_response.button == "continue" or user_response.cancelled:
|
||||
should_iterate = False
|
||||
|
||||
@@ -261,7 +259,9 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
is_loop = True
|
||||
|
||||
elif user_response.button == "change":
|
||||
user_description = await self.ask_question("Please describe the change you want to make (one at the time please)")
|
||||
user_description = await self.ask_question(
|
||||
"Please describe the change you want to make (one at the time please)"
|
||||
)
|
||||
change_description = user_description.text
|
||||
|
||||
elif user_response.button == "bug":
|
||||
|
||||
@@ -34,8 +34,8 @@ IGNORE_SIZE_THRESHOLD = 50000 # 50K+ files are ignored by default
|
||||
|
||||
# Agents with sane setup in the default configuration
|
||||
DEFAULT_AGENT_NAME = "default"
|
||||
CODE_MONKEY_AGENT_NAME = "CodeMonkey"
|
||||
DESCRIBE_FILES_AGENT_NAME = "CodeMonkey.describe_files"
|
||||
ROUTE_FILES_AGENT_NAME = "Troubleshooter.get_route_files"
|
||||
CHECK_LOGS_AGENT_NAME = "BugHunter.check_logs"
|
||||
|
||||
# Endpoint for the external documentation
|
||||
@@ -112,7 +112,7 @@ class AgentLLMConfig(_StrictModel):
|
||||
"""
|
||||
|
||||
provider: LLMProvider = LLMProvider.OPENAI
|
||||
model: str = Field(description="Model to use", default="gpt-4-0125-preview")
|
||||
model: str = Field(description="Model to use", default="gpt-4o-2024-05-13")
|
||||
temperature: float = Field(
|
||||
default=0.5,
|
||||
description="Temperature to use for sampling",
|
||||
@@ -310,9 +310,9 @@ class Config(_StrictModel):
|
||||
agent: dict[str, AgentLLMConfig] = Field(
|
||||
default={
|
||||
DEFAULT_AGENT_NAME: AgentLLMConfig(),
|
||||
CODE_MONKEY_AGENT_NAME: AgentLLMConfig(model="gpt-4-0125-preview", temperature=0.0),
|
||||
DESCRIBE_FILES_AGENT_NAME: AgentLLMConfig(model="gpt-3.5-turbo", temperature=0.0),
|
||||
CHECK_LOGS_AGENT_NAME: AgentLLMConfig(model="claude-3-5-sonnet-20240620", temperature=0.0),
|
||||
ROUTE_FILES_AGENT_NAME: AgentLLMConfig(model="gpt-4o", temperature=0.0),
|
||||
}
|
||||
)
|
||||
prompt: PromptConfig = PromptConfig()
|
||||
|
||||
@@ -44,7 +44,7 @@ Then, upon implementing these changes, your colleague came back with the followi
|
||||
{% endif %}
|
||||
|
||||
{% if user_input != '' %}
|
||||
Your colleague who is testing the app "{{ name }}" sent you this report now:
|
||||
Your colleague who is testing the app "{{ state.branch.project.name }}" sent you this report now:
|
||||
```
|
||||
{{ user_input }}
|
||||
```
|
||||
|
||||
@@ -4,7 +4,8 @@ You are working on an app called "{{ state.branch.project.name }}" and you need
|
||||
|
||||
{% if state.tasks and state.current_task %}
|
||||
Development process of this app was split into smaller tasks. Here is the list of all tasks:
|
||||
```{% for task in state.tasks %}
|
||||
```
|
||||
{% for task in state.tasks %}
|
||||
{{ loop.index }}. {{ task.description }}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
@@ -2,7 +2,6 @@ from enum import Enum
|
||||
|
||||
from core.log import get_logger
|
||||
|
||||
from .javascript_react import JavascriptReactProjectTemplate
|
||||
from .node_express_mongoose import NodeExpressMongooseProjectTemplate
|
||||
from .react_express import ReactExpressProjectTemplate
|
||||
|
||||
@@ -12,13 +11,13 @@ log = get_logger(__name__)
|
||||
class ProjectTemplateEnum(str, Enum):
|
||||
"""Choices of available project templates."""
|
||||
|
||||
JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name
|
||||
# JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name
|
||||
NODE_EXPRESS_MONGOOSE = NodeExpressMongooseProjectTemplate.name
|
||||
REACT_EXPRESS = ReactExpressProjectTemplate.name
|
||||
|
||||
|
||||
PROJECT_TEMPLATES = {
|
||||
JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate,
|
||||
# JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate,
|
||||
NodeExpressMongooseProjectTemplate.name: NodeExpressMongooseProjectTemplate,
|
||||
ReactExpressProjectTemplate.name: ReactExpressProjectTemplate,
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"agent": {
|
||||
"default": {
|
||||
"provider": "openai",
|
||||
"model": "gpt-4o",
|
||||
"model": "gpt-4o-2024-05-13",
|
||||
"temperature": 0.5
|
||||
},
|
||||
"CodeMonkey": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "gpt-pilot"
|
||||
version = "0.2.7"
|
||||
version = "0.2.10"
|
||||
description = "Build complete apps using AI agents"
|
||||
authors = ["Senko Rasic <senko@pythagora.ai>"]
|
||||
license = "FSL-1.1-MIT"
|
||||
|
||||
@@ -19,7 +19,7 @@ async def test_run(agentcontext):
|
||||
side_effect=[
|
||||
TemplateSelection(
|
||||
architecture="dummy arch",
|
||||
template="javascript_react",
|
||||
template="node_express_mongoose",
|
||||
),
|
||||
Architecture(
|
||||
system_dependencies=[
|
||||
@@ -52,4 +52,4 @@ async def test_run(agentcontext):
|
||||
assert sm.current_state.specification.architecture == "dummy arch"
|
||||
assert sm.current_state.specification.system_dependencies[0]["name"] == "docker"
|
||||
assert sm.current_state.specification.package_dependencies[0]["name"] == "express"
|
||||
assert "javascript_react" in sm.current_state.specification.templates
|
||||
assert "node_express_mongoose" in sm.current_state.specification.templates
|
||||
|
||||
@@ -31,7 +31,7 @@ async def test_create_initial_epic(agentcontext):
|
||||
async def test_apply_project_template(agentcontext):
|
||||
sm, _, ui, _ = agentcontext
|
||||
|
||||
sm.current_state.specification.templates = {"javascript_react": {}}
|
||||
sm.current_state.specification.templates = {"node_express_mongoose": {}}
|
||||
sm.current_state.epics = [{"name": "Initial Project"}]
|
||||
|
||||
await sm.commit()
|
||||
|
||||
@@ -65,7 +65,7 @@ def test_builtin_defaults():
|
||||
config = ConfigLoader.from_json("{}")
|
||||
|
||||
assert config.llm_for_agent().provider == LLMProvider.OPENAI
|
||||
assert config.llm_for_agent().model == "gpt-4-0125-preview"
|
||||
assert config.llm_for_agent().model == "gpt-4o-2024-05-13"
|
||||
assert config.llm_for_agent().base_url is None
|
||||
assert config.llm_for_agent().api_key is None
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ async def test_render_react_express_nosql(mock_get_config, testmanager):
|
||||
assert "prisma/schema.prisma" not in files
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.asyncio
|
||||
@patch("core.state.state_manager.get_config")
|
||||
async def test_render_javascript_react(mock_get_config, testmanager):
|
||||
|
||||
Reference in New Issue
Block a user