merge main

This commit is contained in:
LeonOstrez
2024-07-29 11:19:01 +02:00
20 changed files with 194 additions and 39 deletions

View File

@@ -8,6 +8,7 @@ from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.mixins import RelevantFilesMixin
from core.agents.response import AgentResponse, ResponseType
from core.config import TASK_BREAKDOWN_AGENT_NAME
from core.db.models.project_state import IterationStatus, TaskStatus
from core.db.models.specification import Complexity
from core.llm.parser import JSONParser
@@ -216,7 +217,7 @@ class Developer(RelevantFilesMixin, BaseAgent):
current_task_index = self.current_state.tasks.index(current_task)
llm = self.get_llm()
llm = self.get_llm(TASK_BREAKDOWN_AGENT_NAME)
convo = AgentConvo(self).template(
"breakdown",
task=current_task,
@@ -234,6 +235,7 @@ class Developer(RelevantFilesMixin, BaseAgent):
}
self.next_state.flag_tasks_as_modified()
llm = self.get_llm()
await self.send_message("Breaking down the task into steps ...")
convo.assistant(response).template("parse_task").require_schema(TaskSteps)
response: TaskSteps = await llm(convo, parser=JSONParser(TaskSteps), temperature=0)

View File

@@ -111,7 +111,7 @@ class ErrorHandler(BaseAgent):
"description": llm_response,
"alternative_solutions": [],
"attempts": 1,
"status": IterationStatus.HUNTING_FOR_BUG,
"status": IterationStatus.IMPLEMENT_SOLUTION,
"bug_hunting_cycles": [],
}
]

View File

@@ -182,6 +182,8 @@ class Orchestrator(BaseAgent):
return Importer(self.state_manager, self.ui, prev_response=prev_response)
if prev_response.type == ResponseType.EXTERNAL_DOCS_REQUIRED:
return ExternalDocumentation(self.state_manager, self.ui, prev_response=prev_response)
if prev_response.type == ResponseType.UPDATE_SPECIFICATION:
return SpecWriter(self.state_manager, self.ui, prev_response=prev_response)
if not state.specification.description:
if state.files:
@@ -252,6 +254,9 @@ class Orchestrator(BaseAgent):
elif current_iteration_status == IterationStatus.PROBLEM_SOLVER:
# Call Problem Solver if the user said "I'm stuck in a loop"
return ProblemSolver(self.state_manager, self.ui)
elif current_iteration_status == IterationStatus.NEW_FEATURE_REQUESTED:
# Call Spec Writer to add the "change" requested by the user to project specification
return SpecWriter(self.state_manager, self.ui)
# We have just finished the task, call Troubleshooter to ask the user to review
return Troubleshooter(self.state_manager, self.ui)

View File

@@ -45,6 +45,9 @@ class ResponseType(str, Enum):
EXTERNAL_DOCS_REQUIRED = "external-docs-required"
"""We need to fetch external docs for a task."""
UPDATE_SPECIFICATION = "update-specification"
"""We need to update the project specification."""
class AgentResponse:
type: ResponseType = ResponseType.DONE
@@ -144,3 +147,13 @@ class AgentResponse:
@staticmethod
def external_docs_required(agent: "BaseAgent") -> "AgentResponse":
return AgentResponse(type=ResponseType.EXTERNAL_DOCS_REQUIRED, agent=agent)
@staticmethod
def update_specification(agent: "BaseAgent", description: str) -> "AgentResponse":
return AgentResponse(
type=ResponseType.UPDATE_SPECIFICATION,
agent=agent,
data={
"description": description,
},
)

View File

@@ -1,7 +1,8 @@
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.agents.response import AgentResponse, ResponseType
from core.db.models import Complexity
from core.db.models.project_state import IterationStatus
from core.llm.parser import StringParser
from core.log import get_logger
from core.telemetry import telemetry
@@ -26,6 +27,15 @@ class SpecWriter(BaseAgent):
display_name = "Spec Writer"
async def run(self) -> AgentResponse:
current_iteration = self.current_state.current_iteration
if current_iteration is not None and current_iteration.get("status") == IterationStatus.NEW_FEATURE_REQUESTED:
return await self.update_spec(iteration_mode=True)
elif self.prev_response and self.prev_response.type == ResponseType.UPDATE_SPECIFICATION:
return await self.update_spec(iteration_mode=False)
else:
return await self.initialize_spec()
async def initialize_spec(self) -> AgentResponse:
response = await self.ask_question(
"Describe your app in as much detail as possible",
allow_empty=False,
@@ -67,6 +77,7 @@ class SpecWriter(BaseAgent):
spec = await self.review_spec(spec)
self.next_state.specification = self.current_state.specification.clone()
self.next_state.specification.original_description = spec
self.next_state.specification.description = spec
self.next_state.specification.complexity = complexity
telemetry.set("initial_prompt", spec)
@@ -75,6 +86,42 @@ class SpecWriter(BaseAgent):
self.next_state.action = SPEC_STEP_NAME
return AgentResponse.done(self)
async def update_spec(self, iteration_mode) -> AgentResponse:
if iteration_mode:
feature_description = self.current_state.current_iteration["user_feedback"]
else:
feature_description = self.prev_response.data["description"]
await self.send_message(
f"Making the following changes to project specification:\n\n{feature_description}\n\nUpdated project specification:"
)
llm = self.get_llm()
convo = AgentConvo(self).template("add_new_feature", feature_description=feature_description)
llm_response: str = await llm(convo, temperature=0, parser=StringParser())
updated_spec = llm_response.strip()
await self.ui.generate_diff(self.current_state.specification.description, updated_spec)
user_response = await self.ask_question(
"Do you accept these changes to the project specification?",
buttons={"yes": "Yes", "no": "No"},
default="yes",
buttons_only=True,
)
await self.ui.close_diff()
if user_response.button == "yes":
self.next_state.specification = self.current_state.specification.clone()
self.next_state.specification.description = updated_spec
telemetry.set("updated_prompt", updated_spec)
if iteration_mode:
self.next_state.current_iteration["status"] = IterationStatus.FIND_SOLUTION
self.next_state.flag_iterations_as_modified()
else:
complexity = await self.check_prompt_complexity(user_response.text)
self.next_state.current_epic["complexity"] = complexity
return AgentResponse.done(self)
async def check_prompt_complexity(self, prompt: str) -> str:
await self.send_message("Checking the complexity of the prompt ...")
llm = self.get_llm()
@@ -160,6 +207,6 @@ class SpecWriter(BaseAgent):
llm = self.get_llm()
llm_response: str = await llm(convo, temperature=0)
additional_info = llm_response.strip()
if additional_info:
if additional_info and len(additional_info) > 6:
spec += "\nAdditional info/examples:\n" + additional_info
return spec

View File

@@ -47,7 +47,7 @@ class TaskReviewer(BaseAgent):
)
llm_response: str = await llm(convo, temperature=0.7)
if "done" in llm_response.strip().lower()[-7:]:
if "done" in llm_response.strip().lower()[-20:]:
return AgentResponse.done(self)
else:
return AgentResponse.task_review_feedback(self, llm_response)

View File

@@ -5,7 +5,6 @@ from pydantic import BaseModel, Field
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.db.models import Complexity
from core.db.models.project_state import TaskStatus
from core.llm.parser import JSONParser
from core.log import get_logger
@@ -107,7 +106,8 @@ class TechLead(BaseAgent):
if summaries:
spec = self.current_state.specification.clone()
spec.description += "\n\n" + "\n\n".join(summaries)
spec.template_summary = "\n\n".join(summaries)
self.next_state.specification = spec
async def ask_for_new_feature(self) -> AgentResponse:
@@ -135,12 +135,12 @@ class TechLead(BaseAgent):
"description": response.text,
"summary": None,
"completed": False,
"complexity": Complexity.HARD,
"complexity": None, # Determined and defined in SpecWriter
}
]
# Orchestrator will rerun us to break down the new feature epic
self.next_state.action = f"Start of feature #{len(self.current_state.epics)}"
return AgentResponse.done(self)
return AgentResponse.update_specification(self, response.text)
async def plan_epic(self, epic) -> AgentResponse:
log.debug(f"Planning tasks for the epic: {epic['name']}")

View File

@@ -87,7 +87,7 @@ class Troubleshooter(IterationPromptMixin, RelevantFilesMixin, BaseAgent):
return await self.complete_task()
user_feedback = bug_report or change_description
user_feedback_qa = await self.generate_bug_report(run_command, user_instructions, user_feedback)
user_feedback_qa = None # await self.generate_bug_report(run_command, user_instructions, user_feedback)
if is_loop:
if last_iteration["alternative_solutions"]:
@@ -103,14 +103,14 @@ class Troubleshooter(IterationPromptMixin, RelevantFilesMixin, BaseAgent):
# should be - elif change_description is not None: - but to prevent bugs with the extension
# this might be caused if we show the input field instead of buttons
await self.get_relevant_files(user_feedback)
iteration_status = IterationStatus.FIND_SOLUTION
iteration_status = IterationStatus.NEW_FEATURE_REQUESTED
self.next_state.iterations = self.current_state.iterations + [
{
"id": uuid4().hex,
"user_feedback": user_feedback,
"user_feedback_qa": user_feedback_qa,
"description": change_description,
"description": None,
"alternative_solutions": [],
# FIXME - this is incorrect if this is a new problem; otherwise we could
# just count the iterations
@@ -224,7 +224,7 @@ class Troubleshooter(IterationPromptMixin, RelevantFilesMixin, BaseAgent):
is_loop = False
should_iterate = True
test_message = "Can you check if the app works please?"
test_message = "Please check if the app is working"
if user_instructions:
hint = " Here is a description of what should be working:\n\n" + user_instructions
@@ -260,13 +260,11 @@ class Troubleshooter(IterationPromptMixin, RelevantFilesMixin, BaseAgent):
is_loop = True
elif user_response.button == "change":
user_description = await self.ask_question(
"Please describe the change you want to make (one at the time please)"
)
user_description = await self.ask_question("Please describe the change you want to make (one at a time)")
change_description = user_description.text
elif user_response.button == "bug":
user_description = await self.ask_question("Please describe the issue you found (one at the time please)")
user_description = await self.ask_question("Please describe the issue you found (one at a time)")
bug_report = user_description.text
return should_iterate, is_loop, bug_report, change_description

View File

@@ -37,6 +37,7 @@ DEFAULT_AGENT_NAME = "default"
CODE_MONKEY_AGENT_NAME = "CodeMonkey"
DESCRIBE_FILES_AGENT_NAME = "CodeMonkey.describe_files"
CHECK_LOGS_AGENT_NAME = "BugHunter.check_logs"
TASK_BREAKDOWN_AGENT_NAME = "Developer.breakdown_current_task"
# Endpoint for the external documentation
EXTERNAL_DOCUMENTATION_API = "http://docs-pythagora-io-439719575.us-east-1.elb.amazonaws.com"
@@ -317,7 +318,8 @@ class Config(_StrictModel):
DEFAULT_AGENT_NAME: AgentLLMConfig(),
CODE_MONKEY_AGENT_NAME: AgentLLMConfig(model="gpt-4-0125-preview", temperature=0.0),
DESCRIBE_FILES_AGENT_NAME: AgentLLMConfig(model="gpt-3.5-turbo", temperature=0.0),
CHECK_LOGS_AGENT_NAME: AgentLLMConfig(model="claude-3-5-sonnet-20240620", temperature=0.0),
CHECK_LOGS_AGENT_NAME: AgentLLMConfig(model="claude-3-5-sonnet-20240620", temperature=0.5),
TASK_BREAKDOWN_AGENT_NAME: AgentLLMConfig(model="claude-3-5-sonnet-20240620", temperature=0.5),
}
)
prompt: PromptConfig = PromptConfig()

View File

@@ -0,0 +1,36 @@
"""Add original description and template summary fields to specifications
Revision ID: c8905d4ce784
Revises: 08d71952ec2f
Create Date: 2024-07-25 19:24:23.808237
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c8905d4ce784"
down_revision: Union[str, None] = "08d71952ec2f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("specifications", schema=None) as batch_op:
batch_op.add_column(sa.Column("original_description", sa.String(), nullable=True))
batch_op.add_column(sa.Column("template_summary", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("specifications", schema=None) as batch_op:
batch_op.drop_column("template_summary")
batch_op.drop_column("original_description")
# ### end Alembic commands ###

View File

@@ -41,6 +41,7 @@ class IterationStatus:
IMPLEMENT_SOLUTION = "implement_solution"
FIND_SOLUTION = "find_solution"
PROBLEM_SOLVER = "problem_solver"
NEW_FEATURE_REQUESTED = "new_feature_requested"
DONE = "done"

View File

@@ -26,7 +26,9 @@ class Specification(Base):
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
# Attributes
original_description: Mapped[Optional[str]] = mapped_column()
description: Mapped[str] = mapped_column(default="")
template_summary: Mapped[Optional[str]] = mapped_column()
architecture: Mapped[str] = mapped_column(default="")
system_dependencies: Mapped[list[dict]] = mapped_column(default=list)
package_dependencies: Mapped[list[dict]] = mapped_column(default=list)
@@ -43,7 +45,9 @@ class Specification(Base):
Clone the specification.
"""
clone = Specification(
original_description=self.original_description,
description=self.description,
template_summary=self.template_summary,
architecture=self.architecture,
system_dependencies=self.system_dependencies,
package_dependencies=self.package_dependencies,

View File

@@ -15,6 +15,7 @@ log = get_logger(__name__)
# Maximum number of tokens supported by Anthropic Claude 3
MAX_TOKENS = 4096
MAX_TOKENS_SONNET = 8192
class AnthropicClient(BaseLLMClient):
@@ -72,6 +73,11 @@ class AnthropicClient(BaseLLMClient):
"messages": messages,
"temperature": self.config.temperature if temperature is None else temperature,
}
if "sonnet" in self.config.model:
completion_kwargs["extra_headers"] = {"anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15"}
completion_kwargs["max_tokens"] = MAX_TOKENS_SONNET
if json_mode:
completion_kwargs["response_format"] = {"type": "json_object"}

View File

@@ -4,21 +4,6 @@ You are working on an app called "{{ state.branch.project.name }}" and you need
{% include "partials/features_list.prompt" %}
{% include "partials/files_list.prompt" %}
We've broken the development of this {% if state.epics|length > 1 %}feature{% else %}app{% endif %} down to these tasks:
```
{% for task in state.tasks %}
{{ loop.index }}. {{ task.description }}{% if task.get("status") == "done" %} (completed){% endif %}
{% endfor %}
```
You are currently working on task #{{ current_task_index + 1 }} with the following description:
```
{{ task.description }}
```
{% if current_task_index != 0 %}All previous tasks are finished and you don't have to work on them.{% endif %}
Now, tell me all the code that needs to be written to implement ONLY this task and have it fully working and all commands that need to be run to implement this task.
{% include "partials/doc_snippets.prompt" %}
{%- if state.epics|length == 1 %}
@@ -35,3 +20,19 @@ DO NOT specify commands to create any folders or files, they will be created aut
{% include "partials/file_size_limit.prompt" %}
Never use the port 5000 to run the app, it's reserved.
--IMPLEMENTATION INSTRUCTIONS--
We've broken the development of this {% if state.epics|length > 1 %}feature{% else %}app{% endif %} down to these tasks:
```
{% for task in state.tasks %}
{{ loop.index }}. {{ task.description }}{% if task.get("status") == "done" %} (completed){% endif %}
{% endfor %}
```
You are currently working on task #{{ current_task_index + 1 }} with the following description:
```
{{ task.description }}
```
{% if current_task_index != 0 %}All previous tasks are finished and you don't have to work on them.{% endif %}
Now, tell me all the code that needs to be written to implement ONLY this task and have it fully working and all commands that need to be run to implement this task.

View File

@@ -0,0 +1,19 @@
Your team has taken the client brief and turned it into a project specification.
Afterwards the client added a description for a new feature to be added to the project specification.
Your job is to update the project specification so that it contains the new feature information but does not lack any of the information from the original project specification.
This might include:
* details on how the app should work
* information which 3rd party packages or APIs to use or avoid
* concrete examples of API requests/responses, library usage, or other external documentation
Here is the original project specification:
{{ state.specification.description }}
Here is the new feature description:
---FEATURE-DESCRIPTION-START---
{{ feature_description }}
---FEATURE-DESCRIPTION-END---
In your response, output only the new updated project specification, without any additional messages to the user.
If there is no feature description just output the original project specification.

View File

@@ -82,6 +82,8 @@ class Telemetry:
"model": config.agent["default"].model,
# Initial prompt
"initial_prompt": None,
# Updated prompt
"updated_prompt": None,
# App complexity
"is_complex_app": None,
# Optional template used for the project

View File

@@ -2,8 +2,10 @@ from enum import Enum
from core.log import get_logger
from .javascript_react import JavascriptReactProjectTemplate
from .node_express_mongoose import NodeExpressMongooseProjectTemplate
from .react_express import ReactExpressProjectTemplate
# from .react_express import ReactExpressProjectTemplate
log = get_logger(__name__)
@@ -11,13 +13,13 @@ log = get_logger(__name__)
class ProjectTemplateEnum(str, Enum):
"""Choices of available project templates."""
# JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name
JAVASCRIPT_REACT = JavascriptReactProjectTemplate.name
NODE_EXPRESS_MONGOOSE = NodeExpressMongooseProjectTemplate.name
REACT_EXPRESS = ReactExpressProjectTemplate.name
# REACT_EXPRESS = ReactExpressProjectTemplate.name
PROJECT_TEMPLATES = {
# JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate,
JavascriptReactProjectTemplate.name: JavascriptReactProjectTemplate,
NodeExpressMongooseProjectTemplate.name: NodeExpressMongooseProjectTemplate,
ReactExpressProjectTemplate.name: ReactExpressProjectTemplate,
# ReactExpressProjectTemplate.name: ReactExpressProjectTemplate,
}

View File

@@ -42,6 +42,8 @@ class MessageType(str, Enum):
IMPORT_PROJECT = "importProject"
APP_FINISHED = "appFinished"
FEATURE_FINISHED = "featureFinished"
GENERATE_DIFF = "generateDiff"
CLOSE_DIFF = "closeDiff"
class Message(BaseModel):
@@ -356,6 +358,19 @@ class IPCClientUI(UIBase):
content=stats,
)
async def generate_diff(self, file_old: str, file_new: str):
await self._send(
MessageType.GENERATE_DIFF,
content={
"file_old": file_old,
"file_new": file_new,
},
)
async def close_diff(self):
log.debug("Sending signal to close the generated diff file")
await self._send(MessageType.CLOSE_DIFF)
async def loading_finished(self):
log.debug("Sending project loading finished signal to the extension")
await self._send(MessageType.LOADING_FINISHED)

View File

@@ -56,7 +56,7 @@ async def test_ask_for_feature(agentcontext):
tl = TechLead(sm, ui)
response = await tl.run()
assert response.type == ResponseType.DONE
assert response.type == ResponseType.UPDATE_SPECIFICATION
await sm.commit()

View File

@@ -6,6 +6,7 @@ from core.state.state_manager import StateManager
from core.templates.registry import PROJECT_TEMPLATES
@pytest.mark.skip
@pytest.mark.asyncio
@patch("core.state.state_manager.get_config")
async def test_render_react_express_sql(mock_get_config, testmanager):
@@ -30,6 +31,7 @@ async def test_render_react_express_sql(mock_get_config, testmanager):
assert "api/models/user.js" not in files
@pytest.mark.skip
@pytest.mark.asyncio
@patch("core.state.state_manager.get_config")
async def test_render_react_express_nosql(mock_get_config, testmanager):