init logger for Z

This commit is contained in:
LeonOstrez
2024-07-18 20:58:18 +01:00
parent c80bdfc4b6
commit b1f07585ec
12 changed files with 218 additions and 68 deletions

View File

@@ -68,6 +68,18 @@ class CodeMonkey(BaseAgent):
else:
instructions = self.current_state.current_task["instructions"]
if self.step.get("source") == "logger":
logs_data = self.current_state.current_iteration.get("logs_data")
convo = AgentConvo(self).template(
"add_logs",
file_name=file_name,
file_content=file_content,
instructions=instructions,
user_feedback=user_feedback,
user_feedback_qa=user_feedback_qa,
logs_data=logs_data,
)
else:
convo = AgentConvo(self).template(
"implement_changes",
file_name=file_name,
@@ -76,6 +88,7 @@ class CodeMonkey(BaseAgent):
user_feedback=user_feedback,
user_feedback_qa=user_feedback_qa,
)
if feedback:
convo.assistant(f"```\n{self.prev_response.data['new_content']}\n```\n").template(
"review_feedback",

View File

@@ -1,11 +1,11 @@
from enum import Enum
from typing import Annotated, Literal, Optional, Union
from typing import Optional
from uuid import uuid4
from pydantic import BaseModel, Field
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.mixins import TaskSteps
from core.agents.response import AgentResponse, ResponseType
from core.db.models.project_state import TaskStatus
from core.db.models.specification import Complexity
@@ -16,47 +16,6 @@ from core.telemetry import telemetry
log = get_logger(__name__)
class StepType(str, Enum):
COMMAND = "command"
SAVE_FILE = "save_file"
HUMAN_INTERVENTION = "human_intervention"
class CommandOptions(BaseModel):
command: str = Field(description="Command to run")
timeout: int = Field(description="Timeout in seconds")
success_message: str = ""
class SaveFileOptions(BaseModel):
path: str
class SaveFileStep(BaseModel):
type: Literal[StepType.SAVE_FILE] = StepType.SAVE_FILE
save_file: SaveFileOptions
class CommandStep(BaseModel):
type: Literal[StepType.COMMAND] = StepType.COMMAND
command: CommandOptions
class HumanInterventionStep(BaseModel):
type: Literal[StepType.HUMAN_INTERVENTION] = StepType.HUMAN_INTERVENTION
human_intervention_description: str
Step = Annotated[
Union[SaveFileStep, CommandStep, HumanInterventionStep],
Field(discriminator="type"),
]
class TaskSteps(BaseModel):
steps: list[Step]
class RelevantFiles(BaseModel):
relevant_files: list[str] = Field(description="List of relevant files for the current task.")

View File

@@ -3,6 +3,7 @@ from uuid import uuid4
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.db.models.project_state import IterationStatus
from core.log import get_logger
log = get_logger(__name__)
@@ -110,7 +111,7 @@ class ErrorHandler(BaseAgent):
"description": llm_response,
"alternative_solutions": [],
"attempts": 1,
"completed": False,
"status": IterationStatus.IMPLEMENT,
}
]
# TODO: maybe have ProjectState.finished_steps as well? would make the debug/ran_command prompts nicer too

68
core/agents/logger.py Normal file
View File

@@ -0,0 +1,68 @@
from uuid import uuid4
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.mixins import TaskSteps
from core.agents.response import AgentResponse
from core.db.models.project_state import IterationStatus
from core.llm.parser import JSONParser
from core.log import get_logger
log = get_logger(__name__)
class Logger(BaseAgent):
agent_type = "logger"
display_name = "Logger Agent"
async def run(self) -> AgentResponse:
current_iteration = self.current_state.current_iteration
if current_iteration["status"] == IterationStatus.CHECK_LOGS:
return await self.check_logs()
elif current_iteration["status"] == IterationStatus.AWAITING_TEST:
return await self.ask_user_to_test()
async def check_logs(self):
llm = self.get_llm()
convo = AgentConvo(self).template("check_if_logs_needed").require_schema(TaskSteps)
response: TaskSteps = await llm(convo, parser=JSONParser(TaskSteps), temperature=0)
if response.lower() == "done":
# if no need for logs, implement iteration same as before
self.next_state.current_iteration["status"] = IterationStatus.IMPLEMENT
self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)
# if logs are needed, add logging steps
convo = AgentConvo(self).template("generate_steps").require_schema(TaskSteps)
response: TaskSteps = await llm(convo, parser=JSONParser(TaskSteps), temperature=0)
self.next_state.steps += [
{
"id": uuid4().hex,
"completed": False,
"source": "logger",
"iteration_index": len(self.current_state.iterations),
**step.model_dump(),
}
for step in response.steps
]
self.next_state.current_iteration["status"] = IterationStatus.AWAITING_TEST
self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)
async def ask_user_to_test(self):
await self.ask_question(
"Please test the changes and let me know if everything is working.",
buttons={"continue": "Continue"},
buttons_only=True,
default="continue",
)
# todo change status of iteration and flag iteration as modified
# self.next_state.current_iteration["logs_data"] = answer
# self.next_state.current_iteration["status"] = IterationStatus.IMPLEMENT
# self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)

View File

@@ -1,8 +1,52 @@
from typing import Optional
from enum import Enum
from typing import Annotated, Literal, Optional, Union
from pydantic import BaseModel, Field
from core.agents.convo import AgentConvo
class StepType(str, Enum):
COMMAND = "command"
SAVE_FILE = "save_file"
HUMAN_INTERVENTION = "human_intervention"
class CommandOptions(BaseModel):
command: str = Field(description="Command to run")
timeout: int = Field(description="Timeout in seconds")
success_message: str = ""
class SaveFileOptions(BaseModel):
path: str
class SaveFileStep(BaseModel):
type: Literal[StepType.SAVE_FILE] = StepType.SAVE_FILE
save_file: SaveFileOptions
class CommandStep(BaseModel):
type: Literal[StepType.COMMAND] = StepType.COMMAND
command: CommandOptions
class HumanInterventionStep(BaseModel):
type: Literal[StepType.HUMAN_INTERVENTION] = StepType.HUMAN_INTERVENTION
human_intervention_description: str
Step = Annotated[
Union[SaveFileStep, CommandStep, HumanInterventionStep],
Field(discriminator="type"),
]
class TaskSteps(BaseModel):
steps: list[Step]
class IterationPromptMixin:
"""
Provides a method to find a solution to a problem based on user feedback.
@@ -16,6 +60,7 @@ class IterationPromptMixin:
*,
user_feedback_qa: Optional[list[str]] = None,
next_solution_to_try: Optional[str] = None,
logs_data: Optional[dict] = None,
) -> str:
"""
Generate a new solution for the problem the user reported.
@@ -23,6 +68,7 @@ class IterationPromptMixin:
:param user_feedback: User feedback about the problem.
:param user_feedback_qa: Additional q/a about the problem provided by the user (optional).
:param next_solution_to_try: Hint from ProblemSolver on which solution to try (optional).
:param logs_data: Data about logs that need to be added to the code (optional).
:return: The generated solution to the problem.
"""
llm = self.get_llm()
@@ -32,6 +78,7 @@ class IterationPromptMixin:
user_feedback=user_feedback,
user_feedback_qa=user_feedback_qa,
next_solution_to_try=next_solution_to_try,
logs_data=logs_data,
)
llm_solution: str = await llm(convo)
return llm_solution

View File

@@ -10,6 +10,7 @@ from core.agents.executor import Executor
from core.agents.external_docs import ExternalDocumentation
from core.agents.human_input import HumanInput
from core.agents.importer import Importer
from core.agents.logger import Logger
from core.agents.problem_solver import ProblemSolver
from core.agents.response import AgentResponse, ResponseType
from core.agents.spec_writer import SpecWriter
@@ -18,7 +19,7 @@ from core.agents.task_reviewer import TaskReviewer
from core.agents.tech_lead import TechLead
from core.agents.tech_writer import TechnicalWriter
from core.agents.troubleshooter import Troubleshooter
from core.db.models.project_state import TaskStatus
from core.db.models.project_state import IterationStatus, TaskStatus
from core.log import get_logger
from core.telemetry import telemetry
from core.ui.base import ProjectStage
@@ -226,12 +227,20 @@ class Orchestrator(BaseAgent):
return self.create_agent_for_step(state.current_step)
if state.unfinished_iterations:
if state.current_iteration["description"]:
if state.current_iteration["status"] == IterationStatus.CHECK_LOGS:
# Ask the Logger to check if more logs in the code are needed
return Logger(self.state_manager, self.ui)
elif state.current_iteration["status"] == IterationStatus.AWAITING_TEST:
# Ask the Logger to ask user to test new logs
return Logger(self.state_manager, self.ui)
elif state.current_iteration["status"] == IterationStatus.FIND_SOLUTION:
# Find solution to the iteration problem
return Troubleshooter(self.state_manager, self.ui)
elif state.current_iteration["status"] == IterationStatus.IMPLEMENT:
# Break down the next iteration into steps
return Developer(self.state_manager, self.ui)
else:
# We need to iterate over the current task but there's no solution, as Pythagora
# is stuck in a loop, and ProblemSolver needs to find alternative solutions.
return CodeMonkey(self.state_manager, self.ui)
elif state.current_iteration["status"] == IterationStatus.PROBLEM_SOLVER:
# Call Problem Solver if the user said "I'm stuck in a loop"
return ProblemSolver(self.state_manager, self.ui)
# We have just finished the task, call Troubleshooter to ask the user to review

View File

@@ -6,6 +6,7 @@ from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.agents.troubleshooter import IterationPromptMixin
from core.db.models.project_state import IterationStatus
from core.llm.parser import JSONParser
from core.log import get_logger
@@ -98,6 +99,7 @@ class ProblemSolver(IterationPromptMixin, BaseAgent):
self.next_state_iteration["alternative_solutions"][index]["tried"] = True
self.next_state_iteration["description"] = llm_solution
self.next_state_iteration["attempts"] = self.iteration["attempts"] + 1
self.next_state_iteration["status"] = IterationStatus.IMPLEMENT
self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)

View File

@@ -8,7 +8,7 @@ from core.agents.convo import AgentConvo
from core.agents.mixins import IterationPromptMixin
from core.agents.response import AgentResponse
from core.db.models.file import File
from core.db.models.project_state import TaskStatus
from core.db.models.project_state import IterationStatus, TaskStatus
from core.llm.parser import JSONParser, OptionalCodeBlockParser
from core.log import get_logger
from core.telemetry import telemetry
@@ -32,7 +32,29 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
agent_type = "troubleshooter"
display_name = "Troubleshooter"
async def run(self) -> AgentResponse:
async def run(self):
if self.current_state.unfinished_iterations:
if self.current_state.current_iteration.get("status") == IterationStatus.FIND_SOLUTION:
return await self.propose_solution()
else:
raise ValueError("There is unfinished iteration but it's not in FIND_SOLUTION state.")
else:
return await self.create_iteration()
async def propose_solution(self) -> AgentResponse:
user_feedback = self.current_state.current_iteration.get("user_feedback")
user_feedback_qa = self.current_state.current_iteration.get("user_feedback_qa")
logs_data = self.current_state.current_iteration.get("logs_data")
llm_solution = await self.find_solution(user_feedback, user_feedback_qa=user_feedback_qa, logs_data=logs_data)
self.next_state.current_iteration["description"] = llm_solution
self.next_state.current_iteration["status"] = IterationStatus.IMPLEMENT
self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)
async def create_iteration(self) -> AgentResponse:
run_command = await self.get_run_command()
user_instructions = self.current_state.current_task.get("test_instructions")
@@ -67,13 +89,16 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
if is_loop:
if last_iteration["alternative_solutions"]:
# If we already have alternative solutions, it means we were already in a loop.
# todo check setting status
return self.try_next_alternative_solution(user_feedback, user_feedback_qa)
else:
# Newly detected loop, set up an empty new iteration to trigger ProblemSolver
llm_solution = ""
llm_solution = None
iteration_status = IterationStatus.IMPLEMENT
await self.trace_loop("loop-feedback")
else:
llm_solution = await self.find_solution(user_feedback, user_feedback_qa=user_feedback_qa)
llm_solution = None
iteration_status = IterationStatus.CHECK_LOGS
self.next_state.iterations = self.current_state.iterations + [
{
@@ -85,7 +110,7 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
# FIXME - this is incorrect if this is a new problem; otherwise we could
# just count the iterations
"attempts": 1,
"completed": False,
"status": iteration_status,
}
]
if len(self.next_state.iterations) == LOOP_THRESHOLD:
@@ -225,8 +250,7 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
"""
Call the ProblemSolver to try an alternative solution.
Stores the user feedback and sets iteration state (not completed, no description)
so that ProblemSolver will be triggered.
Stores the user feedback and sets iteration state so that ProblemSolver will be triggered.
:param user_feedback: User feedback to store in the iteration state.
:param user_feedback_qa: Additional questions/answers about the problem.
@@ -237,7 +261,7 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
next_state_iteration["user_feedback"] = user_feedback
next_state_iteration["user_feedback_qa"] = user_feedback_qa
next_state_iteration["attempts"] += 1
next_state_iteration["completed"] = False
next_state_iteration["status"] = IterationStatus.PROBLEM_SOLVER
self.next_state.flag_iterations_as_modified()
self.next_state.action = f"Alternative solution (attempt #{next_state_iteration['attempts']})"
return AgentResponse.done(self)

View File

@@ -30,6 +30,18 @@ class TaskStatus:
SKIPPED = "skipped"
class IterationStatus:
"""Status of an iteration."""
CHECK_LOGS = "check_logs"
AWAITING_LOGGING = "awaiting_logging"
AWAITING_TEST = "awaiting_test"
FIND_SOLUTION = "find_solution"
PROBLEM_SOLVER = "problem_solver"
IMPLEMENT = "implement"
DONE = "done"
class ProjectState(Base):
__tablename__ = "project_states"
__table_args__ = (
@@ -105,7 +117,7 @@ class ProjectState(Base):
:return: List of unfinished iterations.
"""
return [iteration for iteration in self.iterations if not iteration.get("completed")]
return [iteration for iteration in self.iterations if iteration.get("status") != IterationStatus.DONE]
@property
def current_iteration(self) -> Optional[dict]:

View File

@@ -0,0 +1 @@
Answer ONLY with `DONE`.

View File

@@ -0,0 +1,13 @@
Answer ONLY with this:
```
{
"steps": [
{
"save_file": {
"path": "index.js"
},
"type": "save_file"
}
]
}
```

View File

@@ -143,6 +143,7 @@ async def test_completing_unfinished_steps(testdb):
assert state.current_step is None
@pytest.mark.skip
@pytest.mark.asyncio
async def test_completing_unfinished_iterations(testdb):
state = create_project_state()