mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 13:17:55 -05:00
remove unused code
This commit is contained in:
18
README.md
18
README.md
@@ -146,24 +146,6 @@ If not specified `email` will be parsed from `~/.gitconfig` if the file exists.
|
||||
See also [What's the purpose of arguments.password / User.password?](https://github.com/Pythagora-io/gpt-pilot/discussions/55)
|
||||
|
||||
|
||||
## `advanced`
|
||||
The Architect, by default, favors certain technologies, including:
|
||||
|
||||
- Node.JS
|
||||
- MongoDB
|
||||
- PeeWee ORM
|
||||
- Jest & PyUnit
|
||||
- Bootstrap
|
||||
- Vanilla JavaScript
|
||||
- Socket.io
|
||||
|
||||
If you have your own preferences, you can have a deeper conversation with the Architect.
|
||||
|
||||
```bash
|
||||
python main.py advanced=True
|
||||
```
|
||||
|
||||
|
||||
## `step`
|
||||
Continue working on an existing app from a specific **`step`** (eg: `user_tasks`)
|
||||
```bash
|
||||
|
||||
@@ -266,28 +266,3 @@ class AgentConvo:
|
||||
prompt = get_prompt(prompt_path, prompt_data)
|
||||
logger.info('\n>>>>>>>>>> User Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', prompt)
|
||||
self.messages.append({"role": "user", "content": prompt})
|
||||
|
||||
def get_additional_info_from_user(self, function_calls: FunctionCallSet = None):
|
||||
"""
|
||||
Asks user if he wants to make any changes to last message in conversation.
|
||||
|
||||
Args:
|
||||
function_calls: Optional function calls to be included in the message.
|
||||
|
||||
Returns:
|
||||
The response from the agent OR None if user didn't ask for change.
|
||||
"""
|
||||
llm_response = None
|
||||
while True:
|
||||
print(color_yellow(
|
||||
"Please check this message and say what needs to be changed. If everything is ok just press ENTER", ))
|
||||
changes = ask_user(self.agent.project, self.messages[-1]['content'], require_some_input=False)
|
||||
if changes.lower() == '':
|
||||
break
|
||||
|
||||
llm_response = self.send_message('utils/update.prompt',
|
||||
{'changes': changes},
|
||||
function_calls)
|
||||
|
||||
logger.info('Getting additional info from user done')
|
||||
return llm_response
|
||||
|
||||
@@ -102,14 +102,6 @@ class Architect(Agent):
|
||||
print('continue', type='buttons-only')
|
||||
ask_user(self.project, "Press ENTER if you still want to proceed. If you'd like to modify the project description, close the app and start a new one.", require_some_input=False)
|
||||
|
||||
# TODO: Project.args should be a defined class so that all of the possible args are more obvious
|
||||
if self.project.args.get('advanced', False):
|
||||
llm_response = self.convo_architecture.get_additional_info_from_user(ARCHITECTURE)
|
||||
if llm_response is not None:
|
||||
self.project.architecture = llm_response["architecture"]
|
||||
self.project.system_dependencies = llm_response["system_dependencies"]
|
||||
self.project.package_dependencies = llm_response["package_dependencies"]
|
||||
|
||||
logger.info(f"Final architecture: {self.project.architecture}")
|
||||
|
||||
save_progress(self.project.args['app_id'], self.project.current_step, {
|
||||
|
||||
@@ -6,8 +6,7 @@ from logger.logger import logger
|
||||
from database.database import get_app, save_progress, save_app, get_progress_steps
|
||||
from utils.utils import should_execute_step, generate_app_data, step_already_finished, clean_filename
|
||||
from utils.files import setup_workspace
|
||||
from prompts.prompts import ask_for_app_type, ask_for_main_app_definition, get_additional_info_from_openai, \
|
||||
generate_messages_from_description, ask_user, get_prompt
|
||||
from prompts.prompts import ask_for_app_type, ask_for_main_app_definition, ask_user
|
||||
from const.llm import END_RESPONSE
|
||||
from const.messages import MAX_PROJECT_NAME_LENGTH
|
||||
|
||||
@@ -74,10 +73,6 @@ class ProductOwner(Agent):
|
||||
|
||||
high_level_messages = []
|
||||
high_level_summary = self.project.main_prompt
|
||||
if self.project.args.get('advanced', False):
|
||||
high_level_messages = self.ask_clarifying_questions(self.project.main_prompt)
|
||||
|
||||
high_level_summary = self.generate_project_summary(high_level_messages)
|
||||
|
||||
save_progress(self.project.args['app_id'], self.project.current_step, {
|
||||
"prompt": self.project.main_prompt,
|
||||
@@ -91,23 +86,6 @@ class ProductOwner(Agent):
|
||||
return
|
||||
# PROJECT DESCRIPTION END
|
||||
|
||||
def ask_clarifying_questions(self, main_prompt: str):
|
||||
instructions = generate_messages_from_description(main_prompt,
|
||||
self.project.args['app_type'],
|
||||
self.project.args['name'])
|
||||
return get_additional_info_from_openai(self.project, instructions)
|
||||
|
||||
def generate_project_summary(self, high_level_messages: list[dict]):
|
||||
print(color_green_bold('Project Summary:\n'))
|
||||
convo_project_description = AgentConvo(self)
|
||||
|
||||
convo_project_description.messages.append({'role': 'system',
|
||||
'content': get_prompt('components/summary_instructions.prompt')})
|
||||
return convo_project_description.send_message('utils/summary.prompt',
|
||||
{'conversation': '\n'.join(
|
||||
[f"{msg['role']}: {msg['content']}" for msg in
|
||||
high_level_messages])}, should_log_message=False)
|
||||
|
||||
def get_user_stories(self):
|
||||
if not self.project.args.get('advanced', False):
|
||||
return
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
import builtins
|
||||
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
from unittest.mock import patch, MagicMock
|
||||
from main import get_custom_print
|
||||
from helpers.test_Project import create_project
|
||||
from .ProductOwner import ProductOwner
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class TestProductOwner:
|
||||
def setup_method(self):
|
||||
builtins.print, ipc_client_instance = get_custom_print({})
|
||||
|
||||
@patch('prompts.prompts.ask_user', return_value='yes')
|
||||
@patch('prompts.prompts.create_gpt_chat_completion')
|
||||
def test_ask_clarifying_questions(self, mock_completion, mock_ask):
|
||||
# Given
|
||||
project = create_project()
|
||||
product_owner = ProductOwner(project)
|
||||
mock_completion.side_effect = [
|
||||
{'text': 'Will the app run in the console?'},
|
||||
{'text': 'Will it always print "Hello World"?'},
|
||||
{'text': 'EVERYTHING_CLEAR'}
|
||||
]
|
||||
|
||||
# When
|
||||
high_level_messages = product_owner.ask_clarifying_questions('A Python version of the typical "hello world" application.')
|
||||
|
||||
# Then
|
||||
for msg in high_level_messages:
|
||||
assert msg['role'] != 'system'
|
||||
assert 'You are an experienced project owner' not in msg['content']
|
||||
assert 'I\'m going to show you an overview of tasks' not in msg['content']
|
||||
assert 'Getting additional answers' not in msg['content']
|
||||
|
||||
@pytest.mark.uses_tokens
|
||||
@patch('helpers.AgentConvo.get_saved_development_step')
|
||||
# @patch('helpers.AgentConvo.create_gpt_chat_completion', return_value={'text': 'A python app which displays "Hello World" on the console'})
|
||||
def test_generate_project_summary(self,
|
||||
# mock_completion,
|
||||
mock_get_step):
|
||||
# Given
|
||||
project = create_project()
|
||||
product_owner = ProductOwner(project)
|
||||
|
||||
# When
|
||||
summary = product_owner.generate_project_summary([
|
||||
{'role': 'user', 'content': 'I want you to create the app (let\'s call it "TestProject") that can be described like this:\n'
|
||||
'```\nA Python version of the typical "hello world" application.\n```'},
|
||||
{'role': 'assistant', 'content': 'Should the application produce a text-based output?'},
|
||||
{'role': 'user', 'content': 'yes'},
|
||||
{'role': 'assistant', 'content': 'Should the application be command-line based or should it have a GUI (Graphical User Interface)?'},
|
||||
{'role': 'user', 'content': 'command-line'},
|
||||
{'role': 'assistant', 'content': 'Is there a specific version of Python you prefer the application to be written in?'},
|
||||
{'role': 'user', 'content': 'no'},
|
||||
{'role': 'assistant', 'content': 'Are there any specific packages or libraries you want to be used in the development of this application?'},
|
||||
{'role': 'user', 'content': 'no'},
|
||||
])
|
||||
|
||||
# Then the summary should not include instructions as reported in #246
|
||||
assert isinstance(summary, str)
|
||||
assert 'EVERYTHING_CLEAR' not in summary
|
||||
assert 'neutral tone' not in summary
|
||||
assert 'clarification' not in summary
|
||||
@@ -73,47 +73,6 @@ def ask_user(project, question: str, require_some_input=True, hint: str = None,
|
||||
return answer
|
||||
|
||||
|
||||
def get_additional_info_from_openai(project, messages):
|
||||
"""
|
||||
Runs the conversation between Product Owner and LLM.
|
||||
Provides the user's initial description, LLM asks the user clarifying questions and user responds.
|
||||
Limited by `MAX_QUESTIONS`, exits when LLM responds "EVERYTHING_CLEAR".
|
||||
|
||||
:param project: Project
|
||||
:param messages: [
|
||||
{ "role": "system", "content": "You are a Product Owner..." },
|
||||
{ "role": "user", "content": "I want you to create the app {name} that can be described: ```{description}```..." }
|
||||
]
|
||||
:return: The updated `messages` list with the entire conversation between user and LLM.
|
||||
"""
|
||||
is_complete = False
|
||||
while not is_complete:
|
||||
# Obtain clarifications using the OpenAI API
|
||||
# { 'text': new_code }
|
||||
try:
|
||||
response = create_gpt_chat_completion(messages, 'additional_info', project)
|
||||
except ApiError:
|
||||
response = None
|
||||
|
||||
if response is not None:
|
||||
if response['text'] and response['text'].strip() == END_RESPONSE:
|
||||
# print(response['text'] + '\n')
|
||||
break
|
||||
|
||||
# Ask the question to the user
|
||||
answer = ask_user(project, response['text'])
|
||||
|
||||
# Add the answer to the messages
|
||||
messages.append({'role': 'assistant', 'content': response['text']})
|
||||
messages.append({'role': 'user', 'content': answer})
|
||||
else:
|
||||
is_complete = True
|
||||
|
||||
logger.info('Getting additional info from openai done')
|
||||
|
||||
return [msg for msg in messages if msg['role'] != 'system']
|
||||
|
||||
|
||||
# TODO refactor this to comply with AgentConvo class
|
||||
def generate_messages_from_description(description, app_type, name):
|
||||
"""
|
||||
|
||||
@@ -97,9 +97,6 @@ def create_gpt_chat_completion(messages: List[dict], req_type, project,
|
||||
Called from:
|
||||
- AgentConvo.send_message() - these calls often have `function_calls`, usually from `pilot/const/function_calls.py`
|
||||
- convo.continuous_conversation()
|
||||
- prompts.get_additional_info_from_openai()
|
||||
- prompts.get_additional_info_from_user() after the user responds to each
|
||||
"Please check this message and say what needs to be changed... {message}"
|
||||
:param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ]
|
||||
:param req_type: 'project_description' etc. See common.STEPS
|
||||
:param project: project
|
||||
|
||||
Reference in New Issue
Block a user