remove gpt-pilot 0.1 code

This commit is contained in:
Senko Rasic
2024-05-22 21:39:26 +02:00
parent 79e6e18236
commit 391998ab67
211 changed files with 0 additions and 15426 deletions

View File

@@ -1,63 +0,0 @@
name: Test & QA
on:
push:
branches:
- main
- development
pull_request:
branches:
- main
- development
jobs:
# Docker:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Build the Docker image
# run: docker compose build
# - name: Run the Docker image
# run: docker compose up gpt-pilot -d
# - name: Wait for the Docker image to start
# run: docker ps
# - name: Stop the Docker image
# run: docker compose down
Test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
# Test latest and oldest supported Python releases
# See https://devguide.python.org/versions/
python-version: ['3.9', '3.12']
os: [ubuntu-latest, macos-12, windows-latest]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Lint
run: |
pip install flake8 ruff
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# stop the build if there are Python syntax errors or undefined names
ruff --output-format=github --select=E9,F63,F7,F82 --target-version=py37 ./pilot
# default set of ruff rules with GitHub Annotations
ruff --output-format=github --target-version=py37 --ignore=F401,E402,E501 ./pilot
- name: Run tests
run: |
pip install pytest
cd pilot
pytest -m "not slow and not uses_tokens and not ux_test"

174
.gitignore vendored
View File

@@ -1,174 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.DS_Store
# Logger
/pilot/logger/debug.log
#sqlite
/pilot/gpt-pilot
# workspace
workspace
pilot-env/
# Other
brija.py

View File

@@ -1,29 +0,0 @@
FROM python:3.11
# Download precompiled ttyd binary from GitHub releases
RUN apt-get update && \
apt-get install -y wget && \
wget https://github.com/tsl0922/ttyd/releases/download/1.6.3/ttyd.x86_64 -O /usr/bin/ttyd && \
chmod +x /usr/bin/ttyd && \
apt-get remove -y wget && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*
ENV NVM_DIR /root/.nvm
RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \
&& . "$NVM_DIR/nvm.sh" \
&& nvm install node \
&& nvm use node
WORKDIR /usr/src/app
COPY . .
RUN pip install --no-cache-dir -r requirements.txt
RUN python -m venv pilot-env
RUN /bin/bash -c "source pilot-env/bin/activate"
RUN pip install -r requirements.txt
WORKDIR /usr/src/app/pilot
EXPOSE 7681
CMD ["ttyd", "bash"]

21
LICENSE
View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2023 Pythagora-io
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

228
README.md
View File

@@ -1,228 +0,0 @@
<div align="center">
# 🧑‍✈️ GPT PILOT 🧑‍✈️
</div>
---
<div align="center">
[![Discord Follow](https://dcbadge.vercel.app/api/server/HaqXugmxr9?style=flat)](https://discord.gg/HaqXugmxr9)
[![GitHub Repo stars](https://img.shields.io/github/stars/Pythagora-io/gpt-pilot?style=social)](https://github.com/Pythagora-io/gpt-pilot)
[![Twitter Follow](https://img.shields.io/twitter/follow/HiPythagora?style=social)](https://twitter.com/HiPythagora)
</div>
---
<div align="center">
<a href="https://www.ycombinator.com/" target="_blank"><img src="https://s3.amazonaws.com/assets.pythagora.ai/yc/PNG/Black.png" alt="Pythagora-io%2Fgpt-pilot | Trendshift" style="width: 250px; height: 93px;"/></a>
</div>
<br>
<div align="center">
<a href="https://trendshift.io/repositories/466" target="_blank"><img src="https://trendshift.io/api/badge/repositories/466" alt="Pythagora-io%2Fgpt-pilot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</div>
<br>
<br>
<div align="center">
### GPT Pilot doesn't just generate code, it builds apps!
</div>
---
<div align="center">
[![See it in action](https://i3.ytimg.com/vi/4g-1cPGK0GA/maxresdefault.jpg)](https://youtu.be/4g-1cPGK0GA)
(click to open the video in YouTube) (1:40min)
</div>
---
<div align="center">
<a href="vscode:extension/PythagoraTechnologies.gpt-pilot-vs-code" target="_blank"><img src="https://github.com/Pythagora-io/gpt-pilot/assets/10895136/5792143e-77c7-47dd-ad96-6902be1501cd" alt="Pythagora-io%2Fgpt-pilot | Trendshift" style="width: 185px; height: 55px;" width="185" height="55"/></a>
</div>
GPT Pilot is the core technology for the [Pythagora VS Code extension](https://bit.ly/3IeZxp6) that aims to provide **the first real AI developer companion**. Not just an autocomplete or a helper for PR messages but rather a real AI developer that can write full features, debug them, talk to you about issues, ask for review, etc.
---
📫 If you would like to get updates on future releases or just get in touch, join our [Discord server](https://discord.gg/HaqXugmxr9) or you [can add your email here](http://eepurl.com/iD6Mpo). 📬
---
<!-- TOC -->
* [🔌 Requirements](#-requirements)
* [🚦How to start using gpt-pilot?](#how-to-start-using-gpt-pilot)
* [🔎 Examples](#-examples)
* [🐳 How to start gpt-pilot in docker?](#-how-to-start-gpt-pilot-in-docker)
* [🧑‍💻️ CLI arguments](#-cli-arguments)
* [🏗 How GPT Pilot works?](#-how-gpt-pilot-works)
* [🕴How's GPT Pilot different from _Smol developer_ and _GPT engineer_?](#hows-gpt-pilot-different-from-smol-developer-and-gpt-engineer)
* [🍻 Contributing](#-contributing)
* [🔗 Connect with us](#-connect-with-us)
* [🌟 Star history](#-star-history)
<!-- TOC -->
---
GPT Pilot aims to research how much LLMs can be utilized to generate fully working, production-ready apps while the developer oversees the implementation.
**The main idea is that AI can write most of the code for an app (maybe 95%), but for the rest, 5%, a developer is and will be needed until we get full AGI**.
If you are interested in our learnings during this project, you can check [our latest blog posts](https://blog.pythagora.ai/2024/02/19/gpt-pilot-what-did-we-learn-in-6-months-of-working-on-a-codegen-pair-programmer/).
---
<br>
<div align="center">
### **[👉 Examples of apps written by GPT Pilot 👈](https://github.com/Pythagora-io/gpt-pilot/wiki/Apps-created-with-GPT-Pilot)**
</div>
<br>
---
# 🔌 Requirements
- **Python 3.9+**
# 🚦How to start using gpt-pilot?
👉 If you are using VS Code as your IDE, the easiest way to start is by downloading [GPT Pilot VS Code extension](https://bit.ly/3IeZxp6). 👈
Otherwise, you can use the CLI tool.
After you have Python and (optionally) PostgreSQL installed, follow these steps:
1. `git clone https://github.com/Pythagora-io/gpt-pilot.git` (clone the repo)
2. `cd gpt-pilot`
3. `python -m venv pilot-env` (create a virtual environment)
4. `source pilot-env/bin/activate` (or on Windows `pilot-env\Scripts\activate`) (activate the virtual environment)
5. `pip install -r requirements.txt` (install the dependencies)
6. `cd pilot`
7. `mv .env.example .env` (or on Windows `copy .env.example .env`) (create the .env file)
8. Add your environment to the `.env` file:
- LLM Provider (OpenAI/Azure/Openrouter)
- Your API key
- database settings: SQLite/PostgreSQL (to change from SQLite to PostgreSQL, just set `DATABASE_TYPE=postgres`)
- optionally set IGNORE_PATHS for the folders which shouldn't be tracked by GPT Pilot in workspace, useful to ignore folders created by compilers (i.e. `IGNORE_PATHS=folder1,folder2,folder3`)
9. `python main.py` (start GPT Pilot)
After, this, you can just follow the instructions in the terminal.
All generated code will be stored in the folder `workspace` inside the folder named after the app name you enter upon starting the pilot.
# 🔎 [Examples](https://github.com/Pythagora-io/gpt-pilot/wiki/Apps-created-with-GPT-Pilot)
[Click here](https://github.com/Pythagora-io/gpt-pilot/wiki/Apps-created-with-GPT-Pilot) to see all example apps created with GPT Pilot.
## 🐳 How to start gpt-pilot in docker?
1. `git clone https://github.com/Pythagora-io/gpt-pilot.git` (clone the repo)
2. Update the `docker-compose.yml` environment variables, which can be done via `docker compose config`. If you wish to use a local model, please go to [https://localai.io/basics/getting_started/](https://localai.io/basics/getting_started/).
3. By default, GPT Pilot will read & write to `~/gpt-pilot-workspace` on your machine, you can also edit this in `docker-compose.yml`
4. run `docker compose build`. this will build a gpt-pilot container for you.
5. run `docker compose up`.
6. access the web terminal on `port 7681`
7. `python main.py` (start GPT Pilot)
This will start two containers, one being a new image built by the `Dockerfile` and a Postgres database. The new image also has [ttyd](https://github.com/tsl0922/ttyd) installed so that you can easily interact with gpt-pilot. Node is also installed on the image and port 3000 is exposed.
# 🧑‍💻️ CLI arguments
## `--get-created-apps-with-steps`
Lists all existing apps.
```bash
python main.py --get-created-apps-with-steps
```
<br>
## `app_id`
Continue working on an existing app using **`app_id`**
```bash
python main.py app_id=<ID_OF_THE_APP>
```
<br>
## `step`
Continue working on an existing app from a specific **`step`** (eg: `development_planning`)
```bash
python main.py app_id=<ID_OF_THE_APP> step=<STEP_FROM_CONST_COMMON>
```
<br>
## `skip_until_dev_step`
Continue working on an existing app from a specific **development step**
```bash
python main.py app_id=<ID_OF_THE_APP> skip_until_dev_step=<DEV_STEP>
```
Continue working on an existing app from a specific **`development step`**. If you want to play around with GPT Pilot, this is likely the flag you will often use.
<br>
```bash
python main.py app_id=<ID_OF_THE_APP> skip_until_dev_step=0
```
Erase all development steps previously done and continue working on an existing app from the start of development.
## `theme`
```bash
python main.py theme=light
```
```bash
python main.py theme=dark
```
<br>
# 🏗 How GPT Pilot works?
Here are the steps GPT Pilot takes to create an app:
1. You enter the app name and the description.
2. **Product Owner agent** like in real life, does nothing. :)
3. **Specification Writer agent** asks a couple of questions to understand the requirements better if project description is not good enough.
4. **Architect agent** writes up technologies that will be used for the app and checks if all technologies are installed on the machine and installs them if not.
5. **Tech Lead agent** writes up development tasks that the Developer must implement.
6. **Developer agent** takes each task and writes up what needs to be done to implement it. The description is in human-readable form.
7. **Code Monkey agent** takes the Developer's description and the existing file and implements the changes.
8. **Reviewer agent** reviews every step of the task and if something is done wrong Reviewer sends it back to Code Monkey.
9. **Troubleshooter agent** helps you to give good feedback to GPT Pilot when something is wrong.
10. **Debugger agent** hate to see him, but he is your best friend when things go south.
11. **Technical Writer agent** writes documentation for the project.
<br>
# 🕴How's GPT Pilot different from _Smol developer_ and _GPT engineer_?
- **GPT Pilot works with the developer to create a fully working production-ready app** - I don't think AI can (at least in the near future) create apps without a developer being involved. So, **GPT Pilot codes the app step by step** just like a developer would in real life. This way, it can debug issues as they arise throughout the development process. If it gets stuck, you, the developer in charge, can review the code and fix the issue. Other similar tools give you the entire codebase at once - this way, bugs are much harder to fix for AI and for you as a developer.
<br><br>
- **Works at scale** - GPT Pilot isn't meant to create simple apps but rather so it can work at any scale. It has mechanisms that filter out the code, so in each LLM conversation, it doesn't need to store the entire codebase in context, but it shows the LLM only the relevant code for the current task it's working on. Once an app is finished, you can continue working on it by writing instructions on what feature you want to add.
# 🍻 Contributing
If you are interested in contributing to GPT Pilot, join [our Discord server](https://discord.gg/HaqXugmxr9), check out open [GitHub issues](https://github.com/Pythagora-io/gpt-pilot/issues), and see if anything interests you. We would be happy to get help in resolving any of those. The best place to start is by reviewing blog posts mentioned above to understand how the architecture works before diving into the codebase.
## 🖥 Development
Other than the research, GPT Pilot needs to be debugged to work in different scenarios. For example, we realized that the quality of the code generated is very sensitive to the size of the development task. When the task is too broad, the code has too many bugs that are hard to fix, but when the development task is too narrow, GPT also seems to struggle in getting the task implemented into the existing code.
## 📊 Telemetry
To improve GPT Pilot, we are tracking some events from which you can opt out at any time. You can read more about it [here](./docs/TELEMETRY.md).
# 🔗 Connect with us
🌟 As an open-source tool, it would mean the world to us if you starred the GPT-pilot repo 🌟
💬 Join [the Discord server](https://discord.gg/HaqXugmxr9) to get in touch.

View File

@@ -1,47 +0,0 @@
version: '3'
services:
gpt-pilot:
environment:
# OPENAI/AZURE/OPENROUTER
- ENDPOINT=OPENAI
- OPENAI_API_KEY=
- OPENAI_ENDPOINT=
# - AZURE_API_KEY=
# - AZURE_ENDPOINT=
# - OPENROUTER_API_KEY=
# In case of Azure endpoint, change this to your deployed model name
- MODEL_NAME=gpt-4
- MAX_TOKENS=8192
- DATABASE_TYPE=postgres
- DB_NAME=pilot
- DB_HOST=postgres
- DB_PORT=5432
- DB_USER=pilot
- DB_PASSWORD=pilot
# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
# IGNORE_PATHS=folder1,folder2
volumes:
- ~/gpt-pilot-workspace:/usr/src/app/workspace
build:
context: .
dockerfile: Dockerfile
ports:
- "7681:7681"
- "3000:3000"
depends_on:
postgres:
condition: service_healthy
postgres:
image: postgres
restart: always
environment:
POSTGRES_USER: pilot
POSTGRES_PASSWORD: pilot
POSTGRES_DB: pilot
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U pilot"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -1,47 +0,0 @@
# OPENAI or AZURE or OPENROUTER (ignored for Anthropic)
ENDPOINT=OPENAI
# OPENAI_ENDPOINT=https://api.openai.com/v1/chat/completions
OPENAI_ENDPOINT=
OPENAI_API_KEY=
AZURE_API_KEY=
AZURE_ENDPOINT=
OPENROUTER_API_KEY=
# Set this to use Anthropic API directly
# If using via OpenRouter, OPENROUTER_API_KEY should be set instead
ANTHROPIC_API_KEY=
# You only need to set this if not using Anthropic API directly (eg. via proxy or AWS Bedrock)
# ANTHROPIC_ENDPOINT=
# In case of Azure/OpenRouter endpoint, change this to your deployed model name
MODEL_NAME=gpt-4-turbo-preview
# In case of Anthropic, use "anthropic/" + the model name, example for Claude 3 Opus
# MODEL_NAME=anthropic/claude-3-opus-20240229
MAX_TOKENS=8192
# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
# IGNORE_PATHS=folder1,folder2
# Database
# DATABASE_TYPE=postgres
DB_NAME=gpt-pilot
DB_HOST=
DB_PORT=
DB_USER=
DB_PASSWORD=
# USE_GPTPILOT_FOLDER=true
# Load database imported from another location/system - EXPERIMENTAL
# AUTOFIX_FILE_PATHS=false
# Set extra buffer to wait on top of detected retry time when rate limmit is hit. defaults to 6
# RATE_LIMIT_EXTRA_BUFFER=
# Only send task-relevant files to the LLM. Enabled by default; uncomment and set this to "false" to disable.
# FILTER_RELEVANT_FILES=true

View File

View File

@@ -1,6 +0,0 @@
MAX_COMMAND_DEBUG_TRIES = 3
MAX_RECURSION_LAYER = 3
MIN_COMMAND_RUN_TIME = 2000 # 2sec
MAX_COMMAND_RUN_TIME = 60000 # 1min
MAX_COMMAND_OUTPUT_LENGTH = 50000
MAX_QUESTIONS_FOR_BUG_REPORT = 5

View File

@@ -1,128 +0,0 @@
import os
APP_TYPES = ['Web App', 'Script', 'Mobile App', 'Chrome Extension']
ROLES = {
'product_owner': ['project_description', 'user_stories', 'user_tasks'],
'architect': ['architecture'],
'tech_lead': ['development_planning'],
'full_stack_developer': ['coding'],
'dev_ops': ['environment_setup'],
'code_monkey': ['coding']
}
STEPS = [
'project_description',
'user_stories',
'user_tasks',
'architecture',
'environment_setup',
'development_planning',
'coding',
'finished'
]
DEFAULT_IGNORE_PATHS = [
'.git',
'.gpt-pilot',
'.idea',
'.vscode',
'.next',
'.DS_Store',
'__pycache__',
"site-packages",
'node_modules',
'package-lock.json',
'venv',
'dist',
'build',
'target',
"*.min.js",
"*.min.css",
"*.svg",
"*.csv",
"*.log",
"go.sum",
]
IGNORE_PATHS = DEFAULT_IGNORE_PATHS + [
folder for folder
in os.environ.get('IGNORE_PATHS', '').split(',')
if folder
]
IGNORE_SIZE_THRESHOLD = 50000 # 50K+ files are ignored by default
PROMPT_DATA_TO_IGNORE = {'directory_tree', 'name'}
EXAMPLE_PROJECT_DESCRIPTION = """
The application is a simple ToDo app built using React. Its primary function is to allow users to manage a list of tasks (todos). Each task has a description and a state (open or completed, with the default state being open). The application is frontend-only, with no user sign-up or authentication process. The goal is to provide a straightforward and user-friendly interface for task management.
Features:
1. Display of Todos: A list that displays all todo items. Each item shows its description and a checkbox to indicate its state (open or completed).
2. Add New Todo: A button to add a new todo item. Clicking this button will prompt the user to enter a description for the new todo.
3. Toggle State: Each todo item includes a checkbox. Checking/unchecking this box toggles the todo's state between open and completed.
4. Local Storage: The application will use the browser's local storage to persist todos between sessions, ensuring that users do not lose their data upon reloading the application.
Functional Specification:
- Upon loading the application, it fetches existing todos from the local storage and displays them in a list.
- Each todo item in the list displays a checkbox and a description. The checkbox reflects the todo's current state (checked for completed, unchecked for open).
- When the user checks or unchecks a checkbox, the application updates the state of the corresponding todo item and saves the updated list to local storage.
- Clicking the "Add New Todo" button prompts the user to enter a description for the new todo. Upon confirmation, the application adds the new todo (with the default state of open) to the list and updates local storage.
- The application does not support deleting or editing todo items to keep the interface and interactions simple.
- Todos persist between sessions using the browser's local storage. The application saves any changes to the todo list (additions or state changes) in local storage and retrieves this data when the application is reloaded.
Technical Specification:
- Platform/Technologies: The application is a web application developed using React. No backend technologies are required.
- Styling: Use Bootstrap 5 for a simple and functional interface. Load Boostrap from the CDN (don't install it locally):
- https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css
- https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js
- State Management: Directly in the React component
- make sure to initialize the state from the local storage as default (... = useState(JSON.parse(localStorage.getItem('todos')) || []) to avoid race conditions
- Data Persistence: The application uses the browser's local storage to persist todos between sessions. It stores the array of todos as a JSON string and parses this data on application load.
"""
EXAMPLE_PROJECT_ARCHITECTURE = {
"architecture": (
"The application is a client-side React web application that uses local storage for data persistence. "
"It consists of a single page with components for listing todos, adding new todos, and toggling their completion status. "
"State management is handled directly within React components, leveraging useState and useEffect hooks for state manipulation and side effects, respectively. "
"Bootstrap 5 is used for styling to provide a responsive and accessible UI."
),
"system_dependencies": [
{
"name": "Node.js",
"description": "JavaScript runtime needed to run the React development tools and build the project.",
"test": "node --version",
"required_locally": True
}
],
"package_dependencies": [
{
"name": "react",
"description": "A JavaScript library for building user interfaces."
},
{
"name": "react-dom",
"description": "Serves as the entry point to the DOM and server renderers for React."
},
{
"name": "bootstrap",
"description": "Frontend framework for developing responsive and mobile-first websites."
}
],
"template": "javascript_react"
}
EXAMPLE_PROJECT_PLAN = [
{
"description": (
"Create a new component TodoList: This component will display the list of todo items. "
"Use localStorage directly to access the current state of todos and map over them, rendering each todo item as a list item. "
"Each item should display the todo's description and a checkbox that reflects the todo's state (checked for completed, unchecked for open). "
"When the checkbox is clicked, dispatch an action to toggle the state of the todo. "
"Also create AddTodo: This component will include a button that, when clicked, displays a prompt asking the user for a description of the new todo. "
"Upon confirmation, dispatch an action to add the new todo to the state with a default state of open. "
"Ensure the component also updates the local storage with the new list of todos. "
"Finally, use TodoList and AddTodo components in App component to implement the required functionality. "
"Integrate Boostrap 5 for styling - add CSS/JS to index.html, style App.jsx and other files as appropriate."
)
}
]

View File

@@ -1,47 +0,0 @@
let messages = {{messages}}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function fill_playground(messages) {
let system_messages = messages.filter(msg => msg.role === 'system');
if (system_messages.length > 0) {
let system_message_textarea = document.querySelector('.chat-pg-instructions').querySelector('textarea');
system_message_textarea.focus();
system_message_textarea.value = '';
document.execCommand("insertText", false, system_messages[0].content);
await sleep(100);
}
// Remove all previous messages
let remove_buttons = document.querySelectorAll('.chat-message-remove-button');
for (let j = 0; j < 10; j++) {
for (let i = 0; i < remove_buttons.length; i++) {
let clickEvent = new Event('click', {
'bubbles': true,
'cancelable': true
});
remove_buttons[i].dispatchEvent(clickEvent);
}
}
let other_messages = messages.filter(msg => msg.role !== 'system');
for (let i = 0; i < other_messages.length; i++) {
document.querySelector('.add-message').click()
await sleep(100);
}
for (let i = 0; i < other_messages.length; i++) {
let all_elements = document.querySelectorAll('.text-input-with-focus');
let last_user_document = all_elements[i];
textarea_to_fill = last_user_document.querySelector('textarea');
textarea_to_fill.focus();
document.execCommand("insertText", false, other_messages[i].content);
await sleep(100);
}
}
fill_playground(messages)

View File

@@ -1,620 +0,0 @@
def process_user_stories(stories):
return stories
def process_user_tasks(tasks):
return tasks
def process_os_technologies(technologies):
return technologies
def run_commands(commands):
return commands
def return_files(files):
# TODO get file
return files
def return_array_from_prompt(name_plural, name_singular, return_var_name):
return {
'name': f'process_{name_plural.replace(" ", "_")}',
'description': f"Print the list of {name_plural} that are created.",
'parameters': {
'type': 'object',
"properties": {
f"{return_var_name}": {
"type": "array",
"description": f"List of {name_plural}.",
"items": {
"type": "string",
"description": f"{name_singular}"
},
},
},
"required": [return_var_name],
},
}
def dev_step_type_description():
return "Type of the development step that needs to be done to complete the entire task."
def step_command_definition(extended=False):
# Base properties and required fields
properties = {
"type": {
"const": "command",
"description": dev_step_type_description()
},
"command": command_definition(),
}
required = ["type", "command"]
# Extended properties
if extended:
properties.update({
"need_to_see_output": {
"type": "boolean",
"description": "Set to `true` if the definition of subsequent steps may need to change after you see the output of a successful execution of this step. For example, if the purpose of a command is to check the status of a service or contents of a file before deciding how to proceed then this flag should be set to `true`. If subsequent steps can be executed as long as this step is successful, then this flag does not need to be set."
},
"check_if_fixed": {
"type": "boolean",
"description": "Flag that indicates if the original command that triggered the error that's being debugged should be tried after this step to check if the error is fixed. If you think that the original command `delete node_modules/ && delete package-lock.json` will pass after this step, then this flag should be set to TRUE and if you think that the original command will still fail after this step, then this flag should be set to `false`."
}
})
# Update required fields when extended
required.extend(["need_to_see_output", "check_if_fixed"])
return {
"type": "object",
"properties": properties,
"required": required
}
def step_save_file_definition():
return {
"type": "object",
"properties": {
"type": {
"const": "save_file",
"description": dev_step_type_description()
},
"save_file": {
"type": "object",
"description": "A file that should be created or updated.",
"properties": {
"path": {
"type": "string",
"description": "Full path of the file with the file name."
},
"code_change_description": {
"type": "string",
"description": "Empty string"
}
},
"required": ["path", "code_change_description"]
}
},
"required": ["type", "save_file"]
}
def step_human_intervention_definition():
return {
"type": "object",
"properties": {
"type": {
"const": "human_intervention",
"description": 'Development step that will be executed by a human. You should avoid using this step if possible, task does NOT need to have "human_intervention" step.'
},
"human_intervention_description": {
"type": "string",
"description": "Very clear description of step where human intervention is needed."
}
},
"required": ["type", "human_intervention_description"]
}
def command_definition(description_command='A single command that needs to be executed.',
description_timeout=
'Timeout in milliseconds that represent the approximate time this command takes to finish. '
'If you need to run a command that doesnt\'t finish by itself (eg. a command to run an app), '
'set the timeout to to a value long enough to determine that it has started successfully and provide a command_id. '
'If you need to create a directory that doesn\'t exist and is not the root project directory, '
'always create it by running a command `mkdir`'):
return {
'type': 'object',
'description': 'Command that needs to be run to complete the current task. This should be used only if the task is of a type "command".',
'properties': {
'command': {
'type': 'string',
'description': description_command,
},
'timeout': {
'type': 'number',
'description': description_timeout,
},
'success_message': {
'type': 'string',
'description': 'A message to look for in the output of the command to determine if successful or not.',
},
'command_id': {
'type': 'string',
'description': 'If the process needs to continue running after the command is executed provide '
'a unique command identifier which you can use to kill the process later.',
}
},
'required': ['command', 'timeout'],
}
USER_STORIES = {
'definitions': [
return_array_from_prompt('user stories', 'user story', 'stories')
],
'functions': {
'process_user_stories': process_user_stories
},
}
USER_TASKS = {
'definitions': [
return_array_from_prompt('user tasks', 'user task', 'tasks')
],
'functions': {
'process_user_tasks': process_user_tasks
},
}
ARCHITECTURE = {
'definitions': [
{
'name': 'process_architecture',
'description': "Get architecture and the list of system dependencies required for the project.",
'parameters': {
'type': 'object',
"properties": {
"architecture": {
"type": "string",
"description": "General description of the app architecture.",
},
"system_dependencies": {
"type": "array",
"description": "List of system dependencies required to build and run the app.",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the system dependency, for example Node.js or Python."
},
"description": {
"type": "string",
"description": "One-line description of the dependency.",
},
"test": {
"type": "string",
"description": "Command line to test whether the dependency is available on the system.",
},
"required_locally": {
"type": "boolean",
"description": "Whether this dependency must be installed locally (as opposed to connecting to cloud or other server)",
}
},
"required": ["name", "description", "test", "required_locally"],
},
},
"package_dependencies": {
"type": "array",
"description": "List of framework/language-specific packages used by the app.",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the package dependency, for example Express or React."
},
"description": {
"type": "string",
"description": "One-line description of the dependency.",
}
},
"required": ["name", "description"],
},
},
'template': {
'type': ['string', 'null'],
'description': 'One of the available project templates.',
},
},
"required": ["architecture", "system_dependencies", "package_dependencies"],
},
},
],
'functions': {
'process_technologies': lambda technologies: technologies
},
}
COMMAND_TO_RUN = {
'definitions': [
{
'name': 'command_to_run',
'description': 'Command that starts the app.',
'parameters': command_definition("Command that starts the app. If app can't be started for some reason, return command as empty string ''."),
},
],
'functions': {
'process_commands': run_commands
},
}
IMPLEMENT_TASK = {
'definitions': [
{
'name': 'parse_development_task',
'description': 'Breaks down the development task into smaller steps that need to be done to implement the entire task.',
'parameters': {
'type': 'object',
"properties": {
"tasks": {
'type': 'array',
'description': 'List of smaller development steps.',
'items': {
"oneOf": [
step_command_definition(),
step_save_file_definition(),
step_human_intervention_definition(),
]
}
}
},
"required": ['tasks'],
},
},
],
'functions': {
'parse_development_task': lambda tasks: tasks
},
}
ALTERNATIVE_SOLUTIONS = {
'definitions': [
{
'name': 'get_alternative_solutions_to_issue',
'description': 'Gets alternative solutions to the recurring issue that was labeled as loop by the user.',
'parameters': {
'type': 'object',
"properties": {
"description_of_tried_solutions": {
'type': 'string',
'description': 'A description of the solutions that were tried to solve the recurring issue that was labeled as loop by the user.',
},
"alternative_solutions": {
'type': 'array',
'description': 'List of all alternative solutions to the recurring issue that was labeled as loop by the user.',
'items': {
'type': 'string',
'description': 'Development step that needs to be done to complete the entire task.',
}
}
},
"required": ['description_of_tried_solutions', 'alternative_solutions'],
},
}
]
}
DEVELOPMENT_PLAN = {
'definitions': [{
'name': 'implement_development_plan',
'description': 'Implements the development plan.',
'parameters': {
'type': 'object',
"properties": {
"plan": {
"type": "array",
"description": 'List of development tasks that need to be done to implement the entire plan.',
"items": {
"type": "object",
'description': 'Development task that needs to be done to implement the entire plan. It contains all details that developer who is not familiar with project needs to know to implement the task.',
'properties': {
'description': {
'type': 'string',
'description': 'Very detailed description of the development task that needs to be done to implement the entire plan.',
}
},
'required': ['description'],
},
},
},
"required": ['plan'],
},
}],
'functions': {
'implement_development_plan': lambda plan: plan
},
}
UPDATE_DEVELOPMENT_PLAN = {
'definitions': [{
'name': 'update_development_plan',
'description': 'Updates the development plan.',
'parameters': {
'type': 'object',
"properties": {
"updated_current_task": {
"type": "object",
'description': 'Updated current task.',
'properties': {
'description': {
'type': 'string',
'description': 'Updated detailed description of what was implemented while working on the current development task.',
}
},
'required': ['description'],
},
"plan": {
"type": "array",
"description": 'List of unfinished tasks.',
"items": {
"type": "object",
'description': 'List of unfinished tasks.',
'properties': {
'description': {
'type': 'string',
'description': 'Detailed description of the unfinished development task.',
}
},
'required': ['description'],
},
},
},
"required": ['updated_current_task', 'plan'],
},
}],
}
EXECUTE_COMMANDS = {
'definitions': [{
'name': 'execute_commands',
'description': 'Executes a list of commands. ',
'parameters': {
'type': 'object',
'properties': {
'commands': {
'type': 'array',
'description': 'List of commands that need to be executed. Remember, if you need to run a command that doesn\'t finish by itself (eg. a command to run an app), put the timeout to 3000 milliseconds. If you need to create a directory that doesn\'t exist and is not the root project directory, always create it by running a command `mkdir`',
'items': command_definition('A single command that needs to be executed.',
'Timeout in milliseconds that represent the approximate time this command takes to finish. If you need to run a command that doesn\'t finish by itself (eg. a command to run an app), put the timeout to 3000 milliseconds.')
}
},
'required': ['commands'],
},
}],
'functions': {
'execute_commands': lambda commands: commands
}
}
GET_FILE_TO_MODIFY = {
'definitions': [{
'name': 'get_file_to_modify',
'description': 'File that needs to be modified.',
'parameters': {
'type': 'object',
'properties': {
'file': {
'type': 'string',
'description': 'Path to the file that needs to be modified, relative to the project root.',
}
}
}
}],
'functions': {
'get_file_to_modify': lambda file: file
}
}
GET_TEST_TYPE = {
'definitions': [{
'name': 'test_changes',
'description': 'Tests the changes based on the test type.',
'parameters': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'description': 'Type of a test that needs to be run. If this is just an intermediate step in getting a task done, put `no_test` as the type and we\'ll just go onto the next task without testing.',
'enum': ['command_test', 'manual_test', 'no_test']
},
'command': command_definition('Command that needs to be run to test the changes.', 'Timeout in milliseconds that represent the approximate time this command takes to finish. If you need to run a command that doesn\'t finish by itself (eg. a command to run an app), put the timeout to 3000 milliseconds. If you need to create a directory that doesn\'t exist and is not the root project directory, always create it by running a command `mkdir`'),
# 'automated_test_description': {
# 'type': 'string',
# 'description': 'Description of an automated test that needs to be run to test the changes. This should be used only if the test type is "automated_test" and it should thoroughly describe what needs to be done to implement the automated test so that when someone looks at this test can know exactly what needs to be done to implement this automated test.',
# },
'manual_test_description': {
'type': 'string',
'description': 'Description of a manual test that needs to be run to test the changes. This should be used only if the test type is "manual_test".',
}
},
'required': ['type'],
},
}],
'functions': {
'test_changes': lambda type, command=None, automated_test_description=None, manual_test_description=None: (
type, command, automated_test_description, manual_test_description)
}
}
DEBUG_STEPS_BREAKDOWN = {
'definitions': [
{
'name': 'start_debugging',
'description': 'Starts the debugging process based on the list of steps that need to be done to debug the problem.',
'parameters': {
'type': 'object',
'properties': {
'steps': {
'type': 'array',
'description': 'List of steps that need to be done to debug the problem.',
'items': {
"oneOf": [
step_command_definition(True),
step_save_file_definition(),
step_human_intervention_definition(),
]
}
}
},
"required": ['steps'],
},
},
],
'functions': {
'start_debugging': lambda steps: steps
},
}
GET_DOCUMENTATION_FILE = {
'definitions': [{
'name': 'get_documentation_file',
'description': 'Gets the full content of requested documentation file.',
'parameters': {
'type': 'object',
'properties': {
'path': {
'type': 'string',
'description': 'Relative path of the documentation file with the file name that needs to be saved.',
},
'content': {
'type': 'string',
'description': 'Full content of the documentation file that needs to be saved on the disk.',
},
},
'required': ['path', 'content'],
},
}],
}
REVIEW_CHANGES = {
'definitions': [{
'name': 'review_diff',
'description': 'Review a unified diff and select hunks to apply or rework.',
'parameters': {
"type": "object",
"properties": {
"hunks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"number": {
"type": "integer",
"description": "Index of the hunk in the diff. Starts from 1."
},
"reason": {
"type": "string",
"description": "Reason for applying or ignoring this hunk, or for asking for it to be reworked."
},
"decision": {
"type": "string",
"enum": ["apply", "ignore", "rework"],
"description": "Whether to apply this hunk, rework, or ignore it."
}
},
"required": ["number", "reason", "decision"],
"additionalProperties": False
},
},
"review_notes": {
"type": "string"
}
},
"required": ["hunks", "review_notes"],
"additionalProperties": False
}
}],
}
GET_BUG_REPORT_MISSING_DATA = {
'definitions': [{
'name': 'bug_report_missing_data',
'description': 'Review bug report and identify missing data. List questions that need to be answered to proceed with the bug fix. If no additional questions are needed missing_data should be an empty array.',
'parameters': {
"type": "object",
"properties": {
"missing_data": {
"type": "array",
"items": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "Very clear question that needs to be answered to have good bug report.",
},
},
"required": ["question"],
"additionalProperties": False
},
}
},
"required": ["missing_data"],
"additionalProperties": False
}
}],
}
LIST_RELEVANT_FILES = {
'definitions': [{
'name': 'list_relevant_files',
'description': 'List of relevant files for the current task.',
'parameters': {
"type": "object",
"properties": {
"relevant_files": {
"type": "array",
"items": {
"type": "string",
"description": "Path to the file that is relevant for the current task, relative to the project root."
},
}
},
"required": ["relevant_files"],
"additionalProperties": False
}
}],
}
DESCRIBE_FILE = {
'definitions': [{
'name': 'describe_file',
'description': 'Describe the content of the file.',
'parameters': {
"type": "object",
"properties": {
"summary": {
"type": "string",
"description": "Describe in detail the functionality being defined or implemented in this file. Be as detailed as possible."
},
"references": {
"type": "array",
"items": {
"type": "string",
"description": "Path to a file that is referenced in the current file, relative to the project root.",
},
"description": "List of file references."
}
},
"required": ["summary", "references"],
"additionalProperties": False,
}
}]
}

View File

@@ -1,42 +0,0 @@
MESSAGE_TYPE = {
'verbose': 'verbose',
'stream': 'stream',
'user_input_request': 'user_input_request', # Displayed above the
'hint': 'hint', # Hint text, eg "Do you want to add anything else? If not, just press ENTER."
'info': 'info', # JSON data can be sent to progress `progress_stage`
'local': 'local',
'run_command': 'run_command', # Command to run server needed for extension only
'project_folder_name': 'project_folder_name', # Project folder name for extension only
'button': 'button', # Button text for extension only
'buttons-only': 'buttons-only', # Button text for extension only but input field is disabled
'exit': 'exit', # Exit message to let extension know we are done
'ipc': 'ipc', # Regular print message that is for extension only
'openFile': 'openFile', # Open a file in extension
'loadingFinished': 'loadingFinished', # Marks end of loading project
'loopTrigger': 'loopTrigger', # Trigger loop feedback popup in extension
'progress': 'progress', # Progress bar for extension only
'projectStats': 'projectStats', # Project stats for extension only
'keyExpired': 'keyExpired', # (Free trial) key expired message - for extension only
'inputPrefill': 'inputPrefill', # Prefill input field with text in extension
'projectDescription': 'projectDescription', # Project description for extension only
'featuresList': 'featuresList', # Features list for extension only
}
LOCAL_IGNORE_MESSAGE_TYPES = [
'info',
'project_folder_name',
'run_command',
'button',
'buttons-only',
'exit',
'ipc',
'openFile',
'loadingFinished',
'loopTrigger',
'progress',
'projectStats',
'keyExpired',
'inputPrefill',
'projectDescription',
'featuresList',
]

View File

@@ -1,7 +0,0 @@
import os
MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS', 8192))
MIN_TOKENS_FOR_GPT_RESPONSE = 600
MAX_QUESTIONS = 5
END_RESPONSE = "EVERYTHING_CLEAR"
API_CONNECT_TIMEOUT = 30 # timeout for connecting to the API and sending the request (seconds)
API_READ_TIMEOUT = 300 # timeout for receiving the response (seconds)

View File

@@ -1,7 +0,0 @@
CHECK_AND_CONTINUE = 'Is everything working? Let me know if something needs to be changed for this task or type "continue" to proceed.'
WHEN_USER_DONE = 'Once you have completed, enter "continue"'
AFFIRMATIVE_ANSWERS = ['', 'y', 'yes', 'ok', 'okay', 'sure', 'absolutely', 'indeed', 'correct', 'affirmative']
NEGATIVE_ANSWERS = ['n', 'no', 'skip', 'negative', 'not now', 'cancel', 'decline', 'stop']
STUCK_IN_LOOP = 'I\'m stuck in a loop'
NONE_OF_THESE = 'none of these'
MAX_PROJECT_NAME_LENGTH = 50

View File

@@ -1,3 +0,0 @@
LARGE_REQUEST_THRESHOLD = 50000 # tokens
SLOW_REQUEST_THRESHOLD = 300 # seconds
LOOP_THRESHOLD = 3 # number of iterations in task to be considered a loop

View File

@@ -1 +0,0 @@
from .database import database_exists, create_database, save_app

View File

@@ -1,8 +0,0 @@
import os
DATABASE_TYPE = os.getenv("DATABASE_TYPE", "sqlite")
DB_NAME = os.getenv("DB_NAME")
DB_HOST = os.getenv("DB_HOST")
DB_PORT = os.getenv("DB_PORT")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")

View File

@@ -1,23 +0,0 @@
from peewee import PostgresqlDatabase
from database.config import DB_NAME, DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DATABASE_TYPE
if DATABASE_TYPE == "postgres":
import psycopg2
from psycopg2.extensions import quote_ident
def get_postgres_database():
return PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT)
def create_postgres_database():
conn = psycopg2.connect(
dbname='postgres',
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
conn.autocommit = True
cursor = conn.cursor()
safe_db_name = quote_ident(DB_NAME, conn)
cursor.execute(f"CREATE DATABASE {safe_db_name}")
cursor.close()
conn.close()

View File

@@ -1,5 +0,0 @@
from peewee import SqliteDatabase
from database.config import DB_NAME
def get_sqlite_database():
return SqliteDatabase(DB_NAME)

View File

@@ -1,604 +0,0 @@
from playhouse.shortcuts import model_to_dict
from utils.style import color_yellow, color_red
from peewee import DoesNotExist, IntegrityError
from functools import reduce
import operator
from database.config import DB_NAME, DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DATABASE_TYPE
if DATABASE_TYPE == "postgres":
import psycopg2
from psycopg2.extensions import quote_ident
import os
from const.common import PROMPT_DATA_TO_IGNORE, STEPS
from logger.logger import logger
from database.models.components.base_models import database
from database.models.user import User
from database.models.app import App
from database.models.project_description import ProjectDescription
from database.models.user_stories import UserStories
from database.models.user_tasks import UserTasks
from database.models.architecture import Architecture
from database.models.development_planning import DevelopmentPlanning
from database.models.development_steps import DevelopmentSteps
from database.models.environment_setup import EnvironmentSetup
from database.models.development import Development
from database.models.file_snapshot import FileSnapshot
from database.models.command_runs import CommandRuns
from database.models.user_apps import UserApps
from database.models.user_inputs import UserInputs
from database.models.files import File
from database.models.feature import Feature
TABLES = [
User,
App,
ProjectDescription,
UserStories,
UserTasks,
Architecture,
DevelopmentPlanning,
DevelopmentSteps,
EnvironmentSetup,
Development,
FileSnapshot,
CommandRuns,
UserApps,
UserInputs,
File,
Feature,
]
def get_created_apps():
return [model_to_dict(app) for app in App.select().where((App.name.is_null(False)) & (App.status.is_null(False)))]
def get_created_apps_with_steps():
apps = get_created_apps()
for app in apps:
app['id'] = str(app['id'])
app['steps'] = [step for step in STEPS[:STEPS.index(app['status']) + 1]] if app['status'] is not None else []
app['development_steps'] = get_all_app_development_steps(app['id'], loading_steps_only=True)
task_counter = 1
troubleshooting_counter = 1
feature_counter = 1
feature_end_counter = 1
new_development_steps = []
for dev_step in app['development_steps']:
# Filter out unwanted keys first
filtered_step = {k: v for k, v in dev_step.items() if k in {'id', 'prompt_path'}}
if 'breakdown' in filtered_step['prompt_path']:
filtered_step['name'] = f"Task {task_counter}"
task_counter += 1
# Reset troubleshooting counter on finding 'breakdown'
troubleshooting_counter = 1
elif 'iteration' in filtered_step['prompt_path']:
filtered_step['name'] = f"Troubleshooting {troubleshooting_counter}"
troubleshooting_counter += 1
elif 'feature_plan' in filtered_step['prompt_path']:
filtered_step['name'] = f"Feature {feature_counter}"
feature_counter += 1
# Reset task and troubleshooting counters on finding 'feature_plan'
task_counter = 1
troubleshooting_counter = 1
elif 'feature_summary' in filtered_step['prompt_path']:
filtered_step['name'] = f"Feature {feature_end_counter} end"
feature_end_counter += 1
# Update the dev_step in the list
new_development_steps.append(filtered_step)
last_step = get_last_development_step(app['id'])
if last_step:
new_development_steps.append({
'id': last_step['id'],
'prompt_path': last_step['prompt_path'],
'name': 'Latest Step',
})
app['development_steps'] = new_development_steps
return apps
def get_all_app_development_steps(app_id, last_step=None, loading_steps_only=False):
query = DevelopmentSteps.select().where(DevelopmentSteps.app == app_id)
if last_step is not None:
query = query.where(DevelopmentSteps.id <= last_step)
if loading_steps_only:
query = query.where((DevelopmentSteps.prompt_path.contains('breakdown')) |
# (DevelopmentSteps.prompt_path.contains('parse_task')) | Not needed for extension users but we can load this steps if needed
(DevelopmentSteps.prompt_path.contains('iteration')) |
# (DevelopmentSteps.prompt_path.contains('create_readme')) | Not needed for extension users but we can load this steps if needed
(DevelopmentSteps.prompt_path.contains('feature_plan')) |
(DevelopmentSteps.prompt_path.contains('feature_summary')))
return [model_to_dict(dev_step, recurse=False) for dev_step in query]
def get_last_development_step(app_id, last_step=None):
last_dev_step_query = DevelopmentSteps.select().where(DevelopmentSteps.app == app_id)
if last_step is not None:
last_dev_step_query = last_dev_step_query.where(DevelopmentSteps.id <= last_step)
# Order by ID in descending order to get the last step and fetch the first result
last_dev_step = last_dev_step_query.order_by(DevelopmentSteps.id.desc()).first()
# If a step is found, convert it to a dictionary, otherwise return None
return model_to_dict(last_dev_step, recurse=False) if last_dev_step else None
def save_user(user_id, email, password):
try:
user = User.get(User.id == user_id)
return user
except DoesNotExist:
try:
existing_user = User.get(User.email == email)
return existing_user
except DoesNotExist:
return User.create(id=user_id, email=email, password=password)
def update_app_status(app_id, new_status):
try:
app = App.get(App.id == app_id)
app.status = new_status
app.save()
return True
except DoesNotExist:
return False
def get_user(user_id=None, email=None):
if not user_id and not email:
raise ValueError("Either user_id or email must be provided")
query = []
if user_id:
query.append(User.id == user_id)
if email:
query.append(User.email == email)
try:
user = User.get(reduce(operator.or_, query))
return user
except DoesNotExist:
raise ValueError("No user found with provided id or email")
def save_app(project):
args = project.args
app_status = getattr(project, "current_step", None)
try:
app = project.app
if app is None:
app = App.get(App.id == args['app_id'])
for key, value in args.items():
if key != 'app_id' and value is not None:
setattr(app, key, value)
app.status = app_status
app.save()
except DoesNotExist:
if args.get('user_id') is not None:
try:
user = get_user(user_id=args['user_id'])
except ValueError:
user = save_user(args['user_id'], args['email'], args['password'])
args['user_id'] = user.id
args['email'] = user.email
else:
user = None
app = App.create(
id=args['app_id'],
user=user,
app_type=args.get('app_type'),
name=args.get('name'),
status=app_status
)
return app
def save_user_app(user_id, app_id, workspace):
try:
user_app = UserApps.get((UserApps.user == user_id) & (UserApps.app == app_id))
user_app.workspace = workspace
user_app.save()
except DoesNotExist:
user_app = UserApps.create(user=user_id, app=app_id, workspace=workspace)
return user_app
def save_progress(app_id, step, data):
progress_table_map = {
'project_description': ProjectDescription,
'user_stories': UserStories,
'user_tasks': UserTasks,
'architecture': Architecture,
'development_planning': DevelopmentPlanning,
'environment_setup': EnvironmentSetup,
'development': Development,
}
data['step'] = step
ProgressTable = progress_table_map.get(step)
if not ProgressTable:
raise ValueError(f"Invalid step: {step}")
app = get_app(app_id)
# Use the get_or_create method, which attempts to retrieve a record
# or creates a new one if it does not exist.
progress, created = ProgressTable.get_or_create(app=app, defaults=data)
# If the record was not created, it already existed and we should update it
if not created:
for key, value in data.items():
setattr(progress, key, value)
progress.save()
update_app_status(app_id, step)
return progress
def edit_development_plan(app_id, update_data):
try:
dev_plan = DevelopmentPlanning.get(app=app_id)
except DevelopmentPlanning.DoesNotExist:
print(color_red(f"No development plan found for app {app_id}"), category='error')
return None
for key, value in update_data.items():
setattr(dev_plan, key, value)
dev_plan.save()
return dev_plan
def edit_feature_plan(app_id, update_data):
try:
dev_plan = (DevelopmentSteps.select()
.where((DevelopmentSteps.app == app_id) & (DevelopmentSteps.prompt_path.contains('feature_plan')))
.order_by(DevelopmentSteps.created_at.desc())
.get())
except DevelopmentPlanning.DoesNotExist:
print(color_red(f"No feature plan found for app {app_id}"), category='error')
return None
for key, value in update_data.items():
setattr(dev_plan, key, value)
dev_plan.save()
return dev_plan
def get_app(app_id, error_if_not_found=True):
try:
app = App.get(App.id == app_id)
return app
except DoesNotExist:
if error_if_not_found:
raise ValueError(f"No app with id: {app_id}; use python main.py --get-created-apps-with-steps to see created apps")
return None
def get_app_by_user_workspace(user_id, workspace):
try:
user_app = UserApps.get((UserApps.user == user_id) & (UserApps.workspace == workspace))
return user_app.app
except DoesNotExist:
return None
def get_progress_steps(app_id, step=None):
progress_table_map = {
'project_description': ProjectDescription,
'user_stories': UserStories,
'user_tasks': UserTasks,
'architecture': Architecture,
'development_planning': DevelopmentPlanning,
'environment_setup': EnvironmentSetup,
'development': Development,
}
if step:
ProgressTable = progress_table_map.get(step)
if not ProgressTable:
raise ValueError(f"Invalid step: {step}")
try:
progress = ProgressTable.get(ProgressTable.app_id == app_id)
return model_to_dict(progress)
except DoesNotExist:
return None
else:
steps = {}
for step, ProgressTable in progress_table_map.items():
try:
progress = ProgressTable.get(ProgressTable.app_id == app_id)
steps[step] = model_to_dict(progress)
except DoesNotExist:
steps[step] = None
return steps
def get_db_model_from_hash_id(model, app_id, previous_step, high_level_step):
try:
db_row = model.get(
(model.app == app_id) & (model.previous_step == previous_step) & (model.high_level_step == high_level_step))
except DoesNotExist:
return None
return db_row
def hash_and_save_step(Model, app_id, unique_data_fields, data_fields, message):
# app = get_app(app_id)
# fields_to_preserve = [getattr(Model, field) for field in list(unique_data_fields.keys())]
for field, value in data_fields.items():
unique_data_fields[field] = value
try:
# existing_record = Model.get_or_none(
# (Model.app == app) & (Model.previous_step == unique_data_fields['previous_step']) & (
# Model.high_level_step == unique_data_fields['high_level_step']))
inserted_id = (Model
.insert(**unique_data_fields)
.execute())
record = Model.get_by_id(inserted_id)
logger.debug(color_yellow(f"{message} with id {record.id}"))
except IntegrityError:
logger.warn(f"A record with data {unique_data_fields} already exists for {Model.__name__}.")
return None
return record
def save_development_step(project, prompt_path, prompt_data, messages, llm_response, exception=None):
data_fields = {
'messages': messages,
'llm_response': llm_response,
'prompt_path': prompt_path,
'prompt_data': {} if prompt_data is None else {k: v for k, v in prompt_data.items() if
k not in PROMPT_DATA_TO_IGNORE and not callable(v)},
'llm_req_num': project.llm_req_num,
'token_limit_exception_raised': exception
}
unique_data = {
'app': project.args['app_id'],
'previous_step': project.checkpoints['last_development_step']['id'] if project.checkpoints['last_development_step'] else None,
'high_level_step': project.current_step,
}
development_step = hash_and_save_step(DevelopmentSteps, project.args['app_id'], unique_data, data_fields,
"Saved Development Step")
project.checkpoints['last_development_step'] = model_to_dict(development_step)
project.save_files_snapshot(development_step.id)
def save_command_run(project, command, cli_response, done_or_error_response, exit_code):
if project.current_step != 'coding':
return
unique_data = {
'app': project.args['app_id'],
'previous_step': project.checkpoints['last_command_run'],
'high_level_step': str(project.checkpoints['last_development_step']['id']) if project.checkpoints['last_development_step'] else None,
}
data_fields = {
'command': command,
'cli_response': cli_response,
'done_or_error_response': done_or_error_response,
'exit_code': exit_code,
}
command_run = hash_and_save_step(CommandRuns, project.args['app_id'], unique_data, data_fields, "Saved Command Run")
project.checkpoints['last_command_run'] = command_run
return command_run
def save_user_input(project, query, user_input, hint):
if project.current_step != 'coding':
return
unique_data = {
'app': project.args['app_id'],
'previous_step': project.checkpoints['last_user_input'],
'high_level_step': str(project.checkpoints['last_development_step']['id']) if project.checkpoints['last_development_step'] else None,
}
data_fields = {
'query': query,
'user_input': user_input,
'hint': hint,
}
user_input = hash_and_save_step(UserInputs, project.args['app_id'], unique_data, data_fields, "Saved User Input")
project.checkpoints['last_user_input'] = user_input
return user_input
def delete_all_subsequent_steps(project):
app = get_app(project.args['app_id'])
delete_subsequent_steps(DevelopmentSteps, app, project.checkpoints['last_development_step'])
# after implementation of backwards compatibility, we don't need to delete subsequent steps for CommandRuns and UserInputs
# delete_subsequent_steps(CommandRuns, app, project.checkpoints['last_command_run'])
# delete_subsequent_steps(UserInputs, app, project.checkpoints['last_user_input'])
def delete_subsequent_steps(Model, app, step):
if isinstance(step, dict):
step_id = step.get('id')
elif hasattr(step, 'id'):
step_id = step.id
else:
step_id = None
logger.info(color_red(f"Deleting subsequent {Model.__name__} steps after {step_id}"))
subsequent_steps = Model.select().where(
(Model.app == app) & (Model.previous_step == step_id))
for subsequent_step in subsequent_steps:
if subsequent_step:
delete_subsequent_steps(Model, app, subsequent_step)
subsequent_step.delete_instance()
if Model == DevelopmentSteps:
FileSnapshot.delete().where(FileSnapshot.development_step == subsequent_step).execute()
Feature.delete().where(Feature.previous_step == subsequent_step).execute()
def get_all_connected_steps(step, previous_step_field_name):
"""Recursively get all steps connected to the given step."""
connected_steps = [step]
prev_step = getattr(step, previous_step_field_name)
while prev_step is not None:
connected_steps.append(prev_step)
prev_step = getattr(prev_step, previous_step_field_name)
return connected_steps
def delete_all_app_development_data(app):
models = [DevelopmentSteps, CommandRuns, UserInputs, UserApps, File, FileSnapshot]
for model in models:
model.delete().where(model.app == app).execute()
def delete_app(app_id):
app = get_app(app_id, False)
if not app:
return
delete_all_app_development_data(app)
App.delete().where(App.id == app.id).execute()
print(color_yellow(f"Deleted app {app_id} from GPT Pilot database. Project files were NOT deleted."))
def delete_unconnected_steps_from(step, previous_step_field_name):
if step is None:
return
connected_steps = get_all_connected_steps(step, previous_step_field_name)
connected_step_ids = [s.id for s in connected_steps]
unconnected_steps = DevelopmentSteps.select().where(
(DevelopmentSteps.app == step.app) &
(DevelopmentSteps.id.not_in(connected_step_ids))
).order_by(DevelopmentSteps.id.desc())
for unconnected_step in unconnected_steps:
print(color_red(f"Deleting unconnected {step.__class__.__name__} step {unconnected_step.id}"))
unconnected_step.delete_instance()
def save_file_description(project, path, name, description):
(File.insert(app=project.app, path=path, name=name, description=description)
.on_conflict(
conflict_target=[File.app, File.name, File.path],
preserve=[],
update={'description': description})
.execute())
def save_feature(app_id, summary, messages, previous_step):
try:
app = get_app(app_id)
feature = Feature.create(app=app, summary=summary, messages=messages, previous_step=previous_step)
return feature
except DoesNotExist:
raise ValueError(f"No app with id: {app_id}")
def get_features_by_app_id(app_id):
try:
app = get_app(app_id)
features = DevelopmentSteps.select().where(
(DevelopmentSteps.app_id == app) &
(DevelopmentSteps.prompt_path.contains("feature_plan"))
).order_by(DevelopmentSteps.created_at)
features_dict = [model_to_dict(feature) for feature in features]
# return only 'summary' because we store all prompt_data to DB
return [{'summary': feature['prompt_data']['feature_description'], 'id': feature['id']} for feature in features_dict]
except DoesNotExist:
raise ValueError(f"No app with id: {app_id}")
def create_tables():
with database:
database.create_tables(TABLES)
def drop_tables():
with database.atomic():
for table in TABLES:
if DATABASE_TYPE == "postgres":
sql = f'DROP TABLE IF EXISTS "{table._meta.table_name}" CASCADE'
elif DATABASE_TYPE == "sqlite":
sql = f'DROP TABLE IF EXISTS "{table._meta.table_name}"'
else:
raise ValueError(f"Unsupported DATABASE_TYPE: {DATABASE_TYPE}")
database.execute_sql(sql)
def database_exists():
try:
database.connect()
database.close()
return True
except Exception:
return False
def create_database():
if DATABASE_TYPE == "postgres":
# Connect to the default 'postgres' database to create a new database
conn = psycopg2.connect(
dbname='postgres',
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
conn.autocommit = True
cursor = conn.cursor()
# Safely quote the database name
safe_db_name = quote_ident(DB_NAME, conn)
# Use the safely quoted database name in the SQL query
cursor.execute(f"CREATE DATABASE {safe_db_name}")
cursor.close()
conn.close()
else:
pass
def tables_exist():
for table in TABLES:
try:
database.get_tables().index(table._meta.table_name)
except ValueError:
return False
return True
if __name__ == "__main__":
drop_tables()
create_tables()

View File

@@ -1,11 +0,0 @@
from peewee import ForeignKeyField, CharField
from database.models.components.base_models import BaseModel
from database.models.user import User
class App(BaseModel):
user = ForeignKeyField(User, backref='apps')
app_type = CharField(null=True)
name = CharField(null=True)
status = CharField(null=True)

View File

@@ -1,15 +0,0 @@
# from peewee import
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class Architecture(ProgressStep):
if DATABASE_TYPE == 'postgres':
architecture = BinaryJSONField()
else:
architecture = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'architecture'

View File

@@ -1,21 +0,0 @@
from peewee import AutoField, ForeignKeyField, TextField, CharField, IntegerField
from database.models.components.base_models import BaseModel
from database.models.app import App
class CommandRuns(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
command = TextField(null=True)
cli_response = TextField(null=True)
done_or_error_response = TextField(null=True)
exit_code = IntegerField(null=True)
previous_step = ForeignKeyField('self', null=True, column_name='previous_step')
high_level_step = CharField(null=True)
class Meta:
table_name = 'command_runs'
indexes = (
(('app', 'previous_step', 'high_level_step'), True),
)

View File

@@ -1,23 +0,0 @@
from peewee import Model, UUIDField, DateTimeField
from datetime import datetime
from uuid import uuid4
from database.config import DATABASE_TYPE
from database.connection.postgres import get_postgres_database
from database.connection.sqlite import get_sqlite_database
# Establish connection to the database
if DATABASE_TYPE == "postgres":
database = get_postgres_database()
else:
database = get_sqlite_database()
class BaseModel(Model):
id = UUIDField(primary_key=True, default=uuid4)
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
class Meta:
database = database

View File

@@ -1,23 +0,0 @@
from peewee import ForeignKeyField, CharField, BooleanField, DateTimeField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class ProgressStep(BaseModel):
app = ForeignKeyField(App, primary_key=True, on_delete='CASCADE')
step = CharField()
if DATABASE_TYPE == 'postgres':
app_data = BinaryJSONField()
data = BinaryJSONField(null=True)
messages = BinaryJSONField(null=True)
else:
app_data = JSONField()
data = JSONField(null=True)
messages = JSONField(null=True)
completed = BooleanField(default=False)
completed_at = DateTimeField(null=True)

View File

@@ -1,14 +0,0 @@
import json
from peewee import TextField
class JSONField(TextField):
def python_value(self, value):
if value is not None:
return json.loads(value)
return value
def db_value(self, value):
if value is not None:
return json.dumps(value)
return value

View File

@@ -1,6 +0,0 @@
from database.models.components.progress_step import ProgressStep
class Development(ProgressStep):
class Meta:
table_name = 'development'

View File

@@ -1,14 +0,0 @@
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class DevelopmentPlanning(ProgressStep):
if DATABASE_TYPE == 'postgres':
development_plan = BinaryJSONField()
else:
development_plan = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'development_planning'

View File

@@ -1,32 +0,0 @@
from peewee import ForeignKeyField, AutoField, TextField, IntegerField, CharField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class DevelopmentSteps(BaseModel):
id = AutoField() # This will serve as the primary key
app = ForeignKeyField(App, on_delete='CASCADE')
prompt_path = TextField(null=True)
llm_req_num = IntegerField(null=True)
token_limit_exception_raised = TextField(null=True)
if DATABASE_TYPE == 'postgres':
messages = BinaryJSONField(null=True)
llm_response = BinaryJSONField(null=False)
prompt_data = BinaryJSONField(null=True)
else:
messages = JSONField(null=True) # Custom JSON field for SQLite
llm_response = JSONField(null=False) # Custom JSON field for SQLite
prompt_data = JSONField(null=True)
previous_step = ForeignKeyField('self', null=True, column_name='previous_step')
high_level_step = CharField(null=True)
class Meta:
table_name = 'development_steps'
indexes = (
(('app', 'previous_step', 'high_level_step'), True),
)

View File

@@ -1,6 +0,0 @@
from database.models.components.progress_step import ProgressStep
class EnvironmentSetup(ProgressStep):
class Meta:
table_name = 'environment_setup'

View File

@@ -1,21 +0,0 @@
from peewee import ForeignKeyField, CharField, BooleanField, DateTimeField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.development_steps import DevelopmentSteps
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class Feature(BaseModel):
app = ForeignKeyField(App, backref='feature', on_delete='CASCADE')
summary = CharField()
if DATABASE_TYPE == 'postgres':
messages = BinaryJSONField(null=True)
else:
messages = JSONField(null=True)
previous_step = ForeignKeyField(DevelopmentSteps, column_name='previous_step')
completed = BooleanField(default=False)
completed_at = DateTimeField(null=True)

View File

@@ -1,46 +0,0 @@
import logging
from peewee import ForeignKeyField, BlobField
from database.models.components.base_models import BaseModel
from database.models.development_steps import DevelopmentSteps
from database.models.app import App
from database.models.files import File
log = logging.getLogger(__name__)
class SmartBlobField(BlobField):
"""
A binary blob field that can also accept/return utf-8 strings.
This is a temporary workaround for the fact that we're passing either binary
or string contents to the database. Once this is cleaned up, we should only
accept binary content and explcitily convert from/to strings as needed.
"""
def db_value(self, value):
if isinstance(value, str):
log.warning("FileSnapshot content is a string, expected bytes, working around it.")
value = value.encode("utf-8")
return super().db_value(value)
def python_value(self, value):
val = bytes(super().python_value(value))
try:
return val.decode("utf-8")
except UnicodeDecodeError:
return val
class FileSnapshot(BaseModel):
app = ForeignKeyField(App, on_delete='CASCADE')
development_step = ForeignKeyField(DevelopmentSteps, backref='files', on_delete='CASCADE')
file = ForeignKeyField(File, on_delete='CASCADE', null=True)
content = SmartBlobField()
class Meta:
table_name = 'file_snapshot'
indexes = (
(('development_step', 'file'), True),
)

View File

@@ -1,53 +0,0 @@
from pathlib import Path
from os.path import commonprefix, join, sep
from peewee import AutoField, CharField, TextField, ForeignKeyField
from database.models.components.base_models import BaseModel
from database.models.app import App
class File(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
name = CharField()
path = CharField()
full_path = CharField()
description = TextField(null=True)
class Meta:
indexes = (
(('app', 'name', 'path'), True),
)
@staticmethod
def update_paths():
workspace_dir = Path(__file__).parent.parent.parent.parent / "workspace"
if not workspace_dir.exists():
# This should only happen on first run
return
paths = [file.full_path for file in File.select(File.full_path).distinct()]
if not paths:
# No paths in the database, so nothing to fix
return
common_prefix = commonprefix(paths)
if commonprefix([common_prefix, str(workspace_dir)]) == str(workspace_dir):
# Paths are up to date, nothing to fix
return
common_sep = "\\" if ":\\" in common_prefix else "/"
common_parts = common_prefix.split(common_sep)
try:
workspace_index = common_parts.index("workspace")
except ValueError:
# There's something strange going on, better not touch anything
return
old_prefix = common_sep.join(common_parts[:workspace_index + 1])
print(f"Updating file paths from {old_prefix} to {workspace_dir}")
for file in File.select().where(File.full_path.startswith(old_prefix)):
parts = file.full_path.split(common_sep)
new_path = str(workspace_dir) + sep + sep.join(parts[workspace_index + 1:])
file.full_path = new_path
file.save()

View File

@@ -1,10 +0,0 @@
from peewee import TextField
from database.models.components.progress_step import ProgressStep
class ProjectDescription(ProgressStep):
prompt = TextField()
summary = TextField()
class Meta:
table_name = 'project_description'

View File

@@ -1,8 +0,0 @@
from peewee import CharField
from database.models.components.base_models import BaseModel
class User(BaseModel):
email = CharField(unique=True)
password = CharField()

View File

@@ -1,18 +0,0 @@
from peewee import AutoField, CharField, ForeignKeyField
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.user import User
class UserApps(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
user = ForeignKeyField(User, on_delete='CASCADE')
workspace = CharField(null=True)
class Meta:
table_name = 'user_apps'
indexes = (
(('app', 'user'), True),
)

View File

@@ -1,20 +0,0 @@
from peewee import AutoField, ForeignKeyField, TextField, CharField
from database.models.components.base_models import BaseModel
from database.models.app import App
class UserInputs(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
query = TextField(null=True)
user_input = TextField(null=True)
hint = TextField(null=True)
previous_step = ForeignKeyField('self', null=True, column_name='previous_step')
high_level_step = CharField(null=True)
class Meta:
table_name = 'user_inputs'
indexes = (
(('app', 'previous_step', 'high_level_step'), True),
)

View File

@@ -1,13 +0,0 @@
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class UserStories(ProgressStep):
if DATABASE_TYPE == 'postgres':
user_stories = BinaryJSONField()
else:
user_stories = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'user_stories'

View File

@@ -1,14 +0,0 @@
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class UserTasks(ProgressStep):
if DATABASE_TYPE == 'postgres':
user_tasks = BinaryJSONField()
else:
user_tasks = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'user_tasks'

View File

@@ -1,6 +0,0 @@
from dotenv import load_dotenv
load_dotenv(override=True)
from database.database import create_tables, drop_tables
drop_tables()
create_tables()

View File

@@ -1,4 +0,0 @@
class Agent:
def __init__(self, role, project):
self.role = role
self.project = project

View File

@@ -1,281 +0,0 @@
import json
import re
import subprocess
import uuid
from os.path import sep
from utils.style import color_yellow, color_yellow_bold, color_red_bold
from database.database import save_development_step
from helpers.exceptions import TokenLimitError, ApiError
from utils.function_calling import parse_agent_response, FunctionCallSet
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import get_prompt, get_sys_message, capitalize_first_word_with_underscores
from logger.logger import logger
from prompts.prompts import ask_user
from const.llm import END_RESPONSE
from helpers.cli import running_processes
from utils.telemetry import telemetry
class AgentConvo:
"""
Represents a conversation with an agent.
Args:
agent: An instance of the agent participating in the conversation.
"""
def __init__(self, agent, temperature: float = 0.7):
# [{'role': 'system'|'user'|'assistant', 'content': ''}, ...]
self.messages: list[dict] = []
self.branches = {}
self.log_to_user = True
self.agent = agent
self.high_level_step = self.agent.project.current_step
self.temperature = temperature
# add system message
system_message = get_sys_message(self.agent.role, self.agent.project.args)
logger.info('\n>>>>>>>>>> System Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',
system_message['content'])
self.messages.append(system_message)
def send_message(self, prompt_path=None, prompt_data=None, function_calls: FunctionCallSet = None, should_log_message=True):
"""
Sends a message in the conversation.
Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the message.
should_log_message: Flag if final response should be logged.
Returns:
The response from the agent.
"""
# craft message
self.construct_and_add_message_from_prompt(prompt_path, prompt_data)
# TODO: move this if block (and the other below) to Developer agent - https://github.com/Pythagora-io/gpt-pilot/issues/91#issuecomment-1751964079
# check if we already have the LLM response saved
if hasattr(self.agent, 'save_dev_steps') and self.agent.save_dev_steps:
self.agent.project.llm_req_num += 1
self.agent.project.finish_loading()
try:
self.replace_files()
response = create_gpt_chat_completion(self.messages, self.high_level_step, self.agent.project,
function_calls=function_calls, prompt_data=prompt_data,
temperature=self.temperature)
except TokenLimitError as e:
save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, {"text": ""}, str(e))
raise e
# TODO: move this code to Developer agent - https://github.com/Pythagora-io/gpt-pilot/issues/91#issuecomment-1751964079
if hasattr(self.agent, 'save_dev_steps') and self.agent.save_dev_steps:
save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, response)
# TODO handle errors from OpenAI
# It's complicated because calling functions are expecting different types of responses - string or tuple
# https://github.com/Pythagora-io/gpt-pilot/issues/165 & #91
if response == {} or response is None:
# This should never happen since we're raising ApiError in create_gpt_chat_completion
# Leaving this in place in case there's a case where this can still happen
logger.error('Aborting with "OpenAI API error happened"')
print(color_red_bold('There was an error talking to OpenAI API. Please try again later.'))
payload_size_kb = len(json.dumps(self.messages)) // 1000
raise ApiError(f"Unknown API error (prompt: {prompt_path}, request size: {payload_size_kb}KB)")
try:
response = parse_agent_response(response, function_calls)
except (KeyError, json.JSONDecodeError) as err:
logger.error("Error while parsing LLM response: {err.__class__.__name__}: {err}")
print(color_red_bold(f'There was an error parsing LLM response: \"{err.__class__.__name__}: {err}\". Please try again later.'))
raise ApiError(f"Error parsing LLM response: {err.__class__.__name__}: {err}: Response text: {response}") from err
message_content = self.format_message_content(response, function_calls)
# TODO we need to specify the response when there is a function called
# TODO maybe we can have a specific function that creates the GPT response from the function call
logger.info('\n>>>>>>>>>> Assistant Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',
message_content)
self.messages.append({"role": "assistant", "content": message_content})
if should_log_message:
self.log_message(message_content)
if self.agent.project.check_ipc():
telemetry.output_project_stats()
return response
def format_message_content(self, response, function_calls):
# TODO remove this once the database is set up properly
if isinstance(response, str):
return response
else:
# string_response = []
# for key, value in response.items():
# string_response.append(f'# {key}')
#
# if isinstance(value, list):
# if 'to_message' in function_calls:
# string_response.append(function_calls['to_message'](value))
# elif len(value) > 0 and isinstance(value[0], dict):
# string_response.extend([
# f'##{i}\n' + array_of_objects_to_string(d)
# for i, d in enumerate(value)
# ])
# else:
# string_response.extend(['- ' + r for r in value])
# else:
# string_response.append(str(value))
#
# return '\n'.join(string_response)
return json.dumps(response)
# TODO END
def continuous_conversation(self, prompt_path, prompt_data, function_calls=None):
"""
Conducts a continuous conversation with the agent.
Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the conversation.
Returns:
List of accepted messages in the conversation.
"""
self.log_to_user = False
accepted_messages = []
response = self.send_message(prompt_path, prompt_data, function_calls)
# Continue conversation until GPT response equals END_RESPONSE
while response != END_RESPONSE:
user_message = ask_user(self.agent.project,
'Do you want to add anything else? If not, just press ENTER.',
hint=response,
require_some_input=False)
if user_message == "":
accepted_messages.append(response)
logger.info('\n>>>>>>>>>> User Message >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', user_message)
self.messages.append({"role": "user", "content": user_message})
response = self.send_message(None, None, function_calls)
self.log_to_user = True
return accepted_messages
def save_branch(self, branch_name=None):
if branch_name is None:
branch_name = str(uuid.uuid4())
self.branches[branch_name] = self.messages.copy()
return branch_name
def load_branch(self, branch_name, reload_files=True):
self.messages = self.branches[branch_name].copy()
if reload_files:
# TODO make this more flexible - with every message, save metadata so every time we load a branch, reconstruct all messages from scratch
self.replace_files()
def replace_files(self):
relevant_files = getattr(self.agent, 'relevant_files', None)
files = self.agent.project.get_all_coded_files(relevant_files=relevant_files)
for msg in self.messages:
if msg['role'] == 'user':
new_content = self.replace_files_in_one_message(files, msg["content"])
if new_content != msg["content"]:
msg["content"] = new_content
def replace_files_in_one_message(self, files, message):
# This needs to EXACTLY match the formatting in `files_list.prompt`
replacement_lines = ["\n---START_OF_FILES---"]
for file in files:
path = f"{file['path']}{sep}{file['name']}"
content = file['content']
replacement_lines.append(f"**{path}** ({ file['lines_of_code'] } lines of code):\n```\n{content}\n```\n")
replacement_lines.append("---END_OF_FILES---\n")
replacement = "\n".join(replacement_lines)
def replace_cb(_m):
return replacement
pattern = r"\n---START_OF_FILES---\n(.*?)\n---END_OF_FILES---\n"
return re.sub(pattern, replace_cb, message, flags=re.MULTILINE|re.DOTALL)
@staticmethod
def escape_specials(s):
s = s.replace("\\", "\\\\")
# List of sequences to preserve
sequences_to_preserve = [
# todo check if needed "\\\\", # Backslash - note: probably not eg. paths on Windows
"\\'", # Single quote
'\\"', # Double quote
# todo check if needed '\\a', # ASCII Bell (BEL)
# todo check if needed '\\b', # ASCII Backspace (BS) - note: different from regex \b
# todo check if needed '\\f', # ASCII Formfeed (FF)
'\\n', # ASCII Linefeed (LF)
# todo check if needed '\\r', # ASCII Carriage Return (CR)
'\\t', # ASCII Horizontal Tab (TAB)
# todo check if needed '\\v' # ASCII Vertical Tab (VT)
]
for seq in sequences_to_preserve:
s = s.replace('\\\\' + seq[-1], seq)
return s
def convo_length(self):
return len([msg for msg in self.messages if msg['role'] != 'system'])
def log_message(self, content):
"""
Logs a message in the conversation.
Args:
content: The content of the message to be logged.
"""
print_msg = capitalize_first_word_with_underscores(self.high_level_step)
if self.log_to_user:
if self.agent.project.checkpoints['last_development_step'] is not None:
dev_step_msg = f'\nDev step {str(self.agent.project.checkpoints["last_development_step"]["id"])}\n'
if not self.agent.project.check_ipc():
print(color_yellow_bold(dev_step_msg), end='')
logger.info(dev_step_msg)
try:
print(f"\n{content}\n", type='local')
except Exception: # noqa
# Workaround for Windows encoding crash: https://github.com/Pythagora-io/gpt-pilot/issues/509
safe_content = content.encode('ascii', 'ignore').decode('ascii')
print(f"\n{safe_content}\n", type='local')
logger.info(f"{print_msg}: {content}\n")
def to_context_prompt(self):
logger.info(f'to_context_prompt({self.agent.project.current_step})')
# TODO: get dependencies & versions from the project (package.json, requirements.txt, pom.xml, etc.)
# Ideally, the LLM could do this, and we update it on load & whenever the file changes
# ...or LLM generates a script for `.gpt-pilot/get_dependencies` that we run
# https://github.com/Pythagora-io/gpt-pilot/issues/189
return get_prompt('development/context.prompt', {
'directory_tree': self.agent.project.get_directory_tree(),
'running_processes': running_processes,
})
def to_playground(self):
# Internal function to help debugging in OpenAI Playground, not to be used in production
with open('const/convert_to_playground_convo.js', 'r', encoding='utf-8') as file:
content = file.read()
process = subprocess.Popen('pbcopy', stdin=subprocess.PIPE)
process.communicate(content.replace('{{messages}}', str(self.messages)).encode('utf-8'))
def remove_last_x_messages(self, x):
logger.info('removing last %d messages: %s', x, self.messages[-x:])
self.messages = self.messages[:-x]
def construct_and_add_message_from_prompt(self, prompt_path, prompt_data):
if prompt_path is not None and prompt_data is not None:
prompt = get_prompt(prompt_path, prompt_data)
logger.info('\n>>>>>>>>>> User Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', prompt)
self.messages.append({"role": "user", "content": prompt})

View File

@@ -1,148 +0,0 @@
import platform
import uuid
import re
import traceback
from const.code_execution import MAX_COMMAND_DEBUG_TRIES, MAX_RECURSION_LAYER
from const.function_calls import DEBUG_STEPS_BREAKDOWN
from const.messages import AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS
from helpers.AgentConvo import AgentConvo
from helpers.exceptions import TokenLimitError
from helpers.exceptions import TooDeepRecursionError
from logger.logger import logger
from prompts.prompts import ask_user
from utils.exit import trace_code_event
from utils.print import print_task_progress
class Debugger:
def __init__(self, agent):
self.agent = agent
self.recursion_layer = 0
def debug(self, convo, command=None, user_input=None, issue_description=None, is_root_task=False,
ask_before_debug=False, task_steps=None, step_index=None):
"""
Debug a conversation.
Args:
convo (AgentConvo): The conversation object.
command (dict, optional): The command to debug. Default is None.
user_input (str, optional): User input for debugging. Default is None.
Should provide `command` or `user_input`.
issue_description (str, optional): Description of the issue to debug. Default is None.
ask_before_debug (bool, optional): True if we have to ask user for permission to start debugging.
task_steps (list, optional): The steps of the task to debug. Default is None.
step_index (int, optional): The index of the step to debug. Default is None.
Returns:
bool: True if debugging was successful, False otherwise.
"""
logger.info('Debugging %s', command)
self.recursion_layer += 1
self.agent.project.current_task.add_debugging_task(self.recursion_layer, command, user_input, issue_description)
if self.recursion_layer > MAX_RECURSION_LAYER:
self.recursion_layer = 0
# TooDeepRecursionError kills all debugging loops and goes back to the point where first debug was called
# it does not retry initial step but instead calls dev_help_needed()
raise TooDeepRecursionError()
function_uuid = str(uuid.uuid4())
convo.save_branch(function_uuid)
success = False
for i in range(MAX_COMMAND_DEBUG_TRIES):
if success:
break
if ask_before_debug or i > 0:
print('yes/no', type='button')
answer = ask_user(self.agent.project, 'Can I start debugging this issue [Y/n/error details]?', require_some_input=False)
if answer.lower() in NEGATIVE_ANSWERS:
self.recursion_layer -= 1
convo.load_branch(function_uuid)
return True
if answer and answer.lower() not in AFFIRMATIVE_ANSWERS:
user_input = answer
self.agent.project.current_task.add_user_input_to_debugging_task(user_input)
print('', type='verbose', category='agent:debugger')
llm_response = convo.send_message('dev_ops/debug.prompt',
{
'command': command['command'] if command is not None else None,
'user_input': user_input,
'issue_description': issue_description,
'task_steps': task_steps,
'step_index': step_index,
'os': platform.system()
},
DEBUG_STEPS_BREAKDOWN)
completed_steps = []
print_task_progress(i+1, i+1, user_input, 'debugger', 'in_progress')
try:
while True:
steps = completed_steps + llm_response['steps']
# TODO refactor to nicely get the developer agent
result = self.agent.project.developer.execute_task(
convo,
steps,
test_command=command,
test_after_code_changes=True,
continue_development=False,
is_root_task=is_root_task,
continue_from_step=len(completed_steps),
task_source='debugger',
)
# in case one step failed or llm wants to see the output to determine the next steps
if 'step_index' in result:
result['os'] = platform.system()
step_index = result['step_index']
completed_steps = steps[:step_index+1]
result['completed_steps'] = completed_steps
result['current_step'] = steps[step_index]
result['next_steps'] = steps[step_index + 1:]
result['current_step_index'] = step_index
# Remove the previous debug plan and build a new one
convo.remove_last_x_messages(2)
# todo before updating task first check if update is needed
llm_response = convo.send_message('development/task/update_task.prompt', result,
DEBUG_STEPS_BREAKDOWN)
else:
success = result['success']
if not success:
convo.load_branch(function_uuid)
if 'cli_response' in result:
user_input = result['cli_response']
convo.messages[-2]['content'] = re.sub(
r'(?<=The output was:\n\n).*?(?=\n\nThink about this output)',
AgentConvo.escape_specials(result['cli_response']),
convo.messages[-2]['content'],
flags=re.DOTALL
)
break
except TokenLimitError as e:
# initial TokenLimitError is triggered by OpenAI API
# TokenLimitError kills recursion loops 1 by 1 and reloads convo, so it can retry the same initial step
if self.recursion_layer > 0:
convo.load_branch(function_uuid)
self.recursion_layer -= 1
raise e
else:
trace_code_event('token-limit-error', {'error': traceback.format_exc()})
if not success:
convo.load_branch(function_uuid)
continue
except TooDeepRecursionError as e:
convo.load_branch(function_uuid)
raise e
convo.load_branch(function_uuid)
self.recursion_layer -= 1
return success

View File

@@ -1,746 +0,0 @@
import json
import os
from pathlib import Path
from typing import Tuple, Optional, Union
import peewee
from playhouse.shortcuts import model_to_dict
from const.messages import CHECK_AND_CONTINUE, AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS, STUCK_IN_LOOP
from utils.style import color_yellow_bold, color_cyan, color_white_bold, color_red_bold
from const.common import STEPS
from database.database import delete_unconnected_steps_from, delete_all_app_development_data, \
get_all_app_development_steps, delete_all_subsequent_steps, get_features_by_app_id
from const.ipc import MESSAGE_TYPE
from prompts.prompts import ask_user
from helpers.exceptions import TokenLimitError, GracefulExit
from utils.questionary import styled_text
from helpers.files import get_directory_contents, get_file_contents, clear_directory, update_file
from helpers.cli import build_directory_tree
from helpers.agents.TechLead import TechLead
from helpers.agents.Developer import Developer
from helpers.agents.Architect import Architect
from helpers.agents.ProductOwner import ProductOwner
from helpers.agents.TechnicalWriter import TechnicalWriter
from helpers.agents.SpecWriter import SpecWriter
from database.models.development_steps import DevelopmentSteps
from database.models.file_snapshot import FileSnapshot
from database.models.files import File
from logger.logger import logger
from utils.dot_gpt_pilot import DotGptPilot
from utils.llm_connection import test_api_access
from utils.ignore import IgnoreMatcher
from utils.telemetry import telemetry
from utils.task import Task
from utils.utils import remove_lines_with_string
from utils.describe import describe_file
from os.path import abspath, relpath
class Project:
def __init__(
self,
args,
*,
ipc_client_instance=None,
):
"""
Initialize a project.
Args:
args (dict): Project arguments - app_id, (app_type, name), user_id, email, password, step
name (str, optional): Project name. Default is None.
description (str, optional): Project description. Default is None.
user_stories (list, optional): List of user stories. Default is None.
user_tasks (list, optional): List of user tasks. Default is None.
architecture (str, optional): Project architecture. Default is None.
development_plan (str, optional): Development plan. Default is None.
current_step (str, optional): Current step in the project. Default is None.
"""
self.args = args
self.llm_req_num = 0
self.command_runs_count = 0
self.user_inputs_count = 0
self.current_task = Task()
self.checkpoints = {
'last_user_input': None,
'last_command_run': None,
'last_development_step': None,
}
# TODO make flexible
self.root_path = ''
self.skip_until_dev_step = self.args['skip_until_dev_step'] if 'skip_until_dev_step' in self.args else None
self.skip_steps = False
self.main_prompt = None
self.files = []
self.continuing_project = args.get('continuing_project', False)
self.ipc_client_instance = ipc_client_instance
self.finished = False
self.current_step = None
self.name = None
self.project_description = None
self.user_stories = None
self.user_tasks = None
self.architecture = ""
self.system_dependencies = []
self.package_dependencies = []
self.project_template = None
self.development_plan = None
self.previous_features = None
self.current_feature = None
self.is_complex_app = True
self.dot_pilot_gpt = DotGptPilot(log_chat_completions=True)
if os.getenv("AUTOFIX_FILE_PATHS", "").lower() in ["true", "1", "yes"]:
File.update_paths()
# start loading of project (since backwards compatibility)
self.should_overwrite_files = None
self.last_detailed_user_review_goal = None
self.last_iteration = None
self.tasks_to_load = []
self.features_to_load = []
self.dev_steps_to_load = []
self.run_command = None
# end loading of project
def set_root_path(self, root_path: str):
self.root_path = root_path
self.dot_pilot_gpt.with_root_path(root_path)
def setup_loading(self):
if self.skip_until_dev_step == '0':
clear_directory(self.root_path)
delete_all_app_development_data(self.args['app_id'])
self.finish_loading(False)
return
self.skip_steps = True
while self.should_overwrite_files is None:
changes_made_question = f'Did you make any changes to "{self.args["name"]}" project files since last time you used Pythagora?'
print(changes_made_question, type='ipc', category='pythagora')
print('yes/no', type='buttons-only')
# must use styled_text() instead of ask_user() here to avoid finish_loading() call
changes_made = styled_text(
self,
changes_made_question,
ignore_user_input_count=True,
)
# if there were no changes just load files from db
if changes_made.lower() in NEGATIVE_ANSWERS:
self.should_overwrite_files = True
break
# otherwise ask user if they want to use those changes
elif changes_made.lower() in AFFIRMATIVE_ANSWERS:
use_changes_question = 'Do you want to use those changes you made?'
use_changes_msg = 'yes'
dont_use_changes_msg = 'no, restore last pythagora state'
print(use_changes_question, type='ipc', category='pythagora')
print(f'{use_changes_msg}/{dont_use_changes_msg}', type='buttons-only')
print(f'"{dont_use_changes_msg}" means Pythagora will restore (overwrite) all files to last stored state.\n'
f'"{use_changes_msg}" means Pythagora will continue working on project using current state of files.', type='hint')
use_changes = styled_text(
self,
use_changes_question,
ignore_user_input_count=True
)
logger.info('Use changes: %s', use_changes)
if use_changes.lower() in NEGATIVE_ANSWERS + [dont_use_changes_msg]:
self.should_overwrite_files = True
elif use_changes.lower() in AFFIRMATIVE_ANSWERS + [use_changes_msg]:
self.should_overwrite_files = False
load_step_before_coding = ('step' in self.args and
self.args['step'] is not None and
STEPS.index(self.args['step']) < STEPS.index('coding'))
if load_step_before_coding:
if not self.should_overwrite_files:
print(color_red_bold('Cannot load step before "coding" without overwriting files. You have to reload '
'the app and select "Use GPT Pilot\'s code" but you will lose all coding progress'
' on this project.'))
raise GracefulExit()
clear_directory(self.root_path)
delete_all_app_development_data(self.args['app_id'])
return
self.dev_steps_to_load = get_all_app_development_steps(self.args['app_id'], last_step=self.skip_until_dev_step)
if self.dev_steps_to_load is not None and len(self.dev_steps_to_load):
self.checkpoints['last_development_step'] = self.dev_steps_to_load[-1]
self.tasks_to_load = [el for el in self.dev_steps_to_load if 'breakdown.prompt' in el.get('prompt_path', '')]
self.features_to_load = [el for el in self.dev_steps_to_load if 'feature_plan.prompt' in el.get('prompt_path', '')]
self.run_command = next((el for el in reversed(self.dev_steps_to_load) if 'get_run_command.prompt' in el.get('prompt_path', '')), None)
if self.run_command is not None:
self.run_command = json.loads(self.run_command['llm_response']['text'])['command']
def start(self):
"""
Start the project.
"""
telemetry.start()
telemetry.set("app_id", self.args["app_id"])
if not test_api_access(self):
return False
if self.continuing_project:
self.setup_loading()
self.project_manager = ProductOwner(self)
self.spec_writer = SpecWriter(self)
print('', type='verbose', category='agent:product-owner')
self.project_manager.get_project_description(self.spec_writer)
telemetry.set("initial_prompt", self.project_description)
print({ "project_description": self.project_description }, type='projectDescription')
self.project_manager.get_user_stories()
# self.user_tasks = self.project_manager.get_user_tasks()
print('', type='verbose', category='agent:architect')
self.architect = Architect(self)
self.architect.get_architecture()
self.developer = Developer(self)
self.developer.set_up_environment()
self.technical_writer = TechnicalWriter(self)
print('', type='verbose', category='agent:tech-lead')
self.tech_lead = TechLead(self)
self.tech_lead.create_development_plan()
telemetry.set("architecture", {
"description": self.architecture,
"system_dependencies": self.system_dependencies,
"package_dependencies": self.package_dependencies,
})
self.dot_pilot_gpt.write_project(self)
print(json.dumps({
"project_stage": "coding"
}), type='info')
self.developer.start_coding('app')
return True
def finish(self):
"""
Finish the project.
"""
while True:
feature_description = ''
if not self.features_to_load:
self.finish_loading()
self.previous_features = get_features_by_app_id(self.args['app_id'])
print({"featuresList": self.previous_features}, type='featuresList')
if not self.skip_steps:
print('', type='verbose', category='pythagora')
if self.run_command and self.check_ipc():
print(self.run_command, type='run_command')
print('continue', type='button')
feature_description = ask_user(self, "Project is finished! Do you want to add any features or changes? "
"If yes, describe it here and if no, just press ENTER",
require_some_input=False)
if feature_description == '' or feature_description == 'continue':
return
print('', type='verbose', category='agent:tech-lead')
self.tech_lead.create_feature_plan(feature_description)
# loading of features
else:
num_of_features = len(self.features_to_load)
# last feature is always the one we want to load
current_feature = self.features_to_load[-1]
self.tech_lead.convo_feature_plan.messages = current_feature['messages'] + [{"role": "assistant", "content": current_feature['llm_response']['text']}]
target_id = current_feature['id']
self.cleanup_list('tasks_to_load', target_id)
self.cleanup_list('dev_steps_to_load', target_id)
# if there is feature_summary.prompt in remaining dev steps it means feature is fully done
# finish loading and ask to add another feature or finish project
feature_summary_dev_step = next((el for el in reversed(self.dev_steps_to_load) if 'feature_summary.prompt' in el.get('prompt_path', '')), None)
if feature_summary_dev_step is not None:
self.cleanup_list('dev_steps_to_load', feature_summary_dev_step['id'])
self.finish_loading()
print(f'loaded {num_of_features} features')
continue
print(f'Loaded {num_of_features - 1} features!')
print(f'Continuing feature #{num_of_features}...')
self.development_plan = json.loads(current_feature['llm_response']['text'])['plan']
feature_description = current_feature['prompt_data']['feature_description']
self.features_to_load = []
self.current_feature = feature_description
self.developer.start_coding('feature')
print('', type='verbose', category='agent:tech-lead')
self.tech_lead.create_feature_summary(feature_description)
def get_directory_tree(self, with_descriptions=False):
"""
Get the directory tree of the project.
Args:
with_descriptions (bool, optional): Whether to include descriptions. Default is False.
Returns:
dict: The directory tree.
"""
return build_directory_tree(self.root_path)
def get_test_directory_tree(self):
"""
Get the directory tree of the tests.
Returns:
dict: The directory tree of tests.
"""
# TODO remove hardcoded path
return build_directory_tree(self.root_path + '/tests')
def get_files_from_db_by_step_id(self, step_id):
"""
Get all coded files associated with a specific step_id.
Args:
step_id (int): The ID of the step.
Returns:
list: A list of coded files associated with the step_id.
"""
if step_id is None:
return []
file_snapshots = FileSnapshot.select().where(FileSnapshot.development_step_id == step_id)
return [{
"name": item['file']['name'],
"path": item['file']['path'],
"full_path": item['file']['full_path'],
'content': item['content'],
"lines_of_code": len(item['content'].splitlines()),
} for item in [model_to_dict(file) for file in file_snapshots]]
@staticmethod
def relpath(file: Union[File, str]) -> str:
"""
Return relative file path (including the name) within a project
:param file: File object or file path
:return: Relative file path
"""
if isinstance(file, File):
fpath = f"{file.path}/{file.name}"
else:
fpath = file
if fpath.startswith("/"):
fpath = fpath[1:]
elif fpath.startswith("./"):
fpath = fpath[2:]
return fpath
def get_all_database_files(self) -> list[File]:
"""
Get all project files from the database.
"""
return (
File
.select()
.where(
(File.app_id == self.args['app_id']) &
peewee.fn.EXISTS(FileSnapshot.select().where(FileSnapshot.file_id == File.id))
)
)
def get_all_coded_files(self, relevant_files=None):
"""
Get all coded files in the project.
Returns:
list: A list of coded files.
"""
files = self.get_all_database_files()
if relevant_files:
n_files0 = len(files)
files = [file for file in files if self.relpath(file) in relevant_files]
n_files1 = len(files)
rel_txt = ",".join(relevant_files) if relevant_files else "(none)"
logger.debug(f"[get_all_coded_files] reduced context from {n_files0} to {n_files1} files, using: {rel_txt}")
return self.get_files([file.path + '/' + file.name for file in files])
def get_file_summaries(self) -> Optional[dict[str, str]]:
"""
Get summaries of all coded files in the project.
:returns: A dictionary of file summaries, or None if file filtering is not enabled.
"""
if os.getenv('FILTER_RELEVANT_FILES', '').lower().strip() in ['false', '0', 'no', 'off']:
return None
files = self.get_all_database_files()
return {self.relpath(file): file.description or "(unknown)" for file in files if os.path.exists(file.full_path)}
def get_files(self, files):
"""
Get file contents.
Args:
files (list): List of file paths.
Returns:
list: A list of files with content.
"""
matcher = IgnoreMatcher(root_path=self.root_path)
files_with_content = []
for file_path in files:
try:
# TODO path is sometimes relative and sometimes absolute - fix at one point
_, full_path = self.get_full_file_path(file_path, file_path)
file_data = get_file_contents(full_path, self.root_path)
except ValueError:
full_path = None
file_data = {"path": file_path, "name": os.path.basename(file_path), "content": ''}
if full_path and file_data["content"] != "" and not matcher.ignore(full_path):
files_with_content.append(file_data)
return files_with_content
def find_input_required_lines(self, file_content):
"""
Parses the provided string (representing file content) and returns a list of tuples containing
the line number and line content for lines that contain the text 'INPUT_REQUIRED'.
:param file_content: The string content of the file.
:return: A list of tuples (line number, line content).
"""
lines_with_input_required = []
lines = file_content.split('\n')
for line_number, line in enumerate(lines, start=1):
if 'INPUT_REQUIRED' in line:
lines_with_input_required.append((line_number, line.strip()))
return lines_with_input_required
def save_file(self, data):
"""
Save a file.
Args:
data: { name: 'hello.py', path: 'path/to/hello.py', content: 'print("Hello!")' }
"""
name = data['name'] if 'name' in data and data['name'] != '' else os.path.basename(data['path'])
path = data['path'] if 'path' in data else name
path, full_path = self.get_full_file_path(path, name)
update_file(full_path, data['content'], project=self)
if full_path not in self.files:
self.files.append(full_path)
if path and path[0] == '/':
path = path.lstrip('/')
description = describe_file(self, relpath(abspath(full_path), abspath(self.root_path)), data['content'])
(File.insert(app=self.app, path=path, name=name, full_path=full_path, description=description)
.on_conflict(
conflict_target=[File.app, File.name, File.path],
preserve=[],
update={'name': name, 'path': path, 'full_path': full_path, 'description': description})
.execute())
if not self.skip_steps:
inputs_required = self.find_input_required_lines(data['content'])
for line_number, line_content in inputs_required:
user_input = None
print('', type='verbose', category='human-intervention')
print(color_yellow_bold(f'Input required on line {line_number}:\n{line_content}') + '\n')
while user_input is None or user_input.lower() not in AFFIRMATIVE_ANSWERS + ['continue']:
print({'path': full_path, 'line': line_number}, type='openFile')
print('continue', type='buttons-only')
user_input = ask_user(
self,
f'Please open the file {data["path"]} on the line {line_number} and add the required input. Please, also remove "// INPUT_REQUIRED" comment and once you\'re done, press "continue".',
require_some_input=False,
ignore_user_input_count=True
)
def get_full_file_path(self, file_path: str, file_name: str) -> Tuple[str, str]:
"""
Combine file path and name into a full file path.
:param file_path: File path.
:param file_name: File name.
:return: (file_path, absolute_path) pair.
Tries to combine the two in a way that makes most sense, even if the given path
have some shared components.
"""
def normalize_path(path: str) -> Tuple[str, str]:
"""
Normalizes a path (see rules in comments) and returns (directory, basename) pair.
:param path: Path to normalize.
:return: (directory, basename) pair.
Directory component may be empty if the path is considered to be a
file name. Basename component may be empty if the path is considered
to be a directory name.
"""
# Normalize path to use os-specific separator (as GPT may output paths
# with / even if we're on Windows)
path = str(Path(path))
# If a path references user's home directory (~), we only care about
# the relative part within it (assume ~ is meant to be the project path).
# Examples:
# - /Users/zvonimirsabljic/Development/~/pilot/server.js -> /pilot/server.js
# - ~/pilot/server.js -> /pilot/server.js
if "~" in path:
path = path.split("~")[-1]
# If the path explicitly references the current directory, remove it so we
# can nicely use it for joins later.
if path == "." or path.startswith(f".{os.path.sep}"):
path = path[1:]
# If the path is absolute, we only care about the relative part within
# the project directory (assume the project directory is the root).
# Examples:
# - /Users/zvonimirsabljic/Development/copilot/pilot/server.js -> /pilot/server.js
# - /pilot/server.js -> /pilot/server.js
# - C:\Users\zvonimirsabljic\Development\copilot\pilot\server.js -> \pilot\server.js
path = path.replace(self.root_path, '')
# If the final component of the path doesn't have a file extension,
# assume it's a directory and add a final (back)slash.
# Examples:
# - /pilot/server.js -> /pilot/server.js
# - /pilot -> /pilot/
# - \pilot\server.js -> \pilot\server.js
# - \pilot -> \pilot\
KNOWN_FILES = ["makefile", "dockerfile", "procfile", "readme", "license", "podfile", "gemfile"] # known exceptions that break the heuristic
KNOWN_DIRS = [] # known exceptions that break the heuristic
base = os.path.basename(path)
if (
base
and ("." not in base or base.lower() in KNOWN_DIRS)
and base.lower() not in KNOWN_FILES
):
path += os.path.sep
# In case we're in Windows and dealing with full paths, remove the drive letter.
_, path = os.path.splitdrive(path)
# We want all paths to start with / (or \\ in Windows)
if not path.startswith(os.path.sep):
path = os.path.sep + path
return os.path.split(path)
head_path, tail_path = normalize_path(file_path)
head_name, tail_name = normalize_path(file_name)
# Prefer directory path from the first argument (file_path), and
# prefer the file name from the second argument (file_name).
final_file_path = head_path if head_path != '' else head_name
final_file_name = tail_name if tail_name != '' else tail_path
# If the directory is contained in the second argument (file_name),
# use that (as it might include additional subdirectories).
if head_path in head_name:
final_file_path = head_name
# Try to combine the directory and file name from the two arguments
# in the way that makes the most sensible output.
if final_file_path != head_name and head_name not in head_path:
if '.' in tail_path:
final_file_path = head_name + head_path
else:
final_file_path = head_path + head_name
if final_file_path == '':
final_file_path = os.path.sep
final_absolute_path = os.path.join(self.root_path, final_file_path[1:], final_file_name)
return final_file_path, final_absolute_path
def save_files_snapshot(self, development_step_id, summaries=None):
if summaries is None:
summaries = {}
files = get_directory_contents(self.root_path)
development_step, created = DevelopmentSteps.get_or_create(id=development_step_id)
total_files = 0
total_lines = 0
for file in files:
if not self.check_ipc():
print(color_cyan(f'Saving file {file["full_path"]}'))
# TODO this can be optimized so we don't go to the db each time
if file['path'] and file['path'][0] == '/':
file['path'] = file['path'].lstrip('/')
file_in_db, created = File.get_or_create(
app=self.app,
name=file['name'],
path=file['path'],
defaults={
'full_path': file['full_path'],
'description': summaries.get(os.path.relpath(file['full_path'], self.root_path), ''),
},
)
file_snapshot, _ = FileSnapshot.get_or_create(
app=self.app,
development_step=development_step,
file=file_in_db,
defaults={'content': file.get('content', '')}
)
file_snapshot.content = file['content']
# For a non-empty file, if we don't have a description, and the file is either new or
# we're loading a project, create the description.
if file['content'] and not file_in_db.description:
file_in_db.description = describe_file(self, relpath(abspath(file['full_path']), abspath(self.root_path)), file['content'])
file_in_db.save()
file_snapshot.save()
total_files += 1
if isinstance(file['content'], str):
total_lines += file['content'].count('\n') + 1
telemetry.set("num_files", total_files)
telemetry.set("num_lines", total_lines)
def restore_files(self, development_step_id):
development_step = DevelopmentSteps.get(DevelopmentSteps.id == development_step_id)
file_snapshots = FileSnapshot.select().where(FileSnapshot.development_step == development_step)
clear_directory(self.root_path, ignore=self.files)
for file_snapshot in file_snapshots:
try:
update_file(file_snapshot.file.full_path, file_snapshot.content, project=self)
except (PermissionError, NotADirectoryError) as err: # noqa
print(f"Error restoring file {file_snapshot.file.full_path}: {err}")
if file_snapshot.file.full_path not in self.files:
self.files.append(file_snapshot.file.full_path)
def delete_all_steps_except_current_branch(self):
delete_unconnected_steps_from(self.checkpoints['last_development_step'], 'previous_step')
delete_unconnected_steps_from(self.checkpoints['last_command_run'], 'previous_step')
delete_unconnected_steps_from(self.checkpoints['last_user_input'], 'previous_step')
def ask_for_human_intervention(self, message, description=None, cbs={}, convo=None, is_root_task=False,
add_loop_button=False, category='human-intervention'):
print('', type='verbose', category=category)
answer = ''
question = color_yellow_bold(message)
if description is not None:
question += '\n' + '-' * 100 + '\n' + color_white_bold(description) + '\n' + '-' * 100 + '\n'
reset_branch_id = None if convo is None else convo.save_branch()
while answer.lower() != 'continue':
print('continue' + (f'/{STUCK_IN_LOOP}' if add_loop_button else ''), type='button')
answer = ask_user(self, CHECK_AND_CONTINUE,
require_some_input=False,
hint=question)
try:
if answer.lower() in cbs:
return cbs[answer.lower()](convo)
elif answer != '':
return {'user_input': answer}
except TokenLimitError as e:
if is_root_task and answer.lower() not in cbs and answer != '':
convo.load_branch(reset_branch_id)
return {'user_input': answer}
else:
raise e
def log(self, text, message_type):
if self.check_ipc():
self.ipc_client_instance.send({
'type': MESSAGE_TYPE[message_type],
'content': str(text),
})
if message_type == MESSAGE_TYPE['user_input_request']:
return self.ipc_client_instance.listen()
else:
print(text)
def check_ipc(self):
"""
Checks if there is an open Inter-Process Communication (IPC) connection.
Returns:
bool: True if there is an open IPC connection, False otherwise.
"""
return self.ipc_client_instance is not None and self.ipc_client_instance.client is not None
def finish_loading(self, do_cleanup=True):
# if already done, don't do it again
if not self.skip_steps:
return
print('', type='loadingFinished')
if do_cleanup and self.checkpoints['last_development_step']:
if self.should_overwrite_files:
self.restore_files(self.checkpoints['last_development_step']['id'])
else:
FileSnapshot.delete().where(
FileSnapshot.app == self.app and FileSnapshot.development_step == int(self.checkpoints['last_development_step']['id'])).execute()
self.save_files_snapshot(int(self.checkpoints['last_development_step']['id']))
delete_all_subsequent_steps(self)
self.tasks_to_load = []
self.features_to_load = []
self.dev_steps_to_load = []
self.last_detailed_user_review_goal = None
self.last_iteration = None
self.skip_steps = False
def cleanup_list(self, list_name, target_id):
if target_id is None or list_name is None:
return
temp_list = getattr(self, list_name, [])
# Find the index of the first el with 'id' greater than target_id
index = next((i for i, el in enumerate(temp_list) if el['id'] >= target_id), len(temp_list))
new_list = temp_list[index:]
if list_name == 'dev_steps_to_load' and len(new_list) == 0:
# needed for finish_loading() because then we restore files, and we need last dev step
self.checkpoints['last_development_step'] = temp_list[index - 1]
# Keep only the elements from that index onwards
setattr(self, list_name, new_list)
def remove_debugging_logs_from_all_files(self):
project_files = self.get_all_coded_files()
for file in project_files:
if 'gpt_pilot_debugging_log' in file['content'].lower():
# remove all lines that contain 'debugging_log'
file['content'] = remove_lines_with_string(file['content'], 'gpt_pilot_debugging_log')
self.save_file(file)

View File

@@ -1,63 +0,0 @@
Roles are defined in `const.common.ROLES`.
Each agent's role is described to the LLM by a prompt in `pilot/prompts/system_messages/{role}.prompt`
## Product Owner
`project_description`, `user_stories`, `user_tasks`
- Talk to client, ask detailed questions about what client wants
- Give specifications to dev team
## Architect
`architecture`
- Scripts: Node.js, MongoDB, PeeWee ORM
- Testing: Node.js -> Jest, Python -> pytest, E2E -> Cypress **(TODO - BDD?)**
- Frontend: Bootstrap, vanilla Javascript **(TODO - TypeScript, Material/Styled, React/Vue/other?)**
- Other: cronjob, Socket.io
TODO:
- README.md
- .gitignore
- .editorconfig
- LICENSE
- CI/CD
- IaC, Dockerfile
## Tech Lead
`development_planning`
- Break down the project into smaller tasks for devs.
- Specify each task as clear as possible:
- Description
- "Programmatic goal" which determines if the task can be marked as done.
eg: "server needs to be able to start running on a port 3000 and accept API request
to the URL `http://localhost:3000/ping` when it will return the status code 200"
- "User-review goal"
eg: "run `npm run start` and open `http://localhost:3000/ping`, see "Hello World" on the screen"
## Dev Ops
`environment_setup`
**TODO: no prompt**
`debug` functions: `run_command`, `implement_changes`
## Developer (full_stack_developer)
`create_scripts`, `coding`
- Implement tasks assigned by tech lead
- Modular code, TDD
- Tasks provided as "programmatic goals" **(TODO: consider BDD)**
## Code Monkey
`create_scripts`, `coding`, `implement_changes`
`implement_changes` functions: `save_files`
- Implement tasks assigned by tech lead
- Modular code, TDD

View File

@@ -1,116 +0,0 @@
from utils.utils import step_already_finished
from helpers.Agent import Agent
import json
from utils.style import color_green_bold, color_yellow_bold
from const.function_calls import ARCHITECTURE
from const.common import EXAMPLE_PROJECT_ARCHITECTURE
import platform
from utils.utils import should_execute_step, generate_app_data
from database.database import save_progress, get_progress_steps
from logger.logger import logger
from helpers.AgentConvo import AgentConvo
from prompts.prompts import ask_user
from templates import PROJECT_TEMPLATES
ARCHITECTURE_STEP = 'architecture'
WARN_SYSTEM_DEPS = ["docker", "kubernetes", "microservices"]
WARN_FRAMEWORKS = ["next.js", "vue", "vue.js", "svelte", "angular"]
WARN_FRAMEWORKS_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/Using-GPT-Pilot-with-frontend-frameworks"
class Architect(Agent):
def __init__(self, project):
super().__init__('architect', project)
self.convo_architecture = None
def get_architecture(self):
print(json.dumps({
"project_stage": "architecture"
}), type='info')
self.project.current_step = ARCHITECTURE_STEP
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], ARCHITECTURE_STEP)
if step and not should_execute_step(self.project.args['step'], ARCHITECTURE_STEP):
step_already_finished(self.project.args, step)
self.project.architecture = None
self.project.system_dependencies = None
self.project.package_dependencies = None
self.project.project_template = None
db_data = step["architecture"]
if db_data:
if isinstance(db_data, dict):
self.project.architecture = db_data["architecture"]
self.project.system_dependencies = db_data["system_dependencies"]
self.project.package_dependencies = db_data["package_dependencies"]
self.project.project_template = db_data.get("project_template")
elif isinstance(db_data, list):
self.project.architecture = ""
self.project.system_dependencies = [
{
"name": dep,
"description": "",
"test": "",
"required_locally": False
} for dep in db_data
]
self.project.package_dependencies = []
self.project.project_template = None
return
print(color_green_bold("Planning project architecture...\n"))
logger.info("Planning project architecture...")
self.convo_architecture = AgentConvo(self)
if self.project.project_manager.is_example_project:
llm_response = EXAMPLE_PROJECT_ARCHITECTURE
else:
llm_response = self.convo_architecture.send_message('architecture/technologies.prompt',
{'name': self.project.args['name'],
'app_summary': self.project.project_description,
'user_stories': self.project.user_stories,
'user_tasks': self.project.user_tasks,
"os": platform.system(),
'app_type': self.project.args['app_type'],
"templates": PROJECT_TEMPLATES,
},
ARCHITECTURE
)
self.project.architecture = llm_response["architecture"]
self.project.system_dependencies = llm_response["system_dependencies"]
self.project.package_dependencies = llm_response["package_dependencies"]
self.project.project_template = llm_response["template"]
warn_system_deps = [dep["name"] for dep in self.project.system_dependencies if dep["name"].lower() in WARN_SYSTEM_DEPS]
warn_package_deps = [dep["name"] for dep in self.project.package_dependencies if dep["name"].lower() in WARN_FRAMEWORKS]
if warn_system_deps:
print(color_yellow_bold(
f"Warning: GPT Pilot doesn't officially support {', '.join(warn_system_deps)}. "
f"You can try to use {'it' if len(warn_system_deps) == 1 else 'them'}, but you may run into problems."
))
print('continue', type='buttons-only')
ask_user(self.project, "Press ENTER if you still want to proceed. If you'd like to modify the project description, close the app and start a new one.", require_some_input=False)
if warn_package_deps:
print(color_yellow_bold(
f"Warning: GPT Pilot works best with vanilla JavaScript. "
f"You can try try to use {', '.join(warn_package_deps)}, but you may run into problems. "
f"Visit {WARN_FRAMEWORKS_URL} for more information."
))
print('continue', type='buttons-only')
ask_user(self.project, "Press ENTER if you still want to proceed. If you'd like to modify the project description, close the app and start a new one.", require_some_input=False)
logger.info(f"Final architecture: {self.project.architecture}")
save_progress(self.project.args['app_id'], self.project.current_step, {
"messages": self.convo_architecture.messages,
"architecture": llm_response,
"app_data": generate_app_data(self.project.args)
})
return

View File

@@ -1,443 +0,0 @@
import os.path
import re
from typing import Optional
from traceback import format_exc
from difflib import unified_diff
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from helpers.files import get_file_contents
from const.function_calls import GET_FILE_TO_MODIFY, REVIEW_CHANGES
from logger.logger import logger
from utils.exit import trace_code_event
from utils.telemetry import telemetry
# Constant for indicating missing new line at the end of a file in a unified diff
NO_EOL = "\\ No newline at end of file"
# Regular expression pattern for matching hunk headers
PATCH_HEADER_PATTERN = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@")
MAX_REVIEW_RETRIES = 2
class CodeMonkey(Agent):
save_dev_steps = True
def __init__(self, project):
super().__init__('code_monkey', project)
def get_original_file(
self,
code_changes_description: str,
step: dict[str, str],
files: list[dict],
) -> tuple[str, str]:
"""
Get the original file content and name.
:param code_changes_description: description of the code changes
:param step: information about the step being implemented
:param files: list of files to send to the LLM
:return: tuple of (file_name, file_content)
"""
# If we're called as a result of debugging, we don't have the name/path of the file
# to modify so we need to figure that out first.
if 'path' not in step or 'name' not in step:
file_to_change = self.identify_file_to_change(code_changes_description, files)
step['path'] = os.path.dirname(file_to_change)
step['name'] = os.path.basename(file_to_change)
rel_path, abs_path = self.project.get_full_file_path(step['path'], step['name'])
for f in files:
# Take into account that step path might start with "/"
if (f['path'] == step['path'] or (os.path.sep + f['path'] == step['path'])) and f['name'] == step['name'] and f['content']:
file_content = f['content']
break
else:
# If we didn't have the match (because of incorrect or double use of path separators or similar), fallback to directly loading the file
try:
file_content = get_file_contents(abs_path, self.project.root_path)['content']
if isinstance(file_content, bytes):
# We should never want to change a binary file, but if we do end up here, let's not crash
file_content = "... <binary file, content omitted> ..."
except ValueError:
# File doesn't exist, we probably need to create a new one
file_content = ""
file_name = os.path.join(rel_path, step['name'])
return file_name, file_content
def implement_code_changes(
self,
convo: Optional[AgentConvo],
step: dict[str, str],
) -> AgentConvo:
"""
Implement code changes described in `code_changes_description`.
:param convo: conversation to continue (must contain file coding/modification instructions)
:param step: information about the step being implemented
"""
previous_temperature = convo.temperature
convo.temperature = 0.0
code_change_description = step.get('code_change_description')
files = self.project.get_all_coded_files()
file_name, file_content = self.get_original_file(code_change_description, step, files)
print('', type='verbose', category='agent:code-monkey')
if file_content:
print(f'Updating existing file {file_name}:')
else:
print(f'Creating new file {file_name}:')
# Get the new version of the file
content = self.replace_complete_file(
convo,
file_content,
file_name,
files,
)
for i in range(MAX_REVIEW_RETRIES):
if not content or content == file_content:
# There are no changes or there was problem talking with the LLM, we're done here
break
print('Sending code for review...', type='verbose', category='agent:code-monkey')
print('', type='verbose', category='agent:reviewer')
content, rework_feedback = self.review_change(convo, code_change_description, file_name, file_content, content)
print('Review finished. Continuing...', type='verbose', category='agent:code-monkey')
if not rework_feedback:
# No rework needed, we're done here
break
print('', type='verbose', category='agent:code-monkey')
content = convo.send_message('development/review_feedback.prompt', {
"content": content,
"original_content": file_content,
"rework_feedback": rework_feedback,
})
if content:
content = self.remove_backticks(content)
convo.remove_last_x_messages(2)
# If we have changes, update the file
if content and content != file_content:
if not self.project.skip_steps:
delta_lines = len(content.splitlines()) - len(file_content.splitlines())
telemetry.inc("created_lines", delta_lines)
self.project.save_file({
'path': step['path'],
'name': step['name'],
'content': content,
})
convo.temperature = previous_temperature
return convo
def replace_complete_file(
self,
convo: AgentConvo,
file_content: str,
file_name: str,
files: list[dict]
) -> str:
"""
As a fallback, replace the complete file content.
This should only be used if we've failed to replace individual code blocks.
:param convo: AgentConvo instance
:param standalone: True if this is a standalone conversation
:param code_changes_description: description of the code changes
:param file_content: content of the file being updated
:param file_name: name of the file being updated
:param files: list of files to send to the LLM
:return: updated file content
Note: if even this fails for any reason, the original content is returned instead.
"""
prev_message = convo.messages[-1]['content']
prev_message_prefix = " ".join(prev_message.split()[:5])
prev_message_postfix = " ".join(prev_message.split()[-5:])
llm_response = convo.send_message('development/implement_changes.prompt', {
"file_content": file_content,
"file_name": file_name,
"files": files,
"prev_message_prefix": prev_message_prefix,
"prev_message_postfix": prev_message_postfix,
})
convo.remove_last_x_messages(2)
return self.remove_backticks(llm_response)
@staticmethod
def remove_backticks(content: str) -> str:
"""
Remove optional backticks from the beginning and end of the content.
:param content: content to remove backticks from
:return: content without backticks
"""
start_pattern = re.compile(r"^\s*```([a-z0-9]+)?\n")
end_pattern = re.compile(r"\n```\s*$")
content = start_pattern.sub("", content)
content = end_pattern.sub("", content)
return content
def identify_file_to_change(self, code_changes_description: str, files: list[dict]) -> str:
"""
Identify file to change based on the code changes description
:param code_changes_description: description of the code changes
:param files: list of files to send to the LLM
:return: file to change
"""
convo = AgentConvo(self)
llm_response = convo.send_message('development/identify_files_to_change.prompt', {
"code_changes_description": code_changes_description,
"files": files,
}, GET_FILE_TO_MODIFY)
return llm_response["file"]
def review_change(
self,
convo: AgentConvo,
instructions: str,
file_name: str,
old_content: str,
new_content: str
) -> tuple[str, str]:
"""
Review changes that were applied to the file.
This asks the LLM to act as a PR reviewer and for each part (hunk) of the
diff, decide if it should be applied (kept) or ignored (removed from the PR).
:param convo: AgentConvo instance
:param instructions: instructions for the reviewer
:param file_name: name of the file being modified
:param old_content: old file content
:param new_content: new file content (with proposed changes)
:return: tuple with file content update with approved changes, and review feedback
Diff hunk explanation: https://www.gnu.org/software/diffutils/manual/html_node/Hunks.html
"""
hunks = self.get_diff_hunks(file_name, old_content, new_content)
llm_response = convo.send_message('development/review_changes.prompt', {
"instructions": instructions,
"file_name": file_name,
"old_content": old_content,
"hunks": hunks,
}, REVIEW_CHANGES)
messages_to_remove = 2
for i in range(MAX_REVIEW_RETRIES):
reasons = {}
ids_to_apply = set()
ids_to_ignore = set()
ids_to_rework = set()
for hunk in llm_response.get("hunks", []):
reasons[hunk["number"] - 1] = hunk["reason"]
if hunk.get("decision", "").lower() == "apply":
ids_to_apply.add(hunk["number"] - 1)
elif hunk.get("decision", "").lower() == "ignore":
ids_to_ignore.add(hunk["number"] - 1)
elif hunk.get("decision", "").lower() == "rework":
ids_to_rework.add(hunk["number"] - 1)
n_hunks = len(hunks)
n_review_hunks = len(reasons)
if n_review_hunks == n_hunks:
break
elif n_review_hunks < n_hunks:
error = "Not all hunks have been reviewed. Please review all hunks and add 'apply', 'ignore' or 'rework' decision for each."
elif n_review_hunks > n_hunks:
error = f"Your review contains more hunks ({n_review_hunks}) than in the original diff ({n_hunks}). Note that one hunk may have multiple changed lines."
# Max two retries; if the reviewer still hasn't reviewed all hunks, we'll just use the entire new content
llm_response = convo.send_message(
'utils/llm_response_error.prompt', {
"error": error
},
REVIEW_CHANGES,
)
messages_to_remove += 2
else:
# The reviewer failed to review all the hunks in 3 attempts, let's just use all the new content
convo.remove_last_x_messages(messages_to_remove)
return new_content, None
convo.remove_last_x_messages(messages_to_remove)
hunks_to_apply = [ h for i, h in enumerate(hunks) if i in ids_to_apply ]
diff_log = f"--- {file_name}\n+++ {file_name}\n" + "\n".join(hunks_to_apply)
hunks_to_rework = [ (i, h) for i, h in enumerate(hunks) if i in ids_to_rework ]
review_log = "\n\n".join([
f"## Change\n```{hunk}```\nReviewer feedback:\n{reasons[i]}" for (i, hunk) in hunks_to_rework
]) + "\n\nReview notes:\n" + llm_response["review_notes"]
if len(hunks_to_apply) == len(hunks):
print("Applying entire change")
logger.info(f"Applying entire change to {file_name}")
return new_content, None
elif len(hunks_to_apply) == 0:
if hunks_to_rework:
print(f"Requesting rework for {len(hunks_to_rework)} changes with reason: {llm_response['review_notes']}")
logger.info(f"Requesting rework for {len(hunks_to_rework)} changes to {file_name} (0 hunks to apply)")
return old_content, review_log
else:
# If everything can be safely ignored, it's probably because the files already implement the changes
# from previous tasks (which can happen often). Insisting on a change here is likely to cause problems.
print(f"Rejecting entire change with reason: {llm_response['review_notes']}")
logger.info(f"Rejecting entire change to {file_name} with reason: {llm_response['review_notes']}")
return old_content, None
print("Applying code change:\n" + diff_log)
logger.info(f"Applying code change to {file_name}:\n{diff_log}")
new_content = self.apply_diff(file_name, old_content, hunks_to_apply, new_content)
if hunks_to_rework:
print(f"Requesting rework for {len(hunks_to_rework)} changes with reason: {llm_response['review_notes']}")
logger.info(f"Requesting further rework for {len(hunks_to_rework)} changes to {file_name}")
return new_content, review_log
else:
return new_content, None
@staticmethod
def get_diff_hunks(file_name: str, old_content: str, new_content: str) -> list[str]:
"""
Get the diff between two files.
This uses Python difflib to produce an unified diff, then splits
it into hunks that will be separately reviewed by the reviewer.
:param file_name: name of the file being modified
:param old_content: old file content
:param new_content: new file content
:return: change hunks from the unified diff
"""
from_name = "old_" + file_name
to_name = "to_" + file_name
from_lines = old_content.splitlines(keepends=True)
to_lines = new_content.splitlines(keepends=True)
diff_gen = unified_diff(from_lines, to_lines, fromfile=from_name, tofile=to_name)
diff_txt = "".join(diff_gen)
hunks = re.split(r'\n@@', diff_txt, re.MULTILINE)
result = []
for i, h in enumerate(hunks):
# Skip the prologue (file names)
if i == 0:
continue
txt = h.splitlines()
txt[0] = "@@" + txt[0]
result.append("\n".join(txt))
return result
def apply_diff(
self,
file_name: str,
old_content: str,
hunks: list[str],
fallback: str
):
"""
Apply the diff to the original file content.
This uses the internal `_apply_patch` method to apply the
approved diff hunks to the original file content.
If patch apply fails, the fallback is the full new file content
with all the changes applied (as if the reviewer approved everythng).
:param file_name: name of the file being modified
:param old_content: old file content
:param hunks: change hunks from the unified diff
:param fallback: proposed new file content (with all the changes applied)
"""
diff = "\n".join(
[
f"--- {file_name}",
f"+++ {file_name}",
] + hunks
) + "\n"
try:
fixed_content = self._apply_patch(old_content, diff)
except Exception as e:
# This should never happen but if it does, just use the new version from
# the LLM and hope for the best
print(f"Error applying diff: {e}; hoping all changes are valid")
trace_code_event(
"patch-apply-error",
{
"file": file_name,
"error": str(e),
"traceback": format_exc(),
"original": old_content,
"diff": diff
}
)
return fallback
return fixed_content
# Adapted from https://gist.github.com/noporpoise/16e731849eb1231e86d78f9dfeca3abc (Public Domain)
@staticmethod
def _apply_patch(original: str, patch: str, revert: bool = False):
"""
Apply a patch to a string to recover a newer version of the string.
:param original: The original string.
:param patch: The patch to apply.
:param revert: If True, treat the original string as the newer version and recover the older string.
:return: The updated string after applying the patch.
"""
original_lines = original.splitlines(True)
patch_lines = patch.splitlines(True)
updated_text = ''
index_original = start_line = 0
# Choose which group of the regex to use based on the revert flag
match_index, line_sign = (1, '+') if not revert else (3, '-')
# Skip header lines of the patch
while index_original < len(patch_lines) and patch_lines[index_original].startswith(("---", "+++")):
index_original += 1
while index_original < len(patch_lines):
match = PATCH_HEADER_PATTERN.match(patch_lines[index_original])
if not match:
raise Exception("Bad patch -- regex mismatch [line " + str(index_original) + "]")
line_number = int(match.group(match_index)) - 1 + (match.group(match_index + 1) == '0')
if start_line > line_number or line_number > len(original_lines):
raise Exception("Bad patch -- bad line number [line " + str(index_original) + "]")
updated_text += ''.join(original_lines[start_line:line_number])
start_line = line_number
index_original += 1
while index_original < len(patch_lines) and patch_lines[index_original][0] != '@':
if index_original + 1 < len(patch_lines) and patch_lines[index_original + 1][0] == '\\':
line_content = patch_lines[index_original][:-1]
index_original += 2
else:
line_content = patch_lines[index_original]
index_original += 1
if line_content:
if line_content[0] == line_sign or line_content[0] == ' ':
updated_text += line_content[1:]
start_line += (line_content[0] != line_sign)
updated_text += ''.join(original_lines[start_line:])
return updated_text

File diff suppressed because it is too large Load Diff

View File

@@ -1,172 +0,0 @@
import json
from utils.style import color_green_bold
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from logger.logger import logger
from database.database import get_app, save_progress, save_app, get_progress_steps
from utils.utils import should_execute_step, generate_app_data, step_already_finished, clean_filename
from utils.files import setup_workspace
from prompts.prompts import ask_for_app_type, ask_for_main_app_definition, ask_user
from const.llm import END_RESPONSE
from const.messages import MAX_PROJECT_NAME_LENGTH
from const.common import EXAMPLE_PROJECT_DESCRIPTION
PROJECT_DESCRIPTION_STEP = 'project_description'
USER_STORIES_STEP = 'user_stories'
USER_TASKS_STEP = 'user_tasks'
class ProductOwner(Agent):
def __init__(self, project):
super().__init__('product_owner', project)
self.is_example_project = False
def get_project_description(self, spec_writer):
print(json.dumps({
"project_stage": "project_description"
}), type='info', category='agent:product-owner')
self.project.app = get_app(self.project.args['app_id'], error_if_not_found=False)
# If this app_id already did this step, just get all data from DB and don't ask user again
if self.project.app is not None:
step = get_progress_steps(self.project.args['app_id'], PROJECT_DESCRIPTION_STEP)
if step and not should_execute_step(self.project.args['step'], PROJECT_DESCRIPTION_STEP):
step_already_finished(self.project.args, step)
self.project.set_root_path(setup_workspace(self.project.args))
self.project.project_description = step['summary']
self.project.project_description_messages = step['messages']
self.project.main_prompt = step['prompt']
return
# PROJECT DESCRIPTION
self.project.current_step = PROJECT_DESCRIPTION_STEP
self.is_example_project = False
if 'app_type' not in self.project.args:
self.project.args['app_type'] = ask_for_app_type()
if 'name' not in self.project.args:
while True:
question = 'What is the project name?'
print(question, type='ipc')
print('continue/start an example project', type='button')
project_name = ask_user(self.project, question)
if project_name is not None and project_name.lower() == 'continue':
continue
if len(project_name) <= MAX_PROJECT_NAME_LENGTH:
break
else:
print(f"Hold your horses cowboy! Please, give project NAME with max {MAX_PROJECT_NAME_LENGTH} characters.")
if project_name.lower() == 'start an example project':
self.is_example_project = True
project_name = 'Example Project'
self.project.args['name'] = clean_filename(project_name)
self.project.app = save_app(self.project)
self.project.set_root_path(setup_workspace(self.project.args))
if self.is_example_project:
print(EXAMPLE_PROJECT_DESCRIPTION)
self.project.main_prompt = EXAMPLE_PROJECT_DESCRIPTION
else:
print(color_green_bold(
"GPT Pilot currently works best for web app projects using Node, Express and MongoDB. "
"You can use it with other technologies, but you may run into problems "
"(eg. Svelte might not work as expected).\n"
))
self.project.main_prompt = ask_for_main_app_definition(self.project)
print(json.dumps({'open_project': {
#'uri': 'file:///' + self.project.root_path.replace('\\', '/'),
'path': self.project.root_path,
'name': self.project.args['name'],
}}), type='info')
high_level_messages = []
high_level_summary = spec_writer.create_spec(self.project.main_prompt)
save_progress(self.project.args['app_id'], self.project.current_step, {
"prompt": self.project.main_prompt,
"messages": high_level_messages,
"summary": high_level_summary,
"app_data": generate_app_data(self.project.args)
})
self.project.project_description = high_level_summary
self.project.project_description_messages = high_level_messages
return
# PROJECT DESCRIPTION END
def get_user_stories(self):
if not self.project.args.get('advanced', False):
return
print(json.dumps({
"project_stage": "user_stories"
}), type='info')
self.project.current_step = USER_STORIES_STEP
self.convo_user_stories = AgentConvo(self)
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], USER_STORIES_STEP)
if step and not should_execute_step(self.project.args['step'], USER_STORIES_STEP):
step_already_finished(self.project.args, step)
self.convo_user_stories.messages = step['messages']
self.project.user_stories = step['user_stories']
return
# USER STORIES
msg = "User Stories:\n"
print(color_green_bold(msg))
logger.info(msg)
self.project.user_stories = self.convo_user_stories.continuous_conversation('user_stories/specs.prompt', {
'name': self.project.args['name'],
'prompt': self.project.project_description,
'app_type': self.project.args['app_type'],
'END_RESPONSE': END_RESPONSE
})
logger.info(f"Final user stories: {self.project.user_stories}")
save_progress(self.project.args['app_id'], self.project.current_step, {
"messages": self.convo_user_stories.messages,
"user_stories": self.project.user_stories,
"app_data": generate_app_data(self.project.args)
})
return
# USER STORIES END
def get_user_tasks(self):
self.project.current_step = USER_TASKS_STEP
self.convo_user_stories.high_level_step = self.project.current_step
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], USER_TASKS_STEP)
if step and not should_execute_step(self.project.args['step'], USER_TASKS_STEP):
step_already_finished(self.project.args, step)
return step['user_tasks']
# USER TASKS
msg = "User Tasks:\n"
print(color_green_bold(msg))
logger.info(msg)
self.project.user_tasks = self.convo_user_stories.continuous_conversation('user_stories/user_tasks.prompt',
{'END_RESPONSE': END_RESPONSE})
logger.info(f"Final user tasks: {self.project.user_tasks}")
save_progress(self.project.args['app_id'], self.project.current_step, {
"messages": self.convo_user_stories.messages,
"user_tasks": self.project.user_tasks,
"app_data": generate_app_data(self.project.args)
})
return self.project.user_tasks
# USER TASKS END

View File

@@ -1,109 +0,0 @@
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from utils.telemetry import telemetry
from utils.style import color_green_bold, color_yellow_bold
from prompts.prompts import ask_user
from const.messages import AFFIRMATIVE_ANSWERS
from utils.exit import trace_code_event
INITIAL_PROJECT_HOWTO_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/How-to-write-a-good-initial-project-description"
class SpecWriter(Agent):
def __init__(self, project):
super().__init__('spec_writer', project)
self.save_dev_steps = True
def analyze_project(self, initial_prompt):
msg = (
"Your project description seems a bit short. "
"The better you can describe the project, the better GPT Pilot will understand what you'd like to build.\n\n"
f"Here are some tips on how to better describe the project: {INITIAL_PROJECT_HOWTO_URL}\n\n"
)
print(color_yellow_bold(msg))
print(color_green_bold("Let's start by refining your project idea:"))
convo = AgentConvo(self)
convo.construct_and_add_message_from_prompt('spec_writer/ask_questions.prompt', {})
num_questions = 0
skipped = False
user_response = initial_prompt
while True:
llm_response = convo.send_message('utils/python_string.prompt', {
"content": user_response,
})
if not llm_response:
continue
num_questions += 1
llm_response = llm_response.strip()
if len(llm_response) > 500:
print('continue', type='button')
user_response = ask_user(
self.project,
"Can we proceed with this project description? If so, just press ENTER. Otherwise, please tell me what's missing or what you'd like to add.",
hint="Does this sound good, and does it capture all the information about your project?",
require_some_input=False
)
if user_response:
user_response = user_response.strip()
if user_response.lower() in AFFIRMATIVE_ANSWERS + ['continue']:
break
else:
print('skip questions', type='button')
user_response = ask_user(self.project, llm_response)
if user_response and user_response.lower() == 'skip questions':
llm_response = convo.send_message(
'utils/python_string.prompt',
{
'content': 'This is enough clarification, you have all the information. Please output the spec now, without additional comments or questions.',
}
)
skipped = True
break
trace_code_event(
"spec-writer-questions",
{
"initial_prompt_length": len(initial_prompt),
"num_questions": num_questions,
"final_prompt_length": len(llm_response),
"skipped": skipped,
}
)
return llm_response
def review_spec(self, initial_prompt, spec):
convo = AgentConvo(self, temperature=0)
llm_response = convo.send_message('spec_writer/review_spec.prompt', {
"brief": initial_prompt,
"spec": spec,
})
if not llm_response:
return None
return llm_response.strip()
def check_app_complexity(self, initial_prompt):
convo = AgentConvo(self, temperature=0)
llm_response = convo.send_message('spec_writer/app_complexity.prompt', {
"app_summary": initial_prompt,
})
if llm_response == '1':
return False
return True
def create_spec(self, initial_prompt):
self.project.is_complex_app = self.check_app_complexity(initial_prompt)
telemetry.set("is_complex_app", self.project.is_complex_app)
if len(initial_prompt) > 1500 or not self.project.is_complex_app:
return initial_prompt
print('', type='verbose', category='agent:spec-writer')
spec = self.analyze_project(initial_prompt)
missing_info = self.review_spec(initial_prompt, spec)
if missing_info:
spec += "\nAdditional info/examples:\n" + missing_info
return spec

View File

@@ -1,163 +0,0 @@
import json
import os
from utils.utils import step_already_finished
from helpers.Agent import Agent
from utils.style import color_green_bold
from helpers.AgentConvo import AgentConvo
from utils.utils import should_execute_step, generate_app_data
from database.database import save_progress, get_progress_steps, save_feature, edit_development_plan, edit_feature_plan
from logger.logger import logger
from const.function_calls import DEVELOPMENT_PLAN, UPDATE_DEVELOPMENT_PLAN
from const.common import EXAMPLE_PROJECT_PLAN
from templates import apply_project_template
DEVELOPMENT_PLANNING_STEP = 'development_planning'
class TechLead(Agent):
def __init__(self, project):
super().__init__('tech_lead', project)
self.save_dev_steps = False
self.convo_feature_plan = AgentConvo(self)
def create_development_plan(self):
self.project.current_step = DEVELOPMENT_PLANNING_STEP
self.convo_development_plan = AgentConvo(self)
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], DEVELOPMENT_PLANNING_STEP)
if step and not should_execute_step(self.project.args['step'], DEVELOPMENT_PLANNING_STEP):
step_already_finished(self.project.args, step)
self.project.development_plan = step['development_plan']
return
existing_summary = apply_project_template(self.project)
# DEVELOPMENT PLANNING
print(color_green_bold("Starting to create the action plan for development...\n"), category='agent:tech-lead')
logger.info("Starting to create the action plan for development...")
if self.project.project_manager.is_example_project:
llm_response = {"plan": EXAMPLE_PROJECT_PLAN}
else:
llm_response = self.convo_development_plan.send_message('development/plan.prompt',
{
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"architecture": self.project.architecture,
"technologies": self.project.system_dependencies + self.project.package_dependencies,
"existing_summary": existing_summary,
"files": self.project.get_all_coded_files(),
"task_type": 'app',
"is_complex_app": self.project.is_complex_app,
}, DEVELOPMENT_PLAN)
self.project.development_plan = llm_response['plan']
logger.info('Plan for development is created.')
save_progress(self.project.args['app_id'], self.project.current_step, {
"development_plan": self.project.development_plan, "app_data": generate_app_data(self.project.args)
})
return
def create_feature_plan(self, feature_description):
self.save_dev_steps = True
self.convo_feature_plan = AgentConvo(self)
llm_response = self.convo_feature_plan.send_message('development/feature_plan.prompt',
{
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"architecture": self.project.architecture,
"technologies": self.project.system_dependencies + self.project.package_dependencies,
"directory_tree": self.project.get_directory_tree(True),
"files": self.project.get_all_coded_files(),
"previous_features": self.project.previous_features,
"feature_description": feature_description,
"task_type": 'feature',
}, DEVELOPMENT_PLAN)
self.project.development_plan = llm_response['plan']
logger.info('Plan for feature development is created.')
return
def create_feature_summary(self, feature_description):
self.save_dev_steps = True
self.convo_feature_summary = AgentConvo(self)
llm_response = self.convo_feature_summary.send_message('development/feature_summary.prompt',
{
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"feature_description": feature_description,
"development_tasks": self.project.development_plan,
})
self.project.feature_summary = llm_response
if not self.project.skip_steps:
save_feature(self.project.args['app_id'],
self.project.feature_summary,
self.convo_feature_plan.messages,
self.project.checkpoints['last_development_step']['id'])
logger.info('Summary for new feature is created.')
return
def update_plan(self, task_source, llm_solutions, modified_files, i):
"""
Update the development plan after a task is finished.
:param task_source: The source of the task, one of: 'app', 'feature'.
:param llm_solutions: The LLM solutions (iterations) for the last finished task.
:param modified_files: The files that were modified during the last task.
:param i: The index of the last finished task in the development plan.
:return: True if the task was successfully updated, False otherwise.
"""
self.save_dev_steps = True
print('Updating development plan...', category='agent:tech-lead')
finished_tasks = [task for task in self.project.development_plan if task.get('finished', False)]
not_finished_tasks = [task for task in self.project.development_plan if not task.get('finished', False)]
files = [
file_dict for file_dict in self.project.get_all_coded_files()
if any(os.path.normpath(file_dict['full_path']).endswith(os.path.normpath(modified_file.lstrip('.'))) for
modified_file in modified_files)
]
update_task_convo = AgentConvo(self, temperature=0)
llm_response = update_task_convo.send_message('development/update_plan.prompt', {
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"finished_tasks": finished_tasks,
"not_finished_tasks": not_finished_tasks,
"last_finished_task": self.project.development_plan[i],
"task_source": task_source,
"llm_solutions": llm_solutions,
"files": files,
}, UPDATE_DEVELOPMENT_PLAN)
finished_tasks[-1]['description'] = llm_response['updated_current_task']['description']
self.project.development_plan = finished_tasks + llm_response['plan']
if task_source == 'app':
db_task_update = edit_development_plan(self.project.args['app_id'], {'development_plan': self.project.development_plan})
else:
db_task_update = edit_feature_plan(self.project.args['app_id'], {'llm_response': {'text': json.dumps({'plan': self.project.development_plan})}})
if db_task_update:
print('Successfully updated development plan.')
else:
print('Failed to update development plan.')
return db_task_update

View File

@@ -1,51 +0,0 @@
from const.function_calls import GET_DOCUMENTATION_FILE
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from utils.files import count_lines_of_code
from utils.style import color_green_bold, color_green
class TechnicalWriter(Agent):
def __init__(self, project):
super().__init__('technical_writer', project)
self.save_dev_steps = True
def document_project(self, percent):
files = self.project.get_all_coded_files()
print(f'{color_green_bold("CONGRATULATIONS!!!")}', category='success')
print(f'You reached {color_green(str(percent) + "%")} of your project generation!\n\n')
print('For now, you have created:\n')
print(f'{color_green(len(files))} files\n')
print(f'{color_green(count_lines_of_code(files))} lines of code\n\n')
print('Before continuing, GPT Pilot will create some documentation for the project...\n')
print('', type='verbose', category='agent:tech-writer')
self.create_license()
self.create_readme()
self.create_api_documentation()
def create_license(self):
# check if LICENSE file exists and if not create one. We want to create it only once.
return
def create_readme(self):
print(color_green('Creating README.md'))
convo = AgentConvo(self)
llm_response = convo.send_message('documentation/create_readme.prompt', {
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"directory_tree": self.project.get_directory_tree(True),
"files": self.project.get_all_coded_files(),
"previous_features": self.project.previous_features,
"current_feature": self.project.current_feature,
}, GET_DOCUMENTATION_FILE)
self.project.save_file(llm_response)
return convo
def create_api_documentation(self):
# create API documentation
return

View File

@@ -1,3 +0,0 @@
from .Architect import Architect, ARCHITECTURE_STEP
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
from .TechLead import TechLead

View File

@@ -1,227 +0,0 @@
import builtins
import json
import os
import pytest
from unittest.mock import patch, MagicMock
import requests
from helpers.AgentConvo import AgentConvo
from dotenv import load_dotenv
load_dotenv(override=True)
from main import get_custom_print
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
from test.mock_questionary import MockQuestionary
from helpers.test_Project import create_project
class TestDeveloper:
def setup_method(self):
builtins.print, ipc_client_instance = get_custom_print({})
name = 'TestDeveloper'
self.project = create_project()
self.project.app_id = 'test-developer'
self.project.name = name
self.project.set_root_path(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestDeveloper')))
self.project.technologies = []
self.project.current_step = ENVIRONMENT_SETUP_STEP
self.developer = Developer(self.project)
@pytest.mark.uses_tokens
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"command": "python --version", "timeout": 10}'})
@patch('helpers.cli.execute_command', return_value=('', 'DONE', None))
def test_install_technology(self, mock_execute_command,
mock_completion, mock_save):
# Given
self.developer.convo_os_specific_tech = AgentConvo(self.developer)
# When
llm_response = self.developer.check_system_dependency('python')
# Then
assert llm_response == 'DONE'
mock_execute_command.assert_called_once_with(self.project, 'python --version', timeout=10, command_id=None)
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"tasks": [{"command": "ls -al"}]}'})
def test_implement_task(self, mock_completion, mock_save):
# Given any project
project = create_project()
project.project_description = 'Test Project'
project.development_plan = [{
'description': 'Do stuff',
'user_review_goal': 'Do stuff',
}]
project.get_file_summaries = lambda: None
project.get_all_coded_files = lambda **kwargs: []
project.current_step = 'test'
# and a developer who will execute any task
developer = Developer(project)
developer.execute_task = MagicMock()
developer.execute_task.return_value = {'success': True}
# When
developer.implement_task(0, 'test', {'description': 'Do stuff'})
# Then we parse the response correctly and send list of steps to execute_task()
assert developer.execute_task.call_count == 1
assert developer.execute_task.call_args[0][1] == [{'command': 'ls -al'}]
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"tasks": [{"command": "ls -al"}, {"command": "ls -al src"}, {"command": "ls -al test"}, {"command": "ls -al build"}]}'})
def test_implement_task_reject_with_user_input(self, mock_completion, mock_save):
# Given any project
project = create_project()
project.project_description = 'Test Project'
project.development_plan = [{
'description': 'Do stuff',
'user_review_goal': 'Do stuff',
}]
project.get_file_summaries = lambda: None
project.get_all_coded_files = lambda **kwargs: []
project.current_step = 'test'
# and a developer who will execute any task except for `ls -al test`
developer = Developer(project)
developer.execute_task = MagicMock()
developer.execute_task.side_effect = [
{'success': False, 'step_index': 2, 'user_input': 'no, use a better command'},
{'success': True}
]
# When
developer.implement_task(0, 'test', {'description': 'Do stuff'})
# Then we include the user input in the conversation to update the task list
assert mock_completion.call_count == 3
prompt = mock_completion.call_args_list[2].args[0][2]['content']
assert prompt.startswith('{"tasks": [{"command": "ls -al"}, {"command": "ls -al src"}, {"command": "ls -al test"}, {"command": "ls -al build"}]}'.lstrip())
# and call `execute_task()` again
assert developer.execute_task.call_count == 2
@patch('helpers.AgentConvo.save_development_step')
# GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"type": "command_test", "command": {"command": "npm run test", "timeout": 3000}}'})
# 2nd arg of return_value: `None` to debug, 'DONE' if successful
@patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE', None))
# @patch('helpers.cli.ask_user', return_value='yes')
# @patch('helpers.cli.get_saved_command_run')
def test_code_changes_command_test(self, mock_save, mock_chat_completion,
# Note: the 2nd line below will use the LLM to debug, uncomment the @patches accordingly
mock_execute_command):
# mock_ask_user, mock_get_saved_command_run):
# Given
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
# When
# "Now, we need to verify if this change was successfully implemented...
result = self.developer.test_code_changes(convo)
# Then
assert result == {'success': True}
@patch('helpers.AgentConvo.save_development_step')
# GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'})
@patch('helpers.Project.ask_user', return_value='continue')
def test_code_changes_manual_test_continue(self, mock_save, mock_chat_completion, mock_ask_user):
# Given
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
# When
result = self.developer.test_code_changes(convo)
# Then
assert result == {'success': True}
@pytest.mark.skip("endless loop in questionary")
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion')
@patch('utils.questionary.get_saved_user_input')
# https://github.com/Pythagora-io/gpt-pilot/issues/35
def test_code_changes_manual_test_no(self, mock_get_saved_user_input, mock_chat_completion, mock_save):
# Given
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
convo.load_branch = lambda function_uuid=None: function_uuid
self.project.developer = self.developer
mock_chat_completion.side_effect = [
{'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'},
{'text': '{"thoughts": "hmmm...", "reasoning": "testing", "steps": [{"type": "command", "command": {"command": "something scary", "timeout": 3000}, "check_if_fixed": true}]}'},
{'text': 'do something else scary'},
]
mock_questionary = MockQuestionary(['no', 'no'])
with patch('utils.questionary.questionary', mock_questionary):
# When
result = self.developer.test_code_changes(convo)
# Then
assert result == {'success': True, 'user_input': 'no'}
@patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE', None))
@patch('helpers.AgentConvo.save_development_step')
@patch('utils.llm_connection.requests.post')
def test_test_code_changes_invalid_json(self,
mock_requests_post,
mock_save,
mock_execute,
monkeypatch):
# Given
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
convo.load_branch = lambda function_uuid=None: function_uuid
self.project.developer = self.developer
# we send a GET_TEST_TYPE spec, but the 1st response is invalid
types_in_response = ['command', 'wrong_again', 'command_test']
json_received = []
def generate_response(*args, **kwargs):
# Copy messages, including the validation errors from the request
content = [msg['content'] for msg in kwargs['json']['messages']]
json_received.append(content)
gpt_response = json.dumps({
'type': types_in_response.pop(0),
'command': {
'command': 'node server.js',
'timeout': 3000
}
})
choice = json.dumps({'delta': {'content': gpt_response}})
line = json.dumps({'choices': [json.loads(choice)]}).encode('utf-8')
response = requests.Response()
response.status_code = 200
response.iter_lines = lambda: [line]
print(f'##### mock response: {response}')
return response
mock_requests_post.side_effect = generate_response
monkeypatch.setenv('OPENAI_API_KEY', 'secret')
# mock_questionary = MockQuestionary([''])
# with patch('utils.questionary.questionary', mock_questionary):
# When
result = self.developer.test_code_changes(convo)
# Then
assert result == {'success': True}
assert mock_requests_post.call_count == 0

View File

@@ -1,68 +0,0 @@
import builtins
import os
import pytest
from unittest.mock import patch
from dotenv import load_dotenv
load_dotenv(override=True)
from main import get_custom_print
from helpers.agents.TechLead import TechLead, DEVELOPMENT_PLANNING_STEP
from helpers.Project import Project
from test.test_utils import assert_non_empty_string
from test.mock_questionary import MockQuestionary
class TestTechLead:
def setup_method(self):
builtins.print, ipc_client_instance = get_custom_print({})
name = 'TestTechLead'
self.project = Project({
'app_id': 'test-tech-lead',
'name': name,
'app_type': ''
},
name=name,
architecture=[],
user_stories=[]
)
self.project.set_root_path(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestTechLead')))
self.project.technologies = []
self.project.project_description = '''
The project entails creating a web-based chat application, tentatively named "chat_app."
This application does not require user authentication or chat history storage.
It solely supports one-on-one messaging, excluding group chats or multimedia sharing like photos, videos, or files.
Additionally, there are no specific requirements for real-time functionality, like live typing indicators or read receipts.
The development of this application will strictly follow a monolithic structure, avoiding the use of microservices, as per the client's demand.
The development process will include the creation of user stories and tasks, based on detailed discussions with the client.
'''
self.project.user_stories = [
'User Story 1: As a user, I can access the web-based "chat_app" directly without needing to authenticate or log in. Do you want to add anything else? If not, just press ENTER.',
'User Story 2: As a user, I can start one-on-one conversations with another user on the "chat_app". Do you want to add anything else? If not, just press ENTER.',
'User Story 3: As a user, I can send and receive messages in real-time within my one-on-one conversation on the "chat_app". Do you want to add anything else? If not, just press ENTER.',
'User Story 4: As a user, I do not need to worry about deleting or storing my chats because the "chat_app" does not store chat histories. Do you want to add anything else? If not, just press ENTER.',
'User Story 5: As a user, I will only be able to send text messages, as the "chat_app" does not support any kind of multimedia sharing like photos, videos, or files. Do you want to add anything else? If not, just press ENTER.',
'User Story 6: As a user, I will not see any live typing indicators or read receipts since the "chat_app" does not provide any additional real-time functionality beyond message exchange. Do you want to add anything else? If not, just press ENTER.',
]
self.project.architecture = ['Node.js', 'Socket.io', 'Bootstrap', 'JavaScript', 'HTML5', 'CSS3']
self.project.current_step = DEVELOPMENT_PLANNING_STEP
@pytest.mark.uses_tokens
@patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
@patch('helpers.agents.TechLead.save_progress', return_value=None)
@patch('helpers.agents.TechLead.get_progress_steps', return_value=None)
def test_create_development_plan(self, mock_get_saved_step, mock_save_progress, mock_get_progress_steps):
self.techLead = TechLead(self.project)
mock_questionary = MockQuestionary(['', '', 'no'])
with patch('utils.questionary.questionary', mock_questionary):
# When
development_plan = self.techLead.create_development_plan()
# Then
assert development_plan is not None
assert_non_empty_string(development_plan[0]['description'])
assert_non_empty_string(development_plan[0]['user_review_goal'])

View File

@@ -1,522 +0,0 @@
import psutil
import subprocess
import os
import signal
import threading
import queue
import time
import platform
from typing import Dict, Union
from logger.logger import logger
from utils.style import color_green, color_red, color_yellow_bold
from utils.ignore import IgnoreMatcher
from database.database import save_command_run
from helpers.exceptions import TooDeepRecursionError
from helpers.exceptions import TokenLimitError
from helpers.exceptions import CommandFinishedEarly
from prompts.prompts import ask_user
from const.code_execution import MIN_COMMAND_RUN_TIME, MAX_COMMAND_RUN_TIME, MAX_COMMAND_OUTPUT_LENGTH
from const.messages import AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS
interrupted = False
running_processes: Dict[str, tuple[str, int]] = {}
"""Holds a list of (command, process ID)s, mapped to the `command_id` provided in the call to `execute_command()`."""
def enqueue_output(out, q):
for line in iter(out.readline, ''):
if interrupted: # Check if the flag is set
break
q.put(line)
out.close()
def run_command(command, root_path, q_stdout, q_stderr) -> subprocess.Popen:
"""
Execute a command in a subprocess.
Args:
command (str): The command to run.
root_path (str): The directory in which to run the command.
q_stdout (Queue): A queue to capture stdout.
q_stderr (Queue): A queue to capture stderr.
Returns:
subprocess.Popen: The subprocess object.
"""
logger.info(f'Running `{command}` on {platform.system()}')
if platform.system() == 'Windows': # Check the operating system
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=root_path
)
else:
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
preexec_fn=os.setsid, # Use os.setsid only for Unix-like systems
cwd=root_path
)
t_stdout = threading.Thread(target=enqueue_output, args=(process.stdout, q_stdout))
t_stderr = threading.Thread(target=enqueue_output, args=(process.stderr, q_stderr))
t_stdout.daemon = True
t_stderr.daemon = True
t_stdout.start()
t_stderr.start()
return process
def terminate_named_process(command_id: str) -> None:
if command_id in running_processes:
terminate_process(running_processes[command_id][1], command_id)
def terminate_running_processes():
for command_id in list(running_processes.keys()):
terminate_process(running_processes[command_id][1], command_id)
def term_proc_windows(pid: int):
try:
subprocess.run(["taskkill", "/F", "/T", "/PID", str(pid)])
except subprocess.CalledProcessError as e:
logger.error(f'Error while terminating process: {e}')
def term_proc_unix_like(pid: int):
try:
os.killpg(pid, signal.SIGKILL)
except OSError as e:
logger.error(f'Error while terminating process: {e}')
def is_process_running(pid: int) -> bool:
"""Check if there is a running process with the given PID."""
try:
# psutil.NoSuchProcess will be raised if the process doesn't exist
process = psutil.Process(pid)
return process.is_running()
except psutil.NoSuchProcess:
return False
def terminate_process(pid: int, name=None) -> None:
# todo refactor terminate_process() using psutil for all OS. Check/terminate child processes and test on all OS
if name is None:
name = ''
if not is_process_running(pid):
logger.info('Process "%s" (pid: %s) is not running. Skipping termination.', name, pid)
# Also remove from running_processes if not running
for command_id, process_info in list(running_processes.items()):
if process_info[1] == pid:
del running_processes[command_id]
return
logger.info('Terminating process "%s" (pid: %s)', name, pid)
if platform.system() == "Windows":
term_proc_windows(pid)
else: # Unix-like systems
term_proc_unix_like(pid)
try:
# Wait for the process to terminate
process = psutil.Process(pid)
process.wait(timeout=10) # Adjust the timeout as necessary
except psutil.NoSuchProcess:
logger.info("Process already terminated.")
except psutil.TimeoutExpired:
logger.warning("Timeout expired while waiting for process to terminate.")
except Exception as e:
logger.error(f"Error waiting for process termination: {e}")
for command_id in list(running_processes.keys()):
if running_processes[command_id][1] == pid:
del running_processes[command_id]
def read_queue_line(q, stdout=True):
try:
line = q.get_nowait()
except queue.Empty:
return ''
if stdout:
print(color_green(line))
logger.info('CLI OUTPUT: ' + line)
# if success_message is not None and success_message in line:
# logger.info('Success message found: %s', success_message)
# # break # TODO background_command - this is if we want to leave command running in background but sometimes processes keep hanging and terminal gets bugged, also if we do that we have to change user messages to make it clear that there is command running in background
# raise CommandFinishedEarly()
if not stdout: # stderr
print(color_red(line))
logger.error('CLI ERROR: ' + line)
return line
def read_remaining_queue(q, stdout=True):
output = ''
while not q.empty():
output += read_queue_line(q, stdout)
return output
def execute_command(project, command, timeout=None, success_message=None, command_id: str = None, force=False) \
-> (str, str, int):
"""
Execute a command and capture its output.
Args:
project: The project associated with the command.
command (str): The command to run.
timeout (int, optional): The maximum execution time in milliseconds. Default is None.
success_message: A message to look for in the output of the command to determine if successful or not.
command_id (str, optional): A unique identifier assigned by the LLM, can be used to terminate the process.
force (bool, optional): Whether to execute the command without confirmation. Default is False.
Returns:
cli_response (str): The command output
or: `None` if user did not authorise the command to run
done_or_error_response (str): 'DONE' if 'no', 'skip' or `success_message` matched.
Otherwise, if `cli_response` is None, user's response to "Can I executed".
If `cli_response` not None: 'was interrupted by user', 'timed out' or `None` - caller should send `cli_response` to LLM
exit_code (int): The exit code of the process.
"""
print('', type='verbose', category='exec-command')
project.finish_loading()
if timeout is not None:
if timeout < 0:
timeout = None
else:
if timeout < 1000:
timeout *= 1000
timeout = min(max(timeout, MIN_COMMAND_RUN_TIME), MAX_COMMAND_RUN_TIME)
if not force:
question = f'Can I execute the command: `{color_yellow_bold(command)}`'
if timeout is not None:
question += f' with {timeout}ms timeout?'
else:
question += '?'
if project.check_ipc():
print(question, type='ipc', category='exec-command')
print('If yes, just press ENTER. Otherwise, type "no" but it will be processed as successfully executed.', type='hint')
hint=None
else:
print(color_yellow_bold('\n--------- EXECUTE COMMAND ----------'))
hint = 'If yes, just press ENTER. Otherwise, type "no" but it will be processed as successfully executed.'
print('yes/no', type='buttons-only')
logger.info('--------- EXECUTE COMMAND ---------- : %s', question)
answer = ask_user(project, question, False, hint=hint)
# TODO can we use .confirm(question, default='yes').ask() https://questionary.readthedocs.io/en/stable/pages/types.html#confirmation
if answer.lower() in NEGATIVE_ANSWERS:
return None, 'SKIP', None
elif answer.lower() not in AFFIRMATIVE_ANSWERS:
# "That's not going to work, let's do X instead"
# https://github.com/Pythagora-io/gpt-pilot/issues/198
# https://github.com/Pythagora-io/gpt-pilot/issues/43#issuecomment-1756352056
# TODO: https://github.com/Pythagora-io/gpt-pilot/issues/122
return None, answer, None
# TODO when a shell built-in commands (like cd or source) is executed, the output is not captured properly - this will need to be changed at some point
if platform.system() != 'Windows' and ("cd " in command or "source " in command):
command = f"bash -c '{command}'"
project.command_runs_count += 1
return_value = None
done_or_error_response = None
q_stderr = queue.Queue()
q = queue.Queue()
process = run_command(command, project.root_path, q, q_stderr)
if command_id is not None:
terminate_named_process(command_id)
# TODO: We want to be able to send the initial stdout/err to the LLM, but it would also be handy to log ongoing output to a log file, named after `command_id`. Terminating an existing process with the same ID should reset the log file
running_processes[command_id] = (command, process.pid)
output = ''
stderr_output = ''
start_time = time.time()
# Note: If we don't need to log the output in real-time, we can remove q, q_stderr, the threads and this while loop.
# if timeout is not None:
# timeout /= 1000
# output, stderr_output = process.communicate(timeout=timeout)
try:
while True:
elapsed_time = time.time() - start_time
time.sleep(0.1) # TODO this shouldn't be used
# if timeout is not None:
# # TODO: print to IPC using a different message type so VS Code can ignore it or update the previous value
# print(color_white_bold(f'\rt: {round(elapsed_time * 1000)}ms : '), end='', flush=True)
# If timeout is reached, kill the process
if timeout is not None and elapsed_time * 1000 > timeout:
if command_id is not None:
logger.info(f'Process "{command_id}" running after timeout as pid: {process.pid}')
break
raise TimeoutError("Command exceeded the specified timeout.")
output += read_queue_line(q)
stderr_output += read_queue_line(q_stderr, False)
# Check if process has finished
if process.poll() is not None:
logger.info('process exited with return code: %d', process.returncode)
if command_id is not None:
del running_processes[command_id]
output += read_remaining_queue(q)
stderr_output += read_remaining_queue(q_stderr, False)
break
except (KeyboardInterrupt, TimeoutError, CommandFinishedEarly) as e:
if isinstance(e, KeyboardInterrupt):
print('\nCTRL+C detected. Stopping command execution...')
logger.info('CTRL+C detected. Stopping command execution...')
done_or_error_response = 'was interrupted by user'
elif isinstance(e, TimeoutError):
print('\nTimeout detected. Stopping command execution...')
logger.warning('Timeout detected. Stopping command execution...')
done_or_error_response = f'took longer than {timeout}ms so I killed it'
elif isinstance(e, CommandFinishedEarly):
print('\nCommand finished before timeout. Handling early completion...')
logger.info('Command finished before timeout. Handling early completion...')
done_or_error_response = 'DONE'
finally:
done_or_error_response = 'DONE' # Todo remove if we want to have different responses
terminate_process(process.pid) # TODO: background_command - remove this is if we want to leave command running in background, look todo above
# update the return code
process.poll()
elapsed_time = time.time() - start_time
logger.info(f'`{command}` took {round(elapsed_time * 1000)}ms to execute.')
if return_value is None:
return_value = ''
if stderr_output != '':
return_value = 'stderr:\n```\n' + stderr_output[0:MAX_COMMAND_OUTPUT_LENGTH] + '\n```\n'
return_value += 'stdout:\n```\n' + output[-MAX_COMMAND_OUTPUT_LENGTH:] + '\n```'
save_command_run(project, command, return_value, done_or_error_response, process.returncode)
return return_value, done_or_error_response, process.returncode
def check_if_command_successful(convo, command, cli_response, response, exit_code, additional_message=None,
task_steps=None, step_index=None):
if cli_response is not None:
logger.info(f'`{command}` ended with exit code: {exit_code}')
if exit_code is None:
# todo this should never happen! process is still running, see why and now we want to handle it
print(color_red(f'Process for command {command} still running.'))
response = 'DONE'
else:
response = convo.send_message('dev_ops/ran_command.prompt',
{
'cli_response': cli_response,
'error_response': response,
'command': command,
'additional_message': additional_message,
'exit_code': exit_code,
'task_steps': task_steps,
'step_index': step_index,
})
logger.debug(f'LLM response to ran_command.prompt: {response}')
if response == 'DONE':
convo.remove_last_x_messages(2)
return response
def build_directory_tree(path, prefix='', root_path=None) -> str:
"""Build the directory tree structure in a simplified format.
:param path: The starting directory path.
:param prefix: Prefix for the current item, used for recursion.
:param root_path: The root directory path.
:return: A string representation of the directory tree.
"""
output = ""
indent = ' '
if root_path is None:
root_path = path
matcher = IgnoreMatcher(root_path=root_path)
if os.path.isdir(path):
if root_path == path:
output += '/'
else:
dir_name = os.path.basename(path)
output += f'{prefix}/{dir_name}'
# List items in the directory
items = os.listdir(path)
dirs = []
files = []
for item in items:
item_path = os.path.join(path, item)
if matcher.ignore(item_path):
continue
if os.path.isdir(item_path):
dirs.append(item)
elif os.path.isfile(item_path):
files.append(item)
dirs.sort()
files.sort()
if dirs:
output += '\n'
for index, dir_item in enumerate(dirs):
item_path = os.path.join(path, dir_item)
new_prefix = prefix + indent # Updated prefix for recursion
output += build_directory_tree(item_path, new_prefix, root_path)
if files:
output += f"{prefix} {', '.join(files)}\n"
elif files:
output += f": {', '.join(files)}\n"
else:
output += '\n'
return output
def execute_command_and_check_cli_response(convo, command: dict, task_steps=None, step_index=None):
"""
Execute a command and check its CLI response.
Args:
convo (AgentConvo): The conversation object.
command (dict):
['command'] (str): The command to run.
['timeout'] (int): The maximum execution time in milliseconds.
task_steps (list, optional): The steps of the current task. Default is None.
step_index (int, optional): The index of the current step. Default is None.
Returns:
tuple: A tuple containing the CLI response and the agent's response.
- cli_response (str): The command output.
- response (str): 'DONE' or 'BUG'.
If `cli_response` is None, user's response to "Can I execute...".
"""
# TODO: Prompt mentions `command` could be `INSTALLED` or `NOT_INSTALLED`, where is this handled?
command_id = command['command_id'] if 'command_id' in command else None
cli_response, response, exit_code = execute_command(convo.agent.project,
command['command'],
timeout=command['timeout'],
command_id=command_id)
response = check_if_command_successful(convo, command['command'], cli_response, response, exit_code,
task_steps=task_steps, step_index=step_index)
return cli_response, response
def run_command_until_success(convo, command,
timeout: Union[int, None],
command_id: Union[str, None] = None,
success_message=None,
additional_message=None,
force=False,
return_cli_response=False,
success_with_cli_response=False,
is_root_task=False,
task_steps=None,
step_index=None):
"""
Run a command until it succeeds or reaches a timeout.
Args:
convo (AgentConvo): The conversation object.
command (str): The command to run.
timeout (int): The maximum execution time in milliseconds.
command_id: A name for the process.
If `timeout` is not provided, can be used to terminate the process.
success_message: A message to look for in the output of the command to determine if successful or not.
additional_message (str, optional): Additional message to include in the "I ran the command..." prompt.
force (bool, optional): Whether to execute the command without confirmation. Default is False.
return_cli_response (bool, optional): If True, may raise TooDeepRecursionError(cli_response)
success_with_cli_response (bool, optional): If True, simply send the cli_response back to the caller without checking with LLM.
The LLM has asked to see the output and may update the task step list.
is_root_task (bool, optional): If True and TokenLimitError is raised, will call `convo.load_branch(reset_branch_id)`
task_steps (list, optional): The steps of the current task. Default is None.
step_index (int, optional): The index of the current step. Default is None.
Returns:
- 'success': bool,
- 'cli_response': ```stdout: <stdout> stderr: <stderr>```
- 'user_input': `None` or user's objection to running the command
"""
cli_response, response, exit_code = execute_command(convo.agent.project,
command,
timeout=timeout,
success_message=success_message,
command_id=command_id,
force=force)
if success_with_cli_response and cli_response is not None:
return {'success': True, 'cli_response': cli_response}
if response == 'SKIP':
return {'success': True, 'user_input': response}
if cli_response is None and response != 'DONE':
return {'success': False, 'user_input': response}
response = check_if_command_successful(convo, command, cli_response, response, exit_code, additional_message,
task_steps=task_steps, step_index=step_index)
if response:
response = response.strip()
if response != 'DONE':
# 'BUG'
print(color_red('Got incorrect CLI response:'))
print(cli_response)
print(color_red('-------------------'))
reset_branch_id = convo.save_branch()
while True:
try:
# This catch is necessary to return the correct value (cli_response) to continue development function so
# the developer can debug the appropriate issue
# this snippet represents the first entry point into debugging recursion because of return_cli_response
success = convo.agent.debugger.debug(convo, {
'command': command,
'timeout': timeout,
'command_id': command_id,
'success_message': success_message,
},user_input=cli_response, is_root_task=is_root_task, ask_before_debug=True, task_steps=task_steps, step_index=step_index)
return {'success': success, 'cli_response': cli_response}
except TooDeepRecursionError as e:
# this is only to put appropriate message in the response after TooDeepRecursionError is raised
raise TooDeepRecursionError(cli_response) if return_cli_response else e
except TokenLimitError as e:
if is_root_task:
convo.load_branch(reset_branch_id)
else:
raise e
else:
return {'success': True, 'cli_response': cli_response}

View File

@@ -1,48 +0,0 @@
import json
from const.llm import MAX_GPT_MODEL_TOKENS
class ApiKeyNotDefinedError(Exception):
def __init__(self, env_key: str):
self.env_key = env_key
super().__init__(f"API Key has not been configured: {env_key}")
class CommandFinishedEarly(Exception):
def __init__(self, message='Command finished before timeout. Handling early completion...'):
self.message = message
super().__init__(message)
class TokenLimitError(Exception):
def __init__(self, tokens_in_messages, max_tokens=MAX_GPT_MODEL_TOKENS):
self.tokens_in_messages = tokens_in_messages
self.max_tokens = max_tokens
super().__init__(f"Token limit error happened with {tokens_in_messages}/{max_tokens} tokens in messages!")
class TooDeepRecursionError(Exception):
def __init__(self, message='Recursion is too deep!'):
self.message = message
super().__init__(message)
class ApiError(Exception):
def __init__(self, message, response=None):
self.message = message
self.response = response
self.response_json = None
if response and hasattr(response, "text"):
try:
self.response_json = json.loads(response.text)
except Exception: # noqa
pass
super().__init__(message)
class GracefulExit(Exception):
def __init__(self, message='Graceful exit'):
self.message = message
super().__init__(message)

View File

@@ -1,162 +0,0 @@
from pathlib import Path
import os
from typing import Optional, Union
from utils.style import color_green
from utils.ignore import IgnoreMatcher
def update_file(path: str, new_content: Union[str, bytes], project=None):
"""
Update file with the new content.
:param path: Full path to the file
:param new_content: New content to write to the file
:param project: Optional; a Project object related to the file update. Default is None.
Any intermediate directories will be created if they don't exist.
If file is text, it will be written using UTF-8 encoding.
"""
# TODO: we should know where project root is and ensure no
# files are written outside of it.
os.makedirs(os.path.dirname(path), exist_ok=True)
if isinstance(new_content, str):
file_mode = "w"
encoding = "utf-8"
else:
file_mode = "wb"
encoding = None
with open(path, file_mode, encoding=encoding) as file:
file.write(new_content)
if project is not None: # project can be None only in tests
if not project.skip_steps:
print({"path": path, "line": None}, type='openFile')
if not project.check_ipc():
print(color_green(f"Updated file {path}"))
def get_file_contents(
path: str, project_root_path: str
) -> dict[str, Union[str, bytes]]:
"""
Get file content and metadata.
:param path: Full path to the file
:param project_root_path: Full path to the project root directory
:return: Object with the following keys:
- name: File name
- path: Relative path to the file
- content: File content (str or bytes)
- full_path: Full path to the file
If file is text, it will be read using UTF-8 encoding and `content`
will be a Python string. If that fails, it will be treated as a
binary file and `content` will be a Python bytes object.
"""
# Normalize the path to avoid issues with different path separators
full_path = os.path.normpath(path)
try:
# Assume it's a text file using UTF-8 encoding
with open(full_path, "r", encoding="utf-8") as file:
file_content = file.read()
except UnicodeDecodeError:
# If that fails, we'll treat it as a binary file
with open(full_path, "rb") as file:
file_content = file.read()
except NotADirectoryError:
raise ValueError(f"Path is not a directory: {path}")
except FileNotFoundError:
raise ValueError(f"File not found: {full_path}")
except Exception as e:
raise ValueError(f"Exception in get_file_contents: {e}")
file_name = os.path.basename(path)
relative_path = str(Path(path).parent.relative_to(project_root_path))
if relative_path == '.':
relative_path = ''
return {
"name": file_name,
"path": relative_path,
"content": file_content,
"full_path": full_path,
"lines_of_code": len(file_content.splitlines()),
}
def get_directory_contents(
directory: str,
ignore: Optional[list[str]] = None,
) -> list[dict[str, Union[str, bytes]]]:
"""
Get the content of all files in the given directory.
:param directory: Full path to the directory to search
:param ignore: List of files or folders to ignore (optional)
:return: List of file objects as returned by `get_file_contents`
See `get_file_contents()` for the details on the output structure
and how files are read.
"""
return_array = []
matcher = IgnoreMatcher(ignore, root_path=directory)
# TODO: Convert to use pathlib.Path.walk()
for dpath, dirs, files in os.walk(directory):
# In-place update of dirs so that os.walk() doesn't traverse them
dirs[:] = [
d for d in dirs
if not matcher.ignore(os.path.join(dpath, d))
]
for file in files:
full_path = os.path.join(dpath, file)
if matcher.ignore(full_path):
continue
return_array.append(get_file_contents(full_path, directory))
return return_array
def clear_directory(directory: str, ignore: Optional[list[str]] = None):
"""
Delete all files and directories (except ignored ones) in the given directory.
:param dir_path: Full path to the directory to clear
:param ignore: List of files or folders to ignore (optional)
"""
matcher = IgnoreMatcher(ignore, root_path=directory)
# TODO: Convert to use pathlib.Path.walk()
for dpath, dirs, files in os.walk(directory, topdown=True):
# In-place update of dirs so that os.walk() doesn't traverse them
dirs[:] = [
d for d in dirs
if not matcher.ignore(os.path.join(dpath, d))
]
for file in files:
full_path = os.path.join(dpath, file)
if matcher.ignore(full_path):
continue
try:
os.remove(full_path)
except: # noqa
# Gracefully handle some weird edge cases instead of crashing
pass
# Delete empty subdirectories not in ignore list
for d in dirs:
try:
subdir_path = os.path.join(dpath, d)
if not os.listdir(subdir_path):
os.rmdir(subdir_path)
except: # noqa
# Gracefully handle some weird edge cases instead of crashing
pass

View File

@@ -1,52 +0,0 @@
# ipc.py
import socket
import json
import time
from utils.utils import json_serial
class IPCClient:
def __init__(self, port):
self.ready = False
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to the external process...")
try:
client.connect(('localhost', int(port)))
self.client = client
print("Connected!")
except ConnectionRefusedError:
self.client = None
print("Connection refused, make sure you started the external process")
def handle_request(self, message_content):
print(f"Received request from the external process: {message_content}")
return message_content # For demonstration, we're just echoing back the content
def listen(self):
if self.client is None:
print("Not connected to the external process!")
return
while True:
data = b''
while True:
data = data + self.client.recv(512 * 1024)
try:
message = json.loads(data)
break
except json.JSONDecodeError:
# This means we still got an incomplete message, so
# we should continue to receive more data.
continue
if message['type'] == 'response':
# self.client.close()
return message['content']
def send(self, data):
serialized_data = json.dumps(data, default=json_serial)
data_length = len(serialized_data)
if self.client is not None:
self.client.sendall(data_length.to_bytes(4, byteorder='big'))
self.client.sendall(serialized_data.encode('utf-8'))

View File

@@ -1,66 +0,0 @@
import builtins
import os.path
from dotenv import load_dotenv
from database.database import database
from const.function_calls import IMPLEMENT_TASK
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
from utils.custom_print import get_custom_print
from .test_Project import create_project
load_dotenv(override=True)
builtins.print, ipc_client_instance = get_custom_print({})
# def test_format_message_content_json_response():
# # Given
# project = create_project()
# project.current_step = 'test'
# developer = Developer(project)
# convo = AgentConvo(developer)
#
# response = {
# 'files': [
# {
# 'name': 'package.json',
# 'path': '/package.json',
# 'content': '{\n "name": "complex_app",\n "version": "1.0.0",\n "description": "",\n "main": "index.js",\n "directories": {\n "test": "tests"\n },\n "scripts": {\n "test": "echo \\"Error: no test specified\\" && exit 1",\n "start": "node index.js"\n },\n "keywords": [],\n "author": "",\n "license": "ISC",\n "dependencies": {\n "axios": "^1.5.1",\n "express": "^4.18.2",\n "mongoose": "^7.6.1",\n "socket.io": "^4.7.2"\n },\n "devDependencies": {\n "nodemon": "^3.0.1"\n }\n}'
# }
# ]
# }
#
# # When
# message_content = convo.format_message_content(response, IMPLEMENT_TASK)
#
# # Then
# assert message_content == '''
# # files
# ##0
# name: package.json
# path: /package.json
# content: {
# "name": "complex_app",
# "version": "1.0.0",
# "description": "",
# "main": "index.js",
# "directories": {
# "test": "tests"
# },
# "scripts": {
# "test": "echo \\"Error: no test specified\\" && exit 1",
# "start": "node index.js"
# },
# "keywords": [],
# "author": "",
# "license": "ISC",
# "dependencies": {
# "axios": "^1.5.1",
# "express": "^4.18.2",
# "mongoose": "^7.6.1",
# "socket.io": "^4.7.2"
# },
# "devDependencies": {
# "nodemon": "^3.0.1"
# }
# }'''.lstrip()

View File

@@ -1,148 +0,0 @@
import builtins
import json
import pytest
from unittest.mock import patch, MagicMock
from dotenv import load_dotenv
load_dotenv()
from utils.custom_print import get_custom_print
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
from helpers.Debugger import Debugger
from helpers.test_Project import create_project
from test.mock_questionary import MockQuestionary
################## NOTE: this test needs to be ran in debug with breakpoints ##################
@pytest.mark.uses_tokens
@patch('pilot.helpers.AgentConvo.get_saved_development_step')
@patch('pilot.helpers.AgentConvo.save_development_step')
@patch('utils.questionary.save_user_input')
@patch('helpers.cli.run_command')
@patch('helpers.cli.save_command_run')
# @patch('pilot.helpers.cli.execute_command', return_value=('', 'DONE', 0))
def test_debug(
# mock_execute_command,
mock_save_command, mock_run_command,
mock_save_input, mock_save_step, mock_get_saved_step):
# Given
builtins.print, ipc_client_instance = get_custom_print({})
project = create_project()
project.current_step = 'coding'
developer = Developer(project)
project.developer = developer
convo = AgentConvo(developer)
convo.load_branch = lambda x: None
debugger = Debugger(developer)
# TODO: mock agent.project.developer.execute_task
# convo.messages.append()
convo.construct_and_add_message_from_prompt('dev_ops/ran_command.prompt', {
'cli_response': '''
stderr:
```
node:internal/modules/cjs/loader:1080
throw err;
^
Error: Cannot find module 'mime'
Require stack:
- /workspace/chat_app/node_modules/send/index.js
- /workspace/chat_app/node_modules/express/lib/utils.js
- /workspace/chat_app/node_modules/express/lib/application.js
- /workspace/chat_app/node_modules/express/lib/express.js
- /workspace/chat_app/node_modules/express/index.js
- /workspace/chat_app/server.js
at Module._resolveFilename (node:internal/modules/cjs/loader:1077:15)
at Module._load (node:internal/modules/cjs/loader:922:27)
at Module.require (node:internal/modules/cjs/loader:1143:19)
at require (node:internal/modules/cjs/helpers:121:18)
at Object.<anonymous> (/workspace/chat_app/node_modules/send/index.js:24:12)
at Module._compile (node:internal/modules/cjs/loader:1256:14)
at Module._extensions..js (node:internal/modules/cjs/loader:1310:10)
at Module.load (node:internal/modules/cjs/loader:1119:32)
at Module._load (node:internal/modules/cjs/loader:960:12)
```
stdout:
```
> chat_app@1.0.0 start
> node server.js
```
'''
})
mock_questionary = MockQuestionary(['', ''])
with patch('utils.questionary.questionary', mock_questionary):
# When
result = debugger.debug(convo, command={'command': 'npm run start'}, is_root_task=True)
# Then
assert result == {'success': True}
@patch('helpers.AgentConvo.create_gpt_chat_completion')
@patch('helpers.AgentConvo.save_development_step')
def test_debug_need_to_see_output(mock_save_step, mock_get_completion):
# Given
builtins.print, ipc_client_instance = get_custom_print({})
project = create_project()
project.current_step = 'coding'
developer = Developer(project)
# hard-wired command output
developer.step_command_run = MagicMock()
developer.step_command_run.side_effect = [
{
'cli_response': 'stdout:\n```\n' + json.dumps({'dependencies': {'something': '0.1.2'}}) + '\n```\n',
'success': True,
}, {
'cli_response': 'app is running...',
'success': True,
}
]
developer.step_test = MagicMock()
developer.step_test.return_value = {'success': True}
project.developer = developer
debugger = Debugger(developer)
convo = AgentConvo(developer)
convo.load_branch = MagicMock()
convo.replace_files = MagicMock()
# hard-wired LLM responses, 1st response asks to see output
mock_get_completion.side_effect = [{'text': json.dumps(response)} for response in [{
'thoughts': 'Hmmm, testing',
'reasoning': 'I need to see the output of the command',
'steps': [
{
'type': 'command',
'command': 'cat package.json',
'need_to_see_output': True
}, {
'type': 'command',
'command': 'npm install something',
}
]
}, {
'thoughts': 'It is already installed',
'reasoning': 'I installed it earlier',
'steps': [
{
'type': 'command',
'command': 'npm start',
'command_id': 'app',
}
]
}]]
# When
debugger.debug(convo, command={'command': 'npm run start'}, is_root_task=True)
# Then we call the LLM twice, second time to show the output
assert mock_get_completion.call_count == 2
prompt = mock_get_completion.call_args_list[1].args[0][2]['content']
assert prompt.startswith('{"thoughts": "It is already installed", "reasoning": "I installed it earlier", "steps": [{"type": "command", "command": "npm start", "command_id": "app"}]}'.lstrip())
# And eventually we start the app
assert developer.step_command_run.call_count == 2
assert developer.step_command_run.call_args_list[1].args[1][1]['command'] == 'npm start'

View File

@@ -1,425 +0,0 @@
import os
import json
from pathlib import Path
import pytest
from unittest.mock import patch, MagicMock
from helpers.Project import Project
test_root = str(Path(__file__).parent.parent.parent / Path("workspace") / Path("gpt-pilot-test"))
def create_project():
os.environ["AUTOFIX_FILE_PATHS"] = "false"
project = Project({
'app_id': 'test-project',
'name': 'TestProject',
'app_type': ''
})
project.set_root_path(test_root)
project.app = 'test'
project.current_step = 'test'
return project
class TestProject:
@pytest.mark.parametrize('file_path, file_name, expected', [
('file.txt', 'file.txt', f'{test_root}/file.txt'),
('', 'file.txt', f'{test_root}/file.txt'),
('path/', 'file.txt', f'{test_root}/path/file.txt'),
('path/to/', 'file.txt', f'{test_root}/path/to/file.txt'),
('path/to/file.txt', 'file.txt', f'{test_root}/path/to/file.txt'),
('./path/to/file.txt', 'to/file.txt', f'{test_root}/path/to/file.txt'),
('./package.json', 'package.json', f'{test_root}/package.json'),
])
def test_get_full_path(self, file_path, file_name, expected):
# Given
project = create_project()
# When
relative_path, absolute_path = project.get_full_file_path(file_path, file_name)
# Then
assert absolute_path == str(Path(expected))
@pytest.mark.parametrize(
("file_path", "file_name", "expected_path", "expected_absolute_path"), [
('', '', '/', f'{test_root}/'),
('', '.', '/', f'{test_root}/'),
('', '.env', '/', f'{test_root}/.env'),
('', '~/', '/', f'{test_root}/'),
('', f'{test_root}/', '/', f'{test_root}/'),
('', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
('', 'server.js', '/', f'{test_root}/server.js'),
('', 'folder1', '/folder1', f'{test_root}/folder1/'),
('', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('.', '', '/', f'{test_root}/'),
('.', '.', '/', f'{test_root}/'),
('.', '.env', '/', f'{test_root}/.env'),
('.', '~/', '/', f'{test_root}/'),
('.', f'{test_root}/', '/', f'{test_root}/'),
('.', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('.', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
('.', 'server.js', '/', f'{test_root}/server.js'),
('.', 'folder1', '/folder1', f'{test_root}/folder1/'),
('.', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('.', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('.', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('.', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('.env', '', '/', f'{test_root}/.env'),
('.env', '.', '/', f'{test_root}/.env'),
('.env', '.env', '/', f'{test_root}/.env'),
('.env', '~/', '/', f'{test_root}/.env'),
('.env', f'{test_root}/', '/', f'{test_root}/.env'),
('.env', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/.env'),
('.env', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/.env'),
('.env', 'server.js', '/', f'{test_root}/server.js'),
('.env', 'folder1', '/folder1', f'{test_root}/folder1/.env'),
('.env', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('.env', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('.env', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.env', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.env', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('.env', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('.env', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('~/', '', '/', f'{test_root}/'),
('~/', '.', '/', f'{test_root}/'),
('~/', '.env', '/', f'{test_root}/.env'),
('~/', '~/', '/', f'{test_root}/'),
('~/', f'{test_root}/', '/', f'{test_root}/'),
('~/', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('~/', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
('~/', 'server.js', '/', f'{test_root}/server.js'),
('~/', 'folder1', '/folder1', f'{test_root}/folder1/'),
('~/', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('~/', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('~/', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('~/', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
(f'{test_root}/', '', '/', f'{test_root}/'),
(f'{test_root}/', '.', '/', f'{test_root}/'),
(f'{test_root}/', '.env', '/', f'{test_root}/.env'),
(f'{test_root}/', '~/', '/', f'{test_root}/'),
(f'{test_root}/', f'{test_root}/', '/', f'{test_root}/'),
(f'{test_root}/', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/', 'server.js', '/', f'{test_root}/server.js'),
(f'{test_root}/', 'folder1', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
(f'{test_root}/', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
(f'{test_root}/', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
(f'{test_root}/folder1', '', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', '.', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', '.env', '/folder1', f'{test_root}/folder1/.env'),
(f'{test_root}/folder1', '~/', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', f'{test_root}/', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', f'{test_root}/Folder With Space/', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/'),
(f'{test_root}/folder1', 'server.js', '/folder1', f'{test_root}/folder1/server.js'),
(f'{test_root}/folder1', 'folder1', '/folder1', f'{test_root}/folder1/'),
(f'{test_root}/folder1', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
(f'{test_root}/folder1', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
(f'{test_root}/folder1', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1', f'{test_root}/Folder With Space/server.js', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/server.js'),
(f'{test_root}/folder1', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
(f'{test_root}/Folder With Space/', '', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/Folder With Space/', '.', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/Folder With Space/', '.env', '/Folder With Space', f'{test_root}/Folder With Space/.env'),
(f'{test_root}/Folder With Space/', '~/', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/Folder With Space/', f'{test_root}/', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/Folder With Space/', f'{test_root}/folder1', '/Folder With Space/folder1', f'{test_root}/Folder With Space/folder1/'),
(f'{test_root}/Folder With Space/', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
(f'{test_root}/Folder With Space/', 'server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/', 'folder1', '/Folder With Space/folder1', f'{test_root}/Folder With Space/folder1/'),
(f'{test_root}/Folder With Space/', 'folder1/folder2', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/'),
(f'{test_root}/Folder With Space/', 'folder1/folder2/', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/'),
(f'{test_root}/Folder With Space/', 'folder1/folder2/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
(f'{test_root}/Folder With Space/', f'{test_root}/folder1/folder2/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
(f'{test_root}/Folder With Space/', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/', '~/folder1/folder2/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
(f'{test_root}/Folder With Space/', './folder1/server.js', '/Folder With Space/folder1', f'{test_root}/Folder With Space/folder1/server.js'),
('server.js', '', '/', f'{test_root}/server.js'),
('server.js', '.', '/', f'{test_root}/server.js'),
('server.js', '.env', '/', f'{test_root}/.env'),
('server.js', '~/', '/', f'{test_root}/server.js'),
('server.js', f'{test_root}/', '/', f'{test_root}/server.js'),
('server.js', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/server.js'),
('server.js', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('server.js', 'server.js', '/', f'{test_root}/server.js'),
('server.js', 'folder1', '/folder1', f'{test_root}/folder1/server.js'),
('server.js', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('server.js', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('server.js', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('server.js', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('server.js', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('folder1', '', '/folder1', f'{test_root}/folder1/'),
('folder1', '.', '/folder1', f'{test_root}/folder1/'),
('folder1', '.env', '/folder1', f'{test_root}/folder1/.env'),
('folder1', '~/', '/folder1', f'{test_root}/folder1/'),
('folder1', f'{test_root}/', '/folder1', f'{test_root}/folder1/'),
('folder1', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('folder1', f'{test_root}/Folder With Space/', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/'),
('folder1', 'server.js', '/folder1', f'{test_root}/folder1/server.js'),
('folder1', 'folder1', '/folder1', f'{test_root}/folder1/'),
('folder1', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1', f'{test_root}/Folder With Space/server.js', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/server.js'),
('folder1', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('folder1/folder2', '', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', '.', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', '.env', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('folder1/folder2', '~/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', f'{test_root}/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', f'{test_root}/folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', f'{test_root}/Folder With Space/', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/'),
('folder1/folder2', 'server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2', 'folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2', f'{test_root}/Folder With Space/server.js', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
('folder1/folder2', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2', './folder1/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/', '', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', '.', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', '.env', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('folder1/folder2/', '~/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', f'{test_root}/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', f'{test_root}/folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', f'{test_root}/Folder With Space/', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/'),
('folder1/folder2/', 'server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/', 'folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('folder1/folder2/', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/', f'{test_root}/Folder With Space/server.js', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
('folder1/folder2/', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/', './folder1/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', '', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', '.', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', '.env', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('folder1/folder2/server.js', '~/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', f'{test_root}/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', f'{test_root}/folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', f'{test_root}/Folder With Space/', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
('folder1/folder2/server.js', 'server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', 'folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
('folder1/folder2/server.js', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('folder1/folder2/server.js', './folder1/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', '', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', '.', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', '.env', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
(f'{test_root}/folder1/folder2/server.js', '~/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', f'{test_root}/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', f'{test_root}/folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', f'{test_root}/Folder With Space/', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', 'server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', 'folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/folder1/folder2/server.js', './folder1/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
(f'{test_root}/Folder With Space/server.js', '', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', '.', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', '.env', '/Folder With Space', f'{test_root}/Folder With Space/.env'),
(f'{test_root}/Folder With Space/server.js', '~/', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', f'{test_root}/', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', f'{test_root}/folder1', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', 'server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', 'folder1', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', 'folder1/folder2', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', 'folder1/folder2/', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', 'folder1/folder2/server.js', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', '~/folder1/folder2/server.js', '/folder1/folder2/Folder With Space', f'{test_root}/folder1/folder2/Folder With Space/server.js'),
(f'{test_root}/Folder With Space/server.js', './folder1/server.js', '/folder1/Folder With Space', f'{test_root}/folder1/Folder With Space/server.js'),
('~/folder1/folder2/server.js', '', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', '.', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', '.env', '/folder1/folder2', f'{test_root}/folder1/folder2/.env'),
('~/folder1/folder2/server.js', '~/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', f'{test_root}/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', f'{test_root}/folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', f'{test_root}/Folder With Space/', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', 'server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', 'folder1', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space/folder1/folder2', f'{test_root}/Folder With Space/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('~/folder1/folder2/server.js', './folder1/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', '', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', '.', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', '.env', '/folder1', f'{test_root}/folder1/.env'),
('./folder1/server.js', '~/', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', f'{test_root}/', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', f'{test_root}/Folder With Space/', '/Folder With Space/folder1', f'{test_root}/Folder With Space/folder1/server.js'),
('./folder1/server.js', 'server.js', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', 'folder1', '/folder1', f'{test_root}/folder1/server.js'),
('./folder1/server.js', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', f'{test_root}/Folder With Space/server.js', '/Folder With Space/folder1', f'{test_root}/Folder With Space/folder1/server.js'),
('./folder1/server.js', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('./folder1/server.js', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
])
def test_get_full_path_permutations(self, file_path, file_name, expected_path, expected_absolute_path):
"""
Test many different permutations of file path/name combinations.
"""
project = create_project()
relative_path, absolute_path = project.get_full_file_path(file_path, file_name)
# Normalize results before comparison, in case of Windows
assert str(Path(relative_path)) == str(Path(expected_path))
assert str(Path(absolute_path)) == str(Path(expected_absolute_path))
@pytest.mark.parametrize('test_data', [
{'name': 'package.json', 'path': 'package.json', 'saved_to': f'{test_root}/package.json'},
{'name': 'package.json', 'path': '', 'saved_to': f'{test_root}/package.json'},
{'name': 'package.json', 'path': '/', 'saved_to': f'{test_root}/package.json'},
{'name': 'package.json', 'path': None, 'saved_to': f'{test_root}/package.json'},
{'name': None, 'path': 'public/index.html', 'saved_to': f'{test_root}/public/index.html'},
{'name': '', 'path': 'public/index.html', 'saved_to': f'{test_root}/public/index.html'},
# TODO: Treatment of paths outside of the project workspace - https://github.com/Pythagora-io/gpt-pilot/issues/129
{'name': '/etc/hosts.txt', 'path': None, 'saved_to': f'{test_root}/etc/hosts.txt'},
# {'name': '.gitconfig', 'path': '~', 'saved_to': '~/.gitconfig'},
# {'name': '.gitconfig', 'path': '~/.gitconfig', 'saved_to': '~/.gitconfig'},
# {'name': 'gpt-pilot.log', 'path': '/temp/gpt-pilot.log', 'saved_to': '/temp/gpt-pilot.log'},
])
@patch('helpers.Project.update_file')
@patch('helpers.Project.File')
@patch('helpers.Project.describe_file')
def test_save_file(self, mock_describe_file, mock_file_insert, mock_update_file, test_data):
# Given
data = {'content': 'Hello World!'}
if test_data['name'] is not None:
data['name'] = str(Path(test_data['name']))
if test_data['path'] is not None:
data['path'] = str(Path(test_data['path']))
mock_describe_file.return_value = "test description"
project = create_project()
# When
project.save_file(data)
# Then assert that update_file with the correct path
expected_saved_to = str(Path(test_data['saved_to']))
mock_update_file.assert_called_once_with(expected_saved_to, 'Hello World!', project=project)
mock_describe_file.assert_called_once()
# Also assert that File.insert was called with the expected arguments
# expected_file_data = {'app': project.app, 'path': test_data['path'], 'name': test_data['name'],
# 'full_path': expected_saved_to}
# mock_file_insert.assert_called_once_with(**expected_file_data,
class TestProjectFileLists:
def setup_method(self):
# Given a project
project = create_project()
self.project = project
project.set_root_path(os.path.join(os.path.dirname(__file__), '../../workspace/directory_tree'))
project.project_description = 'Test Project'
project.development_plan = [{
'description': 'Test User Story',
'user_review_goal': 'Test User Review Goal',
}]
# with directories including common.IGNORE_PATHS
src = os.path.join(project.root_path, 'src')
foo = os.path.join(project.root_path, 'src/foo')
files_no_folders = os.path.join(foo, 'files_no_folders')
os.makedirs(src, exist_ok=True)
os.makedirs(foo, exist_ok=True)
os.makedirs(foo + '/empty1', exist_ok=True)
os.makedirs(foo + '/empty2', exist_ok=True)
os.makedirs(files_no_folders, exist_ok=True)
for dir in ['.git', '.idea', '.vscode', '__pycache__', 'node_modules', 'venv', 'dist', 'build']:
os.makedirs(os.path.join(project.root_path, dir), exist_ok=True)
# ...and files
with open(os.path.join(project.root_path, 'package.json'), 'w') as file:
json.dump({'name': 'test app'}, file, indent=2)
for path in [
os.path.join(src, 'main.js'),
os.path.join(src, 'other.js'),
os.path.join(foo, 'bar.js'),
os.path.join(foo, 'fighters.js'),
os.path.join(files_no_folders, 'file1.js'),
os.path.join(files_no_folders, 'file2.js'),
]:
with open(path, 'w') as file:
file.write('console.log("Hello World!");')
# and a non-empty .gpt-pilot directory
project.dot_pilot_gpt.write_project(project)
def test_get_directory_tree(self):
# When
tree = self.project.get_directory_tree()
# Then we should not be including the .gpt-pilot directory or other ignored directories
# print('\n' + tree)
assert tree == '''
/
/src
/foo
/empty1
/empty2
/files_no_folders: file1.js, file2.js
bar.js, fighters.js
main.js, other.js
package.json
'''.lstrip()
@patch('helpers.Project.DevelopmentSteps.get_or_create', return_value=('test', True))
@patch('helpers.Project.File.get_or_create', return_value=(MagicMock(), True))
@patch('helpers.Project.FileSnapshot.get_or_create', return_value=(MagicMock(), True))
def test_save_files_snapshot(self, mock_snap, mock_file, mock_step):
# Given a snapshot of the files in the project
# When we save the file snapshot
self.project.save_files_snapshot('test')
# Then the files should be saved to the project, but nothing from `.gpt-pilot/`
assert mock_file.call_count == 7
files = ['package.json', 'main.js', 'file1.js', 'file2.js', 'bar.js', 'fighters.js', 'other.js']
for i in range(7):
assert mock_file.call_args_list[i][1]['name'] in files

View File

@@ -1,221 +0,0 @@
import platform
from unittest.mock import patch, MagicMock, call
import pytest
from helpers.cli import execute_command, terminate_process, run_command_until_success
from helpers.test_Project import create_project
@pytest.mark.xfail()
@patch("helpers.cli.os")
@patch("helpers.cli.subprocess")
def test_terminate_process_not_running(mock_subprocess, mock_os):
terminate_process(1234, 'not running')
mock_subprocess.run.assert_not_called()
mock_os.killpg.assert_not_called()
@patch("helpers.cli.MIN_COMMAND_RUN_TIME", create=True, new=100)
@patch('helpers.cli.run_command')
@patch("helpers.cli.terminate_process")
def test_execute_command_timeout_exit_code(mock_terminate_process, mock_run):
# Given
project = create_project()
command = 'cat'
timeout = 0.1
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_process.pid = 1234
mock_run.return_value = mock_process
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout, force=True)
# Then
assert cli_response is not None
assert llm_response == 'DONE'
assert exit_code is not None
mock_terminate_process.assert_called_once_with(1234)
def mock_run_command(command, path, q, q_stderr):
q.put('hello')
mock_process = MagicMock()
mock_process.returncode = 0
mock_process.pid = 1234
return mock_process
@patch('helpers.cli.ask_user', return_value='')
@patch('helpers.cli.run_command')
@patch("helpers.cli.terminate_process")
def test_execute_command_enter(mock_terminate_process, mock_run, mock_ask):
# Given
project = create_project()
command = 'echo hello'
timeout = 1000
mock_run.side_effect = mock_run_command
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)
# Then
assert 'hello' in cli_response
assert llm_response == 'DONE'
assert exit_code == 0
mock_terminate_process.assert_called_once_with(1234)
@patch('helpers.cli.ask_user', return_value='yes')
@patch('helpers.cli.run_command')
@patch('helpers.cli.terminate_process')
def test_execute_command_yes(mock_terminate_process, mock_run, mock_ask):
# Given
project = create_project()
command = 'echo hello'
timeout = 1000
mock_run.side_effect = mock_run_command
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)
# Then
assert 'hello' in cli_response
assert llm_response == 'DONE'
assert exit_code == 0
mock_terminate_process.assert_called_once_with(1234)
@patch('helpers.cli.ask_user', return_value='no')
def test_execute_command_rejected_with_no(mock_ask):
# Given
project = create_project()
command = 'ping www.google.com'
timeout = 1
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)
# Then
assert cli_response is None
assert llm_response == 'SKIP'
assert exit_code is None
@patch('helpers.cli.ask_user', return_value='no, my DNS is not working, ping 8.8.8.8 instead')
def test_execute_command_rejected_with_message(mock_ask):
# Given
project = create_project()
command = 'ping www.google.com'
timeout = 1
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)
# Then
assert cli_response is None
assert llm_response == 'no, my DNS is not working, ping 8.8.8.8 instead'
assert exit_code is None
@patch('helpers.cli.execute_command', return_value=('hello', None, 0))
def test_run_command_until_success(mock_execute):
# Given
convo = MagicMock()
command = 'ping www.google.com'
timeout = 1
# When
result = run_command_until_success(convo, command, timeout)
# Then
assert result['success']
assert result['cli_response'] == 'hello'
assert convo.send_message.call_count == 1
@patch('helpers.cli.execute_command', return_value=('running...', 'DONE', None))
def test_run_command_until_success_app(mock_execute):
# Given
convo = MagicMock()
command = 'npm run start'
command_id = 'app'
timeout = 1000
# When
result = run_command_until_success(convo, command, timeout, command_id=command_id)
# Then
assert result['success']
assert result['cli_response'] == 'running...'
assert convo.send_message.call_count == 0
@patch('helpers.cli.execute_command', return_value=('error', None, 2))
def test_run_command_until_success_error(mock_execute):
# Given
convo = MagicMock()
convo.send_message.return_value = 'NEEDS DEBUGGING'
convo.agent.debugger.debug.return_value = False
command = 'ping www.google.com'
timeout = 1
# When
result = run_command_until_success(convo, command, timeout)
# Then
assert convo.send_message.call_count == 1
assert not result['success']
assert result['cli_response'] == 'error'
@patch('helpers.cli.execute_command', return_value=('hell', 'took longer than 2000ms so I killed it', 0))
def test_run_command_until_success_timed_out(mock_execute):
# Given
convo = MagicMock()
convo.send_message.return_value = 'NEEDS DEBUGGING'
convo.agent.debugger.debug.return_value = False
command = 'ping www.google.com'
timeout = 1
# When
result = run_command_until_success(convo, command, timeout)
# Then
assert convo.send_message.call_count == 1
assert not result['success']
assert result['cli_response'] == 'hell'
@patch('helpers.cli.execute_command', return_value=(None, 'DONE', None))
def test_run_command_until_success_no(mock_execute):
# Given
convo = MagicMock()
command = 'ping www.google.com'
timeout = 1
# When
result = run_command_until_success(convo, command, timeout)
# Then
assert result['success']
assert result['cli_response'] is None
assert 'user_input' not in result or result['user_input'] is None
assert convo.send_message.call_count == 0
@patch('helpers.cli.execute_command', return_value=(None, 'no, my DNS is not working, ping 8.8.8.8 instead', None))
def test_run_command_until_success_rejected(mock_execute):
# Given
convo = MagicMock()
command = 'ping www.google.com'
timeout = 1
# When
result = run_command_until_success(convo, command, timeout)
# Then
assert not result['success']
assert 'cli_response' not in result or result['cli_response'] is None
assert result['user_input'] == 'no, my DNS is not working, ping 8.8.8.8 instead'
assert convo.send_message.call_count == 0

View File

@@ -1,65 +0,0 @@
# init CLI
# 1. show the type of the app that needs to be created
# 1.c ask user to press enter if it's ok, or to add the type of the app they want
# if it's not ok, check if the wanted app CAN be created
# if it can, print confirmation message and continue
# if it can't, print error message and exit
# 2. ask user for the main definition of the app
# start the processing queue
# 2. show the user flow of the app
# 2.c ask user to press enter if it's ok, or to add the user flow they want
# ask for input until they just press enter
# recompute the user flow and ask again
# 3. show the COMPONENTS of the app
# 3.1 frontend
# 3.2 backend
# 3.3 database
# 3.4 config
# 3.x ask user to press enter if it's ok, or to add the components they want
# ask for input until they just press enter
# recompute the components and ask again
# 4. break down the FILES that need to be created to support each of the components
# ask user to press enter if it's ok, or to add the files they want
# ask for input until they just press enter
# recompute the files and ask again
# 5. loop through components (IMPORTANT!!!)
# 5.1 loop through use cases
# 5.1.1 for each case in each component, break down the files, functions and dependencies that need to be created
# each function will have a description
# in each loop, we will send all the previous files and functions so that LLM can change them if needed
# 6. break down the tests that need to be created
# in the prompt, send all the files and functions
# start from the high level tests and go down to the unit tests
# 6.1 ask user to press enter if it's ok, or to add the tests they want
# ask for input until they just press enter
# recompute the tests and ask again
# 7. write the tests
# 8. write the files for each test
# 9. run each created test once the code is written
# start from low level tests and do the high level tests last
# track which test is related to which code
# GPT should first say which functions will it use for a test and then we check if any of those functions is already written and if so, we send it to LLM to change it
# track code coverage and increase to get to 100%
# if the code requires something from config, ask the user to add it
# if the code requires
# when files overlap, ask LLM to combine them
# 10. try debugging 5 times
# 10.1 if it doesn't work, ask the user to debug (!!!IMPORTANT!!!)
# show them the explanations
# ask for input if they want to enter something and retry 5 debugging attempts
# 11. create build/run script
# 12. RUN THE APP
# 4. show the components of the app setup
# a. installation process
# b. configuration process
# c. running process
# d. building process
# e. testing process
# comments
# 1. Možemo koristiti dodatni model koji će izvlačiti iz GPT responsea što treba pokrenuti, što treba updateati, koji komentar složiti, etc. - da ne trebamo i to učiti originalni model in context

View File

@@ -1,61 +0,0 @@
import os
import re
import logging
def setup_logger():
# Create a custom format for your logs
log_format = "%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(levelname)s: %(message)s"
# Create a log handler for file output
file_handler = logging.FileHandler(
filename=os.path.join(os.path.dirname(__file__), 'debug.log'),
mode='w',
encoding='utf-8',
)
# Apply the custom format to the handler
formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
# file_handler.addFilter(lambda record: record.levelno <= logging.INFO)
file_handler.addFilter(filter_sensitive_fields)
# Create a logger and add the handler
logger = logging.getLogger()
logger.addHandler(file_handler)
if os.getenv('DEBUG') == 'true':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
sensitive_fields = ['--api-key', 'password']
def filter_sensitive_fields(record):
# TODO: also remove escape sequences for colors, bold etc
if isinstance(record.args, dict): # check if args is a dictionary
args = record.args.copy()
for field in sensitive_fields:
if field in args:
args[field] = '*****'
record.args = args
elif isinstance(record.args, tuple): # check if args is a tuple
args_list = list(record.args)
# Convert the tuple to a list and replace sensitive fields
args_list = ['*****' if arg in sensitive_fields else arg for arg in args_list]
record.args = tuple(args_list)
# Remove ANSI escape sequences - colours & bold
# Peewee passes a tuple as record.msg
if isinstance(record.msg, str):
record.msg = re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', record.msg)
return True
logger = setup_logger()

View File

@@ -1,177 +0,0 @@
# main.py
import builtins
import json
import os
import sys
import traceback
try:
from dotenv import load_dotenv
except ImportError:
gpt_pilot_root = os.path.dirname(os.path.dirname(__file__))
venv_path = os.path.join(gpt_pilot_root, 'pilot-env')
requirements_path = os.path.join(gpt_pilot_root, 'requirements.txt')
if sys.prefix == sys.base_prefix:
venv_python_path = os.path.join(venv_path, 'scripts' if sys.platform == 'win32' else 'bin', 'python')
print('Python environment for GPT Pilot is not set up.')
print(f'Please create Python virtual environment: {sys.executable} -m venv {venv_path}')
print(f'Then install the required dependencies with: {venv_python_path} -m pip install -r {requirements_path}')
else:
print('Python environment for GPT Pilot is not completely set up.')
print(f'Please run `{sys.executable} -m pip install -r {requirements_path}` to finish Python setup, and rerun GPT Pilot.')
sys.exit(-1)
load_dotenv(override=True)
from utils.style import color_red
from utils.custom_print import get_custom_print
from helpers.Project import Project
from utils.arguments import get_arguments
from utils.exit import exit_gpt_pilot
from logger.logger import logger
from database.database import (
database_exists,
create_database,
tables_exist,
create_tables,
get_created_apps_with_steps,
delete_app,
)
from utils.settings import settings, loader, get_version
from utils.telemetry import telemetry
from helpers.exceptions import ApiError, TokenLimitError, GracefulExit
def init():
# Check if the "euclid" database exists, if not, create it
if not database_exists():
create_database()
# Check if the tables exist, if not, create them
if not tables_exist():
create_tables()
arguments = get_arguments()
logger.info('Starting with args: %s', arguments)
return arguments
if __name__ == "__main__":
ask_feedback = True
project = None
run_exit_fn = True
args = init()
try:
# sys.argv.append('--ux-test=' + 'continue_development')
builtins.print, ipc_client_instance = get_custom_print(args)
if '--api-key' in args:
os.environ["OPENAI_API_KEY"] = args['--api-key']
if '--model-name' in args:
os.environ['MODEL_NAME'] = args['--model-name']
if '--api-endpoint' in args:
os.environ["OPENAI_ENDPOINT"] = args['--api-endpoint']
if '--get-created-apps-with-steps' in args:
run_exit_fn = False
if ipc_client_instance is not None:
print({ 'db_data': get_created_apps_with_steps() }, type='info')
else:
print('----------------------------------------------------------------------------------------')
print('app_id step dev_step name')
print('----------------------------------------------------------------------------------------')
print('\n'.join(f"{app['id']}: {app['status']:20} "
f"{'' if len(app['development_steps']) == 0 else app['development_steps'][-1]['id']:3}"
f" {app['name']}" for app in get_created_apps_with_steps()))
elif '--delete-app' in args:
run_exit_fn = False
app_id = args['--delete-app']
delete_app(app_id)
elif '--version' in args:
print(get_version())
run_exit_fn = False
elif '--ux-test' in args:
from test.ux_tests import run_test
run_test(args['--ux-test'], args)
run_exit_fn = False
else:
if settings.telemetry is None:
telemetry.setup()
loader.save("telemetry")
if args.get("app_id"):
telemetry.set("is_continuation", True)
if "email" in args:
telemetry.set("user_contact", args["email"])
if "extension_version" in args:
telemetry.set("extension_version", args["extension_version"])
# TODO get checkpoint from database and fill the project with it
project = Project(args, ipc_client_instance=ipc_client_instance)
if project.check_ipc():
telemetry.set("is_extension", True)
started = project.start()
if started:
project.finish()
print('Thank you for using Pythagora!', type='ipc', category='pythagora')
telemetry.set("end_result", "success:exit")
else:
run_exit_fn = False
telemetry.set("end_result", "failure:api-error")
print('Exit', type='exit')
except (ApiError, TokenLimitError) as err:
telemetry.record_crash(err, end_result="failure:api-error")
telemetry.send()
run_exit_fn = False
if isinstance(err, TokenLimitError):
print('', type='verbose', category='error')
print(color_red(
"We sent too large request to the LLM, resulting in an error. "
"This is usually caused by including framework files in an LLM request. "
"Here's how you can get GPT Pilot to ignore those extra files: "
"https://bit.ly/faq-token-limit-error"
))
print('Exit', type='exit')
except KeyboardInterrupt:
telemetry.set("end_result", "interrupt")
if project is not None and project.check_ipc():
telemetry.send()
run_exit_fn = False
except GracefulExit:
# can't call project.finish_loading() here because project can be None
run_exit_fn = False
print('', type='loadingFinished')
print('Exit', type='exit')
except Exception as err:
print('', type='verbose', category='error')
print(color_red('---------- GPT PILOT EXITING WITH ERROR ----------'))
traceback.print_exc()
print(color_red('--------------------------------------------------'))
ask_feedback = False
telemetry.record_crash(err)
finally:
if project is not None:
if project.check_ipc():
ask_feedback = False
project.current_task.exit()
project.finish_loading(do_cleanup=False)
if run_exit_fn:
exit_gpt_pilot(project, ask_feedback)

View File

@@ -1,66 +0,0 @@
You're designing the architecture and technical specifications for a new project.
If the project requirements call out for specific technology, use that. Otherwise, if working on a web app, prefer Node.js for the backend (with Express if a web server is needed, and MongoDB if a database is needed), and Bootstrap for the front-end. You MUST NOT use Docker, Kubernetes, microservices and single-page app frameworks like React, Next.js, Angular, Vue or Svelte unless the project details explicitly require it.
Here are the details for the new project:
-----------------------------
{{ project_details }}
{{ features_list }}
-----------------------------
Based on these details, think step by step to design the architecture for the project and choose technologies to use in building it.
1. First, design and describe project architecture in general terms
2. Then, list any system dependencies that should be installed on the system prior to start of development. For each system depedency, output a {{ os }} command to check whether it's installed.
3. Finally, list any other 3rd party packages or libraries that will be used (that will be installed later using packager a package manager in the project repository/environment).
4. {% if templates %}Optionally, choose a project starter template.{% else %}(for this project there are no available starter/boilerplate templates, so there's no template to choose){% endif %}
{% if templates %}You have an option to use a project template that implements standard boilerplate/scaffolding so you can start faster and be more productive. To be considered, a template must be compatible with the architecture and technologies you've choosen (it doesn't need to implement everything that will be used in the project, just a useful subset). If multiple templates can be considered, pick one that's the best match.
If no project templates are a good match, don't pick any! It's better to start from scratch than to use a template that is not a good fit for the project and then spend time reworking it to fit the requirements.
Here are the available project templates:
{% for name, tpl in templates.items() %}
### {{ name }}
{{ tpl['description']}}
Contains:
{{ tpl['summary']}}
{% endfor %}{% endif %}
*IMPORTANT*: You must follow these rules while creating your project:
* You must only list *system* dependencies, ie. the ones that need to be installed (typically as admin) to set up the programming language, database, etc. Any packages that will need to be installed via language/platform-specific package managers are *not* system dependencies.
* If there are several popular options (such as Nginx or Apache for web server), pick one that would be more suitable for the app in question.
* DO NOT include text editors, IDEs, shells, OpenSSL, CLI tools such as git, AWS, or Stripe clients, or other utilities in your list. only direct dependencies required to build and run the project.
* If a dependency (such as database) has a cloud alternative or can be installed on another computer (ie. isn't required on this computer), you must mark it as `required_locally: false`
Output only your response in JSON format like in this example, without other commentary:
```json
{
"architecture": "Detailed description of the architecture of the application",
"system_dependencies": [
{
"name": "Node.js",
"description": "JavaScript runtime for building apps. This is required to be able to run the app you're building.",
"test": "node --version",
"required_locally": true
},
{
"name": "MongoDB",
"description": "NoSQL database. If you don't want to install MongoDB locally, you can use a cloud version such as MongoDB Atlas.",
"test": "mongosh --version",
"required_locally": false
},
...
],
"package_dependencies": [
{
"name": "express",
"description": "Express web server for Node"
},
...
],
"template": "name of the project template to use" // or null if you decide not to use a project template
}
```

View File

@@ -1,9 +0,0 @@
{% if files|length > 0 %}Here are files that were modified during this task implementation:
---start_of_current_files---
{% for file in files %}
**{{ file.path }}/{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
{{ file.content }}
```
{% endfor %}
---end_of_current_files---{% endif %}

View File

@@ -1 +0,0 @@
All the steps will be executed in order in which you give them, so it is very important that you think about all steps before you start listing them. For example, you should never code something before you install dependencies or you should never try access a file before it exists in project.

View File

@@ -1,13 +0,0 @@
{% if previous_features %}
Here is the list of features that were previously implemented on top of initial high level description of "{{ name }}":
```
{% for feature in previous_features %}
- {{ loop.index }}. {{ feature['summary'] }}
{% endfor %}
```
{% endif %}{% if current_feature %}Here is the feature that you are implementing right now:
```
{{ current_feature }}
```
{% endif %}

View File

@@ -1 +0,0 @@
**IMPORTANT**: When creating and naming new files, ensure the file naming (camelCase, kebab-case, underscore_case, etc) is consistent with the best practices and coding style of the language.

View File

@@ -1,2 +0,0 @@
**IMPORTANT**
When you think about in which file should the new code go to, always try to make files as small as possible and put code in more smaller files rather than in one big file.

View File

@@ -1,11 +0,0 @@
{% if file_summaries %}These files are currently implemented:{% for fpath, summary in file_summaries.items() %}
* `{{ fpath }}`: {{ summary }}{% endfor %}
{% endif %}{% if files|length > 0 %}Here are the relevant files:
---START_OF_FILES---{% for file in files %}
**{% if file.path %}{{ file.path }}/{% endif %}{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
{{ file.content }}
```
{% endfor %}
---END_OF_FILES---
{% endif -%}

View File

@@ -1,38 +0,0 @@
**IMPORTANT**
You must not tell me to run a command in the database or anything OS related - only if some dependencies need to be installed. If there is a need to run an OS related command, specifically tell me that this should be labeled as "Human Intervention" and explain what the human needs to do.
Avoid using "Human Intervention" if possible. You should NOT use "Human Intervention" for anything else than steps that you can't execute. Also, you must not use "Human Intervention" to ask user to test that the application works, because this will be done separately after all the steps are finished - no need to ask the user now.
Here are a few examples when and how to use "Human Intervention":
------------------------start_of_example_1---------------------------
Here is an example of good response for the situation where it seems like 3rd party API, in this case Facebook, is not working:
* "Human Intervention"
"1. Check latest Facebook API documentation for updates on endpoints, parameters, or authentication.
2. Verify Facebook API key/authentication and request format to ensure they are current and correctly implemented.
3. Use REST client tools like Postman or cURL to directly test the Facebook API endpoints.
4. Check the Facebook API's status page for any reported downtime or service issues.
5. Try calling the Facebook API from a different environment to isolate the issue."
------------------------end_of_example_1---------------------------
------------------------start_of_example_2---------------------------
Here is an example of good response for the situation where the user needs to enable some settings in their Gmail account:
* "Human Intervention"
"To enable sending emails from your Node.js app via your Gmail, account, you need to do the following:
1. Log in to your Gmail account.
2. Go to 'Manage your Google Account' > Security.
3. Scroll down to 'Less secure app access' and turn it on.
4. Under 'Signing in to Google', select 'App Passwords'. (You may need to sign in again)
5. At the bottom, click 'Select app' and choose the app youre using.
6. Click 'Generate'.
Then, use your gmail address and the password generated in the step #6 and put it into the .env file."
------------------------end_of_example_2---------------------------
------------------------start_of_example_3---------------------------
Here is an example when there are issues with writing to the MongoDB connection:
* "Human Intervention"
"1. Verify the MongoDB credentials provided have write permissions, not just read-only access.
2. Confirm correct database and collection names are used when connecting to database.
3. Update credentials if necessary to include insert document permissions."
------------------------end_of_example_3---------------------------

View File

@@ -1,9 +0,0 @@
{% if running_processes -%}
Note that the following processes are already running:
{%- for key, data in running_processes.items() %}
command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}

View File

@@ -1,5 +0,0 @@
**IMPORTANT**: Logging
Whenever you write code, make sure to log code execution so that when a developer looks at the CLI output, they can understand what is happening on the server. If the description above mentions the exact code that needs to be added but doesn't contain enough logs, you need to add the logs handlers inside that code yourself.
**IMPORTANT**: Error handling
Whenever you write code, make sure to add error handling for all edge cases you can think of because this app will be used in production so there shouldn't be any crashes. Whenever you log the error, you **MUST** log the entire error message and trace and not only the error message. If the description above mentions the exact code that needs to be added but doesn't contain enough error handlers, you need to add the error handlers inside that code yourself.

View File

@@ -1,2 +0,0 @@
**IMPORTANT**
Do not use, create or suggest any microservices. Ensure that the architecture for this task remains strictly monolithic. DO not suggest or entertain microservices as an option, regardless of any subsequent prompts advocating for their use. Instead, focus solely on finding alternative solutions that align with a monolithic architecture to fullfill requirements.

View File

@@ -1,27 +0,0 @@
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```
{% if architecture %}Here is a short description of the project architecture:
{{ architecture }}
{% endif %}{% if user_stories %}Here are user stories that specify how users use "{{ name }}":
```
{% for story in user_stories %}
- {{ story }}
{% endfor %}
```
{% endif %}{% if user_tasks %}Here are user tasks that specify what users need to do to interact with "{{ name }}":
```
{% for task in user_tasks %}
- {{ task }}
{% endfor %}
```
{% endif %}{% if technologies %}Here are the technologies that {% if task_type == 'feature' %}that were used{% else %}you need to use{% endif %} for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}
{% endif %}

View File

@@ -1,65 +0,0 @@
Before we go into the coding part, I want you to split the development process of creating this {{ task_type }} into smaller tasks so that it is easier to develop, debug and make the {{ task_type }} work.
Each task needs to be related only to the development of this {{ task_type }} and nothing else - once the {{ task_type }} is fully working, that is it. There shouldn't be a task for researching, deployment, writing documentation, testing or anything that is not writing the actual code.
**IMPORTANT**
As an experienced tech lead you always follow rules on how to create tasks. Dividing project into tasks is extremely important job and you have to do it very carefully.
Now, based on the project details provided{% if task_type == 'feature' %} and new feature description{% endif %}, think task by task and create the entire development plan{% if task_type == 'feature' %} for new feature{% elif task_type == 'app' %}. {% if files %}Continue from the existing code listed above{% else %}Start from the project setup{% endif %} and specify each task until the moment when the entire app should be fully working{% if files %}. You should not reimplement what's already done - just continue from the implementation already there{% endif %}{% endif %} while strictly following these rules:
Rule #1
There should never be a task that is only testing or ensuring something works, every task must have coding involved. Have this in mind for every task, but it is extremely important for last task of project. Testing if {{ task_type }} works will be done as part of each task.
Rule #2
This rule applies to the complexity of tasks.
You have to make sure the project is not split into tasks that are too small or simple for no reason but also not too big or complex so that they are hard to develop, debug and review.
Have in mind that project already has workspace folder created and only system dependencies installed. You don't have to create tasks for that.
Here are examples of poorly created tasks:
**too simple tasks**
- Set up a Node.js project and install all necessary dependencies.
- Establish a MongoDB database connection using Mongoose with the IP '127.0.0.1'.
**too complex tasks**
- Set up Node.js project with /home, /profile, /register and /login routes that will have user authentication, connection to MongoDB with user schemas, mailing of new users and frontend with nice design.
You must to avoid creating tasks that are too simple or too complex. You have to aim to create tasks that are medium complexity. Here are examples of tasks that are good:
**good tasks**
- Set up a Node.js project, install all necessary dependencies and set up an express server with a simple route to `/ping` that returns the status 200.
- Establish a MongoDB database connection and implement the message schema using Mongoose for persistent storage of messages.
Rule #3
This rule applies to the number of tasks you will create.
Every {{ task_type }} should have different number of tasks depending on complexity. Think task by task and create the minimum number of tasks that are relevant for this specific {{ task_type }}.{% if task_type == 'feature' %} If the feature is small, it is ok to have only 1 task.{% endif %} Here are some examples of apps with different complexity that can give you guidance on how many tasks you should create:
Example #1:
app description: "I want to create an app that will just say 'Hello World' when I open it on my localhost:3000."
number of tasks: 1-2
Example #2:
app description: "Create a node.js app that enables users to register and log into the app. On frontend it should have /home (shows user data), /register and /login. It should use sessions to keep user logged in."
number of tasks: 2-4
Example #3:
app description: "A cool online shoe store, with a sleek look. In terms of data models, there are shoes, categories and user profiles. For web pages: product listing, details, shopping cart. It must look cool and jazzy."
number of tasks: 5-15
Rule #4
This rule applies to writing task 'description'.
Every task must have a clear and very detailed (must be minimum of 4 sentences but can be more) 'description'. It must be very clear so that even developers who just moved to this project can execute them without additional questions. It is not enough to just write something like "Create a route for /home". You have to describe what needs to be done in that route, what data needs to be returned, what should be the status code, etc. Give as many details as possible and make sure no information is missing that could be needed for this task.
Here is an example of good and bad task description:
**bad task**
{
"description": "Create a route for /dashboard"
}
**good task**
{
"description": "In 'route.js' add a route for /dashboard that returns the status 200. Route should be accessible only for logged in users. In 'middlewares.js' there should be a check if user is logged in using session. If user is not logged in, it should redirect to /login. If user is logged in, it should return the user data. User data should be fetched from database in 'users' collection using the user id from session."
}
Rule #5
When creating and naming new files, ensure the file naming (camelCase, kebab-case, underscore_case, etc) is consistent with the best practices and coding style of the language.
Pay attention to file paths: if the command or argument is a file or folder from the project, use paths relative to the project root (for example, use `./somefile` instead of `/somefile`).

View File

@@ -1 +0,0 @@
**IMPORTANT**: Pay attention to file paths: if the command or argument is a file or folder from the project, use paths relative to the project root (for example, use `./somefile` instead of `/somefile`).

View File

@@ -1,23 +0,0 @@
**IMPORTANT**
Here are the instructions for Asking Additional Questions:
Direct Questions Only: If there are any points that are not clear, you should draft direct questions to clarify them. Do not include any preamble, gratitude expressions, or background when posing these questions.
Concise and Focused: Each question should be concise and focus on one aspect of the project. Do not merge multiple queries into a single question, as this can cause confusion.
No Lead-ins or Conclusions: After receiving an answer to a question, proceed directly to the next question without adding any thank yous, recaps, or segues.
Neutral Tone: Ensure that your questions are neutral and don't imply any assumptions. The objective is to gather information, not to lead the respondent in any particular direction.
Examples:
Instead of "Thank you for that information. My next question is: Should A be bigger than B?", simply ask "Should A be bigger than B?".
Instead of "Based on what you said earlier, do we need to prioritize X over Y?", just ask "Do we need to prioritize X over Y?".
Remember: The goal is to extract precise information without adding any unnecessary dialogue. Your questions should be straightforward and to the point.
I want your response to be only one question at a time. I will ask you again when I am ready for next question.
Ask maximum of {{MAX_QUESTIONS}} questions and after that I want you to respond with "{{END_RESPONSE}}".
If everything is clear before asking those {{MAX_QUESTIONS}} questions, you write the response in the following format:
"{{END_RESPONSE}}"

View File

@@ -1,48 +0,0 @@
{% if task_steps and step_index is not none -%}
The current task has been split into multiple steps, and each step is one of the following:
* `command` - command to run
* `save_file` - create or update a file
* `human_intervention` - if the human needs to do something
{% if step_index > 0 %}Here is the list of steps that have been executed:
{% for step in task_steps %}{% if loop.index0 < step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
{%- if 'code_change_description' in type_content -%}
{%- set _ = type_content.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- else -%}
{%- if 'code_change_description' in step -%}
{%- set _ = step.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{{ step }}
{% endif %}{% endfor %}{% endif %}
Here is the step you are currently debugging:
{{ task_steps[step_index] }}
{% if step_index < task_steps|length - 1 %}Here are steps that will be executed once debugging is done:
{% for step in task_steps %}{% if loop.index0 > step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
{%- if 'code_change_description' in type_content -%}
{%- set _ = type_content.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- else -%}
{%- if 'code_change_description' in step -%}
{%- set _ = step.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{{ step }}
{% endif %}{% endfor %}{% endif %}
{%- endif %}

View File

@@ -1,20 +0,0 @@
**IMPORTANT**
Here are the instructions for Writing the Summary:
1. **Stick to the Facts**: Every sentence should be informative and relevant. Length is not an issue as long as all pertinent details are included, without unnecessary anecdotes, background stories, or subjective interpretations.
2. **Avoid Subjectivity and Mentioning The Client or Any External Entities**: Do not mention phrases like "the client wants" or "the client said". Do not provide personal opinions, interpretations, or unnecessary background stories. Summarize information in a direct and neutral manner.
3. **Use Active Voice**: Use active rather than passive voice. For instance, "The project includes 5 subdomains" instead of "It was decided that the project should include 5 subdomains."
4. **Be Direct**: Replace indirect phrases with direct statements. For example, instead of saying "The client said there might be a need for...", state "There will be...".
5. **Prioritize Clarity**: Each statement should be clear and easy to understand. Refrain from using jargon unless it's widely recognized in the project's context.
6. **Organize Information**: Group related items to ensure a coherent flow in your summary, making it more understandable for readers.
**Examples**:
- Instead of "The client expressed a preference for blue in our last meeting", write "The primary color is blue".
- Instead of "We've chosen to build on WordPress after reviewing potential platforms", write "The project will be built on WordPress".
Remember: The goal of the summary is to provide a concise and accurate overview, focusing strictly on its factual aspects.

View File

@@ -1,24 +0,0 @@
{{ steps_list }}
{% if issue_description -%}
You wanted me to check this - `{{ issue_description }}` but there was a problem
{%- else -%}
Now, we need to debug this issue
{%- endif -%}
{% if command %} and we need to be able to execute `{{ command }}` successfully. {% endif %}.
{% if user_input %}I looked into this issue. I want you to take my findings as important part of debugging this issue. Here are my findings:
```
{{ user_input }}
```{% endif -%}
I want you to create a list of steps that are needed to debug this issue.
Each step can be either:
* `command` - command to run (must be able to run on a {{ os }} machine, assume current working directory is project root folder)
* `save_file` - step will create or update a file, and you need to thoroughly describe what needs to be implemented. I will implement the requested changes and let you know.
* `human_intervention` - if you need the human to do something, use this type of step and explain in details what you want the human to do. NEVER use `human_intervention` for testing, as testing will be done separately by a dedicated QA after all the steps are done.
{{ execution_order }}
Also, make sure that at least the last step has `check_if_fixed` set to TRUE.
{{ file_size_limit }}

View File

@@ -1,12 +0,0 @@
{{ steps_list }}
{%- if task_steps and step_index is not none -%}
When trying to see if command was ran successfully, take into consideration steps that were previously executed and steps that will be executed after the current step. It can happen that command seems like it failed but it will be fixed with next steps. In that case you should consider that command to be successfully executed.
{%- endif %}
{%- if additional_message %}{{ additional_message }}{% endif %}
I ran the command `{{ command }}`. The output was:
{#%- if error_response %}, it {{ error_response }}{% endif %#}
{{ cli_response }}
Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE`. If it wasn't, respond with `BUG`.
Do not respond with anything other than these two keywords.

View File

@@ -1,2 +0,0 @@
Should I rerun the command `{{ command }}` or is this task done? If I should rerun `{{ command }}`, respond only with YES. If I don't need to rerun the command and the original problem is fixed, respond with NO.
{{ list_running_processes }}

View File

@@ -1,43 +0,0 @@
Here is description of app that you are working on:
```
{{ app_summary }}
```
{{ files_list }}
{% if task_review_description %}
User was given instructions on how to test if the app is working correctly. Here are the instructions:
```
{{ task_review_description }}
```
{% endif %}
User wrote this feedback:
```
{{ user_feedback }}
```
{% if questions_and_answers|length > 0 %}
Here are questions and answers that you already asked the user:
```{% for row in questions_and_answers %}
Q: {{ row.question }}
A: {{ row.answer }}
{% endfor %}
```{% endif %}
Your job is to identify if feedback is good enough for you to solve the problem. If not, what information you need to solve the problem. Ask for any information that you need to solve the problem.
If you have enough information don't ask any questions.
When thinking of questions, consider the following:
- After getting answers to your questions, you must be able to solve the problem.
- Ask only crucial questions. Do not ask for information that you do not need to solve the problem.
- Ask least amount of questions to get the most information and to solve the problem.
- Ask only questions from the list provided bellow.
- Ask questions in same order as they are in the list.
- Never repeat same question.
Here is the list of questions you can ask:
"Can you please provide more information on what exactly you mean?"
"Can you please provide logs from the frontend?"
"Can you please provide logs from the backend?"
"What is the expected behavior and what is current behaviour?"
"On what page does the issue happen?"

View File

@@ -1,14 +0,0 @@
{%- if directory_tree %}
The project directory tree looks like:
{{ directory_tree }}
{% endif -%}
{% if running_processes -%}
Note that the following processes are already running:
{%- for key, data in running_processes.items() %}
command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}

View File

@@ -1,34 +0,0 @@
How can a human user test if this task was completed successfully?
Please list actions, step by step, in order, that the user should take to verify the task. After each action, describe what the expected response is.
**IMPORTANT**
Follow these important rules when compiling a list of actions the user will take:
1. Actions must be as specific as possible. You don't want the user to have to think anything through but rather that they just follow your instructions.
2. In case this task can be tested by making an API request, you should always prefer to test functionality in the browser. In case you can't do that, do not suggest how can a request be made with Postman but rather write a full cURL command that the user can just run.
3. Do not require the user to write any code or edit files to test this task.
4. If the user must run a command, assume the user already has a terminal opened in the project root directory (no need to instruct the user "open the terminal" or "make sure you're in the project directory")
5. The user is using {{ os }}, so the commands must run on that operating system
6. Assume system services, such as the database, are already set up and running. Don't ask user to install or run any software other than the app they're testing.
7. Don't ask the user to test things which aren't implemented yet (eg. opening a theoretical web page that doesn't exist yet, or clicking on a button that isn't implemented yet)
Remember, these rules are very important and you must follow them!
Here is an example output with a few user steps:
---example---
### Step 1
Action: Start the server using `npm start`
Expected result: You should see the message "Connected to database" or similar
### Step 2
Action: Open your web browser and visit http://localhost:3000/
Expected result: Web page opens and you see a "Hello World" message with a contact form
### Step 3
Action: Click on the "Submit" button in the web form
Expected result: Form is submitted, page is reloaded and "Thank you" message is shown
---end_of_example---
If nothing needs to be tested for this task, instead of outputting the steps, just output a single word: DONE

View File

@@ -1,12 +0,0 @@
You are working in a software development agency and a project manager and software architect approach you telling you that you're assigned to work on a new project. You are working on a {{ app_type }} called "{{ name }}" and your first job is to set up the environment on a computer.
Here are the technologies that you need to use for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}
Let's set up the environment on my machine. Here are the details about my machine:
```
{{ os_info }}
```
First, filter out the technologies from the list above and tell me, which technologies need to be installed on my machine. That is everything OS specific and not dependencies, libraries, etc. Do not respond with anything else except the list in a JSON array format.

View File

@@ -1,6 +0,0 @@
I got the following error:
```
{{ error }}
```
Specify what needs to be done to fix this error either in the code or what command (or commands) needs to be run to fix this error.

View File

@@ -1,18 +0,0 @@
You are working in a software development agency and a project manager and software architect approach you telling you that you're assigned to add new feature to an existing project. You are working on a {{ app_type }} called "{{ name }}" and you need to create a detailed development plan so that developers can start developing the new feature.
{{ project_details }}
{{ features_list }}
Here is directory tree that shows current folder structure of project:
```
{{ directory_tree }}
```
App and all its features are already finished and working.
{{ files_list }}
Finally, here is the description of new feature that needs to be added to {{ app_type }} "{{ name }}":
```
{{ feature_description }}
```
{{ project_tasks }}

View File

@@ -1,22 +0,0 @@
You are working on a {{ app_type }} called "{{ name }}" and you need to create a summary for a new feature.
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```
After {{ app_type }} was successfully coded, user asked for an improvement. Here is what user asked:
```
{{ feature_description }}
```
Then a development plan for that feature was created and the feature was then broken down to smaller tasks so that it's easier for development. Here is development plan:
```{% for task in development_tasks %}
- {{ loop.index }}. {{ task['description'] }}
{% endfor %}
```
Your goal is to create summary describing ONLY this feature in as little amount of sentences as possible. Try to keep it as short as possible.
You are not making summary of project or tasks. Only summary of this feature.
{{summary_instructions}}

View File

@@ -1,33 +0,0 @@
You are working on a {{ app_type }} called "{{ name }}", writing the code for the entire application.
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```
{{ features_list }}
You are currently working on, and have to focus only on, this task:
```
{{ current_task.description }}
```
A part of the app is already finished. Here is the list of files and descriptions that the app currently contains:
{% for fpath, summary in file_summaries.items() %}
* `{{ fpath }}`: {{ summary }}{% endfor %}
{% if user_feedback %}User who was using the app "{{ name }}" sent you this feedback:
```
{{ user_feedback }}
```
{% endif %}{% if next_solution_to_try %}
Focus on solving this issue in the following way:
```
{{ next_solution_to_try }}
```
{% endif %}
**IMPORTANT**
The files necessary for a developer to understand, modify, implement, and test the current task are considered to be relevant files.
Your job is select which of existing files are relevant for the current task. From the above list of files that app currently contains, you have to select ALL files that are relevant to the current task. Think step by step of everything that has to be done in this task and which files contain needed information. If you are unsure if a file is relevant or not, it is always better to include it in the list of relevant files.
{{ relative_paths }}

Some files were not shown because too many files have changed in this diff Show More