mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 21:27:53 -05:00
@@ -1,4 +1,4 @@
|
||||
# OPENAI or AZURE or OPENROUTER
|
||||
# OPENAI or AZURE or OPENROUTER (ignored for Anthropic)
|
||||
ENDPOINT=OPENAI
|
||||
|
||||
# OPENAI_ENDPOINT=https://api.openai.com/v1/chat/completions
|
||||
@@ -10,8 +10,15 @@ AZURE_ENDPOINT=
|
||||
|
||||
OPENROUTER_API_KEY=
|
||||
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
# You only need to set this if not using Anthropic API directly (eg. via proxy or AWS Bedrock)
|
||||
ANTHROPIC_ENDPOINT=
|
||||
|
||||
# In case of Azure/OpenRouter endpoint, change this to your deployed model name
|
||||
MODEL_NAME=gpt-4-turbo-preview
|
||||
# In case of Anthropic, use "anthropic/" + the model name, example for Claude 3 Opus
|
||||
# MODEL_NAME=anthropic/claude-3-opus-20240229
|
||||
MAX_TOKENS=8192
|
||||
|
||||
# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
from database.database import create_tables, drop_tables
|
||||
|
||||
drop_tables()
|
||||
|
||||
@@ -15,7 +15,7 @@ from templates import PROJECT_TEMPLATES
|
||||
|
||||
ARCHITECTURE_STEP = 'architecture'
|
||||
WARN_SYSTEM_DEPS = ["docker", "kubernetes", "microservices"]
|
||||
WARN_FRAMEWORKS = ["react", "react.js", "next.js", "vue", "vue.js", "svelte", "angular"]
|
||||
WARN_FRAMEWORKS = ["next.js", "vue", "vue.js", "svelte", "angular"]
|
||||
WARN_FRAMEWORKS_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/Using-GPT-Pilot-with-frontend-frameworks"
|
||||
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ class ProductOwner(Agent):
|
||||
print(color_green_bold(
|
||||
"GPT Pilot currently works best for web app projects using Node, Express and MongoDB. "
|
||||
"You can use it with other technologies, but you may run into problems "
|
||||
"(eg. React might not work as expected).\n"
|
||||
"(eg. Svelte might not work as expected).\n"
|
||||
))
|
||||
self.project.main_prompt = ask_for_main_app_definition(self.project)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import requests
|
||||
|
||||
from helpers.AgentConvo import AgentConvo
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
from main import get_custom_print
|
||||
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
from main import get_custom_print
|
||||
from helpers.agents.TechLead import TechLead, DEVELOPMENT_PLANNING_STEP
|
||||
|
||||
@@ -8,7 +8,7 @@ from helpers.AgentConvo import AgentConvo
|
||||
from utils.custom_print import get_custom_print
|
||||
from .test_Project import create_project
|
||||
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
builtins.print, ipc_client_instance = get_custom_print({})
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ except ImportError:
|
||||
print(f'Please run `{sys.executable} -m pip install -r {requirements_path}` to finish Python setup, and rerun GPT Pilot.')
|
||||
sys.exit(-1)
|
||||
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
from utils.style import color_red
|
||||
from utils.custom_print import get_custom_print
|
||||
@@ -66,6 +66,8 @@ if __name__ == "__main__":
|
||||
|
||||
if '--api-key' in args:
|
||||
os.environ["OPENAI_API_KEY"] = args['--api-key']
|
||||
if '--model-name' in args:
|
||||
os.environ['MODEL_NAME'] = args['--model-name']
|
||||
if '--api-endpoint' in args:
|
||||
os.environ["OPENAI_ENDPOINT"] = args['--api-endpoint']
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from utils.style import color_green_bold
|
||||
from logger.logger import logger
|
||||
from utils.exit import trace_code_event
|
||||
|
||||
from .javascript_react import JAVASCRIPT_REACT
|
||||
from .node_express_mongoose import NODE_EXPRESS_MONGOOSE
|
||||
from .render import Renderer
|
||||
|
||||
@@ -14,6 +15,7 @@ if TYPE_CHECKING: # noqa
|
||||
|
||||
PROJECT_TEMPLATES = {
|
||||
"node_express_mongoose": NODE_EXPRESS_MONGOOSE,
|
||||
"javascript_react": JAVASCRIPT_REACT,
|
||||
}
|
||||
|
||||
|
||||
@@ -75,10 +77,6 @@ def apply_project_template(
|
||||
print(color_green_bold(f"Applying project template {template['description']}...\n"))
|
||||
logger.info(f"Applying project template {template_name}...")
|
||||
|
||||
last_development_step = project.checkpoints.get('last_development_step')
|
||||
if last_development_step:
|
||||
project.save_files_snapshot(last_development_step['id'])
|
||||
|
||||
try:
|
||||
if install_hook:
|
||||
install_hook(project)
|
||||
@@ -88,6 +86,10 @@ def apply_project_template(
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
last_development_step = project.checkpoints.get('last_development_step')
|
||||
if last_development_step:
|
||||
project.save_files_snapshot(last_development_step['id'])
|
||||
|
||||
trace_code_event('project-template', {'template': template_name})
|
||||
summary = "The code so far includes:\n" + template["summary"]
|
||||
return summary
|
||||
|
||||
23
pilot/templates/javascript_react.py
Normal file
23
pilot/templates/javascript_react.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from helpers.cli import execute_command
|
||||
|
||||
|
||||
def install_hook(project):
|
||||
"""
|
||||
Command to run to complete the project scaffolding setup.
|
||||
|
||||
:param project: the project object
|
||||
"""
|
||||
execute_command(project, "npm install")
|
||||
|
||||
|
||||
JAVASCRIPT_REACT = {
|
||||
"path": "javascript_react",
|
||||
"description": "React web app using Vite devserver/bundler",
|
||||
"summary": "\n".join([
|
||||
"* Initial setup with Vite for fast development",
|
||||
"* Basic project structure for React development",
|
||||
"* Development server setup for hot reloading",
|
||||
"* Minimal configuration to get started with React",
|
||||
]),
|
||||
"install_hook": install_hook,
|
||||
}
|
||||
21
pilot/templates/tpl/javascript_react/.eslintrc.cjs
Normal file
21
pilot/templates/tpl/javascript_react/.eslintrc.cjs
Normal file
@@ -0,0 +1,21 @@
|
||||
module.exports = {
|
||||
root: true,
|
||||
env: { browser: true, es2020: true },
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:react/jsx-runtime',
|
||||
'plugin:react-hooks/recommended',
|
||||
],
|
||||
ignorePatterns: ['dist', '.eslintrc.cjs'],
|
||||
parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
|
||||
settings: { react: { version: '18.2' } },
|
||||
plugins: ['react-refresh'],
|
||||
rules: {
|
||||
'react/jsx-no-target-blank': 'off',
|
||||
'react-refresh/only-export-components': [
|
||||
'warn',
|
||||
{ allowConstantExport: true },
|
||||
],
|
||||
},
|
||||
}
|
||||
24
pilot/templates/tpl/javascript_react/.gitignore
vendored
Normal file
24
pilot/templates/tpl/javascript_react/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
12
pilot/templates/tpl/javascript_react/index.html
Normal file
12
pilot/templates/tpl/javascript_react/index.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>{{ project_name }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
26
pilot/templates/tpl/javascript_react/package.json
Normal file
26
pilot/templates/tpl/javascript_react/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "{{ project_name }}",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"lint": "eslint . --ext js,jsx --report-unused-disable-directives --max-warnings 0",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/react": "^18.2.64",
|
||||
"@types/react-dom": "^18.2.21",
|
||||
"@vitejs/plugin-react": "^4.2.1",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-react": "^7.34.0",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.5",
|
||||
"vite": "^5.1.6"
|
||||
}
|
||||
}
|
||||
7
pilot/templates/tpl/javascript_react/src/App.css
Normal file
7
pilot/templates/tpl/javascript_react/src/App.css
Normal file
@@ -0,0 +1,7 @@
|
||||
#root {
|
||||
max-width: 1280px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
11
pilot/templates/tpl/javascript_react/src/App.jsx
Normal file
11
pilot/templates/tpl/javascript_react/src/App.jsx
Normal file
@@ -0,0 +1,11 @@
|
||||
import './App.css'
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<>
|
||||
<h1>{{ project_name }}</h1>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export default App
|
||||
20
pilot/templates/tpl/javascript_react/src/index.css
Normal file
20
pilot/templates/tpl/javascript_react/src/index.css
Normal file
@@ -0,0 +1,20 @@
|
||||
:root {
|
||||
font-synthesis: none;
|
||||
text-rendering: optimizeLegibility;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
display: flex;
|
||||
place-items: center;
|
||||
min-width: 320px;
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 3.2em;
|
||||
line-height: 1.1;
|
||||
}
|
||||
|
||||
10
pilot/templates/tpl/javascript_react/src/main.jsx
Normal file
10
pilot/templates/tpl/javascript_react/src/main.jsx
Normal file
@@ -0,0 +1,10 @@
|
||||
import React from 'react'
|
||||
import ReactDOM from 'react-dom/client'
|
||||
import App from './App.jsx'
|
||||
import './index.css'
|
||||
|
||||
ReactDOM.createRoot(document.getElementById('root')).render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>,
|
||||
)
|
||||
7
pilot/templates/tpl/javascript_react/vite.config.js
Normal file
7
pilot/templates/tpl/javascript_react/vite.config.js
Normal file
@@ -0,0 +1,7 @@
|
||||
import { defineConfig } from 'vite'
|
||||
import react from '@vitejs/plugin-react'
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig({
|
||||
plugins: [react()],
|
||||
})
|
||||
@@ -3,7 +3,7 @@ import builtins
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
from database.database import create_tables
|
||||
from helpers.Project import Project
|
||||
|
||||
@@ -4,7 +4,7 @@ import yaml
|
||||
from datetime import datetime
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
USE_GPTPILOT_FOLDER = os.getenv('USE_GPTPILOT_FOLDER') == 'true'
|
||||
|
||||
|
||||
@@ -109,8 +109,9 @@ def create_gpt_chat_completion(messages: List[dict], req_type, project,
|
||||
{'function_calls': {'name': str, arguments: {...}}}
|
||||
"""
|
||||
|
||||
model_name = os.getenv('MODEL_NAME', 'gpt-4')
|
||||
gpt_data = {
|
||||
'model': os.getenv('MODEL_NAME', 'gpt-4'),
|
||||
'model': model_name,
|
||||
'n': 1,
|
||||
'temperature': temperature,
|
||||
'top_p': 1,
|
||||
@@ -133,8 +134,18 @@ def create_gpt_chat_completion(messages: List[dict], req_type, project,
|
||||
if prompt_data is not None and function_call_message is not None:
|
||||
prompt_data['function_call_message'] = function_call_message
|
||||
|
||||
if '/' in model_name:
|
||||
model_provider, model_name = model_name.split('/', 1)
|
||||
else:
|
||||
model_provider = 'openai'
|
||||
|
||||
try:
|
||||
response = stream_gpt_completion(gpt_data, req_type, project)
|
||||
if model_provider == 'anthropic':
|
||||
if not os.getenv('ANTHROPIC_API_KEY'):
|
||||
os.environ['ANTHROPIC_API_KEY'] = os.getenv('OPENAI_API_KEY')
|
||||
response = stream_anthropic(messages, function_call_message, gpt_data, model_name)
|
||||
else:
|
||||
response = stream_gpt_completion(gpt_data, req_type, project)
|
||||
|
||||
# Remove JSON schema and any added retry messages
|
||||
while len(messages) > messages_length:
|
||||
@@ -143,7 +154,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, project,
|
||||
except TokenLimitError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.error(f'The request to {os.getenv("ENDPOINT")} API failed: %s', e)
|
||||
logger.error(f'The request to {os.getenv("ENDPOINT")} API for {model_provider}/{model_name} failed: %s', e, exc_info=True)
|
||||
print(color_red(f'The request to {os.getenv("ENDPOINT")} API failed with error: {e}. Please try again later.'))
|
||||
if isinstance(e, ApiError):
|
||||
raise e
|
||||
@@ -588,3 +599,48 @@ def postprocessing(gpt_response: str, req_type) -> str:
|
||||
|
||||
def load_data_to_json(string):
|
||||
return json.loads(fix_json(string))
|
||||
|
||||
|
||||
|
||||
def stream_anthropic(messages, function_call_message, gpt_data, model_name = "claude-3-sonnet-20240229"):
|
||||
try:
|
||||
import anthropic
|
||||
except ImportError as err:
|
||||
raise RuntimeError("The 'anthropic' package is required to use the Anthropic Claude LLM.") from err
|
||||
|
||||
client = anthropic.Anthropic(
|
||||
base_url=os.getenv('ANTHROPIC_ENDPOINT'),
|
||||
)
|
||||
|
||||
claude_system = "You are a software development AI assistant."
|
||||
claude_messages = messages
|
||||
if messages[0]["role"] == "system":
|
||||
claude_system = messages[0]["content"]
|
||||
claude_messages = messages[1:]
|
||||
|
||||
if len(claude_messages):
|
||||
cm2 = [claude_messages[0]]
|
||||
for i in range(1, len(claude_messages)):
|
||||
if cm2[-1]["role"] == claude_messages[i]["role"]:
|
||||
cm2[-1]["content"] += "\n\n" + claude_messages[i]["content"]
|
||||
else:
|
||||
cm2.append(claude_messages[i])
|
||||
claude_messages = cm2
|
||||
|
||||
response = ""
|
||||
with client.messages.stream(
|
||||
model=model_name,
|
||||
max_tokens=4096,
|
||||
temperature=0.5,
|
||||
system=claude_system,
|
||||
messages=claude_messages,
|
||||
) as stream:
|
||||
for chunk in stream.text_stream:
|
||||
print(chunk, type='stream', end='', flush=True)
|
||||
response += chunk
|
||||
|
||||
if function_call_message is not None:
|
||||
response = clean_json_response(response)
|
||||
assert_json_schema(response, gpt_data["functions"])
|
||||
|
||||
return {"text": response}
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import Any, Optional
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from utils.llm_connection import create_gpt_chat_completion, stream_gpt_completi
|
||||
assert_json_response, assert_json_schema, clean_json_response, retry_on_exception
|
||||
from main import get_custom_print
|
||||
|
||||
load_dotenv()
|
||||
load_dotenv(override=True)
|
||||
os.environ.pop("AUTOFIX_FILE_PATHS", None)
|
||||
|
||||
|
||||
|
||||
@@ -25,3 +25,4 @@ tiktoken==0.5.2
|
||||
urllib3==1.26.7
|
||||
wcwidth==0.2.8
|
||||
yaspin==2.5.0
|
||||
anthropic==0.19.1
|
||||
|
||||
Reference in New Issue
Block a user