mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 13:17:55 -05:00
clean up
This commit is contained in:
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
matrix:
|
||||
# 3.10 - 04 Oct 2021
|
||||
# 3.11 - 24 Oct 2022
|
||||
python-version: ['3.11']
|
||||
python-version: ['3.8', '3.9', '3.10', '3.11']
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -51,7 +51,7 @@ https://github.com/Pythagora-io/gpt-pilot/assets/10895136/0495631b-511e-451b-93d
|
||||
# 🔌 Requirements
|
||||
|
||||
|
||||
- **Python >= 3.11**
|
||||
- **Python >= 3.8**
|
||||
- **PostgreSQL** (optional, projects default is SQLite)
|
||||
- DB is needed for multiple reasons like continuing app development if you had to stop at any point or app crashed, going back to specific step so you can change some later steps in development, easier debugging, for future we will add functionality to update project (change some things in existing project or add new features to the project and so on)...
|
||||
|
||||
|
||||
@@ -1,12 +1,31 @@
|
||||
import json
|
||||
import re
|
||||
# from local_llm_function_calling import Generator
|
||||
# from local_llm_function_calling.model.llama import LlamaModel
|
||||
# from local_llm_function_calling.model.huggingface import HuggingfaceModel
|
||||
from local_llm_function_calling.prompter import FunctionType, CompletionModelPrompter, InstructModelPrompter
|
||||
# from local_llm_function_calling.model.llama import LlamaInstructPrompter
|
||||
from typing import Literal, NotRequired, TypedDict, Callable
|
||||
|
||||
from typing import Literal, NotRequired, Protocol, TypeVar, TypedDict, Callable
|
||||
JsonType = str | int | float | bool | None | list["JsonType"] | dict[str, "JsonType"]
|
||||
|
||||
|
||||
class FunctionParameters(TypedDict):
|
||||
"""Function parameters"""
|
||||
|
||||
type: Literal["object"]
|
||||
properties: dict[str, JsonType]
|
||||
required: NotRequired[list[str]]
|
||||
|
||||
|
||||
class FunctionType(TypedDict):
|
||||
"""Function type"""
|
||||
|
||||
name: str
|
||||
description: NotRequired[str]
|
||||
parameters: FunctionParameters
|
||||
|
||||
|
||||
class FunctionCall(TypedDict):
|
||||
"""Function call"""
|
||||
|
||||
name: str
|
||||
parameters: str
|
||||
|
||||
|
||||
class FunctionCallSet(TypedDict):
|
||||
@@ -29,8 +48,6 @@ def add_function_calls_to_request(gpt_data, function_calls: FunctionCallSet | No
|
||||
# gpt_data['function_call'] = {'name': function_calls['definitions'][0]['name']}
|
||||
# return
|
||||
|
||||
# prompter = CompletionModelPrompter()
|
||||
# prompter = InstructModelPrompter()
|
||||
prompter = JsonPrompter(is_llama)
|
||||
|
||||
if len(function_calls['definitions']) > 1:
|
||||
@@ -39,9 +56,6 @@ def add_function_calls_to_request(gpt_data, function_calls: FunctionCallSet | No
|
||||
function_call = function_calls['definitions'][0]['name']
|
||||
|
||||
role = 'user' if '/' in model else 'system'
|
||||
# role = 'user'
|
||||
# role = 'system'
|
||||
# is_llama = True
|
||||
|
||||
gpt_data['messages'].append({
|
||||
'role': role,
|
||||
@@ -186,8 +200,6 @@ class JsonPrompter:
|
||||
"Help choose the appropriate function to call to answer the user's question."
|
||||
if function_to_call is None
|
||||
else f"Define the arguments for {function_to_call} to answer the user's question."
|
||||
# ) + "\nYou must return a JSON object without notes or commentary."
|
||||
# ) + " \nIn your response you must only use JSON output and provide no explanation or commentary."
|
||||
) + " \nThe response should contain only the JSON object, with no additional text or explanation."
|
||||
|
||||
data = (
|
||||
|
||||
@@ -88,7 +88,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
|
||||
}
|
||||
|
||||
# delete some keys if using "OpenRouter" API
|
||||
if os.getenv('ENDPOINT') == "OPENROUTER":
|
||||
if os.getenv('ENDPOINT') == 'OPENROUTER':
|
||||
keys_to_delete = ['n', 'max_tokens', 'temperature', 'top_p', 'presence_penalty', 'frequency_penalty']
|
||||
for key in keys_to_delete:
|
||||
if key in gpt_data:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import builtins
|
||||
import os
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
|
||||
@@ -21,42 +20,6 @@ class TestLlmConnection:
|
||||
def setup_method(self):
|
||||
builtins.print, ipc_client_instance = get_custom_print({})
|
||||
|
||||
# def test_break_down_development_task(self):
|
||||
# # Given
|
||||
# agent = Developer(project)
|
||||
# convo = AgentConvo(agent)
|
||||
# # convo.construct_and_add_message_from_prompt('architecture/technologies.prompt',
|
||||
# # {
|
||||
# # 'name': 'Test App',
|
||||
# # 'prompt': '''
|
||||
#
|
||||
# messages = convo.messages
|
||||
# function_calls = DEV_STEPS
|
||||
#
|
||||
# # When
|
||||
# # response = create_gpt_chat_completion(messages, '', function_calls=function_calls)
|
||||
# response = {'function_calls': {
|
||||
# 'name': 'break_down_development_task',
|
||||
# 'arguments': {'tasks': [{'type': 'command', 'description': 'Run the app'}]}
|
||||
# }}
|
||||
# response = convo.postprocess_response(response, function_calls)
|
||||
#
|
||||
# # Then
|
||||
# # assert len(convo.messages) == 2
|
||||
# assert response == ([{'type': 'command', 'description': 'Run the app'}], 'more_tasks')
|
||||
|
||||
# @pytest.fixture(params=[
|
||||
# {"endpoint": "OPENAI", "model": "gpt-4"},
|
||||
# {"endpoint": "OPENROUTER", "model": "openai/gpt-3.5-turbo"},
|
||||
# {"endpoint": "OPENROUTER", "model": "meta-llama/codellama-34b-instruct"},
|
||||
# {"endpoint": "OPENROUTER", "model": "anthropic/claude-2"},
|
||||
# {"endpoint": "OPENROUTER", "model": "google/palm-2-codechat-bison"},
|
||||
# {"endpoint": "OPENROUTER", "model": "google/palm-2-chat-bison"},
|
||||
# ])
|
||||
# def params(self, request):
|
||||
# return request.param
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.uses_tokens
|
||||
@pytest.mark.parametrize("endpoint, model", [
|
||||
("OPENAI", "gpt-4"), # role: system
|
||||
@@ -70,6 +33,9 @@ class TestLlmConnection:
|
||||
])
|
||||
def test_chat_completion_Architect(self, endpoint, model, monkeypatch):
|
||||
# Given
|
||||
monkeypatch.setenv('ENDPOINT', endpoint)
|
||||
monkeypatch.setenv('MODEL_NAME', model)
|
||||
|
||||
agent = Architect(project)
|
||||
convo = AgentConvo(agent)
|
||||
convo.construct_and_add_message_from_prompt('architecture/technologies.prompt',
|
||||
@@ -99,22 +65,10 @@ solution-oriented decision-making in areas where precise instructions were not p
|
||||
]
|
||||
})
|
||||
|
||||
# endpoint = 'OPENROUTER'
|
||||
# monkeypatch.setattr('utils.llm_connection.endpoint', endpoint)
|
||||
monkeypatch.setenv('ENDPOINT', endpoint)
|
||||
monkeypatch.setenv('MODEL_NAME', model)
|
||||
# monkeypatch.setenv('MODEL_NAME', 'meta-llama/codellama-34b-instruct')
|
||||
# monkeypatch.setenv('MODEL_NAME', 'openai/gpt-3.5-turbo-16k-0613')
|
||||
# monkeypatch.setenv('MODEL_NAME', 'anthropic/claude-2') # TODO: remove ```json\n ... ```
|
||||
# monkeypatch.setenv('MODEL_NAME', 'google/palm-2-codechat-bison') # TODO: not JSON
|
||||
# monkeypatch.setenv('MODEL_NAME', 'google/palm-2-chat-bison') # TODO: not JSON
|
||||
|
||||
messages = convo.messages
|
||||
function_calls = ARCHITECTURE
|
||||
|
||||
# with patch('.llm_connection.endpoint', endpoint):
|
||||
# When
|
||||
response = create_gpt_chat_completion(messages, '', function_calls=function_calls)
|
||||
response = create_gpt_chat_completion(convo.messages, '', function_calls=function_calls)
|
||||
|
||||
# Then
|
||||
assert convo.messages[0]['content'].startswith('You are an experienced software architect')
|
||||
@@ -122,9 +76,30 @@ solution-oriented decision-making in areas where precise instructions were not p
|
||||
|
||||
assert response is not None
|
||||
response = parse_agent_response(response, function_calls)
|
||||
# response = response['function_calls']['arguments']['technologies']
|
||||
assert 'Node.js' in response
|
||||
|
||||
# def test_break_down_development_task(self):
|
||||
# # Given
|
||||
# agent = Developer(project)
|
||||
# convo = AgentConvo(agent)
|
||||
# # convo.construct_and_add_message_from_prompt('architecture/technologies.prompt',
|
||||
# # {
|
||||
# # 'name': 'Test App',
|
||||
# # 'prompt': '''
|
||||
#
|
||||
# function_calls = DEV_STEPS
|
||||
#
|
||||
# # When
|
||||
# response = create_gpt_chat_completion(convo.messages, '', function_calls=function_calls)
|
||||
# # response = {'function_calls': {
|
||||
# # 'name': 'break_down_development_task',
|
||||
# # 'arguments': {'tasks': [{'type': 'command', 'description': 'Run the app'}]}
|
||||
# # }}
|
||||
# response = parse_agent_response(response, function_calls)
|
||||
#
|
||||
# # Then
|
||||
# # assert len(convo.messages) == 2
|
||||
# assert response == ([{'type': 'command', 'description': 'Run the app'}], 'more_tasks')
|
||||
|
||||
def _create_convo(self, agent):
|
||||
convo = AgentConvo(agent)
|
||||
convo = AgentConvo(agent)
|
||||
|
||||
@@ -4,7 +4,6 @@ charset-normalizer==3.2.0
|
||||
distro==1.8.0
|
||||
idna==3.4
|
||||
Jinja2==3.1.2
|
||||
local_llm_function_calling==0.1.14
|
||||
MarkupSafe==2.1.3
|
||||
peewee==3.16.2
|
||||
prompt-toolkit==3.0.39
|
||||
|
||||
Reference in New Issue
Block a user