Merge branch 'master' into add_ingest_documents_script

This commit is contained in:
Maiko Bossuyt
2023-04-14 18:12:23 +02:00
committed by GitHub
13 changed files with 274 additions and 178 deletions

View File

@@ -70,8 +70,8 @@ class AIConfig:
"""
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
with open(config_file, "w") as file:
yaml.dump(config, file)
with open(config_file, "w", encoding='utf-8') as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(self) -> str:
"""

View File

@@ -13,7 +13,7 @@ def call_ai_function(function, args, description, model=None):
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma seperated string
# parse args to comma separated string
args = ", ".join(args)
messages = [
{

View File

@@ -62,6 +62,9 @@ class Config(metaclass=Singleton):
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
self.use_brian_tts = False
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")

View File

@@ -24,7 +24,8 @@ For console handler: simulates typing
class Logger(metaclass=Singleton):
def __init__(self):
# create log directory if it doesn't exist
log_dir = os.path.join('..', 'logs')
this_files_dir_path = os.path.dirname(__file__)
log_dir = os.path.join(this_files_dir_path, '../logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)

View File

@@ -129,60 +129,6 @@ def print_assistant_thoughts(assistant_reply):
logger.error("Error: \n", call_stack)
def load_variables(config_file="config.yaml"):
"""Load variables from yaml file if it exists, otherwise prompt the user for input"""
try:
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
ai_name = config.get("ai_name")
ai_role = config.get("ai_role")
ai_goals = config.get("ai_goals")
except FileNotFoundError:
ai_name = ""
ai_role = ""
ai_goals = []
# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = utils.clean_input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
if not ai_role:
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
if not ai_goals:
print("Enter up to 5 goals for your AI: ")
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = utils.clean_input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
# Save variables to yaml file
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
with open(config_file, "w") as file:
documents = yaml.dump(config, file)
prompt = get_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{prompt}"
return full_prompt
def construct_prompt():
"""Construct the prompt for the AI to respond to"""
config = AIConfig.load()
@@ -318,10 +264,6 @@ def parse_arguments():
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_fast_llm_model(cfg.smart_llm_model)
if args.debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.memory_type:
supported_memory = get_supported_memory_backends()
chosen = args.memory_type
@@ -351,110 +293,148 @@ def main():
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
break
agent = Agent(
ai_name=ai_name,
memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count,
prompt=prompt,
user_input=user_input
)
agent.start_interaction_loop()
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
prompt,
user_input,
full_message_history,
memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
class Agent:
"""Agent class for interacting with Auto-GPT.
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
if not cfg.continuous_mode and next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
next_action_count = abs(int(console_input.split(" ")[1]))
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
"""
def __init__(self,
ai_name,
memory,
full_message_history,
next_action_count,
prompt,
user_input):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
self.prompt = prompt
self.user_input = user_input
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif user_input == "EXIT":
print("Exiting...", flush=True)
def start_interaction_loop(self):
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if next_action_count > 0:
next_action_count -= 1
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
self.prompt,
self.user_input,
self.full_message_history,
self.memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {user_input} "
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
memory.add(memory_to_add)
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
# Check if there's a result from the command append it to the message
# history
if result is not None:
full_message_history.append(chat.create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
full_message_history.append(
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
self.user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(int(console_input.split(" ")[1]))
self.user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
self.user_input = "EXIT"
break
else:
self.user_input = console_input
command_name = "human_feedback"
break
if self.user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif self.user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {self.user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if self.next_action_count > 0:
self.next_action_count -= 1
memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {self.user_input} "
self.memory.add(memory_to_add)
# Check if there's a result from the command append it to the message
# history
if result is not None:
self.full_message_history.append(chat.create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.full_message_history.append(
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if __name__ == "__main__":

View File

@@ -53,6 +53,24 @@ def eleven_labs_speech(text, voice_index=0):
return False
def brian_speech(text):
"""Speak text using Brian with the streamelements API"""
tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
response = requests.get(tts_url)
if response.status_code == 200:
with mutex_lock:
with open("speech.mp3", "wb") as f:
f.write(response.content)
playsound("speech.mp3")
os.remove("speech.mp3")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
with mutex_lock:
@@ -76,7 +94,11 @@ def say_text(text, voice_index=0):
def speak():
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text, voice_index)
macos_tts_speech(text)
elif cfg.use_brian_tts == 'True':
success = brian_speech(text)
if not success:
gtts_speech(text)
else:
gtts_speech(text)
else:

View File

@@ -17,10 +17,10 @@ class Spinner:
def spin(self):
"""Spin the spinner"""
while self.running:
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
def __enter__(self):
"""Start the spinner"""
@@ -32,5 +32,5 @@ class Spinner:
"""Stop the spinner"""
self.running = False
self.spinner_thread.join()
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()