mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-09 15:17:59 -05:00
Merge branch 'Torantulino:master' into more_azure
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||

|
||||
[](https://discord.gg/PQ7VX6TY4t)
|
||||
|
||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, autonomously develops and manages businesses to increase net worth. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||
|
||||
### Demo (30/03/2023):
|
||||
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
|
||||
|
||||
@@ -64,14 +64,14 @@ def chat_with_ai(
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
if cfg.debug:
|
||||
if cfg.debug_mode:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
if cfg.debug:
|
||||
if cfg.debug_mode:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
@@ -110,7 +110,7 @@ def chat_with_ai(
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if cfg.debug:
|
||||
if cfg.debug_mode:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
|
||||
@@ -53,7 +53,7 @@ def fix_and_parse_json(
|
||||
last_brace_index = json_str.rindex("}")
|
||||
json_str = json_str[:last_brace_index+1]
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as e: # noqa: F841
|
||||
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
||||
if try_to_fix_with_gpt:
|
||||
print("Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if cfg.debug:
|
||||
if cfg.debug_mode:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
|
||||
@@ -170,7 +170,7 @@ def load_variables(config_file="config.yaml"):
|
||||
documents = yaml.dump(config, file)
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
|
||||
Reference in New Issue
Block a user