mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-11 16:18:07 -05:00
Compare commits
22 Commits
ntindle/sa
...
self-feedb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a609f8cd9 | ||
|
|
8b82421b9c | ||
|
|
75cc71f8d3 | ||
|
|
f287282e8c | ||
|
|
2a93aff512 | ||
|
|
6d1653b84f | ||
|
|
a7816b8c79 | ||
|
|
21913c4733 | ||
|
|
9d9c66d50f | ||
|
|
a00a7a2bd0 | ||
|
|
d6cb10432b | ||
|
|
0bea5e38a4 | ||
|
|
88b2d5fb2d | ||
|
|
f1032926cc | ||
|
|
e7ad51ce42 | ||
|
|
a3522223d9 | ||
|
|
4e3035efe4 | ||
|
|
a8cbf51489 | ||
|
|
317361da8c | ||
|
|
991bc77e0b | ||
|
|
83357f6c2f | ||
|
|
acf48d2d4d |
@@ -56,6 +56,10 @@ class Agent:
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
@@ -161,9 +165,9 @@ class Agent:
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
|
||||
self_feedback_resp = self.get_self_feedback(self.full_message_history,
|
||||
assistant_reply_json, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
@@ -174,6 +178,7 @@ class Agent:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
print("Invalid input format.")
|
||||
@@ -276,7 +281,7 @@ class Agent:
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
def get_self_feedback(self, full_message_history, latest_response_json, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
@@ -289,14 +294,59 @@ class Agent:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
thoughts = latest_response_json.get("thoughts", {})
|
||||
command = latest_response_json.get("command", {})
|
||||
|
||||
feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
|
||||
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
import json
|
||||
|
||||
# Get ~2000 tokens from the full message history
|
||||
# !!WARNING: THIS IMPLEMENTATION IS BAD - CAUSES BUG SIMILAR TO THIS: https://github.com/Significant-Gravitas/Auto-GPT/pull/3619
|
||||
trimmed_message_history = []
|
||||
for i in range(len(full_message_history) - 1, -1, -1):
|
||||
message = full_message_history[i]
|
||||
# Skip all messages from the user
|
||||
if message["role"] == "user":
|
||||
continue
|
||||
# If the message is from the assistant, remove the "thoughts" dictionary from the content
|
||||
elif message["role"] == "assistant":
|
||||
try:
|
||||
content_dict = json.loads(message["content"])
|
||||
content_dict = content_dict.copy()
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
message["content"] = json.dumps(content_dict)
|
||||
except:
|
||||
pass
|
||||
trimmed_message_history.append(message)
|
||||
|
||||
|
||||
|
||||
|
||||
feedback_prompt = f"""Below is a message from an AI agent with the role: '{ai_role}'.
|
||||
Please review the provided Recent History, Agent's Plan, The Agent's proposed action and their Reasoning.
|
||||
|
||||
If the agent's command makes sense and the agent is on the right track, respond with the letter 'Y' followed by a space.
|
||||
If the provided information is not suitable for achieving the role's objectives or a red flag is raised, please clearly and concisely tell the agent about the issue and suggesting an alternative action.
|
||||
"""
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
criticism = thoughts.get("criticism", "")
|
||||
feedback_thoughts = thought + reasoning + plan + criticism
|
||||
# thought = thoughts.get("thoughts", "")
|
||||
# criticism = thoughts.get("criticism", "")
|
||||
# feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
[
|
||||
{"role": "system", "content": f""""You are AgentReviewerGPT.\n\nRespond with Y if the agent passes your review.\n\nBe wary of the following red flags in the agent's behaviour:
|
||||
- The agent is repeating itself.
|
||||
- The agent is stuck in a loop.
|
||||
- The agent is using '<text>' instead of the actual text.
|
||||
- The agent is using the wrong command for the situation.
|
||||
- The agent is executing a python file that does not exist (it should check if the file exists and read it's contents before executing it).
|
||||
|
||||
Notes:
|
||||
+ Hardcoded paths are okay""" },
|
||||
{"role": "user", "content": f"{feedback_prompt}\n\nRecent History:\n{trimmed_message_history}\n\n\n\n\Agent's Plan:\n{plan}\n\nAgent's Proposed Action:\n{command}\n\nAgent's Reasoning:\n{reasoning}" }
|
||||
],
|
||||
llm_model,
|
||||
)
|
||||
|
||||
@@ -12,6 +12,10 @@ from autogpt.logs import logger
|
||||
from autogpt.memory_management.store_memory import (
|
||||
save_memory_trimmed_from_context_window,
|
||||
)
|
||||
from autogpt.memory_management.summary_memory import (
|
||||
get_newly_trimmed_messages,
|
||||
update_running_summary,
|
||||
)
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -36,10 +40,10 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"
|
||||
),
|
||||
create_chat_message(
|
||||
"system",
|
||||
f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
),
|
||||
# create_chat_message(
|
||||
# "system",
|
||||
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
# ),
|
||||
]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
@@ -81,21 +85,21 @@ def chat_with_ai(
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
if len(full_message_history) == 0:
|
||||
relevant_memory = ""
|
||||
else:
|
||||
recent_history = full_message_history[-5:]
|
||||
shuffle(recent_history)
|
||||
relevant_memories = permanent_memory.get_relevant(
|
||||
str(recent_history), 5
|
||||
)
|
||||
if relevant_memories:
|
||||
shuffle(relevant_memories)
|
||||
relevant_memory = str(relevant_memories)
|
||||
|
||||
# if len(full_message_history) == 0:
|
||||
# relevant_memory = ""
|
||||
# else:
|
||||
# recent_history = full_message_history[-5:]
|
||||
# shuffle(recent_history)
|
||||
# relevant_memories = permanent_memory.get_relevant(
|
||||
# str(recent_history), 5
|
||||
# )
|
||||
# if relevant_memories:
|
||||
# shuffle(relevant_memories)
|
||||
# relevant_memory = str(relevant_memories)
|
||||
relevant_memory = ""
|
||||
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
||||
|
||||
(
|
||||
@@ -105,33 +109,36 @@ def chat_with_ai(
|
||||
current_context,
|
||||
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[:-1]
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model
|
||||
)
|
||||
# while current_tokens_used > 2500:
|
||||
# # remove memories until we are under 2500 tokens
|
||||
# relevant_memory = relevant_memory[:-1]
|
||||
# (
|
||||
# next_message_to_add_index,
|
||||
# current_tokens_used,
|
||||
# insertion_index,
|
||||
# current_context,
|
||||
# ) = generate_context(
|
||||
# prompt, relevant_memory, full_message_history, model
|
||||
# )
|
||||
|
||||
current_tokens_used += count_message_tokens(
|
||||
[create_chat_message("user", user_input)], model
|
||||
) # Account for user input (appended later)
|
||||
|
||||
current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens
|
||||
|
||||
# Add Messages until the token limit is reached or there are no more messages to add.
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = count_message_tokens([message_to_add], model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
save_memory_trimmed_from_context_window(
|
||||
full_message_history,
|
||||
next_message_to_add_index,
|
||||
permanent_memory,
|
||||
)
|
||||
# save_memory_trimmed_from_context_window(
|
||||
# full_message_history,
|
||||
# next_message_to_add_index,
|
||||
# permanent_memory,
|
||||
# )
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context,
|
||||
@@ -146,6 +153,22 @@ def chat_with_ai(
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
# Insert Memories
|
||||
if len(full_message_history) > 0:
|
||||
(
|
||||
newly_trimmed_messages,
|
||||
agent.last_memory_index,
|
||||
) = get_newly_trimmed_messages(
|
||||
full_message_history=full_message_history,
|
||||
current_context=current_context,
|
||||
last_memory_index=agent.last_memory_index,
|
||||
)
|
||||
agent.summary_memory = update_running_summary(
|
||||
current_memory=agent.summary_memory,
|
||||
new_events=newly_trimmed_messages,
|
||||
)
|
||||
current_context.insert(insertion_index, agent.summary_memory)
|
||||
|
||||
api_manager = ApiManager()
|
||||
# inform the AI about its remaining budget (if it has one)
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
|
||||
112
autogpt/memory_management/summary_memory.py
Normal file
112
autogpt/memory_management/summary_memory.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import json
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_newly_trimmed_messages(
|
||||
full_message_history: List[Dict[str, str]],
|
||||
current_context: List[Dict[str, str]],
|
||||
last_memory_index: int,
|
||||
) -> Tuple[List[Dict[str, str]], int]:
|
||||
"""
|
||||
This function returns a list of dictionaries contained in full_message_history
|
||||
with an index higher than prev_index that are absent from current_context.
|
||||
|
||||
Args:
|
||||
full_message_history (list): A list of dictionaries representing the full message history.
|
||||
current_context (list): A list of dictionaries representing the current context.
|
||||
last_memory_index (int): An integer representing the previous index.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context.
|
||||
int: The new index value for use in the next loop.
|
||||
"""
|
||||
# Select messages in full_message_history with an index higher than last_memory_index
|
||||
new_messages = [
|
||||
msg for i, msg in enumerate(full_message_history) if i > last_memory_index
|
||||
]
|
||||
|
||||
# Remove messages that are already present in current_context
|
||||
new_messages_not_in_context = [
|
||||
msg for msg in new_messages if msg not in current_context
|
||||
]
|
||||
|
||||
# Find the index of the last message processed
|
||||
new_index = last_memory_index
|
||||
if new_messages_not_in_context:
|
||||
last_message = new_messages_not_in_context[-1]
|
||||
new_index = full_message_history.index(last_message)
|
||||
|
||||
return new_messages_not_in_context, new_index
|
||||
|
||||
|
||||
def update_running_summary(current_memory: str, new_events: List[Dict]) -> str:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
formatted in the 1st person past tense.
|
||||
|
||||
Args:
|
||||
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
|
||||
|
||||
Returns:
|
||||
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
|
||||
|
||||
Example:
|
||||
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
# Replace "assistant" with "you". This produces much better first person past tense results.
|
||||
for event in new_events:
|
||||
if event["role"].lower() == "assistant":
|
||||
event["role"] = "you"
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
content_dict = json.loads(event["content"])
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event["content"] = json.dumps(content_dict)
|
||||
elif event["role"].lower() == "system":
|
||||
event["role"] = "your computer"
|
||||
# Delete all user messages
|
||||
elif event["role"] == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
# This can happen at any point during execturion, not just the beginning
|
||||
if len(new_events) == 0:
|
||||
new_events = "Nothing new happened."
|
||||
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
|
||||
Summary So Far:
|
||||
"""
|
||||
{current_memory}
|
||||
"""
|
||||
|
||||
Latest Development:
|
||||
"""
|
||||
{new_events}
|
||||
"""
|
||||
'''
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
]
|
||||
|
||||
current_memory = create_chat_completion(messages, cfg.fast_llm_model)
|
||||
|
||||
message_to_return = {
|
||||
"role": "system",
|
||||
"content": f"This reminds you of these events from your past: \n{current_memory}",
|
||||
}
|
||||
|
||||
return message_to_return
|
||||
Reference in New Issue
Block a user