Supporting callable message (#1852)

* add message field

* send

* message func doc str

* test dict message

* retiring soon

* generate_init_message docstr

* remove todo

* update notebook

* CompressibleAgent

* update notebook

* add test

* retrieve agent

* update test

* summary_method args

* summary

* carryover

* dict message

* update nested doc

* generate_init_message

* fix typo

* update docs for mathchat

* Fix missing message

* Add docstrings

* model

* notebook

* default naming

---------

Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: kevin666aa <yrwu000627@gmail.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
Co-authored-by: Li Jiang <lijiang1@microsoft.com>
This commit is contained in:
Qingyun Wu
2024-03-09 15:27:46 -05:00
committed by GitHub
parent 83e1789a50
commit c75655a340
22 changed files with 3082 additions and 2565 deletions

View File

@@ -73,7 +73,7 @@ def test_retrievechat():
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, silent=True)
ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=code_problem, silent=True)
print(conversations)

View File

@@ -68,7 +68,9 @@ def test_retrievechat():
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark", silent=True)
ragproxyagent.initiate_chat(
assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string="spark", silent=True
)
print(conversations)

View File

@@ -173,32 +173,36 @@ def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
"Can we add a new point to the graph? It's distance should be randomly between 0 - 5 to each of the existing points.",
]
class TSPUserProxyAgent(UserProxyAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with open(f"{here}/tsp_prompt.txt", "r") as f:
self._prompt = f.read()
def generate_init_message(self, question) -> str:
return self._prompt.format(question=question)
def tsp_message(sender, recipient, context):
filename = context.get("prompt_filename", "")
with open(filename, "r") as f:
prompt = f.read()
question = context.get("question", "")
return prompt.format(question=question)
# autogen.ChatCompletion.start_logging()
assistant = AssistantAgent("assistant", llm_config={"temperature": 0, "config_list": config_list})
user = TSPUserProxyAgent(
user = UserProxyAgent(
"user",
code_execution_config={"work_dir": here},
code_execution_config={
"work_dir": here,
},
human_input_mode=human_input_mode,
max_consecutive_auto_reply=max_consecutive_auto_reply,
)
user.initiate_chat(assistant, question=hard_questions[2])
chat_res = user.initiate_chat(
assistant, message=tsp_message, question=hard_questions[2], prompt_filename=f"{here}/tsp_prompt.txt"
)
# print(autogen.ChatCompletion.logged_history)
# autogen.ChatCompletion.stop_logging()
# print(chat_res.summary)
print(chat_res.cost)
if __name__ == "__main__":
test_gpt35()
# test_gpt35()
# test_create_execute_script(human_input_mode="TERMINATE")
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
# although the max_consecutive_auto_reply is set to 10.
# test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)

View File

@@ -59,7 +59,7 @@ async def test_async_chats():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
chat_res = await user.a_initiate_chats(

View File

@@ -16,14 +16,16 @@ from conftest import skip_openai # noqa: E402
def test_chat_messages_for_summary():
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER")
user = UserProxyAgent(name="user", human_input_mode="NEVER")
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER", code_execution_config={"use_docker": False})
user = UserProxyAgent(name="user", human_input_mode="NEVER", code_execution_config={"use_docker": False})
user.send("What is the capital of France?", assistant)
messages = assistant.chat_messages_for_summary(user)
assert len(messages) == 1
groupchat = GroupChat(agents=[user, assistant], messages=[], max_round=2)
manager = GroupChatManager(groupchat=groupchat, name="manager", llm_config=False)
manager = GroupChatManager(
groupchat=groupchat, name="manager", llm_config=False, code_execution_config={"use_docker": False}
)
user.initiate_chat(manager, message="What is the capital of France?")
messages = manager.chat_messages_for_summary(user)
assert len(messages) == 2
@@ -42,10 +44,10 @@ def test_chats_group():
)
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Pros and cons of the companies I'm interested in. Keep it short.""",
"""Give lucky numbers for them.""",
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
writing_tasks = ["""Make a joke."""]
user_proxy = UserProxyAgent(
name="User_proxy",
@@ -126,13 +128,15 @@ def test_chats_group():
"recipient": financial_assistant,
"message": financial_tasks[0],
"summary_method": "last_msg",
"max_turns": 1,
},
{
"recipient": manager_1,
"message": financial_tasks[1],
"summary_method": "reflection_with_llm",
"max_turns": 1,
},
{"recipient": manager_2, "message": writing_tasks[0]},
{"recipient": manager_2, "message": writing_tasks[0], "max_turns": 1},
]
)
@@ -148,26 +152,44 @@ def test_chats_group():
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
def test_chats():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def luck_number_message(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Give lucky numbers for them."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
luck_number_message,
luck_number_message,
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
writing_tasks = ["""Make a joke."""]
func = Function()
financial_assistant_1 = AssistantAgent(
name="Financial_assistant_1",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
financial_assistant_2 = AssistantAgent(
name="Financial_assistant_2",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
writer = AssistantAgent(
name="Writer",
@@ -192,9 +214,18 @@ def test_chats():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
# chat_res_play = user.initiate_chat(
# player,
# message= {"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
# max_turns=1,
# summary_method=my_summary,
# summary_args={"prefix": "This is the last message:"},
# )
# print(chat_res_play.summary)
chat_res = user.initiate_chats(
[
{
@@ -215,12 +246,24 @@ def test_chats():
"message": financial_tasks[2],
"summary_method": "last_msg",
"clear_history": False,
"max_turns": 1,
},
{
"recipient": financial_assistant_1,
"message": {
"content": "Let's play a game.",
"function_call": {"name": "get_random_number", "arguments": "{}"},
},
"carryover": "I like even number.",
"summary_method": "last_msg",
"max_turns": 1,
},
{
"recipient": writer,
"message": writing_tasks[0],
"carryover": "I want to include a figure or a table of data in the blogpost.",
"carryover": "Make the numbers relevant.",
"summary_method": "last_msg",
"max_turns": 1,
},
]
)
@@ -248,8 +291,8 @@ def test_chats_general():
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
"""Give lucky numbers for them.""",
"""Give lucky words for them.""",
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
@@ -297,7 +340,7 @@ def test_chats_general():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
chat_res = initiate_chats(
@@ -354,8 +397,8 @@ def test_chats_exceptions():
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
"""Give lucky numbers for them.""",
"""Give lucky words for them.""",
]
financial_assistant_1 = AssistantAgent(
@@ -491,10 +534,93 @@ def test_chats_w_func():
print(res.summary, res.cost, res.chat_history)
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
def test_udf_message_in_chats():
import autogen
config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST")
llm_config = {"config_list": config_list}
research_task = """
## NVDA (NVIDIA Corporation)
- Current Stock Price: $822.79
- Performance over the past month: 24.36%
## TSLA (Tesla, Inc.)
- Current Stock Price: $202.64
- Performance over the past month: 7.84%
Save them to a file named stock_prices.md.
"""
def my_writing_task(sender, recipient, context):
carryover = context.get("carryover", "")
if isinstance(carryover, list):
carryover = carryover[-1]
try:
filename = context.get("work_dir", "") + "/stock_prices.md"
with open(filename, "r") as file:
data = file.read()
except Exception as e:
data = f"An error occurred while reading the file: {e}"
return """Make a joke. """ + "\nContext:\n" + carryover + "\nData:" + data
researcher = autogen.AssistantAgent(
name="Financial_researcher",
llm_config=llm_config,
)
writer = autogen.AssistantAgent(
name="Writer",
llm_config=llm_config,
system_message="""
You are a professional writer, known for
your insightful and engaging articles.
You transform complex concepts into compelling narratives.
Reply "TERMINATE" in the end when everything is done.
""",
)
user_proxy_auto = autogen.UserProxyAgent(
name="User_Proxy_Auto",
human_input_mode="NEVER",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={
"last_n_messages": 1,
"work_dir": "tasks",
"use_docker": False,
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
chat_results = autogen.initiate_chats(
[
{
"sender": user_proxy_auto,
"recipient": researcher,
"message": research_task,
"clear_history": True,
"silent": False,
},
{
"sender": user_proxy_auto,
"recipient": writer,
"message": my_writing_task,
"max_turns": 2, # max number of turns for the conversation (added for demo purposes, generally not necessarily needed)
"summary_method": "reflection_with_llm",
"work_dir": "tasks",
},
]
)
print(chat_results[0].summary, chat_results[0].cost)
print(chat_results[1].summary, chat_results[1].cost)
if __name__ == "__main__":
test_chats()
test_chats_general()
# test_chats_general()
# test_chats_exceptions()
# test_chats_group()
# test_chats_w_func()
# test_chat_messages_for_summary()
# test_udf_message_in_chats()

View File

@@ -14,7 +14,7 @@ from unittest.mock import patch
from pydantic import BaseModel, Field
from typing_extensions import Annotated
import autogen
import os
from autogen.agentchat import ConversableAgent, UserProxyAgent
from autogen.agentchat.conversable_agent import register_function
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
@@ -28,6 +28,8 @@ except ImportError:
else:
skip = False or skip_openai
here = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture
def conversable_agent():
@@ -1091,6 +1093,137 @@ def test_max_turn():
assert len(res.chat_history) <= 6
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_message_func():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def my_message_play(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Let's play a game."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
func = Function()
# autogen.ChatCompletion.start_logging()
user = UserProxyAgent(
"user",
code_execution_config={
"work_dir": here,
"use_docker": False,
},
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
)
player = autogen.AssistantAgent(
name="Player",
system_message="You will use function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
description="A player that makes function_calls.",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
chat_res_play = user.initiate_chat(
player,
message={"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
max_turns=1,
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
message=my_message_play,
max_turns=1,
)
print(chat_res_play.summary)
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_summary():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def my_message_play(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Let's play a game."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
def my_summary(sender, recipient, summary_args):
prefix = summary_args.get("prefix", "Summary:")
return prefix + recipient.chat_messages[sender][-1].get("content", "")
func = Function()
# autogen.ChatCompletion.start_logging()
user = UserProxyAgent(
"user",
code_execution_config={
"work_dir": here,
"use_docker": False,
},
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
)
player = autogen.AssistantAgent(
name="Player",
system_message="You will use function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
description="A player that makes function_calls.",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
chat_res_play = user.initiate_chat(
player,
message=my_message_play,
# message="Make a joke about AI",
max_turns=1,
summary_method="reflection_with_llm",
summary_args={"summary_prompt": "Summarize the conversation into less than five words."},
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
# message=my_message_play,
message="Make a joke about AI",
max_turns=1,
summary_method=my_summary,
summary_args={"prefix": "This is the last message:"},
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
message={"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
max_turns=1,
summary_method=my_summary,
summary_args={"prefix": "This is the last message:"},
)
print(chat_res_play.summary)
def test_process_before_send():
print_mock = unittest.mock.MagicMock()
@@ -1140,4 +1273,6 @@ if __name__ == "__main__":
# test_conversable_agent()
# test_no_llm_config()
# test_max_turn()
test_process_before_send()
# test_process_before_send()
test_message_func()
test_summary()

View File

@@ -53,11 +53,7 @@ def test_math_user_proxy_agent():
assistant.reset()
math_problem = "$x^3=125$. What is x?"
# assistant.receive(
# message=mathproxyagent.generate_init_message(math_problem),
# sender=mathproxyagent,
# )
res = mathproxyagent.initiate_chat(assistant, problem=math_problem)
res = mathproxyagent.initiate_chat(assistant, message=mathproxyagent.message_generator, problem=math_problem)
print(conversations)
print("Chat summary:", res.summary)
print("Chat history:", res.chat_history)
@@ -121,13 +117,13 @@ def test_execute_one_wolfram_query():
def test_generate_prompt():
mathproxyagent = MathUserProxyAgent(name="MathChatAgent", human_input_mode="NEVER")
assert "customized" in mathproxyagent.generate_init_message(
problem="2x=4", prompt_type="python", customized_prompt="customized"
assert "customized" in mathproxyagent.message_generator(
mathproxyagent, None, {"problem": "2x=4", "prompt_type": "python", "customized_prompt": "customized"}
)
if __name__ == "__main__":
# test_add_remove_print()
# test_execute_one_python_code()
# test_generate_prompt()
test_generate_prompt()
test_math_user_proxy_agent()