Files
autogen/test/agentchat/test_tool_calls.py
Brian Finney 40dbf31a92 [Core] [Tool Call] adjust conversable agent to support tool_calls (#974)
* adjust conversable and compressible agents to support tool_calls

* split out tools into their own reply def

* copilot typo

* address review comments

* revert compressible_agent and token_count_utils calls

* cleanup terminate check and remove unnecessary code

* doc search and update

* return function/tool calls as interrupted when user provides a reply to a tool call request

* fix tool name reference

* fix formatting

* fix initiate receiving a dict

* missed changed roled

* ignore incoming role, more similiar to existing code

* consistency

* redundant to_dict

* fix todo comment

* uneeded change

* handle dict reply in groupchat

* Fix generate_tool_call_calls_reply_comment

* change method annotation for register_for_llm from functions to tools

* typo autogen/agentchat/conversable_agent.py

Co-authored-by: Chi Wang <wang.chi@microsoft.com>

* add deprecation comments for function_call

* tweak doc strings

* switch to ToolFunction type

* update the return to

* fix generate_init_message return type

* Revert "fix generate_init_message return type"

This reverts commit 645ba8b76a.

* undo force init to dict

* fix notebooks and groupchat tool handling

* fix type

* use get for key error

* fix teachable to pull content from dict

* change single message tool response

* cleanup unnessary changes

* little better tool response concatenation

* update tools tests

* add skip openai check to tools tests

* fix nits

* move func name normalization to oai_reply and assert configured names

* fix whitespace

* remove extra normalize

* tool name is now normalized in the generate_reply function, so will not be incorrect when sent to receive

* validate function names in init and expand comments for validation methods

* fix dict comprehension

* Dummy llm config for unit tests

* handle tool_calls set to None

* fix tool name reference

* method operates on responses not calls

---------

Co-authored-by: Yiran Wu <32823396+kevin666aa@users.noreply.github.com>
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2024-01-06 17:55:25 +00:00

236 lines
7.8 KiB
Python

try:
from openai import OpenAI
except ImportError:
OpenAI = None
import inspect
import pytest
import json
import autogen
from conftest import skip_openai
from autogen.math_utils import eval_math_responses
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
import sys
from autogen.oai.client import TOOL_ENABLED
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(skip_openai or not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip")
def test_eval_math_responses():
config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
)
tools = [
{
"type": "function",
"function": {
"name": "eval_math_responses",
"description": "Select a response for a math problem using voting, and check if the response is correct if the solution is provided",
"parameters": {
"type": "object",
"properties": {
"responses": {
"type": "array",
"items": {"type": "string"},
"description": "The responses in a list",
},
"solution": {
"type": "string",
"description": "The canonical solution",
},
},
"required": ["responses"],
},
},
},
]
client = autogen.OpenAIWrapper(config_list=config_list)
response = client.create(
messages=[
{
"role": "user",
"content": 'evaluate the math responses ["1", "5/2", "5/2"] against the true answer \\frac{5}{2}',
},
],
tools=tools,
)
print(response)
responses = client.extract_text_or_completion_object(response)
print(responses[0])
tool_calls = responses[0].tool_calls
function_call = tool_calls[0].function
name, arguments = function_call.name, json.loads(function_call.arguments)
assert name == "eval_math_responses"
print(arguments["responses"])
# if isinstance(arguments["responses"], str):
# arguments["responses"] = json.loads(arguments["responses"])
arguments["responses"] = [f"\\boxed{{{x}}}" for x in arguments["responses"]]
print(arguments["responses"])
arguments["solution"] = f"\\boxed{{{arguments['solution']}}}"
print(eval_math_responses(**arguments))
@pytest.mark.skipif(
skip_openai or not TOOL_ENABLED or not sys.version.startswith("3.10"),
reason="do not run if openai is <1.1.0 or py!=3.10 or requested to skip",
)
def test_update_tool():
config_list_gpt4 = autogen.config_list_from_json(
OAI_CONFIG_LIST,
filter_dict={
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
},
file_location=KEY_LOC,
)
llm_config = {
"config_list": config_list_gpt4,
"seed": 42,
"tools": [],
}
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False,
)
assistant = autogen.AssistantAgent(name="test", llm_config=llm_config)
# Define a new function *after* the assistant has been created
assistant.update_tool_signature(
{
"type": "function",
"function": {
"name": "greet_user",
"description": "Greets the user.",
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
},
},
is_remove=False,
)
user_proxy.initiate_chat(
assistant,
message="What functions do you know about in the context of this conversation? End your response with 'TERMINATE'.",
)
messages1 = assistant.chat_messages[user_proxy][-1]["content"]
print(messages1)
assistant.update_tool_signature("greet_user", is_remove=True)
user_proxy.initiate_chat(
assistant,
message="What functions do you know about in the context of this conversation? End your response with 'TERMINATE'.",
)
messages2 = assistant.chat_messages[user_proxy][-1]["content"]
print(messages2)
# The model should know about the function in the context of the conversation
assert "greet_user" in messages1
assert "greet_user" not in messages2
@pytest.mark.skipif(not TOOL_ENABLED, reason="openai>=1.1.0 not installed")
def test_multi_tool_call():
class FakeAgent(autogen.Agent):
def __init__(self, name):
super().__init__(name)
self.received = []
def receive(
self,
message,
sender,
request_reply=None,
silent=False,
):
message = message if isinstance(message, list) else [message]
self.received.extend(message)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False,
)
user_proxy.register_function({"echo": lambda str: str})
fake_agent = FakeAgent("fake_agent")
user_proxy.receive(
message={
"content": "test multi tool call",
"tool_calls": [
{
"id": "tool_1",
"type": "function",
"function": {"name": "echo", "arguments": json.JSONEncoder().encode({"str": "hello world"})},
},
{
"id": "tool_2",
"type": "function",
"function": {
"name": "echo",
"arguments": json.JSONEncoder().encode({"str": "goodbye and thanks for all the fish"}),
},
},
{
"id": "tool_3",
"type": "function",
"function": {
"name": "multi_tool_call_echo", # normalized "multi_tool_call.echo"
"arguments": json.JSONEncoder().encode({"str": "goodbye and thanks for all the fish"}),
},
},
],
},
sender=fake_agent,
request_reply=True,
)
assert fake_agent.received == [
{
"role": "tool",
"tool_responses": [
{"tool_call_id": "tool_1", "role": "tool", "name": "echo", "content": "hello world"},
{
"tool_call_id": "tool_2",
"role": "tool",
"name": "echo",
"content": "goodbye and thanks for all the fish",
},
{
"tool_call_id": "tool_3",
"role": "tool",
"name": "multi_tool_call_echo",
"content": "Error: Function multi_tool_call_echo not found.",
},
],
"content": inspect.cleandoc(
"""
Tool call: echo
Id: tool_1
hello world
Tool call: echo
Id: tool_2
goodbye and thanks for all the fish
Tool call: multi_tool_call_echo
Id: tool_3
Error: Function multi_tool_call_echo not found.
"""
),
}
]
if __name__ == "__main__":
test_update_tool()
test_eval_math_responses()
test_multi_tool_call()