consecutive auto reply, history, template, group chat, class-specific reply (#1165)

* max consecutive auto reply

* chess notebook

* link to notebook

* clear history

* filter

* **context -> context

* format str template

* groupchat

* register class specific reply

* groupchat notebook

* move human reply into generate_reply

* arg in config

* colab link

* remove room

* rename
This commit is contained in:
Chi Wang
2023-08-03 02:17:20 -07:00
committed by GitHub
parent c48babd02f
commit 45b7d908e4
20 changed files with 3031 additions and 762 deletions

View File

@@ -1,6 +1,14 @@
from .agent import Agent
from .assistant_agent import AssistantAgent
from .responsive_agent import ResponsiveAgent
from .assistant_agent import AssistantAgent
from .user_proxy_agent import UserProxyAgent
from .groupchat import GroupChatManager, GroupChatParticipant
__all__ = ["Agent", "ResponsiveAgent", "AssistantAgent", "UserProxyAgent"]
__all__ = [
"Agent",
"ResponsiveAgent",
"AssistantAgent",
"UserProxyAgent",
"GroupChatManager",
"GroupChatParticipant",
]

View File

@@ -36,14 +36,12 @@ class Agent:
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = "",
sender: Optional["Agent"] = None,
) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
default_reply (str or dict): the default reply if no other reply is generated.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.

View File

@@ -165,7 +165,7 @@ class MathUserProxyAgent(UserProxyAgent):
default_auto_reply=default_auto_reply,
**kwargs,
)
self.register_auto_reply(Agent, self._generate_math_reply)
# fixed var
self._max_invalid_q_per_step = max_invalid_q_per_step
@@ -276,12 +276,11 @@ class MathUserProxyAgent(UserProxyAgent):
is_success = False
return output, is_success
def generate_reply(
def _generate_math_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = DEFAULT_REPLY,
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
):
"""Generate an auto reply."""
if messages is None:
messages = self._oai_messages[sender.name]
@@ -291,7 +290,7 @@ class MathUserProxyAgent(UserProxyAgent):
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN``
return default_reply
return True, self._default_auto_reply
is_success, all_success = True, True
reply = ""
for code_block in code_blocks:
@@ -323,7 +322,7 @@ class MathUserProxyAgent(UserProxyAgent):
self._accum_invalid_q_per_step = 0
reply = "Please revisit the problem statement and your reasoning. If you think this step is correct, solve it yourself and continue the next step. Otherwise, correct this step."
return reply
return True, reply
# Modified based on langchain. Langchain is licensed under MIT License:

View File

@@ -0,0 +1,143 @@
import sys
from typing import Dict, List, Optional, Tuple, Union
from .agent import Agent
from .responsive_agent import ResponsiveAgent
class GroupChatManager(ResponsiveAgent):
"""(WIP) A chat manager agent that can manage a group chat of multiple agents."""
agents: List["GroupChatParticipant"]
max_round: int
def _participant_roles(self):
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
def _select_speaker_msg(self):
return {
"role": "system",
"content": f"""You are in a role play game. The following roles are available:
{self._participant_roles()}. Read the following conversation.
Then select the next role from {self._agent_names} to play. Only return the role.""",
}
def __init__(
self,
max_round: Optional[int] = 10,
name: Optional[str] = "chat_manager",
# unlimited consecutive auto reply by default
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
# seed: Optional[int] = 4,
**kwargs,
):
super().__init__(
name=name,
max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode,
**kwargs,
)
self.register_auto_reply(GroupChatParticipant, self._generate_reply_for_participant)
self.max_round = max_round
self._agent_names = []
self._next_speaker = None
self._round = 0
self._messages = []
# self._random = random.Random(seed)
def _generate_reply_for_participant(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
# set the name to sender's name if the role is not function
if message["role"] != "function":
message["name"] = sender.name
self._messages.append(message)
self._next_speaker = None
# broadcast the message to all agents except the sender
for agent in self.agents:
if agent != sender:
self.send(message, agent)
if self._round == 0:
self._agent_names = [agent.name for agent in self.agents]
self._round += 1
if self._round >= self.max_round:
return True, None
# speaker selection msg from an agent
self._next_speaker = self._select_speaker(sender)
self._next_speaker.send(self._next_speaker.generate_reply(sender=self), self)
return True, None
@property
def next_speaker(self):
"""Return the next speaker."""
return self._next_speaker
def _select_speaker(self, last_speaker: "GroupChatParticipant"):
"""Select the next speaker."""
final, name = self._generate_oai_reply([self._select_speaker_msg()] + self._messages)
if not final:
# i = self._random.randint(0, len(self._agent_names) - 1) # randomly pick an id
name = self._agent_names[(self._agent_names.index(last_speaker.name) + 1) % len(self._agent_names)]
return self.agent_by_name(name)
def agent_by_name(self, name: str) -> "GroupChatParticipant":
"""Find the next speaker based on the message."""
return self.agents[self._agent_names.index(name)]
def reset(self):
super().reset()
self._round = 0
self._messages.clear()
self._next_speaker = None
class GroupChatParticipant(ResponsiveAgent):
"""(WIP) A group chat participant agent that can participate in a group chat."""
group_chat_manager: GroupChatManager
def __init__(
self,
name,
group_chat_manager=None,
**kwargs,
):
super().__init__(
name=name,
**kwargs,
)
self.register_auto_reply(GroupChatManager, self._generate_reply_for_chat_manager)
self.group_chat_manager = group_chat_manager
def _generate_reply_for_chat_manager(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Generate reply for the chat manager."""
return self.group_chat_manager.next_speaker != self, None
# def _speaker_selection(self, instruction):
# """Select the next speaker."""
# if self.llm_config is False:
# if self.human_input_mode == "NEVER":
# return self.name
# else:
# return self.get_human_input(instruction["content"])
# sender = self.chat_manager.room
# roles_msg = {
# "content": f"""The following roles are available:
# {self._participant_roles()}""",
# "role": "system",
# }
# old_system_msg = self.system_message
# self.update_system_message(instruction["content"])
# reply = self._generate_oai_reply([roles_msg] + self.chat_messages[sender.name])
# self.update_system_message(old_system_msg)
# return reply

View File

@@ -1,6 +1,6 @@
from collections import defaultdict
import json
from typing import Callable, Dict, List, Optional, Union
from typing import Callable, Dict, List, Optional, Tuple, Union
from flaml.autogen import oai
from .agent import Agent
from flaml.autogen.code_utils import DEFAULT_MODEL, UNKNOWN, execute_code, extract_code, infer_lang
@@ -101,12 +101,33 @@ class ResponsiveAgent(Agent):
self._code_execution_config = {} if code_execution_config is None else code_execution_config
self.human_input_mode = human_input_mode
self.max_consecutive_auto_reply = (
self._max_consecutive_auto_reply = (
max_consecutive_auto_reply if max_consecutive_auto_reply is not None else self.MAX_CONSECUTIVE_AUTO_REPLY
)
self._consecutive_auto_reply_counter = defaultdict(int)
self._max_consecutive_auto_reply_dict = defaultdict(self.max_consecutive_auto_reply)
self._function_map = {} if function_map is None else function_map
self._default_auto_reply = default_auto_reply
self._class_specific_reply = []
self.register_auto_reply(Agent, self._generate_oai_reply)
self.register_auto_reply(Agent, self._generate_code_execution_reply)
self.register_auto_reply(Agent, self._generate_function_call_reply)
def register_auto_reply(self, class_type, reply_func: Callable):
"""Register a class-specific reply function.
The class-specific reply function will be called when the sender is an instance of the class_type.
The function registered later will be checked earlier.
Args:
class_type (Class): the class type.
reply_func (Callable): the reply function.
"""
self._class_specific_reply.append((class_type, reply_func))
def system_message(self):
"""Return the system message."""
return self._oai_system_message[0]["content"]
def update_system_message(self, system_message: str):
"""Update the system message.
@@ -116,6 +137,26 @@ class ResponsiveAgent(Agent):
"""
self._oai_system_message[0]["content"] = system_message
def update_max_consecutive_auto_reply(self, value: int, sender: Optional[Agent] = None):
"""Update the maximum number of consecutive auto replies.
Args:
value (int): the maximum number of consecutive auto replies.
sender (Agent): when the sender is provided, only update the max_consecutive_auto_reply for that sender.
"""
if sender is None:
self._max_consecutive_auto_reply = value
for k in self._max_consecutive_auto_reply_dict:
self._max_consecutive_auto_reply_dict[k] = value
else:
self._max_consecutive_auto_reply_dict[sender.name] = value
def max_consecutive_auto_reply(self, sender: Optional[Agent] = None) -> int:
"""The maximum number of consecutive auto replies."""
return (
self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender.name]
)
@property
def chat_messages(self) -> Dict[str, List[Dict]]:
"""A dictionary of conversations from name to list of ChatCompletion messages."""
@@ -200,7 +241,7 @@ class ResponsiveAgent(Agent):
For example, one agent can send a message A as:
```python
{
"content": "{use_tool_msg}",
"content": lambda context: context["use_tool_msg"],
"context": {
"use_tool_msg": "Use tool X if they are relevant."
}
@@ -234,8 +275,15 @@ class ResponsiveAgent(Agent):
print(message["content"], flush=True)
print(colored("*" * len(func_print), "green"), flush=True)
else:
if message.get("content") is not None:
print(message["content"], flush=True)
content = message.get("content")
if content is not None:
if "context" in message:
content = oai.ChatCompletion.instantiate(
content,
message["context"],
self.llm_config and self.llm_config.get("allow_format_str_template", False),
)
print(content, flush=True)
if "function_call" in message:
func_print = f"***** Suggested function Call: {message['function_call'].get('name', '(No function name found)')} *****"
print(colored(func_print, "green"), flush=True)
@@ -276,8 +324,77 @@ class ResponsiveAgent(Agent):
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
self._print_received_message(message, sender)
reply = self.generate_reply(sender=sender)
if reply is not None:
self.send(reply, sender)
# default reply is empty (i.e., no reply, in this case we will try to generate auto reply)
def initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bool] = True, **context):
"""Initiate a chat with the recipient agent.
Reset the consecutive auto reply counter.
If `clear_history` is True, the chat history with the recipient agent will be cleared.
`generate_init_message` is called to generate the initial message for the agent.
Args:
recipient: the recipient agent.
clear_history (bool): whether to clear the chat history with the agent.
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self.reset_consecutive_auto_reply_counter(recipient)
recipient.reset_consecutive_auto_reply_counter(self)
if clear_history:
self.clear_history(recipient)
recipient.clear_history(self)
self.send(self.generate_init_message(**context), recipient)
def reset(self):
"""Reset the agent."""
self.clear_history()
self.reset_consecutive_auto_reply_counter()
def reset_consecutive_auto_reply_counter(self, sender: Optional[Agent] = None):
"""Reset the consecutive_auto_reply_counter of the sender."""
if sender is None:
self._consecutive_auto_reply_counter.clear()
else:
self._consecutive_auto_reply_counter[sender.name] = 0
def clear_history(self, agent: Optional[Agent] = None):
"""Clear the chat history of the agent.
Args:
agent: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
"""
if agent is None:
self._oai_messages.clear()
else:
self._oai_messages[agent.name].clear()
def _generate_oai_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
if self.llm_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender.name]
# TODO: #1143 handle token limit exceeded error
response = oai.ChatCompletion.create(
context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **self.llm_config
)
return True, oai.ChatCompletion.extract_text_or_function_call(response)[0]
def _check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
reply = ""
no_human_input_msg = ""
if self.human_input_mode == "ALWAYS":
@@ -288,7 +405,7 @@ class ResponsiveAgent(Agent):
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
reply = reply if reply or not self._is_termination_msg(message) else "exit"
else:
if self._consecutive_auto_reply_counter[sender.name] >= self.max_consecutive_auto_reply:
if self._consecutive_auto_reply_counter[sender.name] >= self._max_consecutive_auto_reply_dict[sender.name]:
if self.human_input_mode == "NEVER":
reply = "exit"
else:
@@ -322,39 +439,59 @@ class ResponsiveAgent(Agent):
if reply == "exit":
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] = 0
return
return True, None
# send the human reply
if reply or self.max_consecutive_auto_reply == 0:
if reply or self._max_consecutive_auto_reply_dict[sender.name] == 0:
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] = 0
self.send(reply, sender)
return
return True, reply
# send the auto reply
# increment the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] += 1
if self.human_input_mode != "NEVER":
print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
reply = self.generate_reply(sender=sender, default_reply=self._default_auto_reply)
if reply is not None:
self.send(reply, sender)
def reset(self):
"""Reset the agent."""
self._oai_messages.clear()
self._consecutive_auto_reply_counter.clear()
return False, None
def _oai_reply(self, messages: List[Dict]) -> Union[str, Dict]:
# TODO: #1143 handle token limit exceeded error
response = oai.ChatCompletion.create(
context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **self.llm_config
)
return oai.ChatCompletion.extract_text_or_function_call(response)[0]
def _generate_function_call_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
if "function_call" in message:
_, func_return = self.execute_function(message["function_call"])
return True, func_return
return False, None
def _generate_code_execution_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if self._code_execution_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
code_blocks = extract_code(message["content"])
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN`
return False, None
# code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
# if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# return code_blocks[0][1]
# try to execute the code
exitcode, logs = self.execute_code_blocks(code_blocks)
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = "",
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
"""Reply based on the conversation history.
@@ -373,27 +510,16 @@ class ResponsiveAgent(Agent):
str or dict or None: reply. None if no reply is generated.
"""
assert messages is not None or sender is not None, "Either messages or sender must be provided."
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
if "function_call" in message:
_, func_return = self.execute_function(message["function_call"])
return func_return
if self._code_execution_config is False:
return default_reply if self.llm_config is False else self._oai_reply(messages)
code_blocks = extract_code(message["content"])
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN`
if self.llm_config is False:
return default_reply
# code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
# if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# return code_blocks[0][1]
return self._oai_reply(messages)
# try to execute the code
exitcode, logs = self.execute_code_blocks(code_blocks)
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
return f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
final, reply = self._check_termination_and_human_reply(sender=sender)
if final:
return reply
if sender is not None:
for class_specifc_reply in self._class_specific_reply[-1::-1]:
if isinstance(sender, class_specifc_reply[0]):
final, reply = class_specifc_reply[1](messages, sender)
if final:
return reply
return self._default_auto_reply
def get_human_input(self, prompt: str) -> str:
"""Get human input.
@@ -536,18 +662,6 @@ class ResponsiveAgent(Agent):
"""
return context["message"]
def initiate_chat(self, recipient, **context):
"""Initiate a chat with the recipient agent.
`generate_init_message` is called to generate the initial message for the agent.
Args:
recipient: the recipient agent.
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self.send(self.generate_init_message(**context), recipient)
def register_function(self, function_map: Dict[str, Callable]):
"""Register functions to the agent.

View File

@@ -8,7 +8,8 @@ class UserProxyAgent(ResponsiveAgent):
UserProxyAgent is a subclass of ResponsiveAgent configured with `human_input_mode` to ALWAYS
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
Code execution is enabled by default. LLM-based auto reply is disabled by default.
To modify auto reply, override `generate_reply` method.
To modify auto reply, register a method with `register_class_specific_reply`.
The method should have a similar signature with `_generate_oai_reply` method.
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.

View File

@@ -684,6 +684,7 @@ class Completion(openai_Completion):
config_list: Optional[List[Dict]] = None,
filter_func: Optional[Callable[[Dict, Dict, Dict], bool]] = None,
raise_on_ratelimit_or_timeout: Optional[bool] = True,
allow_format_str_template: Optional[bool] = False,
**config,
):
"""Make a completion for a given context.
@@ -738,6 +739,7 @@ class Completion(openai_Completion):
raise_on_ratelimit_or_timeout (bool, Optional): Whether to raise RateLimitError or Timeout when all configs fail.
When set to False, -1 will be returned when all configs fail.
allow_format_str_template (bool, Optional): Whether to allow format string template in the config.
**config: Configuration for the openai API call. This is used as parameters for calling openai API.
Besides the parameters for the openai API call, it can also contain a seed (int) for the cache.
This is useful when implementing "controlled randomness" for the completion.
@@ -753,6 +755,7 @@ class Completion(openai_Completion):
cost = 0
for i, each_config in enumerate(config_list):
base_config = config.copy()
base_config["allow_format_str_template"] = allow_format_str_template
base_config.update(each_config)
if i < last and filter_func is None and "retry_timeout" not in base_config:
# retry_timeout = 0 to avoid retrying when no filter is given
@@ -779,7 +782,7 @@ class Completion(openai_Completion):
logger.debug(f"failed with config {i}", exc_info=1)
if i == last:
raise
params = cls._construct_params(context, config)
params = cls._construct_params(context, config, allow_format_str_template=allow_format_str_template)
if not use_cache:
return cls._get_response(
params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False
@@ -792,15 +795,20 @@ class Completion(openai_Completion):
return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout)
@classmethod
def _instantiate(cls, template: Union[str, None], context: Optional[Dict] = None):
def instantiate(
cls,
template: Union[str, None],
context: Optional[Dict] = None,
allow_format_str_template: Optional[bool] = False,
):
if not context or template is None:
return template
if isinstance(template, str):
return template.format(**context)
return template.format(**context) if allow_format_str_template else template
return template(context)
@classmethod
def _construct_params(cls, context, config, prompt=None, messages=None):
def _construct_params(cls, context, config, prompt=None, messages=None, allow_format_str_template=False):
params = config.copy()
model = config["model"]
prompt = config.get("prompt") if prompt is None else prompt
@@ -815,7 +823,7 @@ class Completion(openai_Completion):
[
{
**m,
"content": cls._instantiate(m["content"], context),
"content": cls.instantiate(m["content"], context, allow_format_str_template),
}
if m.get("content")
else m
@@ -829,12 +837,12 @@ class Completion(openai_Completion):
params["messages"] = [
{
"role": "user",
"content": cls._instantiate(prompt, context),
"content": cls.instantiate(prompt, context, allow_format_str_template),
},
]
params.pop("prompt", None)
else:
params["prompt"] = cls._instantiate(prompt, context)
params["prompt"] = cls.instantiate(prompt, context, allow_format_str_template)
return params
@classmethod

View File

@@ -274,9 +274,6 @@
}
],
"source": [
"# reset the assistant. Always reset the assistant before starting a new conversation.\n",
"assistant.reset()\n",
"\n",
"# given a math problem, we use the mathproxyagent to generate a prompt to be sent to the assistant as the initial message.\n",
"# the assistant receives the message and generates a response. The response will be sent back to the mathproxyagent for processing.\n",
"# The conversation continues until the termination condition is met, in MathChat, the termination condition is the detect of \"\\boxed{}\" in the response.\n",
@@ -422,8 +419,6 @@
}
],
"source": [
"assistant.reset()\n",
"\n",
"math_problem = \"For what negative value of $k$ is there exactly one solution to the system of equations \\\\begin{align*}\\ny &= 2x^2 + kx + 6 \\\\\\\\\\ny &= -x + 4?\\n\\\\end{align*}\"\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem)"
]
@@ -545,8 +540,6 @@
}
],
"source": [
"assistant.reset()\n",
"\n",
"math_problem = \"Find all positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational. Express them in decreasing order, separated by commas.\"\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem)"
]
@@ -740,8 +733,6 @@
}
],
"source": [
"assistant.reset() # clear LLM assistant's message history\n",
"\n",
"# we set the prompt_type to \"python\", which is a simplied version of the default prompt.\n",
"math_problem = \"Problem: If $725x + 727y = 1500$ and $729x+ 731y = 1508$, what is the value of $x - y$ ?\"\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem, prompt_type=\"python\")"
@@ -855,8 +846,6 @@
}
],
"source": [
"assistant.reset() # clear LLM assistant's message history\n",
"\n",
"# The wolfram alpha appid is required for this example (the assistant may choose to query Wolfram Alpha).\n",
"import os\n",
"if \"WOLFRAM_ALPHA_APPID\" not in os.environ:\n",

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,505 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Auto Generated Agent Chat: Group Chat\n",
"\n",
"Modified based on https://github.com/microsoft/FLAML/blob/4ea686af5c3e8ff24d9076a7a626c8b28ab5b1d7/notebook/autogen_multiagent_roleplay_chat.ipynb\n",
"\n",
"## Requirements\n",
"\n",
"FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the [autogen] option:\n",
"```bash\n",
"pip install flaml[autogen]\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install flaml[autogen]~=2.0.0rc4"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set your API Endpoint\n",
"\n",
"The [`config_list_from_json`](https://microsoft.github.io/FLAML/docs/reference/autogen/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from flaml import autogen\n",
"\n",
"config_list_gpt4 = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
" \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
" },\n",
")\n",
"# config_list_gpt35 = autogen.config_list_from_json(\n",
"# \"OAI_CONFIG_LIST\",\n",
"# filter_dict={\n",
"# \"model\": {\n",
"# \"gpt-3.5-turbo\",\n",
"# \"gpt-3.5-turbo-16k\",\n",
"# \"gpt-3.5-turbo-0301\",\n",
"# \"chatgpt-35-turbo-0301\",\n",
"# \"gpt-35-turbo-v0301\",\n",
"# },\n",
"# },\n",
"# )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the gpt-4 models are kept in the list based on the filter condition.\n",
"\n",
"The config list looks like the following:\n",
"```python\n",
"config_list = [\n",
" {\n",
" 'model': 'gpt-4',\n",
" 'api_key': '<your OpenAI API key here>',\n",
" },\n",
" {\n",
" 'model': 'gpt-4',\n",
" 'api_key': '<your Azure OpenAI API key here>',\n",
" 'api_base': '<your Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" },\n",
" {\n",
" 'model': 'gpt-4-32k',\n",
" 'api_key': '<your Azure OpenAI API key here>',\n",
" 'api_base': '<your Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" },\n",
"]\n",
"```\n",
"\n",
"If you open this notebook in colab, you can upload your files by clicking the file icon on the left panel and then choose \"upload file\" icon.\n",
"\n",
"You can set the value of config_list in other ways you prefer, e.g., loading from a YAML file."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Construct Agents"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"\n",
"llm_config = {\"config_list\": config_list_gpt4}\n",
"group_chat_manager = autogen.GroupChatManager(max_round=4, llm_config=llm_config)\n",
"human = autogen.GroupChatParticipant(\n",
" name=\"Human\",\n",
" system_message=\"A human admin.\",\n",
" human_input_mode=\"ALWAYS\",\n",
" llm_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"alice = autogen.GroupChatParticipant(\n",
" name=\"Alice\",\n",
" system_message=autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE,\n",
" max_consecutive_auto_reply=sys.maxsize,\n",
" human_input_mode=\"NEVER\",\n",
" llm_config=llm_config,\n",
" code_execution_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"bob = autogen.GroupChatParticipant(\n",
" name=\"Bob\",\n",
" system_message=\"Code reviewer. Prevent code execution if unsafe or not well documented. Suggest changes. Otherwise, approve and return the final code to execute.\",\n",
" max_consecutive_auto_reply=sys.maxsize,\n",
" human_input_mode=\"NEVER\",\n",
" llm_config=llm_config,\n",
" code_execution_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"\n",
"group_chat_manager.agents = [human, alice, bob]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Start Chat"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mHuman\u001b[0m (to chat_manager):\n",
"\n",
"find a latest paper about generative agents\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Alice):\n",
"\n",
"find a latest paper about generative agents\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Bob):\n",
"\n",
"find a latest paper about generative agents\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
"As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
"\n",
"Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
"\n",
"```bash\n",
"pip install beautifulsoup4 requests\n",
"```\n",
"\n",
"Then you can use this Python script to fetch and print the title of the latest paper:\n",
"\n",
"Python code:\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
"res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
"\n",
"# Parse the HTML content of the page\n",
"soup = BeautifulSoup(res.text, 'html.parser')\n",
"\n",
"# Find the first result (which is the latest) and print its title\n",
"title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
"print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
"```\n",
"Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
"\n",
"If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
"\n",
"Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Human):\n",
"\n",
"As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
"\n",
"Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
"\n",
"```bash\n",
"pip install beautifulsoup4 requests\n",
"```\n",
"\n",
"Then you can use this Python script to fetch and print the title of the latest paper:\n",
"\n",
"Python code:\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
"res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
"\n",
"# Parse the HTML content of the page\n",
"soup = BeautifulSoup(res.text, 'html.parser')\n",
"\n",
"# Find the first result (which is the latest) and print its title\n",
"title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
"print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
"```\n",
"Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
"\n",
"If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
"\n",
"Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
"\u001b[33mchat_manager\u001b[0m (to Bob):\n",
"\n",
"As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
"\n",
"Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
"\n",
"```bash\n",
"pip install beautifulsoup4 requests\n",
"```\n",
"\n",
"Then you can use this Python script to fetch and print the title of the latest paper:\n",
"\n",
"Python code:\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
"res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
"\n",
"# Parse the HTML content of the page\n",
"soup = BeautifulSoup(res.text, 'html.parser')\n",
"\n",
"# Find the first result (which is the latest) and print its title\n",
"title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
"print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
"```\n",
"Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
"\n",
"If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
"\n",
"Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mBob\u001b[0m (to chat_manager):\n",
"\n",
"Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
"\n",
"Here is the more secure and documented code:\n",
"\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Function that uses requests.get to fetch an URL's content\n",
"def get_url_content(url):\n",
" try:\n",
" response = requests.get(url)\n",
" response.raise_for_status()\n",
" return response.text\n",
" except (requests.RequestException, ValueError) as error:\n",
" print(f'Google scholar cannot be accessed because of: {error}')\n",
" return None\n",
"\n",
"# Function to find the title of the latest paper about \"generative agents\"\n",
"def find_latest_paper(url):\n",
" html = get_url_content(url)\n",
" if html:\n",
" # Parse the HTML content of the page\n",
" soup = BeautifulSoup(html, 'html.parser')\n",
" # Find the first result (which is the latest one)\n",
" result = soup.find('h3', {'class': 'gs_rt'})\n",
" \n",
" # If result found, print its title; Otherwise, print paper not found\n",
" if result:\n",
" title = result.a.text\n",
" print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
" else:\n",
" print(\"No papers about 'generative agents' found.\")\n",
" else:\n",
" print(\"No internet or Google scholar is down.\")\n",
"\n",
"# URL of Google scholar with a search query \"generative agents\"\n",
"google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
"\n",
"find_latest_paper(google_scholar_url)\n",
"```\n",
"\n",
"Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Human):\n",
"\n",
"Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
"\n",
"Here is the more secure and documented code:\n",
"\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Function that uses requests.get to fetch an URL's content\n",
"def get_url_content(url):\n",
" try:\n",
" response = requests.get(url)\n",
" response.raise_for_status()\n",
" return response.text\n",
" except (requests.RequestException, ValueError) as error:\n",
" print(f'Google scholar cannot be accessed because of: {error}')\n",
" return None\n",
"\n",
"# Function to find the title of the latest paper about \"generative agents\"\n",
"def find_latest_paper(url):\n",
" html = get_url_content(url)\n",
" if html:\n",
" # Parse the HTML content of the page\n",
" soup = BeautifulSoup(html, 'html.parser')\n",
" # Find the first result (which is the latest one)\n",
" result = soup.find('h3', {'class': 'gs_rt'})\n",
" \n",
" # If result found, print its title; Otherwise, print paper not found\n",
" if result:\n",
" title = result.a.text\n",
" print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
" else:\n",
" print(\"No papers about 'generative agents' found.\")\n",
" else:\n",
" print(\"No internet or Google scholar is down.\")\n",
"\n",
"# URL of Google scholar with a search query \"generative agents\"\n",
"google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
"\n",
"find_latest_paper(google_scholar_url)\n",
"```\n",
"\n",
"Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
"\u001b[33mchat_manager\u001b[0m (to Alice):\n",
"\n",
"Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
"\n",
"Here is the more secure and documented code:\n",
"\n",
"```python\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# Function that uses requests.get to fetch an URL's content\n",
"def get_url_content(url):\n",
" try:\n",
" response = requests.get(url)\n",
" response.raise_for_status()\n",
" return response.text\n",
" except (requests.RequestException, ValueError) as error:\n",
" print(f'Google scholar cannot be accessed because of: {error}')\n",
" return None\n",
"\n",
"# Function to find the title of the latest paper about \"generative agents\"\n",
"def find_latest_paper(url):\n",
" html = get_url_content(url)\n",
" if html:\n",
" # Parse the HTML content of the page\n",
" soup = BeautifulSoup(html, 'html.parser')\n",
" # Find the first result (which is the latest one)\n",
" result = soup.find('h3', {'class': 'gs_rt'})\n",
" \n",
" # If result found, print its title; Otherwise, print paper not found\n",
" if result:\n",
" title = result.a.text\n",
" print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
" else:\n",
" print(\"No papers about 'generative agents' found.\")\n",
" else:\n",
" print(\"No internet or Google scholar is down.\")\n",
"\n",
"# URL of Google scholar with a search query \"generative agents\"\n",
"google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
"\n",
"find_latest_paper(google_scholar_url)\n",
"```\n",
"\n",
"Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
"\u001b[33mHuman\u001b[0m (to chat_manager):\n",
"\n",
"exitcode: 0 (execution succeeded)\n",
"Code output: \n",
"The title of the latest paper about 'generative agents' is:\n",
"Generative agents for player decision modeling in games\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Alice):\n",
"\n",
"exitcode: 0 (execution succeeded)\n",
"Code output: \n",
"The title of the latest paper about 'generative agents' is:\n",
"Generative agents for player decision modeling in games\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mchat_manager\u001b[0m (to Bob):\n",
"\n",
"exitcode: 0 (execution succeeded)\n",
"Code output: \n",
"The title of the latest paper about 'generative agents' is:\n",
"Generative agents for player decision modeling in games\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
}
],
"source": [
"human.send(\"find a latest paper about generative agents\", group_chat_manager)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "flaml",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.17"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -571,8 +571,6 @@
}
],
"source": [
"# it is suggested to reset the assistant to clear the state if the new task is not related to the previous one.\n",
"assistant.reset()\n",
"user_proxy.initiate_chat(\n",
" assistant,\n",
" message=\"\"\"Show me the YTD gain of 10 largest technology companies as of today.\"\"\",\n",

File diff suppressed because it is too large Load Diff

View File

@@ -483,6 +483,7 @@
" ], # the prompt templates to choose from\n",
" stop=[[\"\\nclass\", \"\\ndef\", \"\\nif\", \"\\nprint\"], None], # the stop sequences\n",
" config_list=endpoint_list, # optional: a list of endpoints to use\n",
" allow_format_str_template=True, # whether to allow format string template\n",
")\n"
]
},

View File

@@ -369,7 +369,7 @@
" x, y = [], []\n",
" votes_success = defaultdict(lambda: [0, 0])\n",
" for i, data_i in enumerate(test_data[:50]):\n",
" response = oai.ChatCompletion.create(context=data_i, **config)\n",
" response = oai.ChatCompletion.create(context=data_i, allow_format_str_template=True, **config)\n",
" responses = oai.ChatCompletion.extract_text(response)\n",
" metrics.append(eval_math_responses(responses, **data_i))\n",
" votes = metrics[-1][\"votes\"]\n",

View File

@@ -100,12 +100,10 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
user.initiate_chat(assistant, message="TERMINATE")
# should terminate without sending any message
assert assistant.last_message()["content"] == assistant.last_message(user)["content"] == "TERMINATE"
assistant.reset()
coding_task = "Print hello world to a file called hello.txt"
user.initiate_chat(assistant, message=coding_task)
# coding_task = "Create a powerpoint with the text hello world in it."
# assistant.receive(coding_task, user)
assistant.reset()
coding_task = "Save a pandas df with 3 rows and 3 columns to disk."
user.initiate_chat(assistant, message=coding_task)
assert not isinstance(user.use_docker, bool) # None or str

View File

@@ -0,0 +1,35 @@
from flaml import autogen
def test_chat_manager():
group_chat_manager = autogen.GroupChatManager(max_round=2, llm_config=False)
agent1 = autogen.GroupChatParticipant(
"alice",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is alice sepaking.",
group_chat_manager=group_chat_manager,
)
agent2 = autogen.GroupChatParticipant(
"bob",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is bob speaking.",
group_chat_manager=group_chat_manager,
)
group_chat_manager.agents = [agent1, agent2]
agent1.send("start", group_chat_manager)
assert len(agent1.chat_messages[group_chat_manager.name]) == 2
group_chat_manager.reset()
agent1.reset()
agent2.reset()
agent2.send("start", group_chat_manager)
if __name__ == "__main__":
# test_broadcast()
test_chat_manager()

View File

@@ -4,6 +4,67 @@ import pytest
from flaml.autogen.agentchat import ResponsiveAgent
def test_context():
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
agent1.send(
{
"content": "hello {name}",
"context": {
"name": "there",
},
},
agent,
)
# expect hello {name} to be printed
agent1.send(
{
"content": lambda context: f"hello {context['name']}",
"context": {
"name": "there",
},
},
agent,
)
# expect hello there to be printed
agent.llm_config = {"allow_format_str_template": True}
agent1.send(
{
"content": "hello {name}",
"context": {
"name": "there",
},
},
agent,
)
# expect hello there to be printed
def test_max_consecutive_auto_reply():
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=2, llm_config=False, human_input_mode="NEVER")
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
assert agent.max_consecutive_auto_reply() == agent.max_consecutive_auto_reply(agent1) == 2
agent.update_max_consecutive_auto_reply(1)
assert agent.max_consecutive_auto_reply() == agent.max_consecutive_auto_reply(agent1) == 1
agent1.initiate_chat(agent, message="hello")
assert agent._consecutive_auto_reply_counter[agent1.name] == 1
agent1.initiate_chat(agent, message="hello again")
# with auto reply because the counter is reset
assert agent1.last_message(agent)["role"] == "user"
assert len(agent1.chat_messages[agent.name]) == 2
assert len(agent.chat_messages[agent1.name]) == 2
assert agent._consecutive_auto_reply_counter[agent1.name] == 1
agent1.send(message="bye", recipient=agent)
# no auto reply
assert agent1.last_message(agent)["role"] == "assistant"
agent1.initiate_chat(agent, clear_history=False, message="hi")
assert len(agent1.chat_messages[agent.name]) > 2
assert len(agent.chat_messages[agent1.name]) > 2
def test_responsive_agent(monkeypatch):
dummy_agent_1 = ResponsiveAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
dummy_agent_2 = ResponsiveAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
@@ -54,4 +115,6 @@ def test_responsive_agent(monkeypatch):
if __name__ == "__main__":
test_responsive_agent(pytest.monkeypatch)
test_context()
# test_max_consecutive_auto_reply()
# test_responsive_agent(pytest.monkeypatch)

View File

@@ -87,6 +87,19 @@ def test_chatcompletion():
prompt="hi",
)
assert "messages" in params
params = autogen.Completion._construct_params(
context={"name": "there"},
config={"model": "unknown"},
prompt="hi {name}",
allow_format_str_template=True,
)
assert params["prompt"] == "hi there"
params = autogen.Completion._construct_params(
context={"name": "there"},
config={"model": "unknown"},
prompt="hi {name}",
)
assert params["prompt"] != "hi there"
def test_multi_model():
@@ -213,6 +226,7 @@ def test_humaneval(num_samples=1):
eval_func=eval_function_completions,
n=1,
prompt="{definition}",
allow_format_str_template=True,
)
response = autogen.Completion.create(context=test_data[0], **config)
# a minimal tuning example for tuning chat completion models using the Completion class
@@ -224,6 +238,7 @@ def test_humaneval(num_samples=1):
n=1,
model="text-davinci-003",
prompt="{definition}",
allow_format_str_template=True,
)
response = autogen.Completion.create(context=test_data[0], **config)
# a minimal tuning example for tuning chat completion models using the ChatCompletion class
@@ -236,6 +251,7 @@ def test_humaneval(num_samples=1):
n=1,
messages=[{"role": "user", "content": "{definition}"}],
config_list=config_list,
allow_format_str_template=True,
)
response = autogen.ChatCompletion.create(context=test_data[0], config_list=config_list, **config)
print(response)
@@ -271,6 +287,7 @@ def test_humaneval(num_samples=1):
],
stop=[["\nclass", "\ndef", "\nif", "\nprint"], None], # the stop sequences
config_list=config_list,
allow_format_str_template=True,
)
print(config2)
print(analysis.best_result)
@@ -411,9 +428,9 @@ if __name__ == "__main__":
openai.api_key = os.environ["OPENAI_API_KEY"]
# test_filter()
# test_chatcompletion()
test_chatcompletion()
# test_multi_model()
# test_improve()
# test_nocontext()
test_humaneval(1)
# test_humaneval(1)
# test_math(1)

View File

@@ -101,6 +101,7 @@ config, analysis = autogen.Completion.tune(
"Complete the following Python function:{definition}",
], # the prompt templates to choose from
stop=[["\nclass", "\ndef", "\nif", "\nprint"], None], # the stop sequences
allow_format_str_template=True,
)
```

View File

@@ -23,7 +23,7 @@ By default, the automatically generated reply is crafted based on automatic code
When `llm_config` is set to a dict, `UserProxyAgent` can generate replies using an LLM when code execution is not performed.
The auto-reply capability of `ResponsiveAgent` allows for more autonomous multi-agent communication while retaining the possibility of human intervention.
One can also easily extend it by overriding the `generate_reply` function of the `UserProxyAgent` to add or modify responses.
One can also easily extend it by registering auto_reply functions with the `register_auto_reply()` method.
Example usage of the agents to solve a task with code:
```python
@@ -144,7 +144,7 @@ user_proxy.initiate_chat(
*Interested in trying it yourself? Please check the following notebook examples:*
* [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)
* [Task Solving with Auto Code Generation, Execution, Debugging and Human Feedback](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_human_feedback.ipynb)
* [Auto Code Generation, Execution, Debugging and Human Feedback](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_human_feedback.ipynb)
* [Solve Tasks Requiring Web Info](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_web_info.ipynb)
@@ -152,7 +152,9 @@ user_proxy.initiate_chat(
* [Automated Task Solving with Coding & Planning Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_planning.ipynb)
* [Automated Task Solving with Multiple Human Users](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_two_users.ipynb)
* [Automated Task Solving with GPT-4 + Multiple Human Users](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_two_users.ipynb)
* [Automated Chess Game Playing & Chitchatting by GPT-4 Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_chess.ipynb)
## Enhanced Inference
@@ -348,6 +350,7 @@ If the provided prompt or message is a template, it will be automatically materi
response = autogen.Completion.create(
context={"problem": "How many positive integers, not exceeding 100, are multiples of 2 or 3 but not 4?"},
prompt="{problem} Solve the problem carefully.",
allow_format_str_template=True,
**config
)
```
@@ -355,7 +358,7 @@ response = autogen.Completion.create(
A template is either a format str, like the example above, or a function which produces a str from several input fields, like the example below.
```python
def content(turn, **context):
def content(turn, context):
return "\n".join(
[
context[f"user_message_{turn}"],