improve validation of llm_config (#1946)

* improve validation of llm_config

* fixed test_register_for_llm_without_LLM

* docstr about llm_config=None

* Make None a sentinel

* pop tools

---------

Co-authored-by: Davor Runje <davor@airt.ai>
This commit is contained in:
Chi Wang
2024-03-11 15:36:00 -07:00
committed by GitHub
parent ce71d85e77
commit 523581825b
7 changed files with 44 additions and 27 deletions

View File

@@ -65,7 +65,7 @@ class ConversableAgent(LLMAgent):
`run_code`, and `execute_function` methods respectively.
"""
DEFAULT_CONFIG = {} # An empty configuration
DEFAULT_CONFIG = False # False or dict, the default config for llm inference
MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)
DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
@@ -123,7 +123,9 @@ class ConversableAgent(LLMAgent):
llm_config (dict or False or None): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
To disable llm-based auto reply, set to False.
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
@@ -139,21 +141,7 @@ class ConversableAgent(LLMAgent):
else (lambda x: content_str(x.get("content")) == "TERMINATE")
)
if llm_config is False:
self.llm_config = False
self.client = None
else:
self.llm_config = self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
if "model" not in self.llm_config and (
not self.llm_config.get("config_list")
or any(not config.get("model") for config in self.llm_config["config_list"])
):
raise ValueError(
"Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)
self.client = OpenAIWrapper(**self.llm_config)
self._validate_llm_config(llm_config)
if logging_enabled():
log_new_agent(self, locals())
@@ -246,6 +234,20 @@ class ConversableAgent(LLMAgent):
"process_message_before_send": [],
}
def _validate_llm_config(self, llm_config):
assert llm_config in (None, False) or isinstance(
llm_config, dict
), "llm_config must be a dict or False or None."
if llm_config is None:
llm_config = self.DEFAULT_CONFIG
self.llm_config = self.DEFAULT_CONFIG if llm_config is None else llm_config
# TODO: more complete validity check
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
raise ValueError(
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)
self.client = None if self.llm_config is False else OpenAIWrapper(**self.llm_config)
@property
def name(self) -> str:
"""Get the name of the agent."""

View File

@@ -514,7 +514,11 @@ class GroupChatManager(ConversableAgent):
system_message: Optional[Union[str, List]] = "Group chat manager.",
**kwargs,
):
if kwargs.get("llm_config") and (kwargs["llm_config"].get("functions") or kwargs["llm_config"].get("tools")):
if (
kwargs.get("llm_config")
and isinstance(kwargs["llm_config"], dict)
and (kwargs["llm_config"].get("functions") or kwargs["llm_config"].get("tools"))
):
raise ValueError(
"GroupChatManager is not allowed to make function/tool calls. Please remove the 'functions' or 'tools' config in 'llm_config' you passed in."
)

View File

@@ -70,10 +70,11 @@ class UserProxyAgent(ConversableAgent):
- timeout (Optional, int): The maximum execution time in seconds.
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
llm_config (dict or False): llm inference configuration.
llm_config (dict or False or None): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
Default to false, which disables llm-based auto reply.
Default to False, which disables llm-based auto reply.
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
system_message (str or List): system message for ChatCompletion inference.
Only used when llm_config is not False. Use it to reprogram the agent.
description (str): a short description of the agent. This description is used by other agents

View File

@@ -356,6 +356,7 @@ class OpenAIWrapper:
base_config: base config. It can contain both keyword arguments for openai client
and additional kwargs.
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `base_config` or in each config of `config_list`.
"""
if logging_enabled():

View File

@@ -1 +1 @@
__version__ = "0.2.17"
__version__ = "0.2.18"

View File

@@ -240,6 +240,7 @@
"\n",
"llm_config_manager = llm_config.copy()\n",
"llm_config_manager.pop(\"functions\", None)\n",
"llm_config_manager.pop(\"tools\", None)\n",
"\n",
"manager = autogen.GroupChatManager(\n",
" groupchat=groupchat,\n",
@@ -361,8 +362,12 @@
],
"metadata": {
"front_matter": {
"tags": ["code generation", "function call", "async"],
"description": "Learn how to implement both synchronous and asynchronous function calls using AssistantAgent and UserProxyAgent in AutoGen, with examples of their application in individual and group chat settings for task execution with language models."
"description": "Learn how to implement both synchronous and asynchronous function calls using AssistantAgent and UserProxyAgent in AutoGen, with examples of their application in individual and group chat settings for task execution with language models.",
"tags": [
"code generation",
"function call",
"async"
]
},
"kernelspec": {
"display_name": "flaml_dev",

View File

@@ -817,17 +817,21 @@ def test_register_for_llm_without_description():
def test_register_for_llm_without_LLM():
agent = ConversableAgent(name="agent", llm_config=None)
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
AssertionError,
match="To update a tool signature, agent must have an llm_config",
):
ConversableAgent(name="agent", llm_config=None)
@agent.register_for_llm(description="do things.")
def do_stuff(s: str) -> str:
return f"{s} done"
def test_register_for_llm_without_configuration():
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": []})
@@ -835,7 +839,7 @@ def test_register_for_llm_without_configuration():
def test_register_for_llm_without_model_name():
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": [{"model": ""}]})