mirror of
https://github.com/microsoft/autogen.git
synced 2026-02-06 15:44:57 -05:00
return None instead of tuple in _generate_oai_reply_from_client (#1644)
* return None instead of tuple * return final=False if extracted response is None
This commit is contained in:
@@ -1108,9 +1108,9 @@ class ConversableAgent(LLMAgent):
|
||||
extracted_response = self._generate_oai_reply_from_client(
|
||||
client, self._oai_system_message + messages, self.client_cache
|
||||
)
|
||||
return True, extracted_response
|
||||
return (False, None) if extracted_response is None else (True, extracted_response)
|
||||
|
||||
def _generate_oai_reply_from_client(self, llm_client, messages, cache):
|
||||
def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]:
|
||||
# unroll tool_responses
|
||||
all_messages = []
|
||||
for message in messages:
|
||||
@@ -1132,8 +1132,8 @@ class ConversableAgent(LLMAgent):
|
||||
extracted_response = llm_client.extract_text_or_completion_object(response)[0]
|
||||
|
||||
if extracted_response is None:
|
||||
warnings.warn("Extracted_response is None.", UserWarning)
|
||||
return False, None
|
||||
warnings.warn("Extracted_response from {response} is None.", UserWarning)
|
||||
return None
|
||||
# ensure function and tool calls will be accepted when sent back to the LLM
|
||||
if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
|
||||
extracted_response = model_dump(extracted_response)
|
||||
|
||||
@@ -68,18 +68,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={
|
||||
"model": {
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-35-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"chatgpt-35-turbo-0301",
|
||||
"gpt-35-turbo-v0301",
|
||||
"gpt",
|
||||
},
|
||||
},
|
||||
filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]},
|
||||
)
|
||||
llm_config = {
|
||||
"cache_seed": 42,
|
||||
@@ -206,8 +195,8 @@ def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test_gpt35()
|
||||
test_create_execute_script(human_input_mode="TERMINATE")
|
||||
test_gpt35()
|
||||
# test_create_execute_script(human_input_mode="TERMINATE")
|
||||
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
|
||||
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
|
||||
# although the max_consecutive_auto_reply is set to 10.
|
||||
|
||||
@@ -6,6 +6,7 @@ import pytest
|
||||
import autogen
|
||||
from autogen.agentchat import AssistantAgent, UserProxyAgent
|
||||
from autogen.cache import Cache
|
||||
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST, here
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai, skip_redis # noqa: E402
|
||||
@@ -107,23 +108,11 @@ def test_disk_cache():
|
||||
|
||||
|
||||
def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_reply=5, cache=None):
|
||||
KEY_LOC = "notebook"
|
||||
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={
|
||||
"model": {
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-35-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"chatgpt-35-turbo-0301",
|
||||
"gpt-35-turbo-v0301",
|
||||
"gpt",
|
||||
},
|
||||
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
|
||||
},
|
||||
)
|
||||
llm_config = {
|
||||
@@ -159,7 +148,7 @@ def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_
|
||||
|
||||
# track how long this takes
|
||||
user.initiate_chat(assistant, message=coding_task, cache=cache)
|
||||
return user.chat_messages[list(user.chat_messages.keys())[-0]]
|
||||
return user.chat_messages[assistant]
|
||||
|
||||
|
||||
def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_auto_reply=5):
|
||||
@@ -170,16 +159,7 @@ def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={
|
||||
"model": {
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-35-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"chatgpt-35-turbo-0301",
|
||||
"gpt-35-turbo-v0301",
|
||||
"gpt",
|
||||
},
|
||||
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
|
||||
},
|
||||
)
|
||||
llm_config = {
|
||||
|
||||
@@ -233,7 +233,7 @@ def test_chat_tools_stream() -> None:
|
||||
config_list = config_list_from_json(
|
||||
env_or_file=OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
|
||||
filter_dict={"tags": ["multitool"]},
|
||||
)
|
||||
tools = [
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user