diff --git a/python/docs/src/guides/azure-openai-with-aad-auth.md b/python/docs/src/guides/azure-openai-with-aad-auth.md index 2700e6d20..ebb2f929f 100644 --- a/python/docs/src/guides/azure-openai-with-aad-auth.md +++ b/python/docs/src/guides/azure-openai-with-aad-auth.md @@ -15,7 +15,7 @@ pip install azure-identity ## Using the Model Client ```python -from agnext.components.models import AzureOpenAI +from agnext.components.models import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider # Create the token provider @@ -23,7 +23,7 @@ token_provider = get_bearer_token_provider( DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" ) -client = AzureOpenAI( +client = AzureOpenAIChatCompletionClient( model="{your-azure-deployment}", api_version="2024-02-01", azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", diff --git a/python/docs/src/guides/group-chat-coder-reviewer.md b/python/docs/src/guides/group-chat-coder-reviewer.md index ce413c666..5b0c39822 100644 --- a/python/docs/src/guides/group-chat-coder-reviewer.md +++ b/python/docs/src/guides/group-chat-coder-reviewer.md @@ -24,7 +24,7 @@ from agnext.chat.agents import ChatCompletionAgent from agnext.chat.memory import BufferedChatMemory from agnext.chat.patterns import GroupChatManager from agnext.chat.types import TextMessage -from agnext.components.models import OpenAI, SystemMessage +from agnext.components.models import OpenAIChatCompletionClient, SystemMessage ``` Next, let's create the runtime: @@ -49,7 +49,7 @@ coder = runtime.register_and_get_proxy( "Work with the reviewer to improve your code." ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=BufferedChatMemory(buffer_size=10), ), ) @@ -69,7 +69,7 @@ reviewer = runtime.register_and_get_proxy( "Suggested Changes: " ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=BufferedChatMemory(buffer_size=10), ), ) diff --git a/python/examples/core/one_agent_direct.py b/python/examples/core/one_agent_direct.py index a17ade106..61ad4706d 100644 --- a/python/examples/core/one_agent_direct.py +++ b/python/examples/core/one_agent_direct.py @@ -5,7 +5,7 @@ from agnext.application import SingleThreadedAgentRuntime from agnext.components import TypeRoutedAgent, message_handler from agnext.components.models import ( ChatCompletionClient, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -34,7 +34,7 @@ class ChatCompletionAgent(TypeRoutedAgent): async def main() -> None: runtime = SingleThreadedAgentRuntime() agent = runtime.register_and_get( - "chat_agent", lambda: ChatCompletionAgent("Chat agent", OpenAI(model="gpt-3.5-turbo")) + "chat_agent", lambda: ChatCompletionAgent("Chat agent", OpenAIChatCompletionClient(model="gpt-3.5-turbo")) ) # Send a message to the agent. diff --git a/python/examples/core/two_agents_pub_sub_termination.py b/python/examples/core/two_agents_pub_sub_termination.py index 3db1ed526..31da9136f 100644 --- a/python/examples/core/two_agents_pub_sub_termination.py +++ b/python/examples/core/two_agents_pub_sub_termination.py @@ -8,7 +8,7 @@ from agnext.components.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -90,7 +90,7 @@ async def main() -> None: "Jack", lambda: ChatCompletionAgent( description="Jack a comedian", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), system_messages=[ SystemMessage("You are a comedian likes to make jokes. " "When you are done talking, say 'TERMINATE'.") ], @@ -101,7 +101,7 @@ async def main() -> None: "Cathy", lambda: ChatCompletionAgent( description="Cathy a poet", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), system_messages=[ SystemMessage("You are a poet likes to write poems. " "When you are done talking, say 'TERMINATE'.") ], diff --git a/python/examples/demos/chat_room.py b/python/examples/demos/chat_room.py index b0ffb13eb..06ce8d1bc 100644 --- a/python/examples/demos/chat_room.py +++ b/python/examples/demos/chat_room.py @@ -11,7 +11,7 @@ from agnext.chat.types import Message, TextMessage from agnext.chat.utils import convert_messages_to_llm_messages from agnext.components import TypeRoutedAgent, message_handler from agnext.components.memory import ChatMemory -from agnext.components.models import ChatCompletionClient, OpenAI, SystemMessage +from agnext.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage from agnext.core import AgentRuntime, CancellationToken sys.path.append(os.path.abspath(os.path.dirname(__file__))) @@ -101,7 +101,7 @@ def chat_room(runtime: AgentRuntime, app: TextualChatApp) -> None: description="Alice in the chat room.", background_story="Alice is a software engineer who loves to code.", memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) bob = runtime.register_and_get_proxy( @@ -111,7 +111,7 @@ def chat_room(runtime: AgentRuntime, app: TextualChatApp) -> None: description="Bob in the chat room.", background_story="Bob is a data scientist who loves to analyze data.", memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) charlie = runtime.register_and_get_proxy( @@ -121,7 +121,7 @@ def chat_room(runtime: AgentRuntime, app: TextualChatApp) -> None: description="Charlie in the chat room.", background_story="Charlie is a designer who loves to create art.", memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) app.welcoming_notice = f"""Welcome to the chat room demo with the following participants: diff --git a/python/examples/demos/chess_game.py b/python/examples/demos/chess_game.py index be61a953a..7df8c810a 100644 --- a/python/examples/demos/chess_game.py +++ b/python/examples/demos/chess_game.py @@ -12,7 +12,7 @@ from agnext.chat.agents._chat_completion_agent import ChatCompletionAgent from agnext.chat.memory import BufferedChatMemory from agnext.chat.patterns._group_chat_manager import GroupChatManager from agnext.chat.types import TextMessage -from agnext.components.models import OpenAI, SystemMessage +from agnext.components.models import OpenAIChatCompletionClient, SystemMessage from agnext.components.tools import FunctionTool from agnext.core import AgentRuntime from chess import BLACK, SQUARE_NAMES, WHITE, Board, Move @@ -163,7 +163,7 @@ def chess_game(runtime: AgentRuntime) -> None: # type: ignore ), ], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), tools=black_tools, ), ) @@ -180,7 +180,7 @@ def chess_game(runtime: AgentRuntime) -> None: # type: ignore ), ], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), tools=white_tools, ), ) diff --git a/python/examples/demos/illustrator_critics.py b/python/examples/demos/illustrator_critics.py index 76722a45c..557c0d5f1 100644 --- a/python/examples/demos/illustrator_critics.py +++ b/python/examples/demos/illustrator_critics.py @@ -9,7 +9,7 @@ from agnext.application import SingleThreadedAgentRuntime from agnext.chat.agents import ChatCompletionAgent, ImageGenerationAgent from agnext.chat.memory import BufferedChatMemory from agnext.chat.patterns._group_chat_manager import GroupChatManager -from agnext.components.models import OpenAI, SystemMessage +from agnext.components.models import OpenAIChatCompletionClient, SystemMessage from agnext.core import AgentRuntime sys.path.append(os.path.abspath(os.path.dirname(__file__))) @@ -41,7 +41,7 @@ def illustrator_critics(runtime: AgentRuntime, app: TextualChatApp) -> None: ), ], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo", max_tokens=500), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo", max_tokens=500), ), ) illustrator = runtime.register_and_get_proxy( @@ -69,7 +69,7 @@ def illustrator_critics(runtime: AgentRuntime, app: TextualChatApp) -> None: ), ], memory=BufferedChatMemory(buffer_size=2), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) runtime.register( diff --git a/python/examples/demos/software_consultancy.py b/python/examples/demos/software_consultancy.py index 5a23cf4e6..9a973b212 100644 --- a/python/examples/demos/software_consultancy.py +++ b/python/examples/demos/software_consultancy.py @@ -20,7 +20,7 @@ from agnext.application import SingleThreadedAgentRuntime from agnext.chat.agents import ChatCompletionAgent from agnext.chat.memory import HeadAndTailChatMemory from agnext.chat.patterns._group_chat_manager import GroupChatManager -from agnext.components.models import OpenAI, SystemMessage +from agnext.components.models import OpenAIChatCompletionClient, SystemMessage from agnext.components.tools import FunctionTool from agnext.core import AgentRuntime from markdownify import markdownify # type: ignore @@ -126,7 +126,7 @@ def software_consultancy(runtime: AgentRuntime, app: TextualChatApp) -> None: # "Be concise and deliver now." ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=HeadAndTailChatMemory(head_size=1, tail_size=10), tools=[ FunctionTool( @@ -166,7 +166,7 @@ def software_consultancy(runtime: AgentRuntime, app: TextualChatApp) -> None: # "Be VERY concise." ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=HeadAndTailChatMemory(head_size=1, tail_size=10), tools=[ FunctionTool( @@ -194,7 +194,7 @@ def software_consultancy(runtime: AgentRuntime, app: TextualChatApp) -> None: # "Be concise and deliver now." ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=HeadAndTailChatMemory(head_size=1, tail_size=10), tools=[ FunctionTool( @@ -226,7 +226,7 @@ def software_consultancy(runtime: AgentRuntime, app: TextualChatApp) -> None: # "Be concise and deliver now." ) ], - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), memory=HeadAndTailChatMemory(head_size=1, tail_size=10), tools=[ FunctionTool( @@ -243,7 +243,7 @@ def software_consultancy(runtime: AgentRuntime, app: TextualChatApp) -> None: # lambda: GroupChatManager( description="A group chat manager.", memory=HeadAndTailChatMemory(head_size=1, tail_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), participants=[developer, product_manager, ux_designer, illustrator, user_agent], ), ) diff --git a/python/examples/orchestrator.py b/python/examples/orchestrator.py index ab4272e0f..f3df179fc 100644 --- a/python/examples/orchestrator.py +++ b/python/examples/orchestrator.py @@ -14,7 +14,7 @@ from agnext.chat.agents._oai_assistant import OpenAIAssistantAgent from agnext.chat.memory import BufferedChatMemory from agnext.chat.patterns._orchestrator_chat import OrchestratorChat from agnext.chat.types import TextMessage -from agnext.components.models import OpenAI, SystemMessage +from agnext.components.models import OpenAIChatCompletionClient, SystemMessage from agnext.components.tools import BaseTool from agnext.core import AgentRuntime, CancellationToken from pydantic import BaseModel, Field @@ -57,7 +57,7 @@ def software_development(runtime: AgentRuntime) -> OrchestratorChat: # type: ig description="A developer that writes code.", system_messages=[SystemMessage("You are a Python developer.")], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) @@ -88,7 +88,7 @@ def software_development(runtime: AgentRuntime) -> OrchestratorChat: # type: ig SystemMessage("You can use the search tool to find information on the web."), ], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), tools=[SearchTool()], ), ) @@ -99,7 +99,7 @@ def software_development(runtime: AgentRuntime) -> OrchestratorChat: # type: ig description="A planner that organizes and schedules tasks.", system_messages=[SystemMessage("You are a planner of complex tasks.")], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) @@ -111,7 +111,7 @@ def software_development(runtime: AgentRuntime) -> OrchestratorChat: # type: ig SystemMessage("You are an orchestrator that coordinates the team to complete a complex task.") ], memory=BufferedChatMemory(buffer_size=10), - model_client=OpenAI(model="gpt-4-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-4-turbo"), ), ) diff --git a/python/examples/patterns/coder_reviewer_direct.py b/python/examples/patterns/coder_reviewer_direct.py index 0e018a810..9203c7e4c 100644 --- a/python/examples/patterns/coder_reviewer_direct.py +++ b/python/examples/patterns/coder_reviewer_direct.py @@ -10,7 +10,7 @@ from agnext.components.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -197,14 +197,14 @@ async def main() -> None: "ReviewerAgent", lambda: ReviewerAgent( description="Code Reviewer", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) coder = runtime.register_and_get( "CoderAgent", lambda: CoderAgent( description="Coder", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), reviewer=reviewer, ), ) diff --git a/python/examples/patterns/coder_reviewer_pub_sub.py b/python/examples/patterns/coder_reviewer_pub_sub.py index a20f0a073..dde558978 100644 --- a/python/examples/patterns/coder_reviewer_pub_sub.py +++ b/python/examples/patterns/coder_reviewer_pub_sub.py @@ -11,7 +11,7 @@ from agnext.components.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -272,14 +272,14 @@ async def main() -> None: "ReviewerAgent", lambda: ReviewerAgent( description="Code Reviewer", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) runtime.register( "CoderAgent", lambda: CoderAgent( description="Coder", - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) runtime.register( diff --git a/python/examples/patterns/group_chat_pub_sub.py b/python/examples/patterns/group_chat_pub_sub.py index 58666d6c4..cc70026e2 100644 --- a/python/examples/patterns/group_chat_pub_sub.py +++ b/python/examples/patterns/group_chat_pub_sub.py @@ -8,7 +8,7 @@ from agnext.components.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -120,7 +120,7 @@ async def main() -> None: lambda: GroupChatParticipant( description="A data scientist", system_messages=[SystemMessage("You are a data scientist.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) agent2 = runtime.register_and_get( @@ -128,7 +128,7 @@ async def main() -> None: lambda: GroupChatParticipant( description="An engineer", system_messages=[SystemMessage("You are an engineer.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) agent3 = runtime.register_and_get( @@ -136,7 +136,7 @@ async def main() -> None: lambda: GroupChatParticipant( description="An artist", system_messages=[SystemMessage("You are an artist.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), ), ) diff --git a/python/examples/patterns/mixture_of_agents_direct.py b/python/examples/patterns/mixture_of_agents_direct.py index b34e76fca..84774fd23 100644 --- a/python/examples/patterns/mixture_of_agents_direct.py +++ b/python/examples/patterns/mixture_of_agents_direct.py @@ -8,7 +8,7 @@ from typing import List from agnext.application import SingleThreadedAgentRuntime from agnext.components import TypeRoutedAgent, message_handler -from agnext.components.models import ChatCompletionClient, OpenAI, SystemMessage, UserMessage +from agnext.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage, UserMessage from agnext.core import AgentId, CancellationToken @@ -94,7 +94,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 1", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=0.1), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=0.1), ), ) ref2 = runtime.register_and_get( @@ -102,7 +102,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 2", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=0.5), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=0.5), ), ) ref3 = runtime.register_and_get( @@ -110,7 +110,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 3", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=1.0), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=1.0), ), ) agg = runtime.register_and_get( @@ -122,7 +122,7 @@ async def main() -> None: "...synthesize these responses into a single, high-quality response... Responses from models:" ) ], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), references=[ref1, ref2, ref3], ), ) diff --git a/python/examples/patterns/mixture_of_agents_pub_sub.py b/python/examples/patterns/mixture_of_agents_pub_sub.py index b134929b4..5cebb60eb 100644 --- a/python/examples/patterns/mixture_of_agents_pub_sub.py +++ b/python/examples/patterns/mixture_of_agents_pub_sub.py @@ -8,7 +8,7 @@ from typing import Any, Dict, List from agnext.application import SingleThreadedAgentRuntime from agnext.components import TypeRoutedAgent, message_handler -from agnext.components.models import ChatCompletionClient, OpenAI, SystemMessage, UserMessage +from agnext.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage, UserMessage from agnext.core import AgentId, CancellationToken from agnext.core.intervention import DefaultInterventionHandler @@ -140,7 +140,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 1", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=0.1), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=0.1), ), ) runtime.register( @@ -148,7 +148,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 2", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=0.5), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=0.5), ), ) runtime.register( @@ -156,7 +156,7 @@ async def main() -> None: lambda: ReferenceAgent( description="Reference Agent 3", system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")], - model_client=OpenAI(model="gpt-3.5-turbo", temperature=1.0), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo", temperature=1.0), ), ) runtime.register( @@ -168,7 +168,7 @@ async def main() -> None: "...synthesize these responses into a single, high-quality response... Responses from models:" ) ], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), num_references=3, ), ) diff --git a/python/examples/tool-use/coding_one_agent_direct.py b/python/examples/tool-use/coding_one_agent_direct.py index 217d040e1..a0d318a5c 100644 --- a/python/examples/tool-use/coding_one_agent_direct.py +++ b/python/examples/tool-use/coding_one_agent_direct.py @@ -12,7 +12,7 @@ from agnext.components.models import ( FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -118,7 +118,7 @@ async def main() -> None: lambda: ToolEnabledAgent( description="Tool Use Agent", system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), tools=tools, ), ) diff --git a/python/examples/tool-use/coding_two_agent_direct.py b/python/examples/tool-use/coding_two_agent_direct.py index 2e9d479c2..1e3dbedf7 100644 --- a/python/examples/tool-use/coding_two_agent_direct.py +++ b/python/examples/tool-use/coding_two_agent_direct.py @@ -12,7 +12,7 @@ from agnext.components.models import ( FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -133,7 +133,7 @@ async def main() -> None: lambda: ToolUserAgent( description="Tool Use Agent", system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), tools=tools, tool_executor=executor, ), diff --git a/python/examples/tool-use/coding_two_agent_pub_sub.py b/python/examples/tool-use/coding_two_agent_pub_sub.py index b399d04e7..2523b3c8f 100644 --- a/python/examples/tool-use/coding_two_agent_pub_sub.py +++ b/python/examples/tool-use/coding_two_agent_pub_sub.py @@ -13,7 +13,7 @@ from agnext.components.models import ( FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, - OpenAI, + OpenAIChatCompletionClient, SystemMessage, UserMessage, ) @@ -210,7 +210,7 @@ async def main() -> None: lambda: ToolUseAgent( description="Tool Use Agent", system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), tools=tools, ), ) diff --git a/python/examples/tool-use/custom_function_tool_one_agent_direct.py b/python/examples/tool-use/custom_function_tool_one_agent_direct.py index e49ff4a15..a59b43cba 100644 --- a/python/examples/tool-use/custom_function_tool_one_agent_direct.py +++ b/python/examples/tool-use/custom_function_tool_one_agent_direct.py @@ -5,7 +5,7 @@ import sys from agnext.application import SingleThreadedAgentRuntime from agnext.components.models import ( - OpenAI, + OpenAIChatCompletionClient, SystemMessage, ) from agnext.components.tools import FunctionTool @@ -31,7 +31,7 @@ async def main() -> None: lambda: ToolEnabledAgent( description="Tool Use Agent", system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")], - model_client=OpenAI(model="gpt-3.5-turbo"), + model_client=OpenAIChatCompletionClient(model="gpt-3.5-turbo"), tools=[ # Define a tool that gets the stock price. FunctionTool( diff --git a/python/src/agnext/components/models/__init__.py b/python/src/agnext/components/models/__init__.py index 05e149408..a21e4367f 100644 --- a/python/src/agnext/components/models/__init__.py +++ b/python/src/agnext/components/models/__init__.py @@ -1,7 +1,7 @@ from ._model_client import ChatCompletionClient, ModelCapabilities from ._openai_client import ( - AzureOpenAI, - OpenAI, + AzureOpenAIChatCompletionClient, + OpenAIChatCompletionClient, ) from ._types import ( AssistantMessage, @@ -16,8 +16,8 @@ from ._types import ( ) __all__ = [ - "AzureOpenAI", - "OpenAI", + "AzureOpenAIChatCompletionClient", + "OpenAIChatCompletionClient", "ModelCapabilities", "ChatCompletionClient", "SystemMessage", diff --git a/python/src/agnext/components/models/_openai_client.py b/python/src/agnext/components/models/_openai_client.py index 34cd5e933..7d74ca4bd 100644 --- a/python/src/agnext/components/models/_openai_client.py +++ b/python/src/agnext/components/models/_openai_client.py @@ -74,7 +74,7 @@ def _azure_openai_client_from_config(config: Mapping[str, Any]) -> AsyncAzureOpe copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") copied_config["azure_endpoint"] = copied_config.get("azure_endpoint", copied_config.pop("base_url", None)) - # Shave down the config to just the AzureOpenAI kwargs + # Shave down the config to just the AzureOpenAIChatCompletionClient kwargs azure_config = {k: v for k, v in copied_config.items() if k in aopenai_init_kwargs} return AsyncAzureOpenAI(**azure_config) @@ -249,7 +249,7 @@ def assert_valid_name(name: str) -> str: return name -class BaseOpenAI(ChatCompletionClient): +class BaseOpenAIChatCompletionClient(ChatCompletionClient): def __init__( self, client: Union[AsyncOpenAI, AsyncAzureOpenAI], @@ -258,7 +258,7 @@ class BaseOpenAI(ChatCompletionClient): ): self._client = client if model_capabilities is None and isinstance(client, AsyncAzureOpenAI): - raise ValueError("AzureOpenAI requires explicit model capabilities") + raise ValueError("AzureOpenAIChatCompletionClient requires explicit model capabilities") elif model_capabilities is None: self._model_capabilities = _model_info.get_capabilties(create_args["model"]) else: @@ -281,7 +281,7 @@ class BaseOpenAI(ChatCompletionClient): @classmethod def create_from_config(cls, config: Dict[str, Any]) -> ChatCompletionClient: - return OpenAI(**config) + return OpenAIChatCompletionClient(**config) async def create( self, @@ -517,10 +517,10 @@ class BaseOpenAI(ChatCompletionClient): return self._model_capabilities -class OpenAI(BaseOpenAI): +class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): def __init__(self, **kwargs: Unpack[OpenAIClientConfiguration]): if "model" not in kwargs: - raise ValueError("model is required for OpenAI") + raise ValueError("model is required for OpenAIChatCompletionClient") model_capabilities: Optional[ModelCapabilities] = None copied_args = dict(kwargs).copy() @@ -543,10 +543,10 @@ class OpenAI(BaseOpenAI): self._client = _openai_client_from_config(state["_raw_config"]) -class AzureOpenAI(BaseOpenAI): +class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): if "model" not in kwargs: - raise ValueError("model is required for OpenAI") + raise ValueError("model is required for OpenAIChatCompletionClient") model_capabilities: Optional[ModelCapabilities] = None copied_args = dict(kwargs).copy() diff --git a/python/src/agnext/core/_agent_runtime.py b/python/src/agnext/core/_agent_runtime.py index 0236878ae..5b423086c 100644 --- a/python/src/agnext/core/_agent_runtime.py +++ b/python/src/agnext/core/_agent_runtime.py @@ -73,7 +73,7 @@ class AgentRuntime(Protocol): lambda: ChatCompletionAgent( description="A generic chat agent.", system_messages=[SystemMessage("You are a helpful assistant")], - model_client=OpenAI(model="gpt-4o"), + model_client=OpenAIChatCompletionClient(model="gpt-4o"), memory=BufferedChatMemory(buffer_size=10), ), )