From 253fe216fdaf13e65a8702a6df546733cd389d9b Mon Sep 17 00:00:00 2001 From: Leonardo Pinheiro Date: Tue, 10 Dec 2024 13:18:09 +1000 Subject: [PATCH] Add models.openai and tools.langchain namespaces (#4601) * add models.openai namespace * refactor tools namespace * update lock file * revert pyproject changes * update docs and add cast * update ext models doc ref * increase underline * add reply models namespace * update imports * fix test * linting * fix missing conflicts * revert pydantic changes * rename to replay * replay * fix reply * Fix test * formatting * example --------- Co-authored-by: Leonardo Pinheiro Co-authored-by: Jack Gerrits Co-authored-by: Jack Gerrits --- .../agents/_assistant_agent.py | 6 +- .../agents/_society_of_mind_agent.py | 2 +- .../src/autogen_agentchat/base/_handoff.py | 12 +- .../teams/_group_chat/_base_group_chat.py | 10 +- .../_magentic_one/_magentic_one_group_chat.py | 2 +- .../_group_chat/_round_robin_group_chat.py | 4 +- .../teams/_group_chat/_selector_group_chat.py | 4 +- .../teams/_group_chat/_swarm_group_chat.py | 4 +- .../tests/test_assistant_agent.py | 2 +- .../tests/test_group_chat.py | 3 +- .../tests/test_magentic_one_group_chat.py | 2 +- .../tests/test_society_of_mind_agent.py | 2 +- .../autogen-core/docs/src/reference/index.md | 5 +- .../python/autogen_ext.models.openai.rst | 8 + .../python/autogen_ext.models.replay.rst | 8 + .../reference/python/autogen_ext.models.rst | 8 - ...ls.rst => autogen_ext.tools.langchain.rst} | 0 .../examples/company-research.ipynb | 2 +- .../examples/literature-review.ipynb | 2 +- .../examples/travel-planning.ipynb | 2 +- .../agentchat-user-guide/quickstart.ipynb | 2 +- .../tutorial/agents.ipynb | 2 +- .../tutorial/models.ipynb | 4 +- .../tutorial/selector-group-chat.ipynb | 2 +- .../agentchat-user-guide/tutorial/state.ipynb | 2 +- .../agentchat-user-guide/tutorial/swarm.ipynb | 2 +- .../agentchat-user-guide/tutorial/teams.ipynb | 6 +- .../tutorial/termination.ipynb | 2 +- .../cookbook/azure-openai-with-aad-auth.md | 2 +- .../cookbook/local-llms-ollama-litellm.ipynb | 2 +- .../cookbook/structured-output-agent.ipynb | 2 +- .../cookbook/tool-use-with-intervention.ipynb | 2 +- .../design-patterns/group-chat.ipynb | 2 +- .../design-patterns/handoffs.ipynb | 4 +- .../design-patterns/mixture-of-agents.ipynb | 1034 ++++++++--------- .../design-patterns/multi-agent-debate.ipynb | 2 +- .../design-patterns/reflection.ipynb | 2 +- .../design-patterns/sequential-workflow.ipynb | 2 +- .../src/user-guide/core-user-guide/faqs.md | 2 +- .../framework/model-clients.ipynb | 6 +- .../core-user-guide/framework/tools.ipynb | 2 +- .../core-user-guide/quickstart.ipynb | 2 +- .../autogen-core/samples/common/utils.py | 2 +- .../samples/distributed-group-chat/_types.py | 2 +- .../samples/distributed-group-chat/_utils.py | 2 +- .../run_editor_agent.py | 2 +- .../run_group_chat_manager.py | 2 +- .../run_writer_agent.py | 2 +- .../agents/video_surfer/_video_surfer.py | 4 +- .../models/{ => openai}/__init__.py | 6 +- .../models/{_openai => openai}/_model_info.py | 0 .../{_openai => openai}/_openai_client.py | 15 +- .../{_openai => openai}/config/__init__.py | 0 .../src/autogen_ext/models/replay/__init__.py | 5 + .../_replay_chat_completion_client.py} | 6 +- .../tools/{ => langchain}/__init__.py | 0 .../{ => langchain}/_langchain_adapter.py | 0 .../tests/models/test_openai_model_client.py | 10 +- .../test_reply_chat_completion_client.py | 8 +- .../packages/autogen-ext/tests/test_tools.py | 7 +- .../src/autogen_magentic_one/utils.py | 2 +- .../database/component_factory.py | 2 +- .../autogen-studio/notebooks/tutorial.ipynb | 2 +- 63 files changed, 634 insertions(+), 621 deletions(-) create mode 100644 python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst create mode 100644 python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.replay.rst delete mode 100644 python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.rst rename python/packages/autogen-core/docs/src/reference/python/{autogen_ext.tools.rst => autogen_ext.tools.langchain.rst} (100%) rename python/packages/autogen-ext/src/autogen_ext/models/{ => openai}/__init__.py (50%) rename python/packages/autogen-ext/src/autogen_ext/models/{_openai => openai}/_model_info.py (100%) rename python/packages/autogen-ext/src/autogen_ext/models/{_openai => openai}/_openai_client.py (99%) rename python/packages/autogen-ext/src/autogen_ext/models/{_openai => openai}/config/__init__.py (100%) create mode 100644 python/packages/autogen-ext/src/autogen_ext/models/replay/__init__.py rename python/packages/autogen-ext/src/autogen_ext/models/{_reply_chat_completion_client.py => replay/_replay_chat_completion_client.py} (97%) rename python/packages/autogen-ext/src/autogen_ext/tools/{ => langchain}/__init__.py (100%) rename python/packages/autogen-ext/src/autogen_ext/tools/{ => langchain}/_langchain_adapter.py (100%) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 9450d4512..74a80ac7b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -121,7 +121,7 @@ class AssistantAgent(BaseChatAgent): import asyncio from autogen_core import CancellationToken - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage @@ -149,7 +149,7 @@ class AssistantAgent(BaseChatAgent): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_agentchat.ui import Console @@ -183,7 +183,7 @@ class AssistantAgent(BaseChatAgent): import asyncio from autogen_core import CancellationToken - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index 26593c1eb..9074760c5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -40,7 +40,7 @@ class SocietyOfMindAgent(BaseChatAgent): import asyncio from autogen_agentchat.agents import AssistantAgent, SocietyOfMindAgent - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.conditions import MaxMessageTermination diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_handoff.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_handoff.py index 33efc5677..91b78a759 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_handoff.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_handoff.py @@ -15,23 +15,23 @@ class Handoff(BaseModel): target: str """The name of the target agent to handoff to.""" - description: str = Field(default=None) + description: str = Field(default="") """The description of the handoff such as the condition under which it should happen and the target agent's ability. If not provided, it is generated from the target agent's name.""" - name: str = Field(default=None) + name: str = Field(default="") """The name of this handoff configuration. If not provided, it is generated from the target agent's name.""" - message: str = Field(default=None) + message: str = Field(default="") """The message to the target agent. If not provided, it is generated from the target agent's name.""" @model_validator(mode="before") @classmethod def set_defaults(cls, values: Dict[str, Any]) -> Dict[str, Any]: - if values.get("description") is None: + if not values.get("description"): values["description"] = f"Handoff to {values['target']}." - if values.get("name") is None: + if not values.get("name"): values["name"] = f"transfer_to_{values['target']}".lower() else: name = values["name"] @@ -40,7 +40,7 @@ class Handoff(BaseModel): # Check if name is a valid identifier. if not name.isidentifier(): raise ValueError(f"Handoff name must be a valid identifier: {values['name']}") - if values.get("message") is None: + if not values.get("message"): values["message"] = ( f"Transferred to {values['target']}, adopting the role of {values['target']} immediately." ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index 2c38f8449..fe405d52e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -188,7 +188,7 @@ class BaseGroupChat(Team, ABC): from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: @@ -219,7 +219,7 @@ class BaseGroupChat(Team, ABC): from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.teams import RoundRobinGroupChat from autogen_core import CancellationToken - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: @@ -286,7 +286,7 @@ class BaseGroupChat(Team, ABC): from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: @@ -320,7 +320,7 @@ class BaseGroupChat(Team, ABC): from autogen_agentchat.ui import Console from autogen_agentchat.teams import RoundRobinGroupChat from autogen_core import CancellationToken - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: @@ -437,7 +437,7 @@ class BaseGroupChat(Team, ABC): from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py index c98f04334..fbd336ae0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py @@ -38,7 +38,7 @@ class MagenticOneGroupChat(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import MagenticOneGroupChat from autogen_agentchat.ui import Console diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 9ac06ac0b..8330c89a3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -83,7 +83,7 @@ class RoundRobinGroupChat(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.conditions import TextMentionTermination @@ -113,7 +113,7 @@ class RoundRobinGroupChat(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.conditions import TextMentionTermination diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 13deaa88d..a42b6fc2b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -219,7 +219,7 @@ class SelectorGroupChat(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination @@ -273,7 +273,7 @@ class SelectorGroupChat(BaseGroupChat): import asyncio from typing import Sequence - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index 10574e0a9..a2659636f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -108,7 +108,7 @@ class Swarm(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import Swarm from autogen_agentchat.conditions import MaxMessageTermination @@ -143,7 +143,7 @@ class Swarm(BaseGroupChat): .. code-block:: python import asyncio - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import Swarm from autogen_agentchat.conditions import HandoffTermination, MaxMessageTermination diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index d5673b0a9..8f0b2d00c 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -16,7 +16,7 @@ from autogen_agentchat.messages import ( ) from autogen_core import Image from autogen_core.tools import FunctionTool -from autogen_ext.models import OpenAIChatCompletionClient +from autogen_ext.models.openai import OpenAIChatCompletionClient from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index c94ef3191..c64fcbe32 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -35,7 +35,8 @@ from autogen_agentchat.ui import Console from autogen_core import AgentId, CancellationToken from autogen_core.tools import FunctionTool from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor -from autogen_ext.models import OpenAIChatCompletionClient, ReplayChatCompletionClient +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_ext.models.replay import ReplayChatCompletionClient from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index 2aa2ba617..b6e24c6f5 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -18,7 +18,7 @@ from autogen_agentchat.teams import ( ) from autogen_agentchat.teams._group_chat._magentic_one._magentic_one_orchestrator import MagenticOneOrchestrator from autogen_core import AgentId, CancellationToken -from autogen_ext.models import ReplayChatCompletionClient +from autogen_ext.models.replay import ReplayChatCompletionClient from utils import FileLogHandler logger = logging.getLogger(EVENT_LOGGER_NAME) diff --git a/python/packages/autogen-agentchat/tests/test_society_of_mind_agent.py b/python/packages/autogen-agentchat/tests/test_society_of_mind_agent.py index a62d6c1db..ec4bf08b1 100644 --- a/python/packages/autogen-agentchat/tests/test_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/tests/test_society_of_mind_agent.py @@ -5,7 +5,7 @@ import pytest from autogen_agentchat.agents import AssistantAgent, SocietyOfMindAgent from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.teams import RoundRobinGroupChat -from autogen_ext.models import OpenAIChatCompletionClient +from autogen_ext.models.openai import OpenAIChatCompletionClient from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md index 6441e86ef..905521deb 100644 --- a/python/packages/autogen-core/docs/src/reference/index.md +++ b/python/packages/autogen-core/docs/src/reference/index.md @@ -45,8 +45,9 @@ python/autogen_ext.agents.web_surfer python/autogen_ext.agents.file_surfer python/autogen_ext.agents.video_surfer python/autogen_ext.agents.video_surfer.tools -python/autogen_ext.models -python/autogen_ext.tools +python/autogen_ext.models.openai +python/autogen_ext.models.replay +python/autogen_ext.tools.langchain python/autogen_ext.code_executors.local python/autogen_ext.code_executors.docker python/autogen_ext.code_executors.azure diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst new file mode 100644 index 000000000..44703cb70 --- /dev/null +++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst @@ -0,0 +1,8 @@ +autogen\_ext.models.openai +========================== + + +.. automodule:: autogen_ext.models.openai + :members: + :undoc-members: + :show-inheritance: diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.replay.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.replay.rst new file mode 100644 index 000000000..a3630970f --- /dev/null +++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.replay.rst @@ -0,0 +1,8 @@ +autogen\_ext.models.replay +========================== + + +.. automodule:: autogen_ext.models.replay + :members: + :undoc-members: + :show-inheritance: diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.rst deleted file mode 100644 index 3025c28dc..000000000 --- a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.rst +++ /dev/null @@ -1,8 +0,0 @@ -autogen\_ext.models -=================== - - -.. automodule:: autogen_ext.models - :members: - :undoc-members: - :show-inheritance: diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.langchain.rst similarity index 100% rename from python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.rst rename to python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.langchain.rst diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb index 44d1b089a..a9959ccc1 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb @@ -27,7 +27,7 @@ "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", "from autogen_core.tools import FunctionTool\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb index 4613b6dc2..c4d22fa6e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb @@ -27,7 +27,7 @@ "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", "from autogen_core.tools import FunctionTool\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/travel-planning.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/travel-planning.ipynb index b802127c4..beb4ceac6 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/travel-planning.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/travel-planning.ipynb @@ -21,7 +21,7 @@ "from autogen_agentchat.conditions import TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb index 907dfed8f..f16c7f263 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb @@ -76,7 +76,7 @@ "from autogen_agentchat.conditions import TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "\n", "# Define a tool\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index c4f78fc05..90f3f169c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -33,7 +33,7 @@ "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.messages import TextMessage\n", "from autogen_core import CancellationToken\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "\n", "# Define a tool that searches the web for information.\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index efc3b6969..917b87799 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "opneai_model_client = OpenAIChatCompletionClient(\n", " model=\"gpt-4o-2024-08-06\",\n", @@ -128,7 +128,7 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "\n", "# Create the token provider\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb index fb71cdc2a..c51b2c7a8 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb @@ -67,7 +67,7 @@ "from autogen_agentchat.messages import AgentMessage\n", "from autogen_agentchat.teams import SelectorGroupChat\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb index e09c255f5..c7bff2cc6 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb @@ -39,7 +39,7 @@ "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", "from autogen_core import CancellationToken\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "assistant_agent = AssistantAgent(\n", " name=\"assistant_agent\",\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb index 480ca60d8..b0feb00d0 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb @@ -100,7 +100,7 @@ "from autogen_agentchat.messages import HandoffMessage\n", "from autogen_agentchat.teams import Swarm\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index 97d09cda2..3f8ece4bf 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -33,7 +33,7 @@ "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.conditions import TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "# Create an OpenAI model client.\n", "model_client = OpenAIChatCompletionClient(\n", @@ -260,7 +260,7 @@ "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "# Create an OpenAI model client.\n", "model_client = OpenAIChatCompletionClient(\n", @@ -633,7 +633,7 @@ "from autogen_agentchat.base import Handoff\n", "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "# Create an OpenAI model client.\n", "model_client = OpenAIChatCompletionClient(\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index 45fd79046..8646ed38a 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -58,7 +58,7 @@ "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "model_client = OpenAIChatCompletionClient(\n", " model=\"gpt-4o\",\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md index b347ed7de..4ad6d35a3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md @@ -15,7 +15,7 @@ pip install azure-identity ## Using the Model Client ```python -from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider # Create the token provider diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb index aef515cf4..7e236d100 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb @@ -55,7 +55,7 @@ " SystemMessage,\n", " UserMessage,\n", ")\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb index 159cea6e1..2a6fcc432 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb @@ -66,7 +66,7 @@ "from typing import Optional\n", "\n", "from autogen_core.models import UserMessage\n", - "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n", "\n", "\n", "# Function to get environment variable and ensure it is not None\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb index 4a8d8d494..27f9bfc6a 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb @@ -38,7 +38,7 @@ "from autogen_core.tool_agent import ToolAgent, ToolException, tool_agent_caller_loop\n", "from autogen_core.tools import PythonCodeExecutionTool, ToolSchema\n", "from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb index 6e8169881..b418025f5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb @@ -91,7 +91,7 @@ " UserMessage,\n", ")\n", "from autogen_core.tools import FunctionTool\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "from IPython.display import display # type: ignore\n", "from pydantic import BaseModel\n", "from rich.console import Console\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb index b40443cee..31885a10e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb @@ -75,7 +75,7 @@ " UserMessage,\n", ")\n", "from autogen_core.tools import FunctionTool, Tool\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "from pydantic import BaseModel" ] }, @@ -296,7 +296,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb index 887e151ae..68b7a43a9 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb @@ -1,519 +1,519 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Mixture of Agents\n", - "\n", - "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", - "that models after the feed-forward neural network architecture.\n", - "\n", - "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", - "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", - "Messages from the worker agents in a previous layer are concatenated and sent to\n", - "all the worker agents in the next layer.\n", - "\n", - "This example implements the Mixture of Agents pattern using the core library\n", - "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", - "\n", - "Here is a high-level procedure overview of the pattern:\n", - "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", - "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", - "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", - "4. The process continues until the final layer is reached.\n", - "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", - "\n", - "We use the direct messaging API {py:meth}`~autogen_core.BaseAgent.send_message` to implement this pattern.\n", - "This makes it easier to add more features like worker task cancellation and error handling in the future." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "from dataclasses import dataclass\n", - "from typing import List\n", - "\n", - "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", - "from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Protocol\n", - "\n", - "The agents communicate using the following messages:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class WorkerTask:\n", - " task: str\n", - " previous_results: List[str]\n", - "\n", - "\n", - "@dataclass\n", - "class WorkerTaskResult:\n", - " result: str\n", - "\n", - "\n", - "@dataclass\n", - "class UserTask:\n", - " task: str\n", - "\n", - "\n", - "@dataclass\n", - "class FinalResult:\n", - " result: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Worker Agent\n", - "\n", - "Each worker agent receives a task from the orchestrator agent and processes them\n", - "indepedently.\n", - "Once the task is completed, the worker agent returns the result." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "class WorkerAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " ) -> None:\n", - " super().__init__(description=\"Worker Agent\")\n", - " self._model_client = model_client\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", - " if message.previous_results:\n", - " # If previous results are provided, we need to synthesize them to create a single prompt.\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " else:\n", - " # If no previous results are provided, we can simply pass the user query to the model.\n", - " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", - " assert isinstance(model_result.content, str)\n", - " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", - " return WorkerTaskResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Orchestrator Agent\n", - "\n", - "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", - "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", - "the orchestrator agent aggregates the results and publishes the final result." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "class OrchestratorAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " worker_agent_types: List[str],\n", - " num_layers: int,\n", - " ) -> None:\n", - " super().__init__(description=\"Aggregator Agent\")\n", - " self._model_client = model_client\n", - " self._worker_agent_types = worker_agent_types\n", - " self._num_layers = num_layers\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", - " # Create task for the first layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[])\n", - " # Iterate over layers.\n", - " for i in range(self._num_layers - 1):\n", - " # Assign workers for this layer.\n", - " worker_ids = [\n", - " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", - " for j, worker_type in enumerate(self._worker_agent_types)\n", - " ]\n", - " # Dispatch tasks to workers.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", - " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", - " # Prepare task for the next layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", - " # Perform final aggregation.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " assert isinstance(model_result.content, str)\n", - " return FinalResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running Mixture of Agents\n", - "\n", - "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "task = (\n", - " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", - "We only need to register a single worker agent types, \"worker\", because we are using\n", - "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", - "If you want to use different models, you will need to register multiple worker agent types,\n", - "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", - "factory function.\n", - "\n", - "The instances of worker agents are automatically created when the orchestrator agent\n", - "dispatches tasks to them.\n", - "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", - "for more information on agent lifecycle." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_1:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", - "\n", - "Add the parts together:\n", - "\\[ 3 + 4 + 2 = 9 \\]\n", - "\n", - "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", - "\n", - "Now, multiply the value of one part by the number of parts for each person:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[ 3 \\times 48 = 144 \\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[ 4 \\times 48 = 192 \\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[ 2 \\times 48 = 96 \\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_0:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", - "\n", - "\\[\n", - "3 + 4 + 2 = 9 \\text{ parts}\n", - "\\]\n", - "\n", - "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\n", - "\\[\n", - "\\text{Value of one part} = \\frac{432}{9} = 48\n", - "\\]\n", - "\n", - "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[\n", - "3 \\times 48 = 144 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[\n", - "4 \\times 48 = 192 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[\n", - "2 \\times 48 = 96 \\text{ cookies}\n", - "\\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_2:\n", - "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", - "\n", - "The total parts are:\n", - "- Alice: 3 parts\n", - "- Bob: 4 parts\n", - "- Charlie: 2 parts\n", - "\n", - "Adding these parts together gives:\n", - "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", - "\n", - "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", - "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", - "\n", - "Now we can calculate the number of cookies for each person:\n", - "- Alice's share: \n", - "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", - "- Bob's share: \n", - "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", - "- Charlie's share: \n", - "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", - "\n", - "So, the final distribution of cookies is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_2:\n", - "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", - "\n", - "1. **Determine the total number of parts in the ratio**:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48\n", - " \\]\n", - "\n", - "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", - " - **For Alice (3 parts)**:\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **For Bob (4 parts)**:\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **For Charlie (2 parts)**:\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Thus, the final distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_0:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", - "\n", - "1. **Calculate the Total Parts**: \n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part**: \n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share**:\n", - " - **Alice's Share** (3 parts):\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share** (4 parts):\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share** (2 parts):\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Final Distribution**:\n", - " - Alice: 144 cookies\n", - " - Bob: 192 cookies\n", - " - Charlie: 96 cookies\n", - "\n", - "Thus, the distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_1:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", - "\n", - "1. **Calculate Total Parts:**\n", - " \\[\n", - " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate the Share for Each Person:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie’s Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Summary of the Distribution:**\n", - " - **Alice:** 144 cookies\n", - " - **Bob:** 192 cookies\n", - " - **Charlie:** 96 cookies\n", - "\n", - "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Performing final aggregation\n", - "--------------------------------------------------------------------------------\n", - "Final result:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", - "\n", - "1. **Calculate the Total Parts in the Ratio:**\n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Therefore, the distribution of cookies is as follows:\n", - "- **Alice:** 144 cookies\n", - "- **Bob:** 192 cookies\n", - "- **Charlie:** 96 cookies\n", - "\n", - "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await WorkerAgent.register(\n", - " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", - ")\n", - "await OrchestratorAgent.register(\n", - " runtime,\n", - " \"orchestrator\",\n", - " lambda: OrchestratorAgent(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", - " ),\n", - ")\n", - "\n", - "runtime.start()\n", - "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", - "await runtime.stop_when_idle()\n", - "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mixture of Agents\n", + "\n", + "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", + "that models after the feed-forward neural network architecture.\n", + "\n", + "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", + "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", + "Messages from the worker agents in a previous layer are concatenated and sent to\n", + "all the worker agents in the next layer.\n", + "\n", + "This example implements the Mixture of Agents pattern using the core library\n", + "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", + "\n", + "Here is a high-level procedure overview of the pattern:\n", + "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", + "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", + "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", + "4. The process continues until the final layer is reached.\n", + "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", + "\n", + "We use the direct messaging API {py:meth}`~autogen_core.BaseAgent.send_message` to implement this pattern.\n", + "This makes it easier to add more features like worker task cancellation and error handling in the future." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Protocol\n", + "\n", + "The agents communicate using the following messages:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class WorkerTask:\n", + " task: str\n", + " previous_results: List[str]\n", + "\n", + "\n", + "@dataclass\n", + "class WorkerTaskResult:\n", + " result: str\n", + "\n", + "\n", + "@dataclass\n", + "class UserTask:\n", + " task: str\n", + "\n", + "\n", + "@dataclass\n", + "class FinalResult:\n", + " result: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Worker Agent\n", + "\n", + "Each worker agent receives a task from the orchestrator agent and processes them\n", + "indepedently.\n", + "Once the task is completed, the worker agent returns the result." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "class WorkerAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " ) -> None:\n", + " super().__init__(description=\"Worker Agent\")\n", + " self._model_client = model_client\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", + " if message.previous_results:\n", + " # If previous results are provided, we need to synthesize them to create a single prompt.\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " else:\n", + " # If no previous results are provided, we can simply pass the user query to the model.\n", + " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", + " assert isinstance(model_result.content, str)\n", + " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", + " return WorkerTaskResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Orchestrator Agent\n", + "\n", + "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", + "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", + "the orchestrator agent aggregates the results and publishes the final result." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "class OrchestratorAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " worker_agent_types: List[str],\n", + " num_layers: int,\n", + " ) -> None:\n", + " super().__init__(description=\"Aggregator Agent\")\n", + " self._model_client = model_client\n", + " self._worker_agent_types = worker_agent_types\n", + " self._num_layers = num_layers\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", + " # Create task for the first layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[])\n", + " # Iterate over layers.\n", + " for i in range(self._num_layers - 1):\n", + " # Assign workers for this layer.\n", + " worker_ids = [\n", + " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", + " for j, worker_type in enumerate(self._worker_agent_types)\n", + " ]\n", + " # Dispatch tasks to workers.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", + " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", + " # Prepare task for the next layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", + " # Perform final aggregation.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " assert isinstance(model_result.content, str)\n", + " return FinalResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running Mixture of Agents\n", + "\n", + "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "task = (\n", + " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", + "We only need to register a single worker agent types, \"worker\", because we are using\n", + "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", + "If you want to use different models, you will need to register multiple worker agent types,\n", + "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", + "factory function.\n", + "\n", + "The instances of worker agents are automatically created when the orchestrator agent\n", + "dispatches tasks to them.\n", + "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", + "for more information on agent lifecycle." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_1:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", + "\n", + "Add the parts together:\n", + "\\[ 3 + 4 + 2 = 9 \\]\n", + "\n", + "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", + "\n", + "Now, multiply the value of one part by the number of parts for each person:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[ 3 \\times 48 = 144 \\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[ 4 \\times 48 = 192 \\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[ 2 \\times 48 = 96 \\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_0:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", + "\n", + "\\[\n", + "3 + 4 + 2 = 9 \\text{ parts}\n", + "\\]\n", + "\n", + "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\n", + "\\[\n", + "\\text{Value of one part} = \\frac{432}{9} = 48\n", + "\\]\n", + "\n", + "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[\n", + "3 \\times 48 = 144 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[\n", + "4 \\times 48 = 192 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[\n", + "2 \\times 48 = 96 \\text{ cookies}\n", + "\\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_2:\n", + "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", + "\n", + "The total parts are:\n", + "- Alice: 3 parts\n", + "- Bob: 4 parts\n", + "- Charlie: 2 parts\n", + "\n", + "Adding these parts together gives:\n", + "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", + "\n", + "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", + "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", + "\n", + "Now we can calculate the number of cookies for each person:\n", + "- Alice's share: \n", + "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", + "- Bob's share: \n", + "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", + "- Charlie's share: \n", + "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", + "\n", + "So, the final distribution of cookies is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_2:\n", + "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", + "\n", + "1. **Determine the total number of parts in the ratio**:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48\n", + " \\]\n", + "\n", + "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", + " - **For Alice (3 parts)**:\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **For Bob (4 parts)**:\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **For Charlie (2 parts)**:\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Thus, the final distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_0:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", + "\n", + "1. **Calculate the Total Parts**: \n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part**: \n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share**:\n", + " - **Alice's Share** (3 parts):\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share** (4 parts):\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share** (2 parts):\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Final Distribution**:\n", + " - Alice: 144 cookies\n", + " - Bob: 192 cookies\n", + " - Charlie: 96 cookies\n", + "\n", + "Thus, the distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_1:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", + "\n", + "1. **Calculate Total Parts:**\n", + " \\[\n", + " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate the Share for Each Person:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie’s Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Summary of the Distribution:**\n", + " - **Alice:** 144 cookies\n", + " - **Bob:** 192 cookies\n", + " - **Charlie:** 96 cookies\n", + "\n", + "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Performing final aggregation\n", + "--------------------------------------------------------------------------------\n", + "Final result:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", + "\n", + "1. **Calculate the Total Parts in the Ratio:**\n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Therefore, the distribution of cookies is as follows:\n", + "- **Alice:** 144 cookies\n", + "- **Bob:** 192 cookies\n", + "- **Charlie:** 96 cookies\n", + "\n", + "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await WorkerAgent.register(\n", + " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", + ")\n", + "await OrchestratorAgent.register(\n", + " runtime,\n", + " \"orchestrator\",\n", + " lambda: OrchestratorAgent(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", + " ),\n", + ")\n", + "\n", + "runtime.start()\n", + "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", + "await runtime.stop_when_idle()\n", + "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb index 852f35f07..6d34f7e58 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb @@ -59,7 +59,7 @@ " SystemMessage,\n", " UserMessage,\n", ")\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb index 7fb2566fd..bae4b92a4 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb @@ -442,7 +442,7 @@ ], "source": [ "from autogen_core import DefaultTopicId, SingleThreadedAgentRuntime\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", "await ReviewerAgent.register(\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/sequential-workflow.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/sequential-workflow.ipynb index f95fd2072..91e837713 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/sequential-workflow.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/sequential-workflow.ipynb @@ -58,7 +58,7 @@ " type_subscription,\n", ")\n", "from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient" + "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md index d1fae6cad..cbec0e6af 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md @@ -46,7 +46,7 @@ Model capabilites are additional capabilities an LLM may have beyond the standar Model capabilities can be passed into a model, which will override the default definitions. These capabilities will not affect what the underlying model is actually capable of, but will allow or disallow behaviors associated with them. This is particularly useful when [using local LLMs](cookbook/local-llms-ollama-litellm.ipynb). ```python -from autogen_ext.models import OpenAIChatCompletionClient +from autogen_ext.models.openai import OpenAIChatCompletionClient client = OpenAIChatCompletionClient( model="gpt-4o", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb index c8ac3d163..73998b02e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb @@ -33,7 +33,7 @@ "outputs": [], "source": [ "from autogen_core.models import UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "# Create an OpenAI model client.\n", "model_client = OpenAIChatCompletionClient(\n", @@ -290,7 +290,7 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "\n", "# Create the token provider\n", @@ -334,7 +334,7 @@ "\n", "from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "\n", "@dataclass\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index 26f140302..7e7ea0ef9 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -171,7 +171,7 @@ ")\n", "from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop\n", "from autogen_core.tools import FunctionTool, Tool, ToolSchema\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "\n", "@dataclass\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb index f75fb5298..d6b49453c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb @@ -324,7 +324,7 @@ "\n", "from autogen_core import SingleThreadedAgentRuntime\n", "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "work_dir = tempfile.mkdtemp()\n", "\n", diff --git a/python/packages/autogen-core/samples/common/utils.py b/python/packages/autogen-core/samples/common/utils.py index b86106860..d43283ab7 100644 --- a/python/packages/autogen-core/samples/common/utils.py +++ b/python/packages/autogen-core/samples/common/utils.py @@ -9,7 +9,7 @@ from autogen_core.models import ( LLMMessage, UserMessage, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider from typing_extensions import Literal diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_types.py b/python/packages/autogen-core/samples/distributed-group-chat/_types.py index 4bbdf08cc..cf5d8e752 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_types.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_types.py @@ -4,7 +4,7 @@ from typing import Dict from autogen_core.models import ( LLMMessage, ) -from autogen_ext.models import AzureOpenAIClientConfiguration +from autogen_ext.models.openai import AzureOpenAIClientConfiguration from pydantic import BaseModel diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py index 3869b88c2..2e329e745 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py @@ -5,7 +5,7 @@ from typing import Any, Iterable, Type import yaml from _types import AppConfig from autogen_core import MessageSerializer, try_get_known_serializers_for_type -from autogen_ext.models import AzureOpenAIClientConfiguration +from autogen_ext.models.openai import AzureOpenAIClientConfiguration from azure.identity import DefaultAzureCredential, get_bearer_token_provider diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py index f516f20a3..2fdfa72c7 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py @@ -8,7 +8,7 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py index ac0507891..ad9d06c02 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py @@ -8,7 +8,7 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py index a7172d15c..0455fb8da 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py @@ -8,7 +8,7 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py index 21146c3c7..455c8b749 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py @@ -42,7 +42,7 @@ class VideoSurfer(AssistantAgent): from autogen_agentchat.ui import Console from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.agents.video_surfer import VideoSurfer async def main() -> None: @@ -76,7 +76,7 @@ class VideoSurfer(AssistantAgent): from autogen_agentchat.ui import Console from autogen_agentchat.teams import MagenticOneGroupChat from autogen_agentchat.agents import UserProxyAgent - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.agents.video_surfer import VideoSurfer async def main() -> None: diff --git a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py similarity index 50% rename from python/packages/autogen-ext/src/autogen_ext/models/__init__.py rename to python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py index 80533f805..bad5690e3 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py @@ -1,14 +1,12 @@ -from ._openai._openai_client import ( +from ._openai_client import ( AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, ) -from ._openai.config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration -from ._reply_chat_completion_client import ReplayChatCompletionClient +from .config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration __all__ = [ "AzureOpenAIClientConfiguration", "AzureOpenAIChatCompletionClient", "OpenAIClientConfiguration", "OpenAIChatCompletionClient", - "ReplayChatCompletionClient", ] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_model_info.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py similarity index 100% rename from python/packages/autogen-ext/src/autogen_ext/models/_openai/_model_info.py rename to python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py similarity index 99% rename from python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py rename to python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 56bd96708..f84a30e0e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -43,6 +43,9 @@ from autogen_core.models import ( UserMessage, ) from autogen_core.tools import Tool, ToolSchema +from pydantic import BaseModel +from typing_extensions import Unpack + from openai import AsyncAzureOpenAI, AsyncOpenAI from openai.types.chat import ( ChatCompletion, @@ -63,8 +66,6 @@ from openai.types.chat import ( from openai.types.chat.chat_completion import Choice from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice from openai.types.shared_params import FunctionDefinition, FunctionParameters -from pydantic import BaseModel -from typing_extensions import Unpack from . import _model_info from .config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration @@ -909,13 +910,13 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: bash - pip install 'autogen-ext[openai]==0.4.0.dev9' + pip install 'autogen-ext[openai]==0.4.0.dev8' The following code snippet shows how to use the client with an OpenAI model: .. code-block:: python - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_core.models import UserMessage openai_client = OpenAIChatCompletionClient( @@ -931,7 +932,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: python - from autogen_ext.models import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient custom_model_client = OpenAIChatCompletionClient( model="custom-model-name", @@ -989,7 +990,7 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: bash - pip install 'autogen-ext[openai,azure]==0.4.0.dev9' + pip install 'autogen-ext[openai,azure]==0.4.0.dev8' To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities. @@ -1000,7 +1001,7 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: python - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider # Create the token provider diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py similarity index 100% rename from python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py rename to python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py diff --git a/python/packages/autogen-ext/src/autogen_ext/models/replay/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/replay/__init__.py new file mode 100644 index 000000000..6e6da6f0a --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/models/replay/__init__.py @@ -0,0 +1,5 @@ +from ._replay_chat_completion_client import ReplayChatCompletionClient + +__all__ = [ + "ReplayChatCompletionClient", +] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py b/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py similarity index 97% rename from python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py rename to python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py index e3e0ff35a..d0cd159a3 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py @@ -37,7 +37,7 @@ class ReplayChatCompletionClient: .. code-block:: python - from autogen_ext.models import ReplayChatCompletionClient + from autogen_ext.models.replay import ReplayChatCompletionClient from autogen_core.models import UserMessage @@ -57,7 +57,7 @@ class ReplayChatCompletionClient: .. code-block:: python import asyncio - from autogen_ext.models import ReplayChatCompletionClient + from autogen_ext.models.replay import ReplayChatCompletionClient from autogen_core.models import UserMessage @@ -83,7 +83,7 @@ class ReplayChatCompletionClient: .. code-block:: python import asyncio - from autogen_ext.models import ReplayChatCompletionClient + from autogen_ext.models.replay import ReplayChatCompletionClient from autogen_core.models import UserMessage diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/__init__.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py similarity index 100% rename from python/packages/autogen-ext/src/autogen_ext/tools/__init__.py rename to python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py similarity index 100% rename from python/packages/autogen-ext/src/autogen_ext/tools/_langchain_adapter.py rename to python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py diff --git a/python/packages/autogen-ext/tests/models/test_openai_model_client.py b/python/packages/autogen-ext/tests/models/test_openai_model_client.py index 76b481aa2..9f2144c5d 100644 --- a/python/packages/autogen-ext/tests/models/test_openai_model_client.py +++ b/python/packages/autogen-ext/tests/models/test_openai_model_client.py @@ -15,9 +15,9 @@ from autogen_core.models import ( UserMessage, ) from autogen_core.tools import BaseTool, FunctionTool -from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient -from autogen_ext.models._openai._model_info import resolve_model -from autogen_ext.models._openai._openai_client import calculate_vision_tokens, convert_tools +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient +from autogen_ext.models.openai._model_info import resolve_model +from autogen_ext.models.openai._openai_client import calculate_vision_tokens, convert_tools from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta @@ -275,9 +275,7 @@ async def test_openai_chat_completion_client_count_tokens(monkeypatch: pytest.Mo tools = [FunctionTool(tool1, description="example tool 1"), FunctionTool(tool2, description="example tool 2")] mockcalculate_vision_tokens = MagicMock() - monkeypatch.setattr( - "autogen_ext.models._openai._openai_client.calculate_vision_tokens", mockcalculate_vision_tokens - ) + monkeypatch.setattr("autogen_ext.models.openai._openai_client.calculate_vision_tokens", mockcalculate_vision_tokens) num_tokens = client.count_tokens(messages, tools=tools) assert num_tokens diff --git a/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py b/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py index 40b27848a..7c3fe584b 100644 --- a/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py +++ b/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py @@ -13,7 +13,7 @@ from autogen_core import ( message_handler, ) from autogen_core.models import ChatCompletionClient, CreateResult, SystemMessage, UserMessage -from autogen_ext.models import ReplayChatCompletionClient +from autogen_ext.models.replay import ReplayChatCompletionClient @dataclass @@ -48,7 +48,7 @@ class LLMAgentWithDefaultSubscription(LLMAgent): ... @pytest.mark.asyncio -async def test_reply_chat_completion_client() -> None: +async def test_replay_chat_completion_client() -> None: num_messages = 5 messages = [f"Message {i}" for i in range(num_messages)] reply_model_client = ReplayChatCompletionClient(messages) @@ -61,7 +61,7 @@ async def test_reply_chat_completion_client() -> None: @pytest.mark.asyncio -async def test_reply_chat_completion_client_create_stream() -> None: +async def test_replay_chat_completion_client_create_stream() -> None: num_messages = 5 messages = [f"Message {i}" for i in range(num_messages)] reply_model_client = ReplayChatCompletionClient(messages) @@ -155,7 +155,7 @@ async def test_token_count_logics() -> None: @pytest.mark.asyncio -async def test_reply_chat_completion_client_reset() -> None: +async def test_replay_chat_completion_client_reset() -> None: """Test that reset functionality properly resets the client state.""" messages = ["First message", "Second message", "Third message"] client = ReplayChatCompletionClient(messages) diff --git a/python/packages/autogen-ext/tests/test_tools.py b/python/packages/autogen-ext/tests/test_tools.py index 3c9cf4159..58896642a 100644 --- a/python/packages/autogen-ext/tests/test_tools.py +++ b/python/packages/autogen-ext/tests/test_tools.py @@ -1,8 +1,9 @@ -from typing import Optional, Type +from typing import Optional, Type, cast import pytest from autogen_core import CancellationToken -from autogen_ext.tools import LangChainToolAdapter # type: ignore +from autogen_core.tools import Tool +from autogen_ext.tools.langchain import LangChainToolAdapter # type: ignore from langchain_core.callbacks.manager import AsyncCallbackManagerForToolRun, CallbackManagerForToolRun from langchain_core.tools import BaseTool as LangChainTool from langchain_core.tools import tool # pyright: ignore @@ -46,7 +47,7 @@ async def test_langchain_tool_adapter() -> None: langchain_tool = add # type: ignore # Create an adapter - adapter = LangChainToolAdapter(langchain_tool) # type: ignore + adapter = cast(Tool, LangChainToolAdapter(langchain_tool)) # type: ignore # Test schema generation schema = adapter.schema diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py index 46ebe9d66..e8df40bae 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py @@ -11,7 +11,7 @@ from autogen_core.models import ( ChatCompletionClient, ModelCapabilities, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from .messages import ( AgentEvent, diff --git a/python/packages/autogen-studio/autogenstudio/database/component_factory.py b/python/packages/autogen-studio/autogenstudio/database/component_factory.py index 140112ede..edabb8291 100644 --- a/python/packages/autogen-studio/autogenstudio/database/component_factory.py +++ b/python/packages/autogen-studio/autogenstudio/database/component_factory.py @@ -13,7 +13,7 @@ from autogen_core.tools import FunctionTool from autogen_ext.agents.file_surfer import FileSurfer from autogen_ext.agents.magentic_one import MagenticOneCoderAgent from autogen_ext.agents.web_surfer import MultimodalWebSurfer -from autogen_ext.models import OpenAIChatCompletionClient +from autogen_ext.models.openai import OpenAIChatCompletionClient from ..datamodel.types import ( AgentConfig, diff --git a/python/packages/autogen-studio/notebooks/tutorial.ipynb b/python/packages/autogen-studio/notebooks/tutorial.ipynb index 17dbab020..47dc0220d 100644 --- a/python/packages/autogen-studio/notebooks/tutorial.ipynb +++ b/python/packages/autogen-studio/notebooks/tutorial.ipynb @@ -210,7 +210,7 @@ "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.conditions import TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "planner_agent = AssistantAgent(\n", " \"planner_agent\",\n",