diff --git a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py index 09925ebe1..004bb0c38 100644 --- a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py @@ -9,7 +9,7 @@ from openai import AzureOpenAI from typing import List from autogen_core import AgentId, AgentProxy, TopicId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core.components.models import ( ChatCompletionClient, diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py index 15b312c51..4d7135819 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py @@ -9,7 +9,7 @@ from openai import AzureOpenAI from typing import List from autogen_core import AgentId, AgentProxy, TopicId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core.components.models import ( ChatCompletionClient, diff --git a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py index a0a2707aa..63643c84a 100644 --- a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py @@ -2,7 +2,7 @@ import asyncio import logging from autogen_core import AgentId, AgentProxy, TopicId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import DefaultSubscription, DefaultTopicId from autogen_core.components.code_executor import LocalCommandLineCodeExecutor diff --git a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py index 22a681851..159003459 100644 --- a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py @@ -8,7 +8,7 @@ import nltk from typing import Any, Dict, List, Tuple, Union from autogen_core import AgentId, AgentProxy, TopicId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import DefaultSubscription, DefaultTopicId from autogen_core.components.code_executor import LocalCommandLineCodeExecutor diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index 5a798d2b0..1e46c83fd 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -12,10 +12,10 @@ from autogen_core import ( CancellationToken, ClosureAgent, MessageContext, + SingleThreadedAgentRuntime, TypeSubscription, ) from autogen_core._closure_agent import ClosureContext -from autogen_core.application import SingleThreadedAgentRuntime from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TaskResult, Team, TerminationCondition diff --git a/python/packages/autogen-agentchat/tests/test_sequential_routed_agent.py b/python/packages/autogen-agentchat/tests/test_sequential_routed_agent.py index 9291882cc..165c68e6d 100644 --- a/python/packages/autogen-agentchat/tests/test_sequential_routed_agent.py +++ b/python/packages/autogen-agentchat/tests/test_sequential_routed_agent.py @@ -5,8 +5,14 @@ from typing import List import pytest from autogen_agentchat.teams._group_chat._sequential_routed_agent import SequentialRoutedAgent -from autogen_core import AgentId, DefaultTopicId, MessageContext, default_subscription, message_handler -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import ( + AgentId, + DefaultTopicId, + MessageContext, + SingleThreadedAgentRuntime, + default_subscription, + message_handler, +) @dataclass diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/extracting-results-with-an-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/extracting-results-with-an-agent.ipynb index 81cdaecac..c73b1930c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/extracting-results-with-an-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/extracting-results-with-an-agent.ipynb @@ -25,8 +25,14 @@ "import asyncio\n", "from dataclasses import dataclass\n", "\n", - "from autogen_core import ClosureAgent, ClosureContext, DefaultSubscription, DefaultTopicId, MessageContext\n", - "from autogen_core.application import SingleThreadedAgentRuntime" + "from autogen_core import (\n", + " ClosureAgent,\n", + " ClosureContext,\n", + " DefaultSubscription,\n", + " DefaultTopicId,\n", + " MessageContext,\n", + " SingleThreadedAgentRuntime,\n", + ")" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/langgraph-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/langgraph-agent.ipynb index 513ec5576..349bc3948 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/langgraph-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/langgraph-agent.ipynb @@ -35,15 +35,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dataclasses import dataclass\n", "from typing import Any, Callable, List, Literal\n", "\n", - "from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.tools import tool # pyright: ignore\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llamaindex-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llamaindex-agent.ipynb index 71e1dc196..e75293fce 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llamaindex-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llamaindex-agent.ipynb @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -41,8 +41,7 @@ "from dataclasses import dataclass\n", "from typing import List, Optional\n", "\n", - "from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from llama_index.core import Settings\n", "from llama_index.core.agent import ReActAgent\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb index 69edc8478..ea248500b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb @@ -39,8 +39,15 @@ "source": [ "from dataclasses import dataclass\n", "\n", - "from autogen_core import AgentId, DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import (\n", + " AgentId,\n", + " DefaultTopicId,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " default_subscription,\n", + " message_handler,\n", + ")\n", "from autogen_core.components.model_context import BufferedChatCompletionContext\n", "from autogen_core.components.models import (\n", " AssistantMessage,\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/openai-assistant-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/openai-assistant-agent.ipynb index 337e754fa..000336c33 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/openai-assistant-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/openai-assistant-agent.ipynb @@ -386,7 +386,7 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import SingleThreadedAgentRuntime\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", "await OpenAIAssistantAgent.register(\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb index 1d5387e7c..be4a3b895 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb @@ -22,8 +22,15 @@ "from dataclasses import dataclass\n", "from typing import Any\n", "\n", - "from autogen_core import AgentId, DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import (\n", + " AgentId,\n", + " DefaultTopicId,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " default_subscription,\n", + " message_handler,\n", + ")\n", "from autogen_core.base.intervention import DefaultInterventionHandler" ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb index 98fd748d3..a46ccdfbc 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb @@ -1,283 +1,290 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# User Approval for Tool Execution using Intervention Handler\n", - "\n", - "This cookbook shows how to intercept the tool execution using\n", - "an intervention hanlder, and prompt the user for permission to execute the tool." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "from typing import Any, List\n", - "\n", - "from autogen_core import AgentId, AgentType, FunctionCall, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage\n", - "from autogen_core.components.models import (\n", - " ChatCompletionClient,\n", - " LLMMessage,\n", - " SystemMessage,\n", - " UserMessage,\n", - ")\n", - "from autogen_core.components.tools import PythonCodeExecutionTool, ToolSchema\n", - "from autogen_core.tool_agent import ToolAgent, ToolException, tool_agent_caller_loop\n", - "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's define a simple message type that carries a string content." - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class Message:\n", - " content: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create a simple tool use agent that is capable of using tools through a\n", - "{py:class}`~autogen_core.components.tool_agent.ToolAgent`." - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "class ToolUseAgent(RoutedAgent):\n", - " \"\"\"An agent that uses tools to perform tasks. It executes the tools\n", - " by itself by sending the tool execution task to a ToolAgent.\"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " description: str,\n", - " system_messages: List[SystemMessage],\n", - " model_client: ChatCompletionClient,\n", - " tool_schema: List[ToolSchema],\n", - " tool_agent_type: AgentType,\n", - " ) -> None:\n", - " super().__init__(description)\n", - " self._model_client = model_client\n", - " self._system_messages = system_messages\n", - " self._tool_schema = tool_schema\n", - " self._tool_agent_id = AgentId(type=tool_agent_type, key=self.id.key)\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " \"\"\"Handle a user message, execute the model and tools, and returns the response.\"\"\"\n", - " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"User\")]\n", - " # Use the tool agent to execute the tools, and get the output messages.\n", - " output_messages = await tool_agent_caller_loop(\n", - " self,\n", - " tool_agent_id=self._tool_agent_id,\n", - " model_client=self._model_client,\n", - " input_messages=session,\n", - " tool_schema=self._tool_schema,\n", - " cancellation_token=ctx.cancellation_token,\n", - " )\n", - " # Extract the final response from the output messages.\n", - " final_response = output_messages[-1].content\n", - " assert isinstance(final_response, str)\n", - " return Message(content=final_response)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The tool use agent sends tool call requests to the tool agent to execute tools,\n", - "so we can intercept the messages sent by the tool use agent to the tool agent\n", - "to prompt the user for permission to execute the tool.\n", - "\n", - "Let's create an intervention handler that intercepts the messages and prompts\n", - "user for before allowing the tool execution." - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "class ToolInterventionHandler(DefaultInterventionHandler):\n", - " async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:\n", - " if isinstance(message, FunctionCall):\n", - " # Request user prompt for tool execution.\n", - " user_input = input(\n", - " f\"Function call: {message.name}\\nArguments: {message.arguments}\\nDo you want to execute the tool? (y/n): \"\n", - " )\n", - " if user_input.strip().lower() != \"y\":\n", - " raise ToolException(content=\"User denied tool execution.\", call_id=message.id)\n", - " return message" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we can create a runtime with the intervention handler registered." - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "# Create the runtime with the intervention handler.\n", - "runtime = SingleThreadedAgentRuntime(intervention_handlers=[ToolInterventionHandler()])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example, we will use a tool for Python code execution.\n", - "First, we create a Docker-based command-line code executor\n", - "using {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`,\n", - "and then use it to instantiate a built-in Python code execution tool\n", - "{py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`\n", - "that runs code in a Docker container." - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "# Create the docker executor for the Python code execution tool.\n", - "docker_executor = DockerCommandLineCodeExecutor()\n", - "\n", - "# Create the Python code execution tool.\n", - "python_tool = PythonCodeExecutionTool(executor=docker_executor)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Register the agents with tools and tool schema." - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='tool_enabled_agent')" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Register agents.\n", - "tool_agent_type = await ToolAgent.register(\n", - " runtime,\n", - " \"tool_executor_agent\",\n", - " lambda: ToolAgent(\n", - " description=\"Tool Executor Agent\",\n", - " tools=[python_tool],\n", - " ),\n", - ")\n", - "await ToolUseAgent.register(\n", - " runtime,\n", - " \"tool_enabled_agent\",\n", - " lambda: ToolUseAgent(\n", - " description=\"Tool Use Agent\",\n", - " system_messages=[SystemMessage(content=\"You are a helpful AI Assistant. Use your tools to solve problems.\")],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " tool_schema=[python_tool.schema],\n", - " tool_agent_type=tool_agent_type,\n", - " ),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Run the agents by starting the runtime and sending a message to the tool use agent.\n", - "The intervention handler will prompt you for permission to execute the tool." - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The output of the code is: **Hello, World!**\n" - ] - } - ], - "source": [ - "# Start the runtime and the docker executor.\n", - "await docker_executor.start()\n", - "runtime.start()\n", - "\n", - "# Send a task to the tool user.\n", - "response = await runtime.send_message(\n", - " Message(\"Run the following Python code: print('Hello, World!')\"), AgentId(\"tool_enabled_agent\", \"default\")\n", - ")\n", - "print(response.content)\n", - "\n", - "# Stop the runtime and the docker executor.\n", - "await runtime.stop()\n", - "await docker_executor.stop()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# User Approval for Tool Execution using Intervention Handler\n", + "\n", + "This cookbook shows how to intercept the tool execution using\n", + "an intervention hanlder, and prompt the user for permission to execute the tool." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "from typing import Any, List\n", + "\n", + "from autogen_core import (\n", + " AgentId,\n", + " AgentType,\n", + " FunctionCall,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " message_handler,\n", + ")\n", + "from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage\n", + "from autogen_core.components.models import (\n", + " ChatCompletionClient,\n", + " LLMMessage,\n", + " SystemMessage,\n", + " UserMessage,\n", + ")\n", + "from autogen_core.components.tools import PythonCodeExecutionTool, ToolSchema\n", + "from autogen_core.tool_agent import ToolAgent, ToolException, tool_agent_caller_loop\n", + "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's define a simple message type that carries a string content." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Message:\n", + " content: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create a simple tool use agent that is capable of using tools through a\n", + "{py:class}`~autogen_core.components.tool_agent.ToolAgent`." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "class ToolUseAgent(RoutedAgent):\n", + " \"\"\"An agent that uses tools to perform tasks. It executes the tools\n", + " by itself by sending the tool execution task to a ToolAgent.\"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " description: str,\n", + " system_messages: List[SystemMessage],\n", + " model_client: ChatCompletionClient,\n", + " tool_schema: List[ToolSchema],\n", + " tool_agent_type: AgentType,\n", + " ) -> None:\n", + " super().__init__(description)\n", + " self._model_client = model_client\n", + " self._system_messages = system_messages\n", + " self._tool_schema = tool_schema\n", + " self._tool_agent_id = AgentId(type=tool_agent_type, key=self.id.key)\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " \"\"\"Handle a user message, execute the model and tools, and returns the response.\"\"\"\n", + " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"User\")]\n", + " # Use the tool agent to execute the tools, and get the output messages.\n", + " output_messages = await tool_agent_caller_loop(\n", + " self,\n", + " tool_agent_id=self._tool_agent_id,\n", + " model_client=self._model_client,\n", + " input_messages=session,\n", + " tool_schema=self._tool_schema,\n", + " cancellation_token=ctx.cancellation_token,\n", + " )\n", + " # Extract the final response from the output messages.\n", + " final_response = output_messages[-1].content\n", + " assert isinstance(final_response, str)\n", + " return Message(content=final_response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tool use agent sends tool call requests to the tool agent to execute tools,\n", + "so we can intercept the messages sent by the tool use agent to the tool agent\n", + "to prompt the user for permission to execute the tool.\n", + "\n", + "Let's create an intervention handler that intercepts the messages and prompts\n", + "user for before allowing the tool execution." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "class ToolInterventionHandler(DefaultInterventionHandler):\n", + " async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:\n", + " if isinstance(message, FunctionCall):\n", + " # Request user prompt for tool execution.\n", + " user_input = input(\n", + " f\"Function call: {message.name}\\nArguments: {message.arguments}\\nDo you want to execute the tool? (y/n): \"\n", + " )\n", + " if user_input.strip().lower() != \"y\":\n", + " raise ToolException(content=\"User denied tool execution.\", call_id=message.id)\n", + " return message" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we can create a runtime with the intervention handler registered." + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the runtime with the intervention handler.\n", + "runtime = SingleThreadedAgentRuntime(intervention_handlers=[ToolInterventionHandler()])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will use a tool for Python code execution.\n", + "First, we create a Docker-based command-line code executor\n", + "using {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`,\n", + "and then use it to instantiate a built-in Python code execution tool\n", + "{py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`\n", + "that runs code in a Docker container." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the docker executor for the Python code execution tool.\n", + "docker_executor = DockerCommandLineCodeExecutor()\n", + "\n", + "# Create the Python code execution tool.\n", + "python_tool = PythonCodeExecutionTool(executor=docker_executor)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Register the agents with tools and tool schema." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='tool_enabled_agent')" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Register agents.\n", + "tool_agent_type = await ToolAgent.register(\n", + " runtime,\n", + " \"tool_executor_agent\",\n", + " lambda: ToolAgent(\n", + " description=\"Tool Executor Agent\",\n", + " tools=[python_tool],\n", + " ),\n", + ")\n", + "await ToolUseAgent.register(\n", + " runtime,\n", + " \"tool_enabled_agent\",\n", + " lambda: ToolUseAgent(\n", + " description=\"Tool Use Agent\",\n", + " system_messages=[SystemMessage(content=\"You are a helpful AI Assistant. Use your tools to solve problems.\")],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " tool_schema=[python_tool.schema],\n", + " tool_agent_type=tool_agent_type,\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run the agents by starting the runtime and sending a message to the tool use agent.\n", + "The intervention handler will prompt you for permission to execute the tool." + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The output of the code is: **Hello, World!**\n" + ] + } + ], + "source": [ + "# Start the runtime and the docker executor.\n", + "await docker_executor.start()\n", + "runtime.start()\n", + "\n", + "# Send a task to the tool user.\n", + "response = await runtime.send_message(\n", + " Message(\"Run the following Python code: print('Hello, World!')\"), AgentId(\"tool_enabled_agent\", \"default\")\n", + ")\n", + "print(response.content)\n", + "\n", + "# Stop the runtime and the docker executor.\n", + "await runtime.stop()\n", + "await docker_executor.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/topic-subscription-scenarios.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/topic-subscription-scenarios.ipynb index fafb88b6d..6a09277b3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/topic-subscription-scenarios.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/topic-subscription-scenarios.ipynb @@ -1,601 +1,607 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Topic and Subscription Example Scenarios\n", - "\n", - "### Introduction\n", - "\n", - "In this cookbook, we explore how broadcasting works for agent communication in AutoGen using four different broadcasting scenarios. These scenarios illustrate various ways to handle and distribute messages among agents. We'll use a consistent example of a tax management company processing client requests to demonstrate each scenario.\n", - "\n", - "### Scenario Overview\n", - "\n", - "Imagine a tax management company that offers various services to clients, such as tax planning, dispute resolution, compliance, and preparation. The company employs a team of tax specialists, each with expertise in one of these areas, and a tax system manager who oversees the operations.\n", - "\n", - "Clients submit requests that need to be processed by the appropriate specialists. The communication between the clients, the tax system manager, and the tax specialists is handled through broadcasting in this system.\n", - "\n", - "We'll explore how different broadcasting scenarios affect the way messages are distributed among agents and how they can be used to tailor the communication flow to specific needs.\n", - "\n", - "---\n", - "\n", - "### Broadcasting Scenarios Overview\n", - "\n", - "We will cover the following broadcasting scenarios:\n", - "\n", - "1. **Single-Tenant, Single Scope of Publishing**\n", - "2. **Multi-Tenant, Single Scope of Publishing**\n", - "3. **Single-Tenant, Multiple Scopes of Publishing**\n", - "4. **Multi-Tenant, Multiple Scopes of Publishing**\n", - "\n", - "\n", - "Each scenario represents a different approach to message distribution and agent interaction within the system. By understanding these scenarios, you can design agent communication strategies that best fit your application's requirements." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "from dataclasses import dataclass\n", - "from enum import Enum\n", - "from typing import List\n", - "\n", - "from autogen_core import MessageContext, RoutedAgent, TopicId, TypeSubscription, message_handler\n", - "from autogen_core._default_subscription import DefaultSubscription\n", - "from autogen_core._default_topic import DefaultTopicId\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.components.models import (\n", - " SystemMessage,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "class TaxSpecialty(str, Enum):\n", - " PLANNING = \"planning\"\n", - " DISPUTE_RESOLUTION = \"dispute_resolution\"\n", - " COMPLIANCE = \"compliance\"\n", - " PREPARATION = \"preparation\"\n", - "\n", - "\n", - "@dataclass\n", - "class ClientRequest:\n", - " content: str\n", - "\n", - "\n", - "@dataclass\n", - "class RequestAssessment:\n", - " content: str\n", - "\n", - "\n", - "class TaxSpecialist(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " description: str,\n", - " specialty: TaxSpecialty,\n", - " system_messages: List[SystemMessage],\n", - " ) -> None:\n", - " super().__init__(description)\n", - " self.specialty = specialty\n", - " self._system_messages = system_messages\n", - " self._memory: List[ClientRequest] = []\n", - "\n", - " @message_handler\n", - " async def handle_message(self, message: ClientRequest, ctx: MessageContext) -> None:\n", - " # Process the client request.\n", - " print(f\"\\n{'='*50}\\nTax specialist {self.id} with specialty {self.specialty}:\\n{message.content}\")\n", - " # Send a response back to the manager\n", - " if ctx.topic_id is None:\n", - " raise ValueError(\"Topic ID is required for broadcasting\")\n", - " await self.publish_message(\n", - " message=RequestAssessment(content=f\"I can handle this request in {self.specialty}.\"),\n", - " topic_id=ctx.topic_id,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1. Single-Tenant, Single Scope of Publishing\n", - "\n", - "#### Scenarios Explanation\n", - "In the single-tenant, single scope of publishing scenario:\n", - "\n", - "- All agents operate within a single tenant (e.g., one client or user session).\n", - "- Messages are published to a single topic, and all agents subscribe to this topic.\n", - "- Every agent receives every message that gets published to the topic.\n", - "\n", - "This scenario is suitable for situations where all agents need to be aware of all messages, and there's no need to isolate communication between different groups of agents or sessions.\n", - "\n", - "#### Application in the Tax Specialist Company\n", - "\n", - "In our tax specialist company, this scenario implies:\n", - "\n", - "- All tax specialists receive every client request and internal message.\n", - "- All agents collaborate closely, with full visibility of all communications.\n", - "- Useful for tasks or teams where all agents need to be aware of all messages.\n", - "\n", - "#### How the Scenario Works\n", - "\n", - "- Subscriptions: All agents use the default subscription(e.g., \"default\").\n", - "- Publishing: Messages are published to the default topic.\n", - "- Message Handling: Each agent decides whether to act on a message based on its content and available handlers.\n", - "\n", - "#### Benefits\n", - "- Simplicity: Easy to set up and understand.\n", - "- Collaboration: Promotes transparency and collaboration among agents.\n", - "- Flexibility: Agents can dynamically decide which messages to process.\n", - "\n", - "#### Considerations\n", - "- Scalability: May not scale well with a large number of agents or messages.\n", - "- Efficiency: Agents may receive many irrelevant messages, leading to unnecessary processing." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_1:default with specialty TaxSpecialty.PLANNING:\n", - "I need to have my tax for 2024 prepared.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_2:default with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "I need to have my tax for 2024 prepared.\n" - ] - } - ], - "source": [ - "async def run_single_tenant_single_scope() -> None:\n", - " # Create the runtime.\n", - " runtime = SingleThreadedAgentRuntime()\n", - "\n", - " # Register TaxSpecialist agents for each specialty\n", - " specialist_agent_type_1 = \"TaxSpecialist_1\"\n", - " specialist_agent_type_2 = \"TaxSpecialist_2\"\n", - " await TaxSpecialist.register(\n", - " runtime=runtime,\n", - " type=specialist_agent_type_1,\n", - " factory=lambda: TaxSpecialist(\n", - " description=\"A tax specialist 1\",\n", - " specialty=TaxSpecialty.PLANNING,\n", - " system_messages=[SystemMessage(content=\"You are a tax specialist.\")],\n", - " ),\n", - " )\n", - "\n", - " await TaxSpecialist.register(\n", - " runtime=runtime,\n", - " type=specialist_agent_type_2,\n", - " factory=lambda: TaxSpecialist(\n", - " description=\"A tax specialist 2\",\n", - " specialty=TaxSpecialty.DISPUTE_RESOLUTION,\n", - " system_messages=[SystemMessage(content=\"You are a tax specialist.\")],\n", - " ),\n", - " )\n", - "\n", - " # Add default subscriptions for each agent type\n", - " await runtime.add_subscription(DefaultSubscription(agent_type=specialist_agent_type_1))\n", - " await runtime.add_subscription(DefaultSubscription(agent_type=specialist_agent_type_2))\n", - "\n", - " # Start the runtime and send a message to agents on default topic\n", - " runtime.start()\n", - " await runtime.publish_message(ClientRequest(\"I need to have my tax for 2024 prepared.\"), topic_id=DefaultTopicId())\n", - " await runtime.stop_when_idle()\n", - "\n", - "\n", - "await run_single_tenant_single_scope()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2. Multi-Tenant, Single Scope of Publishing\n", - "\n", - "#### Scenario Explanation\n", - "\n", - "In the multi-tenant, single scope of publishing scenario:\n", - "\n", - "- There are multiple tenants (e.g., multiple clients or user sessions).\n", - "- Each tenant has its own isolated topic through the topic source.\n", - "- All agents within a tenant subscribe to the tenant's topic. If needed, new agent instances are created for each tenant.\n", - "- Messages are only visible to agents within the same tenant.\n", - "\n", - "This scenario is useful when you need to isolate communication between different tenants but want all agents within a tenant to be aware of all messages.\n", - "\n", - "#### Application in the Tax Specialist Company\n", - "\n", - "In this scenario:\n", - "\n", - "- The company serves multiple clients (tenants) simultaneously.\n", - "- For each client, a dedicated set of agent instances is created.\n", - "- Each client's communication is isolated from others.\n", - "- All agents for a client receive messages published to that client's topic.\n", - "\n", - "#### How the Scenario Works\n", - "\n", - "- Subscriptions: Agents subscribe to topics based on the tenant's identity.\n", - "- Publishing: Messages are published to the tenant-specific topic.\n", - "- Message Handling: Agents only receive messages relevant to their tenant.\n", - "\n", - "#### Benefits\n", - "- Tenant Isolation: Ensures data privacy and separation between clients.\n", - "- Collaboration Within Tenant: Agents can collaborate freely within their tenant.\n", - "\n", - "#### Considerations\n", - "- Complexity: Requires managing multiple sets of agents and topics.\n", - "- Resource Usage: More agent instances may consume additional resources." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_planning:ClientABC with specialty TaxSpecialty.PLANNING:\n", - "ClientABC requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_dispute_resolution:ClientABC with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "ClientABC requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_compliance:ClientABC with specialty TaxSpecialty.COMPLIANCE:\n", - "ClientABC requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_preparation:ClientABC with specialty TaxSpecialty.PREPARATION:\n", - "ClientABC requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_planning:ClientXYZ with specialty TaxSpecialty.PLANNING:\n", - "ClientXYZ requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_dispute_resolution:ClientXYZ with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "ClientXYZ requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_compliance:ClientXYZ with specialty TaxSpecialty.COMPLIANCE:\n", - "ClientXYZ requires tax services.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_preparation:ClientXYZ with specialty TaxSpecialty.PREPARATION:\n", - "ClientXYZ requires tax services.\n" - ] - } - ], - "source": [ - "async def run_multi_tenant_single_scope() -> None:\n", - " # Create the runtime\n", - " runtime = SingleThreadedAgentRuntime()\n", - "\n", - " # List of clients (tenants)\n", - " tenants = [\"ClientABC\", \"ClientXYZ\"]\n", - "\n", - " # Initialize sessions and map the topic type to each TaxSpecialist agent type\n", - " for specialty in TaxSpecialty:\n", - " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", - " await TaxSpecialist.register(\n", - " runtime=runtime,\n", - " type=specialist_agent_type,\n", - " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", - " description=f\"A tax specialist in {specialty.value}.\",\n", - " specialty=specialty,\n", - " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", - " ),\n", - " )\n", - " specialist_subscription = DefaultSubscription(agent_type=specialist_agent_type)\n", - " await runtime.add_subscription(specialist_subscription)\n", - "\n", - " # Start the runtime\n", - " runtime.start()\n", - "\n", - " # Publish client requests to their respective topics\n", - " for tenant in tenants:\n", - " topic_source = tenant # The topic source is the client name\n", - " topic_id = DefaultTopicId(source=topic_source)\n", - " await runtime.publish_message(\n", - " ClientRequest(f\"{tenant} requires tax services.\"),\n", - " topic_id=topic_id,\n", - " )\n", - "\n", - " # Allow time for message processing\n", - " await asyncio.sleep(1)\n", - "\n", - " # Stop the runtime when idle\n", - " await runtime.stop_when_idle()\n", - "\n", - "\n", - "await run_multi_tenant_single_scope()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Single-Tenant, Multiple Scopes of Publishing\n", - "\n", - "#### Scenario Explanation\n", - "\n", - "In the single-tenant, multiple scopes of publishing scenario:\n", - "\n", - "- All agents operate within a single tenant.\n", - "- Messages are published to different topics.\n", - "- Agents subscribe to specific topics relevant to their role or specialty.\n", - "- Messages are directed to subsets of agents based on the topic.\n", - "\n", - "This scenario allows for targeted communication within a tenant, enabling more granular control over message distribution.\n", - "\n", - "#### Application in the Tax Management Company\n", - "\n", - "In this scenario:\n", - "\n", - "- The tax system manager communicates with specific specialists based on their specialties.\n", - "- Different topics represent different specialties (e.g., \"planning\", \"compliance\").\n", - "- Specialists subscribe only to the topic that matches their specialty.\n", - "- The manager publishes messages to specific topics to reach the intended specialists.\n", - "\n", - "#### How the Scenario Works\n", - "\n", - "- Subscriptions: Agents subscribe to topics corresponding to their specialties.\n", - "- Publishing: Messages are published to topics based on the intended recipients.\n", - "- Message Handling: Only agents subscribed to a topic receive its messages.\n", - "#### Benefits\n", - "\n", - "- Targeted Communication: Messages reach only the relevant agents.\n", - "- Efficiency: Reduces unnecessary message processing by agents.\n", - "\n", - "#### Considerations\n", - "\n", - "- Setup Complexity: Requires careful management of topics and subscriptions.\n", - "- Flexibility: Changes in communication scenarios may require updating subscriptions." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_planning:default with specialty TaxSpecialty.PLANNING:\n", - "I need assistance with planning taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_dispute_resolution:default with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "I need assistance with dispute_resolution taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_compliance:default with specialty TaxSpecialty.COMPLIANCE:\n", - "I need assistance with compliance taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_preparation:default with specialty TaxSpecialty.PREPARATION:\n", - "I need assistance with preparation taxes.\n" - ] - } - ], - "source": [ - "async def run_single_tenant_multiple_scope() -> None:\n", - " # Create the runtime\n", - " runtime = SingleThreadedAgentRuntime()\n", - " # Register TaxSpecialist agents for each specialty and add subscriptions\n", - " for specialty in TaxSpecialty:\n", - " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", - " await TaxSpecialist.register(\n", - " runtime=runtime,\n", - " type=specialist_agent_type,\n", - " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", - " description=f\"A tax specialist in {specialty.value}.\",\n", - " specialty=specialty,\n", - " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", - " ),\n", - " )\n", - " specialist_subscription = TypeSubscription(topic_type=specialty.value, agent_type=specialist_agent_type)\n", - " await runtime.add_subscription(specialist_subscription)\n", - "\n", - " # Start the runtime\n", - " runtime.start()\n", - "\n", - " # Publish a ClientRequest to each specialist's topic\n", - " for specialty in TaxSpecialty:\n", - " topic_id = TopicId(type=specialty.value, source=\"default\")\n", - " await runtime.publish_message(\n", - " ClientRequest(f\"I need assistance with {specialty.value} taxes.\"),\n", - " topic_id=topic_id,\n", - " )\n", - "\n", - " # Allow time for message processing\n", - " await asyncio.sleep(1)\n", - "\n", - " # Stop the runtime when idle\n", - " await runtime.stop_when_idle()\n", - "\n", - "\n", - "await run_single_tenant_multiple_scope()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Multi-Tenant, Multiple Scopes of Publishing\n", - "\n", - "#### Scenario Explanation\n", - "\n", - "In the multi-tenant, multiple scopes of publishing scenario:\n", - "\n", - "- There are multiple tenants, each with their own set of agents.\n", - "- Messages are published to multiple topics within each tenant.\n", - "- Agents subscribe to tenant-specific topics relevant to their role.\n", - "- Combines tenant isolation with targeted communication.\n", - "\n", - "This scenario provides the highest level of control over message distribution, suitable for complex systems with multiple clients and specialized communication needs.\n", - "\n", - "#### Application in the Tax Management Company\n", - "\n", - "In this scenario:\n", - "\n", - "- The company serves multiple clients, each with dedicated agent instances.\n", - "- Within each client, agents communicate using multiple topics based on specialties.\n", - "- For example, Client A's planning specialist subscribes to the \"planning\" topic with source \"ClientA\".\n", - "- The tax system manager for each client communicates with their specialists using tenant-specific topics.\n", - "\n", - "#### How the Scenario Works\n", - "\n", - "- Subscriptions: Agents subscribe to topics based on both tenant identity and specialty.\n", - "- Publishing: Messages are published to tenant-specific and specialty-specific topics.\n", - "- Message Handling: Only agents matching the tenant and topic receive messages.\n", - "\n", - "#### Benefits\n", - "\n", - "- Complete Isolation: Ensures both tenant and communication isolation.\n", - "- Granular Control: Enables precise routing of messages to intended agents.\n", - "\n", - "#### Considerations\n", - "\n", - "- Complexity: Requires careful management of topics, tenants, and subscriptions.\n", - "- Resource Usage: Increased number of agent instances and topics may impact resources." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_planning:ClientABC with specialty TaxSpecialty.PLANNING:\n", - "ClientABC needs assistance with planning taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_dispute_resolution:ClientABC with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "ClientABC needs assistance with dispute_resolution taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_compliance:ClientABC with specialty TaxSpecialty.COMPLIANCE:\n", - "ClientABC needs assistance with compliance taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_preparation:ClientABC with specialty TaxSpecialty.PREPARATION:\n", - "ClientABC needs assistance with preparation taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_planning:ClientXYZ with specialty TaxSpecialty.PLANNING:\n", - "ClientXYZ needs assistance with planning taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_dispute_resolution:ClientXYZ with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", - "ClientXYZ needs assistance with dispute_resolution taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_compliance:ClientXYZ with specialty TaxSpecialty.COMPLIANCE:\n", - "ClientXYZ needs assistance with compliance taxes.\n", - "\n", - "==================================================\n", - "Tax specialist TaxSpecialist_preparation:ClientXYZ with specialty TaxSpecialty.PREPARATION:\n", - "ClientXYZ needs assistance with preparation taxes.\n" - ] - } - ], - "source": [ - "async def run_multi_tenant_multiple_scope() -> None:\n", - " # Create the runtime\n", - " runtime = SingleThreadedAgentRuntime()\n", - "\n", - " # Define TypeSubscriptions for each specialty and tenant\n", - " tenants = [\"ClientABC\", \"ClientXYZ\"]\n", - "\n", - " # Initialize agents for all specialties and add type subscriptions\n", - " for specialty in TaxSpecialty:\n", - " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", - " await TaxSpecialist.register(\n", - " runtime=runtime,\n", - " type=specialist_agent_type,\n", - " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", - " description=f\"A tax specialist in {specialty.value}.\",\n", - " specialty=specialty,\n", - " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", - " ),\n", - " )\n", - " for tenant in tenants:\n", - " specialist_subscription = TypeSubscription(\n", - " topic_type=f\"{tenant}_{specialty.value}\", agent_type=specialist_agent_type\n", - " )\n", - " await runtime.add_subscription(specialist_subscription)\n", - "\n", - " # Start the runtime\n", - " runtime.start()\n", - "\n", - " # Send messages for each tenant to each specialty\n", - " for tenant in tenants:\n", - " for specialty in TaxSpecialty:\n", - " topic_id = TopicId(type=f\"{tenant}_{specialty.value}\", source=tenant)\n", - " await runtime.publish_message(\n", - " ClientRequest(f\"{tenant} needs assistance with {specialty.value} taxes.\"),\n", - " topic_id=topic_id,\n", - " )\n", - "\n", - " # Allow time for message processing\n", - " await asyncio.sleep(1)\n", - "\n", - " # Stop the runtime when idle\n", - " await runtime.stop_when_idle()\n", - "\n", - "\n", - "await run_multi_tenant_multiple_scope()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.6" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Topic and Subscription Example Scenarios\n", + "\n", + "### Introduction\n", + "\n", + "In this cookbook, we explore how broadcasting works for agent communication in AutoGen using four different broadcasting scenarios. These scenarios illustrate various ways to handle and distribute messages among agents. We'll use a consistent example of a tax management company processing client requests to demonstrate each scenario.\n", + "\n", + "### Scenario Overview\n", + "\n", + "Imagine a tax management company that offers various services to clients, such as tax planning, dispute resolution, compliance, and preparation. The company employs a team of tax specialists, each with expertise in one of these areas, and a tax system manager who oversees the operations.\n", + "\n", + "Clients submit requests that need to be processed by the appropriate specialists. The communication between the clients, the tax system manager, and the tax specialists is handled through broadcasting in this system.\n", + "\n", + "We'll explore how different broadcasting scenarios affect the way messages are distributed among agents and how they can be used to tailor the communication flow to specific needs.\n", + "\n", + "---\n", + "\n", + "### Broadcasting Scenarios Overview\n", + "\n", + "We will cover the following broadcasting scenarios:\n", + "\n", + "1. **Single-Tenant, Single Scope of Publishing**\n", + "2. **Multi-Tenant, Single Scope of Publishing**\n", + "3. **Single-Tenant, Multiple Scopes of Publishing**\n", + "4. **Multi-Tenant, Multiple Scopes of Publishing**\n", + "\n", + "\n", + "Each scenario represents a different approach to message distribution and agent interaction within the system. By understanding these scenarios, you can design agent communication strategies that best fit your application's requirements." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from dataclasses import dataclass\n", + "from enum import Enum\n", + "from typing import List\n", + "\n", + "from autogen_core import (\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " TopicId,\n", + " TypeSubscription,\n", + " message_handler,\n", + ")\n", + "from autogen_core._default_subscription import DefaultSubscription\n", + "from autogen_core._default_topic import DefaultTopicId\n", + "from autogen_core.components.models import (\n", + " SystemMessage,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "class TaxSpecialty(str, Enum):\n", + " PLANNING = \"planning\"\n", + " DISPUTE_RESOLUTION = \"dispute_resolution\"\n", + " COMPLIANCE = \"compliance\"\n", + " PREPARATION = \"preparation\"\n", + "\n", + "\n", + "@dataclass\n", + "class ClientRequest:\n", + " content: str\n", + "\n", + "\n", + "@dataclass\n", + "class RequestAssessment:\n", + " content: str\n", + "\n", + "\n", + "class TaxSpecialist(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " description: str,\n", + " specialty: TaxSpecialty,\n", + " system_messages: List[SystemMessage],\n", + " ) -> None:\n", + " super().__init__(description)\n", + " self.specialty = specialty\n", + " self._system_messages = system_messages\n", + " self._memory: List[ClientRequest] = []\n", + "\n", + " @message_handler\n", + " async def handle_message(self, message: ClientRequest, ctx: MessageContext) -> None:\n", + " # Process the client request.\n", + " print(f\"\\n{'='*50}\\nTax specialist {self.id} with specialty {self.specialty}:\\n{message.content}\")\n", + " # Send a response back to the manager\n", + " if ctx.topic_id is None:\n", + " raise ValueError(\"Topic ID is required for broadcasting\")\n", + " await self.publish_message(\n", + " message=RequestAssessment(content=f\"I can handle this request in {self.specialty}.\"),\n", + " topic_id=ctx.topic_id,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Single-Tenant, Single Scope of Publishing\n", + "\n", + "#### Scenarios Explanation\n", + "In the single-tenant, single scope of publishing scenario:\n", + "\n", + "- All agents operate within a single tenant (e.g., one client or user session).\n", + "- Messages are published to a single topic, and all agents subscribe to this topic.\n", + "- Every agent receives every message that gets published to the topic.\n", + "\n", + "This scenario is suitable for situations where all agents need to be aware of all messages, and there's no need to isolate communication between different groups of agents or sessions.\n", + "\n", + "#### Application in the Tax Specialist Company\n", + "\n", + "In our tax specialist company, this scenario implies:\n", + "\n", + "- All tax specialists receive every client request and internal message.\n", + "- All agents collaborate closely, with full visibility of all communications.\n", + "- Useful for tasks or teams where all agents need to be aware of all messages.\n", + "\n", + "#### How the Scenario Works\n", + "\n", + "- Subscriptions: All agents use the default subscription(e.g., \"default\").\n", + "- Publishing: Messages are published to the default topic.\n", + "- Message Handling: Each agent decides whether to act on a message based on its content and available handlers.\n", + "\n", + "#### Benefits\n", + "- Simplicity: Easy to set up and understand.\n", + "- Collaboration: Promotes transparency and collaboration among agents.\n", + "- Flexibility: Agents can dynamically decide which messages to process.\n", + "\n", + "#### Considerations\n", + "- Scalability: May not scale well with a large number of agents or messages.\n", + "- Efficiency: Agents may receive many irrelevant messages, leading to unnecessary processing." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_1:default with specialty TaxSpecialty.PLANNING:\n", + "I need to have my tax for 2024 prepared.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_2:default with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "I need to have my tax for 2024 prepared.\n" + ] + } + ], + "source": [ + "async def run_single_tenant_single_scope() -> None:\n", + " # Create the runtime.\n", + " runtime = SingleThreadedAgentRuntime()\n", + "\n", + " # Register TaxSpecialist agents for each specialty\n", + " specialist_agent_type_1 = \"TaxSpecialist_1\"\n", + " specialist_agent_type_2 = \"TaxSpecialist_2\"\n", + " await TaxSpecialist.register(\n", + " runtime=runtime,\n", + " type=specialist_agent_type_1,\n", + " factory=lambda: TaxSpecialist(\n", + " description=\"A tax specialist 1\",\n", + " specialty=TaxSpecialty.PLANNING,\n", + " system_messages=[SystemMessage(content=\"You are a tax specialist.\")],\n", + " ),\n", + " )\n", + "\n", + " await TaxSpecialist.register(\n", + " runtime=runtime,\n", + " type=specialist_agent_type_2,\n", + " factory=lambda: TaxSpecialist(\n", + " description=\"A tax specialist 2\",\n", + " specialty=TaxSpecialty.DISPUTE_RESOLUTION,\n", + " system_messages=[SystemMessage(content=\"You are a tax specialist.\")],\n", + " ),\n", + " )\n", + "\n", + " # Add default subscriptions for each agent type\n", + " await runtime.add_subscription(DefaultSubscription(agent_type=specialist_agent_type_1))\n", + " await runtime.add_subscription(DefaultSubscription(agent_type=specialist_agent_type_2))\n", + "\n", + " # Start the runtime and send a message to agents on default topic\n", + " runtime.start()\n", + " await runtime.publish_message(ClientRequest(\"I need to have my tax for 2024 prepared.\"), topic_id=DefaultTopicId())\n", + " await runtime.stop_when_idle()\n", + "\n", + "\n", + "await run_single_tenant_single_scope()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Multi-Tenant, Single Scope of Publishing\n", + "\n", + "#### Scenario Explanation\n", + "\n", + "In the multi-tenant, single scope of publishing scenario:\n", + "\n", + "- There are multiple tenants (e.g., multiple clients or user sessions).\n", + "- Each tenant has its own isolated topic through the topic source.\n", + "- All agents within a tenant subscribe to the tenant's topic. If needed, new agent instances are created for each tenant.\n", + "- Messages are only visible to agents within the same tenant.\n", + "\n", + "This scenario is useful when you need to isolate communication between different tenants but want all agents within a tenant to be aware of all messages.\n", + "\n", + "#### Application in the Tax Specialist Company\n", + "\n", + "In this scenario:\n", + "\n", + "- The company serves multiple clients (tenants) simultaneously.\n", + "- For each client, a dedicated set of agent instances is created.\n", + "- Each client's communication is isolated from others.\n", + "- All agents for a client receive messages published to that client's topic.\n", + "\n", + "#### How the Scenario Works\n", + "\n", + "- Subscriptions: Agents subscribe to topics based on the tenant's identity.\n", + "- Publishing: Messages are published to the tenant-specific topic.\n", + "- Message Handling: Agents only receive messages relevant to their tenant.\n", + "\n", + "#### Benefits\n", + "- Tenant Isolation: Ensures data privacy and separation between clients.\n", + "- Collaboration Within Tenant: Agents can collaborate freely within their tenant.\n", + "\n", + "#### Considerations\n", + "- Complexity: Requires managing multiple sets of agents and topics.\n", + "- Resource Usage: More agent instances may consume additional resources." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_planning:ClientABC with specialty TaxSpecialty.PLANNING:\n", + "ClientABC requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_dispute_resolution:ClientABC with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "ClientABC requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_compliance:ClientABC with specialty TaxSpecialty.COMPLIANCE:\n", + "ClientABC requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_preparation:ClientABC with specialty TaxSpecialty.PREPARATION:\n", + "ClientABC requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_planning:ClientXYZ with specialty TaxSpecialty.PLANNING:\n", + "ClientXYZ requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_dispute_resolution:ClientXYZ with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "ClientXYZ requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_compliance:ClientXYZ with specialty TaxSpecialty.COMPLIANCE:\n", + "ClientXYZ requires tax services.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_preparation:ClientXYZ with specialty TaxSpecialty.PREPARATION:\n", + "ClientXYZ requires tax services.\n" + ] + } + ], + "source": [ + "async def run_multi_tenant_single_scope() -> None:\n", + " # Create the runtime\n", + " runtime = SingleThreadedAgentRuntime()\n", + "\n", + " # List of clients (tenants)\n", + " tenants = [\"ClientABC\", \"ClientXYZ\"]\n", + "\n", + " # Initialize sessions and map the topic type to each TaxSpecialist agent type\n", + " for specialty in TaxSpecialty:\n", + " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", + " await TaxSpecialist.register(\n", + " runtime=runtime,\n", + " type=specialist_agent_type,\n", + " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", + " description=f\"A tax specialist in {specialty.value}.\",\n", + " specialty=specialty,\n", + " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", + " ),\n", + " )\n", + " specialist_subscription = DefaultSubscription(agent_type=specialist_agent_type)\n", + " await runtime.add_subscription(specialist_subscription)\n", + "\n", + " # Start the runtime\n", + " runtime.start()\n", + "\n", + " # Publish client requests to their respective topics\n", + " for tenant in tenants:\n", + " topic_source = tenant # The topic source is the client name\n", + " topic_id = DefaultTopicId(source=topic_source)\n", + " await runtime.publish_message(\n", + " ClientRequest(f\"{tenant} requires tax services.\"),\n", + " topic_id=topic_id,\n", + " )\n", + "\n", + " # Allow time for message processing\n", + " await asyncio.sleep(1)\n", + "\n", + " # Stop the runtime when idle\n", + " await runtime.stop_when_idle()\n", + "\n", + "\n", + "await run_multi_tenant_single_scope()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Single-Tenant, Multiple Scopes of Publishing\n", + "\n", + "#### Scenario Explanation\n", + "\n", + "In the single-tenant, multiple scopes of publishing scenario:\n", + "\n", + "- All agents operate within a single tenant.\n", + "- Messages are published to different topics.\n", + "- Agents subscribe to specific topics relevant to their role or specialty.\n", + "- Messages are directed to subsets of agents based on the topic.\n", + "\n", + "This scenario allows for targeted communication within a tenant, enabling more granular control over message distribution.\n", + "\n", + "#### Application in the Tax Management Company\n", + "\n", + "In this scenario:\n", + "\n", + "- The tax system manager communicates with specific specialists based on their specialties.\n", + "- Different topics represent different specialties (e.g., \"planning\", \"compliance\").\n", + "- Specialists subscribe only to the topic that matches their specialty.\n", + "- The manager publishes messages to specific topics to reach the intended specialists.\n", + "\n", + "#### How the Scenario Works\n", + "\n", + "- Subscriptions: Agents subscribe to topics corresponding to their specialties.\n", + "- Publishing: Messages are published to topics based on the intended recipients.\n", + "- Message Handling: Only agents subscribed to a topic receive its messages.\n", + "#### Benefits\n", + "\n", + "- Targeted Communication: Messages reach only the relevant agents.\n", + "- Efficiency: Reduces unnecessary message processing by agents.\n", + "\n", + "#### Considerations\n", + "\n", + "- Setup Complexity: Requires careful management of topics and subscriptions.\n", + "- Flexibility: Changes in communication scenarios may require updating subscriptions." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_planning:default with specialty TaxSpecialty.PLANNING:\n", + "I need assistance with planning taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_dispute_resolution:default with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "I need assistance with dispute_resolution taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_compliance:default with specialty TaxSpecialty.COMPLIANCE:\n", + "I need assistance with compliance taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_preparation:default with specialty TaxSpecialty.PREPARATION:\n", + "I need assistance with preparation taxes.\n" + ] + } + ], + "source": [ + "async def run_single_tenant_multiple_scope() -> None:\n", + " # Create the runtime\n", + " runtime = SingleThreadedAgentRuntime()\n", + " # Register TaxSpecialist agents for each specialty and add subscriptions\n", + " for specialty in TaxSpecialty:\n", + " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", + " await TaxSpecialist.register(\n", + " runtime=runtime,\n", + " type=specialist_agent_type,\n", + " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", + " description=f\"A tax specialist in {specialty.value}.\",\n", + " specialty=specialty,\n", + " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", + " ),\n", + " )\n", + " specialist_subscription = TypeSubscription(topic_type=specialty.value, agent_type=specialist_agent_type)\n", + " await runtime.add_subscription(specialist_subscription)\n", + "\n", + " # Start the runtime\n", + " runtime.start()\n", + "\n", + " # Publish a ClientRequest to each specialist's topic\n", + " for specialty in TaxSpecialty:\n", + " topic_id = TopicId(type=specialty.value, source=\"default\")\n", + " await runtime.publish_message(\n", + " ClientRequest(f\"I need assistance with {specialty.value} taxes.\"),\n", + " topic_id=topic_id,\n", + " )\n", + "\n", + " # Allow time for message processing\n", + " await asyncio.sleep(1)\n", + "\n", + " # Stop the runtime when idle\n", + " await runtime.stop_when_idle()\n", + "\n", + "\n", + "await run_single_tenant_multiple_scope()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Multi-Tenant, Multiple Scopes of Publishing\n", + "\n", + "#### Scenario Explanation\n", + "\n", + "In the multi-tenant, multiple scopes of publishing scenario:\n", + "\n", + "- There are multiple tenants, each with their own set of agents.\n", + "- Messages are published to multiple topics within each tenant.\n", + "- Agents subscribe to tenant-specific topics relevant to their role.\n", + "- Combines tenant isolation with targeted communication.\n", + "\n", + "This scenario provides the highest level of control over message distribution, suitable for complex systems with multiple clients and specialized communication needs.\n", + "\n", + "#### Application in the Tax Management Company\n", + "\n", + "In this scenario:\n", + "\n", + "- The company serves multiple clients, each with dedicated agent instances.\n", + "- Within each client, agents communicate using multiple topics based on specialties.\n", + "- For example, Client A's planning specialist subscribes to the \"planning\" topic with source \"ClientA\".\n", + "- The tax system manager for each client communicates with their specialists using tenant-specific topics.\n", + "\n", + "#### How the Scenario Works\n", + "\n", + "- Subscriptions: Agents subscribe to topics based on both tenant identity and specialty.\n", + "- Publishing: Messages are published to tenant-specific and specialty-specific topics.\n", + "- Message Handling: Only agents matching the tenant and topic receive messages.\n", + "\n", + "#### Benefits\n", + "\n", + "- Complete Isolation: Ensures both tenant and communication isolation.\n", + "- Granular Control: Enables precise routing of messages to intended agents.\n", + "\n", + "#### Considerations\n", + "\n", + "- Complexity: Requires careful management of topics, tenants, and subscriptions.\n", + "- Resource Usage: Increased number of agent instances and topics may impact resources." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_planning:ClientABC with specialty TaxSpecialty.PLANNING:\n", + "ClientABC needs assistance with planning taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_dispute_resolution:ClientABC with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "ClientABC needs assistance with dispute_resolution taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_compliance:ClientABC with specialty TaxSpecialty.COMPLIANCE:\n", + "ClientABC needs assistance with compliance taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_preparation:ClientABC with specialty TaxSpecialty.PREPARATION:\n", + "ClientABC needs assistance with preparation taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_planning:ClientXYZ with specialty TaxSpecialty.PLANNING:\n", + "ClientXYZ needs assistance with planning taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_dispute_resolution:ClientXYZ with specialty TaxSpecialty.DISPUTE_RESOLUTION:\n", + "ClientXYZ needs assistance with dispute_resolution taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_compliance:ClientXYZ with specialty TaxSpecialty.COMPLIANCE:\n", + "ClientXYZ needs assistance with compliance taxes.\n", + "\n", + "==================================================\n", + "Tax specialist TaxSpecialist_preparation:ClientXYZ with specialty TaxSpecialty.PREPARATION:\n", + "ClientXYZ needs assistance with preparation taxes.\n" + ] + } + ], + "source": [ + "async def run_multi_tenant_multiple_scope() -> None:\n", + " # Create the runtime\n", + " runtime = SingleThreadedAgentRuntime()\n", + "\n", + " # Define TypeSubscriptions for each specialty and tenant\n", + " tenants = [\"ClientABC\", \"ClientXYZ\"]\n", + "\n", + " # Initialize agents for all specialties and add type subscriptions\n", + " for specialty in TaxSpecialty:\n", + " specialist_agent_type = f\"TaxSpecialist_{specialty.value}\"\n", + " await TaxSpecialist.register(\n", + " runtime=runtime,\n", + " type=specialist_agent_type,\n", + " factory=lambda specialty=specialty: TaxSpecialist( # type: ignore\n", + " description=f\"A tax specialist in {specialty.value}.\",\n", + " specialty=specialty,\n", + " system_messages=[SystemMessage(content=f\"You are a tax specialist in {specialty.value}.\")],\n", + " ),\n", + " )\n", + " for tenant in tenants:\n", + " specialist_subscription = TypeSubscription(\n", + " topic_type=f\"{tenant}_{specialty.value}\", agent_type=specialist_agent_type\n", + " )\n", + " await runtime.add_subscription(specialist_subscription)\n", + "\n", + " # Start the runtime\n", + " runtime.start()\n", + "\n", + " # Send messages for each tenant to each specialty\n", + " for tenant in tenants:\n", + " for specialty in TaxSpecialty:\n", + " topic_id = TopicId(type=f\"{tenant}_{specialty.value}\", source=tenant)\n", + " await runtime.publish_message(\n", + " ClientRequest(f\"{tenant} needs assistance with {specialty.value} taxes.\"),\n", + " topic_id=topic_id,\n", + " )\n", + "\n", + " # Allow time for message processing\n", + " await asyncio.sleep(1)\n", + "\n", + " # Stop the runtime when idle\n", + " await runtime.stop_when_idle()\n", + "\n", + "\n", + "await run_multi_tenant_multiple_scope()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/concurrent-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/concurrent-agents.ipynb index 6ba7efc45..63626202f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/concurrent-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/concurrent-agents.ipynb @@ -34,13 +34,13 @@ " DefaultTopicId,\n", " MessageContext,\n", " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", " TopicId,\n", " TypeSubscription,\n", " default_subscription,\n", " message_handler,\n", " type_subscription,\n", - ")\n", - "from autogen_core.application import SingleThreadedAgentRuntime" + ")" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb index 1f1d06f70..3a472fca8 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/group-chat.ipynb @@ -78,11 +78,11 @@ " Image,\n", " MessageContext,\n", " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", " TopicId,\n", " TypeSubscription,\n", " message_handler,\n", ")\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", "from autogen_core.components.models import (\n", " AssistantMessage,\n", " ChatCompletionClient,\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb index c8379e97f..9e2a4f797 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb @@ -56,8 +56,15 @@ "import uuid\n", "from typing import List, Tuple\n", "\n", - "from autogen_core import FunctionCall, MessageContext, RoutedAgent, TopicId, TypeSubscription, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import (\n", + " FunctionCall,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " TopicId,\n", + " TypeSubscription,\n", + " message_handler,\n", + ")\n", "from autogen_core.components.models import (\n", " AssistantMessage,\n", " ChatCompletionClient,\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb index 735f5a61f..0cfc6a36c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb @@ -1,520 +1,519 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Mixture of Agents\n", - "\n", - "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", - "that models after the feed-forward neural network architecture.\n", - "\n", - "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", - "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", - "Messages from the worker agents in a previous layer are concatenated and sent to\n", - "all the worker agents in the next layer.\n", - "\n", - "This example implements the Mixture of Agents pattern using the core library\n", - "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", - "\n", - "Here is a high-level procedure overview of the pattern:\n", - "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", - "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", - "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", - "4. The process continues until the final layer is reached.\n", - "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", - "\n", - "We use the direct messaging API {py:meth}`~autogen_core.base.BaseAgent.send_message` to implement this pattern.\n", - "This makes it easier to add more features like worker task cancellation and error handling in the future." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "from dataclasses import dataclass\n", - "from typing import List\n", - "\n", - "from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Protocol\n", - "\n", - "The agents communicate using the following messages:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class WorkerTask:\n", - " task: str\n", - " previous_results: List[str]\n", - "\n", - "\n", - "@dataclass\n", - "class WorkerTaskResult:\n", - " result: str\n", - "\n", - "\n", - "@dataclass\n", - "class UserTask:\n", - " task: str\n", - "\n", - "\n", - "@dataclass\n", - "class FinalResult:\n", - " result: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Worker Agent\n", - "\n", - "Each worker agent receives a task from the orchestrator agent and processes them\n", - "indepedently.\n", - "Once the task is completed, the worker agent returns the result." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "class WorkerAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " ) -> None:\n", - " super().__init__(description=\"Worker Agent\")\n", - " self._model_client = model_client\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", - " if message.previous_results:\n", - " # If previous results are provided, we need to synthesize them to create a single prompt.\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " else:\n", - " # If no previous results are provided, we can simply pass the user query to the model.\n", - " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", - " assert isinstance(model_result.content, str)\n", - " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", - " return WorkerTaskResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Orchestrator Agent\n", - "\n", - "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", - "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", - "the orchestrator agent aggregates the results and publishes the final result." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "class OrchestratorAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " worker_agent_types: List[str],\n", - " num_layers: int,\n", - " ) -> None:\n", - " super().__init__(description=\"Aggregator Agent\")\n", - " self._model_client = model_client\n", - " self._worker_agent_types = worker_agent_types\n", - " self._num_layers = num_layers\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", - " # Create task for the first layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[])\n", - " # Iterate over layers.\n", - " for i in range(self._num_layers - 1):\n", - " # Assign workers for this layer.\n", - " worker_ids = [\n", - " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", - " for j, worker_type in enumerate(self._worker_agent_types)\n", - " ]\n", - " # Dispatch tasks to workers.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", - " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", - " # Prepare task for the next layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", - " # Perform final aggregation.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " assert isinstance(model_result.content, str)\n", - " return FinalResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running Mixture of Agents\n", - "\n", - "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "task = (\n", - " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", - "We only need to register a single worker agent types, \"worker\", because we are using\n", - "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", - "If you want to use different models, you will need to register multiple worker agent types,\n", - "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", - "factory function.\n", - "\n", - "The instances of worker agents are automatically created when the orchestrator agent\n", - "dispatches tasks to them.\n", - "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", - "for more information on agent lifecycle." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_1:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", - "\n", - "Add the parts together:\n", - "\\[ 3 + 4 + 2 = 9 \\]\n", - "\n", - "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", - "\n", - "Now, multiply the value of one part by the number of parts for each person:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[ 3 \\times 48 = 144 \\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[ 4 \\times 48 = 192 \\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[ 2 \\times 48 = 96 \\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_0:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", - "\n", - "\\[\n", - "3 + 4 + 2 = 9 \\text{ parts}\n", - "\\]\n", - "\n", - "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\n", - "\\[\n", - "\\text{Value of one part} = \\frac{432}{9} = 48\n", - "\\]\n", - "\n", - "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[\n", - "3 \\times 48 = 144 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[\n", - "4 \\times 48 = 192 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[\n", - "2 \\times 48 = 96 \\text{ cookies}\n", - "\\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_2:\n", - "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", - "\n", - "The total parts are:\n", - "- Alice: 3 parts\n", - "- Bob: 4 parts\n", - "- Charlie: 2 parts\n", - "\n", - "Adding these parts together gives:\n", - "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", - "\n", - "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", - "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", - "\n", - "Now we can calculate the number of cookies for each person:\n", - "- Alice's share: \n", - "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", - "- Bob's share: \n", - "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", - "- Charlie's share: \n", - "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", - "\n", - "So, the final distribution of cookies is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_2:\n", - "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", - "\n", - "1. **Determine the total number of parts in the ratio**:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48\n", - " \\]\n", - "\n", - "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", - " - **For Alice (3 parts)**:\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **For Bob (4 parts)**:\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **For Charlie (2 parts)**:\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Thus, the final distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_0:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", - "\n", - "1. **Calculate the Total Parts**: \n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part**: \n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share**:\n", - " - **Alice's Share** (3 parts):\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share** (4 parts):\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share** (2 parts):\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Final Distribution**:\n", - " - Alice: 144 cookies\n", - " - Bob: 192 cookies\n", - " - Charlie: 96 cookies\n", - "\n", - "Thus, the distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_1:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", - "\n", - "1. **Calculate Total Parts:**\n", - " \\[\n", - " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate the Share for Each Person:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie’s Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Summary of the Distribution:**\n", - " - **Alice:** 144 cookies\n", - " - **Bob:** 192 cookies\n", - " - **Charlie:** 96 cookies\n", - "\n", - "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Performing final aggregation\n", - "--------------------------------------------------------------------------------\n", - "Final result:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", - "\n", - "1. **Calculate the Total Parts in the Ratio:**\n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Therefore, the distribution of cookies is as follows:\n", - "- **Alice:** 144 cookies\n", - "- **Bob:** 192 cookies\n", - "- **Charlie:** 96 cookies\n", - "\n", - "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await WorkerAgent.register(\n", - " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", - ")\n", - "await OrchestratorAgent.register(\n", - " runtime,\n", - " \"orchestrator\",\n", - " lambda: OrchestratorAgent(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", - " ),\n", - ")\n", - "\n", - "runtime.start()\n", - "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", - "await runtime.stop_when_idle()\n", - "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mixture of Agents\n", + "\n", + "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", + "that models after the feed-forward neural network architecture.\n", + "\n", + "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", + "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", + "Messages from the worker agents in a previous layer are concatenated and sent to\n", + "all the worker agents in the next layer.\n", + "\n", + "This example implements the Mixture of Agents pattern using the core library\n", + "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", + "\n", + "Here is a high-level procedure overview of the pattern:\n", + "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", + "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", + "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", + "4. The process continues until the final layer is reached.\n", + "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", + "\n", + "We use the direct messaging API {py:meth}`~autogen_core.base.BaseAgent.send_message` to implement this pattern.\n", + "This makes it easier to add more features like worker task cancellation and error handling in the future." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Protocol\n", + "\n", + "The agents communicate using the following messages:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class WorkerTask:\n", + " task: str\n", + " previous_results: List[str]\n", + "\n", + "\n", + "@dataclass\n", + "class WorkerTaskResult:\n", + " result: str\n", + "\n", + "\n", + "@dataclass\n", + "class UserTask:\n", + " task: str\n", + "\n", + "\n", + "@dataclass\n", + "class FinalResult:\n", + " result: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Worker Agent\n", + "\n", + "Each worker agent receives a task from the orchestrator agent and processes them\n", + "indepedently.\n", + "Once the task is completed, the worker agent returns the result." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "class WorkerAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " ) -> None:\n", + " super().__init__(description=\"Worker Agent\")\n", + " self._model_client = model_client\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", + " if message.previous_results:\n", + " # If previous results are provided, we need to synthesize them to create a single prompt.\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " else:\n", + " # If no previous results are provided, we can simply pass the user query to the model.\n", + " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", + " assert isinstance(model_result.content, str)\n", + " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", + " return WorkerTaskResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Orchestrator Agent\n", + "\n", + "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", + "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", + "the orchestrator agent aggregates the results and publishes the final result." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "class OrchestratorAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " worker_agent_types: List[str],\n", + " num_layers: int,\n", + " ) -> None:\n", + " super().__init__(description=\"Aggregator Agent\")\n", + " self._model_client = model_client\n", + " self._worker_agent_types = worker_agent_types\n", + " self._num_layers = num_layers\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", + " # Create task for the first layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[])\n", + " # Iterate over layers.\n", + " for i in range(self._num_layers - 1):\n", + " # Assign workers for this layer.\n", + " worker_ids = [\n", + " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", + " for j, worker_type in enumerate(self._worker_agent_types)\n", + " ]\n", + " # Dispatch tasks to workers.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", + " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", + " # Prepare task for the next layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", + " # Perform final aggregation.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " assert isinstance(model_result.content, str)\n", + " return FinalResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running Mixture of Agents\n", + "\n", + "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "task = (\n", + " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", + "We only need to register a single worker agent types, \"worker\", because we are using\n", + "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", + "If you want to use different models, you will need to register multiple worker agent types,\n", + "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", + "factory function.\n", + "\n", + "The instances of worker agents are automatically created when the orchestrator agent\n", + "dispatches tasks to them.\n", + "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", + "for more information on agent lifecycle." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_1:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", + "\n", + "Add the parts together:\n", + "\\[ 3 + 4 + 2 = 9 \\]\n", + "\n", + "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", + "\n", + "Now, multiply the value of one part by the number of parts for each person:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[ 3 \\times 48 = 144 \\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[ 4 \\times 48 = 192 \\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[ 2 \\times 48 = 96 \\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_0:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", + "\n", + "\\[\n", + "3 + 4 + 2 = 9 \\text{ parts}\n", + "\\]\n", + "\n", + "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\n", + "\\[\n", + "\\text{Value of one part} = \\frac{432}{9} = 48\n", + "\\]\n", + "\n", + "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[\n", + "3 \\times 48 = 144 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[\n", + "4 \\times 48 = 192 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[\n", + "2 \\times 48 = 96 \\text{ cookies}\n", + "\\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_2:\n", + "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", + "\n", + "The total parts are:\n", + "- Alice: 3 parts\n", + "- Bob: 4 parts\n", + "- Charlie: 2 parts\n", + "\n", + "Adding these parts together gives:\n", + "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", + "\n", + "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", + "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", + "\n", + "Now we can calculate the number of cookies for each person:\n", + "- Alice's share: \n", + "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", + "- Bob's share: \n", + "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", + "- Charlie's share: \n", + "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", + "\n", + "So, the final distribution of cookies is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_2:\n", + "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", + "\n", + "1. **Determine the total number of parts in the ratio**:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48\n", + " \\]\n", + "\n", + "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", + " - **For Alice (3 parts)**:\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **For Bob (4 parts)**:\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **For Charlie (2 parts)**:\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Thus, the final distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_0:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", + "\n", + "1. **Calculate the Total Parts**: \n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part**: \n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share**:\n", + " - **Alice's Share** (3 parts):\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share** (4 parts):\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share** (2 parts):\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Final Distribution**:\n", + " - Alice: 144 cookies\n", + " - Bob: 192 cookies\n", + " - Charlie: 96 cookies\n", + "\n", + "Thus, the distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_1:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", + "\n", + "1. **Calculate Total Parts:**\n", + " \\[\n", + " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate the Share for Each Person:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie’s Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Summary of the Distribution:**\n", + " - **Alice:** 144 cookies\n", + " - **Bob:** 192 cookies\n", + " - **Charlie:** 96 cookies\n", + "\n", + "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Performing final aggregation\n", + "--------------------------------------------------------------------------------\n", + "Final result:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", + "\n", + "1. **Calculate the Total Parts in the Ratio:**\n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Therefore, the distribution of cookies is as follows:\n", + "- **Alice:** 144 cookies\n", + "- **Bob:** 192 cookies\n", + "- **Charlie:** 96 cookies\n", + "\n", + "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await WorkerAgent.register(\n", + " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", + ")\n", + "await OrchestratorAgent.register(\n", + " runtime,\n", + " \"orchestrator\",\n", + " lambda: OrchestratorAgent(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", + " ),\n", + ")\n", + "\n", + "runtime.start()\n", + "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", + "await runtime.stop_when_idle()\n", + "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb index ad0137a46..7bbdac84f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb @@ -1,571 +1,571 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Multi-Agent Debate\n", - "\n", - "Multi-Agent Debate is a multi-agent design pattern that simulates a multi-turn interaction \n", - "where in each turn, agents exchange their responses with each other, and refine \n", - "their responses based on the responses from other agents.\n", - "\n", - "This example shows an implementation of the multi-agent debate pattern for solving\n", - "math problems from the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k).\n", - "\n", - "There are of two types of agents in this pattern: solver agents and an aggregator agent.\n", - "The solver agents are connected in a sparse manner following the technique described in\n", - "[Improving Multi-Agent Debate with Sparse Communication Topology](https://arxiv.org/abs/2406.11776).\n", - "The solver agents are responsible for solving math problems and exchanging responses with each other.\n", - "The aggregator agent is responsible for distributing math problems to the solver agents,\n", - "waiting for their final responses, and aggregating the responses to get the final answer.\n", - "\n", - "The pattern works as follows:\n", - "1. User sends a math problem to the aggregator agent.\n", - "2. The aggregator agent distributes the problem to the solver agents.\n", - "3. Each solver agent processes the problem, and publishes a response to its neighbors.\n", - "4. Each solver agent uses the responses from its neighbors to refine its response, and publishes a new response.\n", - "5. Repeat step 4 for a fixed number of rounds. In the final round, each solver agent publishes a final response.\n", - "6. The aggregator agent uses majority voting to aggregate the final responses from all solver agents to get a final answer, and publishes the answer.\n", - "\n", - "We will be using the broadcast API, i.e., {py:meth}`~autogen_core.base.BaseAgent.publish_message`,\n", - "and we will be using topic and subscription to implement the communication topology.\n", - "Read about [Topics and Subscriptions](../core-concepts/topic-and-subscription.md) to understand how they work." - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [], - "source": [ - "import re\n", - "from dataclasses import dataclass\n", - "from typing import Dict, List\n", - "\n", - "from autogen_core import (\n", - " DefaultTopicId,\n", - " MessageContext,\n", - " RoutedAgent,\n", - " TypeSubscription,\n", - " default_subscription,\n", - " message_handler,\n", - ")\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.components.models import (\n", - " AssistantMessage,\n", - " ChatCompletionClient,\n", - " LLMMessage,\n", - " SystemMessage,\n", - " UserMessage,\n", - ")\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Protocol\n", - "\n", - "First, we define the messages used by the agents.\n", - "`IntermediateSolverResponse` is the message exchanged among the solver agents in each round,\n", - "and `FinalSolverResponse` is the message published by the solver agents in the final round." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class Question:\n", - " content: str\n", - "\n", - "\n", - "@dataclass\n", - "class Answer:\n", - " content: str\n", - "\n", - "\n", - "@dataclass\n", - "class SolverRequest:\n", - " content: str\n", - " question: str\n", - "\n", - "\n", - "@dataclass\n", - "class IntermediateSolverResponse:\n", - " content: str\n", - " question: str\n", - " answer: str\n", - " round: int\n", - "\n", - "\n", - "@dataclass\n", - "class FinalSolverResponse:\n", - " answer: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solver Agent\n", - "\n", - "The solver agent is responsible for solving math problems and exchanging responses with other solver agents.\n", - "Upon receiving a `SolverRequest`, the solver agent uses an LLM to generate an answer.\n", - "Then, it publishes a `IntermediateSolverResponse`\n", - "or a `FinalSolverResponse` based on the round number.\n", - "\n", - "The solver agent is given a topic type, which is used to indicate the topic\n", - "to which the agent should publish intermediate responses. This topic is subscribed\n", - "to by its neighbors to receive responses from this agent -- we will show\n", - "how this is done later.\n", - "\n", - "We use {py:meth}`~autogen_core.components.default_subscription` to let\n", - "solver agents subscribe to the default topic, which is used by the aggregator agent\n", - "to collect the final responses from the solver agents." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "@default_subscription\n", - "class MathSolver(RoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n", - " super().__init__(\"A debator.\")\n", - " self._topic_type = topic_type\n", - " self._model_client = model_client\n", - " self._num_neighbors = num_neighbors\n", - " self._history: List[LLMMessage] = []\n", - " self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n", - " self._system_messages = [\n", - " SystemMessage(\n", - " content=(\n", - " \"You are a helpful assistant with expertise in mathematics and reasoning. \"\n", - " \"Your task is to assist in solving a math reasoning problem by providing \"\n", - " \"a clear and detailed solution. Limit your output within 100 words, \"\n", - " \"and your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response. \"\n", - " \"For example, 'The answer is {{42}}.'\"\n", - " )\n", - " )\n", - " ]\n", - " self._round = 0\n", - " self._max_round = max_round\n", - "\n", - " @message_handler\n", - " async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n", - " # Add the question to the memory.\n", - " self._history.append(UserMessage(content=message.content, source=\"user\"))\n", - " # Make an inference using the model.\n", - " model_result = await self._model_client.create(self._system_messages + self._history)\n", - " assert isinstance(model_result.content, str)\n", - " # Add the response to the memory.\n", - " self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n", - " print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n", - " # Extract the answer from the response.\n", - " match = re.search(r\"\\{\\{(\\-?\\d+(\\.\\d+)?)\\}\\}\", model_result.content)\n", - " if match is None:\n", - " raise ValueError(\"The model response does not contain the answer.\")\n", - " answer = match.group(1)\n", - " # Increment the counter.\n", - " self._round += 1\n", - " if self._round == self._max_round:\n", - " # If the counter reaches the maximum round, publishes a final response.\n", - " await self.publish_message(FinalSolverResponse(answer=answer), topic_id=DefaultTopicId())\n", - " else:\n", - " # Publish intermediate response to the topic associated with this solver.\n", - " await self.publish_message(\n", - " IntermediateSolverResponse(\n", - " content=model_result.content,\n", - " question=message.question,\n", - " answer=answer,\n", - " round=self._round,\n", - " ),\n", - " topic_id=DefaultTopicId(type=self._topic_type),\n", - " )\n", - "\n", - " @message_handler\n", - " async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n", - " # Add neighbor's response to the buffer.\n", - " self._buffer.setdefault(message.round, []).append(message)\n", - " # Check if all neighbors have responded.\n", - " if len(self._buffer[message.round]) == self._num_neighbors:\n", - " print(\n", - " f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n", - " )\n", - " # Prepare the prompt for the next question.\n", - " prompt = \"These are the solutions to the problem from other agents:\\n\"\n", - " for resp in self._buffer[message.round]:\n", - " prompt += f\"One agent solution: {resp.content}\\n\"\n", - " prompt += (\n", - " \"Using the solutions from other agents as additional information, \"\n", - " \"can you provide your answer to the math problem? \"\n", - " f\"The original math problem is {message.question}. \"\n", - " \"Your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response.\"\n", - " )\n", - " # Send the question to the agent itself to solve.\n", - " await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n", - " # Clear the buffer.\n", - " self._buffer.pop(message.round)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Aggregator Agent\n", - "\n", - "The aggregator agent is responsible for handling user question and \n", - "distributing math problems to the solver agents.\n", - "\n", - "The aggregator subscribes to the default topic using\n", - "{py:meth}`~autogen_core.components.default_subscription`. The default topic is used to\n", - "recieve user question, receive the final responses from the solver agents,\n", - "and publish the final answer back to the user.\n", - "\n", - "In a more complex application when you want to isolate the multi-agent debate into a\n", - "sub-component, you should use\n", - "{py:meth}`~autogen_core.components.type_subscription` to set a specific topic\n", - "type for the aggregator-solver communication, \n", - "and have the both the solver and aggregator publish and subscribe to that topic type." - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "@default_subscription\n", - "class MathAggregator(RoutedAgent):\n", - " def __init__(self, num_solvers: int) -> None:\n", - " super().__init__(\"Math Aggregator\")\n", - " self._num_solvers = num_solvers\n", - " self._buffer: List[FinalSolverResponse] = []\n", - "\n", - " @message_handler\n", - " async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n", - " print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n", - " prompt = (\n", - " f\"Can you solve the following math problem?\\n{message.content}\\n\"\n", - " \"Explain your reasoning. Your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response.\"\n", - " )\n", - " print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n", - " await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n", - "\n", - " @message_handler\n", - " async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n", - " self._buffer.append(message)\n", - " if len(self._buffer) == self._num_solvers:\n", - " print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n", - " # Find the majority answer.\n", - " answers = [resp.answer for resp in self._buffer]\n", - " majority_answer = max(set(answers), key=answers.count)\n", - " # Publish the aggregated response.\n", - " await self.publish_message(Answer(content=majority_answer), topic_id=DefaultTopicId())\n", - " # Clear the responses.\n", - " self._buffer.clear()\n", - " print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setting Up a Debate\n", - "\n", - "We will now set up a multi-agent debate with 4 solver agents and 1 aggregator agent.\n", - "The solver agents will be connected in a sparse manner as illustrated in the figure\n", - "below:\n", - "\n", - "```\n", - "A --- B\n", - "| |\n", - "| |\n", - "C --- D\n", - "```\n", - "\n", - "Each solver agent is connected to two other solver agents. \n", - "For example, agent A is connected to agents B and C.\n", - "\n", - "Let's first create a runtime and register the agent types." - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='MathAggregator')" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverA\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverA\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverB\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverB\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverC\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverC\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverD\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverD\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathAggregator.register(runtime, \"MathAggregator\", lambda: MathAggregator(num_solvers=4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we will create the solver agent topology using {py:class}`~autogen_core.components.TypeSubscription`,\n", - "which maps each solver agent's publishing topic type to its neighbors' agent types." - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [], - "source": [ - "# Subscriptions for topic published to by MathSolverA.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverD\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverB\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverB.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverA\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverC\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverC.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverB\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverD\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverD.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverC\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverA\"))\n", - "\n", - "# All solvers and the aggregator subscribe to the default topic." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solving Math Problems\n", - "\n", - "Now let's run the debate to solve a math problem.\n", - "We publish a `SolverRequest` to the default topic, \n", - "and the aggregator agent will start the debate." - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default received question:\n", - "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default publishes initial solver request.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 0:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. To find the total number of clips sold in April and May, we add the amounts: 48 (April) + 24 (May) = 72 clips. \n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 0:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many clips, which is 48 / 2 = 24 clips. To find the total clips sold in April and May, we add both amounts: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Thus, the total number of clips sold altogether is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 0:\n", - "Natalia sold 48 clips in April. In May, she sold half as many, which is \\( \\frac{48}{2} = 24 \\) clips. To find the total clips sold in both months, we add the clips sold in April and May together:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Thus, Natalia sold a total of 72 clips.\n", - "\n", - "The answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 0:\n", - "In April, Natalia sold clips to 48 friends. In May, she sold half as many, which is calculated as follows:\n", - "\n", - "Half of 48 is \\( 48 \\div 2 = 24 \\).\n", - "\n", - "Now, to find the total clips sold in April and May, we add the totals from both months:\n", - "\n", - "\\( 48 + 24 = 72 \\).\n", - "\n", - "Thus, the total number of clips Natalia sold altogether in April and May is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. The total number of clips sold in April and May is calculated by adding the two amounts: 48 (April) + 24 (May) = 72 clips. \n", - "\n", - "Therefore, the answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we sum the clips from April and May: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Thus, Natalia sold a total of {{72}} clips. \n", - "\n", - "The answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 1:\n", - "Natalia sold 48 clips in April. In May, she sold half of that, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold, we add the clips sold in both months:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Therefore, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we add the amounts: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Therefore, the total number of clips sold altogether by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold in both months, we add the amounts from April and May:\n", - "\n", - "\\( 48 + 24 = 72 \\).\n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold in both months, we add the clips sold in April and May: \n", - "\n", - "48 (April) + 24 (May) = 72. \n", - "\n", - "Thus, the total number of clips sold altogether by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold over both months, we sum the totals: \n", - "\n", - "\\( 48 (April) + 24 (May) = 72 \\).\n", - "\n", - "Therefore, the total number of clips Natalia sold is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 2:\n", - "To solve the problem, we know that Natalia sold 48 clips in April. In May, she sold half that amount, which is calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold over both months, we add the two amounts together:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default received all final answers from 4 solvers.\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default publishes final answer:\n", - "72\n" - ] - } - ], - "source": [ - "question = \"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\"\n", - "runtime.start()\n", - "await runtime.publish_message(Question(content=question), DefaultTopicId())\n", - "await runtime.stop_when_idle()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-Agent Debate\n", + "\n", + "Multi-Agent Debate is a multi-agent design pattern that simulates a multi-turn interaction \n", + "where in each turn, agents exchange their responses with each other, and refine \n", + "their responses based on the responses from other agents.\n", + "\n", + "This example shows an implementation of the multi-agent debate pattern for solving\n", + "math problems from the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k).\n", + "\n", + "There are of two types of agents in this pattern: solver agents and an aggregator agent.\n", + "The solver agents are connected in a sparse manner following the technique described in\n", + "[Improving Multi-Agent Debate with Sparse Communication Topology](https://arxiv.org/abs/2406.11776).\n", + "The solver agents are responsible for solving math problems and exchanging responses with each other.\n", + "The aggregator agent is responsible for distributing math problems to the solver agents,\n", + "waiting for their final responses, and aggregating the responses to get the final answer.\n", + "\n", + "The pattern works as follows:\n", + "1. User sends a math problem to the aggregator agent.\n", + "2. The aggregator agent distributes the problem to the solver agents.\n", + "3. Each solver agent processes the problem, and publishes a response to its neighbors.\n", + "4. Each solver agent uses the responses from its neighbors to refine its response, and publishes a new response.\n", + "5. Repeat step 4 for a fixed number of rounds. In the final round, each solver agent publishes a final response.\n", + "6. The aggregator agent uses majority voting to aggregate the final responses from all solver agents to get a final answer, and publishes the answer.\n", + "\n", + "We will be using the broadcast API, i.e., {py:meth}`~autogen_core.base.BaseAgent.publish_message`,\n", + "and we will be using topic and subscription to implement the communication topology.\n", + "Read about [Topics and Subscriptions](../core-concepts/topic-and-subscription.md) to understand how they work." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "from dataclasses import dataclass\n", + "from typing import Dict, List\n", + "\n", + "from autogen_core import (\n", + " DefaultTopicId,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " TypeSubscription,\n", + " default_subscription,\n", + " message_handler,\n", + ")\n", + "from autogen_core.components.models import (\n", + " AssistantMessage,\n", + " ChatCompletionClient,\n", + " LLMMessage,\n", + " SystemMessage,\n", + " UserMessage,\n", + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Protocol\n", + "\n", + "First, we define the messages used by the agents.\n", + "`IntermediateSolverResponse` is the message exchanged among the solver agents in each round,\n", + "and `FinalSolverResponse` is the message published by the solver agents in the final round." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Question:\n", + " content: str\n", + "\n", + "\n", + "@dataclass\n", + "class Answer:\n", + " content: str\n", + "\n", + "\n", + "@dataclass\n", + "class SolverRequest:\n", + " content: str\n", + " question: str\n", + "\n", + "\n", + "@dataclass\n", + "class IntermediateSolverResponse:\n", + " content: str\n", + " question: str\n", + " answer: str\n", + " round: int\n", + "\n", + "\n", + "@dataclass\n", + "class FinalSolverResponse:\n", + " answer: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Solver Agent\n", + "\n", + "The solver agent is responsible for solving math problems and exchanging responses with other solver agents.\n", + "Upon receiving a `SolverRequest`, the solver agent uses an LLM to generate an answer.\n", + "Then, it publishes a `IntermediateSolverResponse`\n", + "or a `FinalSolverResponse` based on the round number.\n", + "\n", + "The solver agent is given a topic type, which is used to indicate the topic\n", + "to which the agent should publish intermediate responses. This topic is subscribed\n", + "to by its neighbors to receive responses from this agent -- we will show\n", + "how this is done later.\n", + "\n", + "We use {py:meth}`~autogen_core.components.default_subscription` to let\n", + "solver agents subscribe to the default topic, which is used by the aggregator agent\n", + "to collect the final responses from the solver agents." + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "@default_subscription\n", + "class MathSolver(RoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n", + " super().__init__(\"A debator.\")\n", + " self._topic_type = topic_type\n", + " self._model_client = model_client\n", + " self._num_neighbors = num_neighbors\n", + " self._history: List[LLMMessage] = []\n", + " self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n", + " self._system_messages = [\n", + " SystemMessage(\n", + " content=(\n", + " \"You are a helpful assistant with expertise in mathematics and reasoning. \"\n", + " \"Your task is to assist in solving a math reasoning problem by providing \"\n", + " \"a clear and detailed solution. Limit your output within 100 words, \"\n", + " \"and your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response. \"\n", + " \"For example, 'The answer is {{42}}.'\"\n", + " )\n", + " )\n", + " ]\n", + " self._round = 0\n", + " self._max_round = max_round\n", + "\n", + " @message_handler\n", + " async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n", + " # Add the question to the memory.\n", + " self._history.append(UserMessage(content=message.content, source=\"user\"))\n", + " # Make an inference using the model.\n", + " model_result = await self._model_client.create(self._system_messages + self._history)\n", + " assert isinstance(model_result.content, str)\n", + " # Add the response to the memory.\n", + " self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n", + " print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n", + " # Extract the answer from the response.\n", + " match = re.search(r\"\\{\\{(\\-?\\d+(\\.\\d+)?)\\}\\}\", model_result.content)\n", + " if match is None:\n", + " raise ValueError(\"The model response does not contain the answer.\")\n", + " answer = match.group(1)\n", + " # Increment the counter.\n", + " self._round += 1\n", + " if self._round == self._max_round:\n", + " # If the counter reaches the maximum round, publishes a final response.\n", + " await self.publish_message(FinalSolverResponse(answer=answer), topic_id=DefaultTopicId())\n", + " else:\n", + " # Publish intermediate response to the topic associated with this solver.\n", + " await self.publish_message(\n", + " IntermediateSolverResponse(\n", + " content=model_result.content,\n", + " question=message.question,\n", + " answer=answer,\n", + " round=self._round,\n", + " ),\n", + " topic_id=DefaultTopicId(type=self._topic_type),\n", + " )\n", + "\n", + " @message_handler\n", + " async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n", + " # Add neighbor's response to the buffer.\n", + " self._buffer.setdefault(message.round, []).append(message)\n", + " # Check if all neighbors have responded.\n", + " if len(self._buffer[message.round]) == self._num_neighbors:\n", + " print(\n", + " f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n", + " )\n", + " # Prepare the prompt for the next question.\n", + " prompt = \"These are the solutions to the problem from other agents:\\n\"\n", + " for resp in self._buffer[message.round]:\n", + " prompt += f\"One agent solution: {resp.content}\\n\"\n", + " prompt += (\n", + " \"Using the solutions from other agents as additional information, \"\n", + " \"can you provide your answer to the math problem? \"\n", + " f\"The original math problem is {message.question}. \"\n", + " \"Your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response.\"\n", + " )\n", + " # Send the question to the agent itself to solve.\n", + " await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n", + " # Clear the buffer.\n", + " self._buffer.pop(message.round)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Aggregator Agent\n", + "\n", + "The aggregator agent is responsible for handling user question and \n", + "distributing math problems to the solver agents.\n", + "\n", + "The aggregator subscribes to the default topic using\n", + "{py:meth}`~autogen_core.components.default_subscription`. The default topic is used to\n", + "recieve user question, receive the final responses from the solver agents,\n", + "and publish the final answer back to the user.\n", + "\n", + "In a more complex application when you want to isolate the multi-agent debate into a\n", + "sub-component, you should use\n", + "{py:meth}`~autogen_core.components.type_subscription` to set a specific topic\n", + "type for the aggregator-solver communication, \n", + "and have the both the solver and aggregator publish and subscribe to that topic type." + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "@default_subscription\n", + "class MathAggregator(RoutedAgent):\n", + " def __init__(self, num_solvers: int) -> None:\n", + " super().__init__(\"Math Aggregator\")\n", + " self._num_solvers = num_solvers\n", + " self._buffer: List[FinalSolverResponse] = []\n", + "\n", + " @message_handler\n", + " async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n", + " print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n", + " prompt = (\n", + " f\"Can you solve the following math problem?\\n{message.content}\\n\"\n", + " \"Explain your reasoning. Your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response.\"\n", + " )\n", + " print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n", + " await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n", + "\n", + " @message_handler\n", + " async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n", + " self._buffer.append(message)\n", + " if len(self._buffer) == self._num_solvers:\n", + " print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n", + " # Find the majority answer.\n", + " answers = [resp.answer for resp in self._buffer]\n", + " majority_answer = max(set(answers), key=answers.count)\n", + " # Publish the aggregated response.\n", + " await self.publish_message(Answer(content=majority_answer), topic_id=DefaultTopicId())\n", + " # Clear the responses.\n", + " self._buffer.clear()\n", + " print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up a Debate\n", + "\n", + "We will now set up a multi-agent debate with 4 solver agents and 1 aggregator agent.\n", + "The solver agents will be connected in a sparse manner as illustrated in the figure\n", + "below:\n", + "\n", + "```\n", + "A --- B\n", + "| |\n", + "| |\n", + "C --- D\n", + "```\n", + "\n", + "Each solver agent is connected to two other solver agents. \n", + "For example, agent A is connected to agents B and C.\n", + "\n", + "Let's first create a runtime and register the agent types." + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='MathAggregator')" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverA\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverA\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverB\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverB\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverC\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverC\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverD\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverD\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathAggregator.register(runtime, \"MathAggregator\", lambda: MathAggregator(num_solvers=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we will create the solver agent topology using {py:class}`~autogen_core.components.TypeSubscription`,\n", + "which maps each solver agent's publishing topic type to its neighbors' agent types." + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [], + "source": [ + "# Subscriptions for topic published to by MathSolverA.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverD\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverB\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverB.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverA\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverC\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverC.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverB\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverD\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverD.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverC\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverA\"))\n", + "\n", + "# All solvers and the aggregator subscribe to the default topic." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Solving Math Problems\n", + "\n", + "Now let's run the debate to solve a math problem.\n", + "We publish a `SolverRequest` to the default topic, \n", + "and the aggregator agent will start the debate." + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default received question:\n", + "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default publishes initial solver request.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 0:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. To find the total number of clips sold in April and May, we add the amounts: 48 (April) + 24 (May) = 72 clips. \n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 0:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many clips, which is 48 / 2 = 24 clips. To find the total clips sold in April and May, we add both amounts: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Thus, the total number of clips sold altogether is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 0:\n", + "Natalia sold 48 clips in April. In May, she sold half as many, which is \\( \\frac{48}{2} = 24 \\) clips. To find the total clips sold in both months, we add the clips sold in April and May together:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Thus, Natalia sold a total of 72 clips.\n", + "\n", + "The answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 0:\n", + "In April, Natalia sold clips to 48 friends. In May, she sold half as many, which is calculated as follows:\n", + "\n", + "Half of 48 is \\( 48 \\div 2 = 24 \\).\n", + "\n", + "Now, to find the total clips sold in April and May, we add the totals from both months:\n", + "\n", + "\\( 48 + 24 = 72 \\).\n", + "\n", + "Thus, the total number of clips Natalia sold altogether in April and May is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. The total number of clips sold in April and May is calculated by adding the two amounts: 48 (April) + 24 (May) = 72 clips. \n", + "\n", + "Therefore, the answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we sum the clips from April and May: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Thus, Natalia sold a total of {{72}} clips. \n", + "\n", + "The answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 1:\n", + "Natalia sold 48 clips in April. In May, she sold half of that, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold, we add the clips sold in both months:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Therefore, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we add the amounts: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Therefore, the total number of clips sold altogether by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold in both months, we add the amounts from April and May:\n", + "\n", + "\\( 48 + 24 = 72 \\).\n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold in both months, we add the clips sold in April and May: \n", + "\n", + "48 (April) + 24 (May) = 72. \n", + "\n", + "Thus, the total number of clips sold altogether by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold over both months, we sum the totals: \n", + "\n", + "\\( 48 (April) + 24 (May) = 72 \\).\n", + "\n", + "Therefore, the total number of clips Natalia sold is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 2:\n", + "To solve the problem, we know that Natalia sold 48 clips in April. In May, she sold half that amount, which is calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold over both months, we add the two amounts together:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default received all final answers from 4 solvers.\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default publishes final answer:\n", + "72\n" + ] + } + ], + "source": [ + "question = \"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\"\n", + "runtime.start()\n", + "await runtime.publish_message(Question(content=question), DefaultTopicId())\n", + "await runtime.stop_when_idle()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb index 40a6dd680..14c704ede 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/reflection.ipynb @@ -441,8 +441,7 @@ } ], "source": [ - "from autogen_core import DefaultTopicId\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import DefaultTopicId, SingleThreadedAgentRuntime\n", "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md index 5e12f8603..9a63a9c0b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md @@ -18,7 +18,7 @@ The key can correspond to a user id, a session id, or could just be "default" if ## How do I increase the GRPC message size? -If you need to provide custom gRPC options, such as overriding the `max_send_message_length` and `max_receive_message_length`, you can define an `extra_grpc_config` variable and pass it to both the `WorkerAgentRuntimeHost` and `WorkerAgentRuntime` instances. +If you need to provide custom gRPC options, such as overriding the `max_send_message_length` and `max_receive_message_length`, you can define an `extra_grpc_config` variable and pass it to both the `GrpcWorkerAgentRuntimeHost` and `GrpcWorkerAgentRuntime` instances. ```python # Define custom gRPC options @@ -27,10 +27,10 @@ extra_grpc_config = [ ("grpc.max_receive_message_length", new_max_size), ] -# Create instances of WorkerAgentRuntimeHost and WorkerAgentRuntime with the custom gRPC options +# Create instances of GrpcWorkerAgentRuntimeHost and GrpcWorkerAgentRuntime with the custom gRPC options -host = WorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config) -worker1 = WorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) +host = GrpcWorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config) +worker1 = GrpcWorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) ``` -**Note**: When `WorkerAgentRuntime` creates a host connection for the clients, it uses `DEFAULT_GRPC_CONFIG` from `HostConnection` class as default set of values which will can be overriden if you pass parameters with the same name using `extra_grpc_config`. +**Note**: When `GrpcWorkerAgentRuntime` creates a host connection for the clients, it uses `DEFAULT_GRPC_CONFIG` from `HostConnection` class as default set of values which will can be overriden if you pass parameters with the same name using `extra_grpc_config`. diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index ac61eadd3..618700c8f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -117,7 +117,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -132,7 +132,7 @@ } ], "source": [ - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import SingleThreadedAgentRuntime\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 50f703610..4c54954bb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -28,18 +28,18 @@ "```\n", "````\n", "\n", - "We can start a host service using {py:class}`~autogen_core.application.WorkerAgentRuntimeHost`." + "We can start a host service using {py:class}`~autogen_core.application.GrpcWorkerAgentRuntimeHost`." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from autogen_core.application import WorkerAgentRuntimeHost\n", + "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n", "\n", - "host = WorkerAgentRuntimeHost(address=\"localhost:50051\")\n", + "host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n", "host.start() # Start a host service in the background." ] }, @@ -94,7 +94,7 @@ "metadata": {}, "source": [ "Now we can set up the worker agent runtimes.\n", - "We use {py:class}`~autogen_core.application.WorkerAgentRuntime`.\n", + "We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n", "We set up two worker runtimes. Each runtime hosts one agent.\n", "All agents publish and subscribe to the default topic, so they can see all\n", "messages being published.\n", @@ -104,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -127,13 +127,13 @@ "source": [ "import asyncio\n", "\n", - "from autogen_core.application import WorkerAgentRuntime\n", + "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n", "\n", - "worker1 = WorkerAgentRuntime(host_address=\"localhost:50051\")\n", + "worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", "worker1.start()\n", "await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n", "\n", - "worker2 = WorkerAgentRuntime(host_address=\"localhost:50051\")\n", + "worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", "worker2.start()\n", "await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n", "\n", @@ -149,7 +149,7 @@ "source": [ "We can see each agent published exactly 5 messages.\n", "\n", - "To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.WorkerAgentRuntime.stop`." + "To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntime.stop`." ] }, { @@ -169,7 +169,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can call {py:meth}`~autogen_core.application.WorkerAgentRuntimeHost.stop`\n", + "We can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntimeHost.stop`\n", "to stop the host service." ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb index d4f5b0ed0..a12304bfe 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb @@ -90,8 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "\n", "\n", "class MyAgent(RoutedAgent):\n", @@ -298,8 +297,7 @@ "source": [ "from dataclasses import dataclass\n", "\n", - "from autogen_core import MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "\n", "\n", "@dataclass\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb index f430f1fff..0183954ef 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb @@ -1,621 +1,620 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Model Clients\n", - "\n", - "AutoGen provides the {py:mod}`autogen_core.components.models` module with a suite of built-in\n", - "model clients for using ChatCompletion API.\n", - "All model clients implement the {py:class}`~autogen_core.components.models.ChatCompletionClient` protocol class." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Built-in Model Clients\n", - "\n", - "Currently there are two built-in model clients:\n", - "{py:class}`~autogen_ext.models.OpenAIChatCompletionClient` and\n", - "{py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`.\n", - "Both clients are asynchronous.\n", - "\n", - "To use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`, you need to provide the API key\n", - "either through the environment variable `OPENAI_API_KEY` or through the `api_key` argument." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.components.models import UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", - "\n", - "# Create an OpenAI model client.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"sk-...\", # Optional if you have an API key set in the environment.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can call the {py:meth}`~autogen_ext.models.OpenAIChatCompletionClient.create` method to create a\n", - "chat completion request, and await for an {py:class}`~autogen_core.components.models.CreateResult` object in return." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The capital of France is Paris.\n" - ] - } - ], - "source": [ - "# Send a message list to the model and await the response.\n", - "messages = [\n", - " UserMessage(content=\"What is the capital of France?\", source=\"user\"),\n", - "]\n", - "response = await model_client.create(messages=messages)\n", - "\n", - "# Print the response\n", - "print(response.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "RequestUsage(prompt_tokens=15, completion_tokens=7)\n" - ] - } - ], - "source": [ - "# Print the response token usage\n", - "print(response.usage)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Streaming Response\n", - "\n", - "You can use the {py:meth}`~autogen_ext.models.OpenAIChatCompletionClient.create_streaming` method to create a\n", - "chat completion request with streaming response." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streamed responses:\n", - "In the heart of an ancient forest, beneath the shadow of snow-capped peaks, a dragon named Elara lived secretly for centuries. Elara was unlike any dragon from the old tales; her scales shimmered with a deep emerald hue, each scale engraved with symbols of lost wisdom. The villagers in the nearby valley spoke of mysterious lights dancing across the night sky, but none dared venture close enough to solve the enigma.\n", - "\n", - "One cold winter's eve, a young girl named Lira, brimming with curiosity and armed with the innocence of youth, wandered into Elara’s domain. Instead of fire and fury, she found warmth and a gentle gaze. The dragon shared stories of a world long forgotten and in return, Lira gifted her simple stories of human life, rich in laughter and scent of earth.\n", - "\n", - "From that night on, the villagers noticed subtle changes—the crops grew taller, and the air seemed sweeter. Elara had infused the valley with ancient magic, a guardian of balance, watching quietly as her new friend thrived under the stars. And so, Lira and Elara’s bond marked the beginning of a timeless friendship that spun tales of hope whispered through the leaves of the ever-verdant forest.\n", - "\n", - "------------\n", - "\n", - "The complete response:\n", - "In the heart of an ancient forest, beneath the shadow of snow-capped peaks, a dragon named Elara lived secretly for centuries. Elara was unlike any dragon from the old tales; her scales shimmered with a deep emerald hue, each scale engraved with symbols of lost wisdom. The villagers in the nearby valley spoke of mysterious lights dancing across the night sky, but none dared venture close enough to solve the enigma.\n", - "\n", - "One cold winter's eve, a young girl named Lira, brimming with curiosity and armed with the innocence of youth, wandered into Elara’s domain. Instead of fire and fury, she found warmth and a gentle gaze. The dragon shared stories of a world long forgotten and in return, Lira gifted her simple stories of human life, rich in laughter and scent of earth.\n", - "\n", - "From that night on, the villagers noticed subtle changes—the crops grew taller, and the air seemed sweeter. Elara had infused the valley with ancient magic, a guardian of balance, watching quietly as her new friend thrived under the stars. And so, Lira and Elara’s bond marked the beginning of a timeless friendship that spun tales of hope whispered through the leaves of the ever-verdant forest.\n", - "\n", - "\n", - "------------\n", - "\n", - "The token usage was:\n", - "RequestUsage(prompt_tokens=0, completion_tokens=0)\n" - ] - } - ], - "source": [ - "messages = [\n", - " UserMessage(content=\"Write a very short story about a dragon.\", source=\"user\"),\n", - "]\n", - "\n", - "# Create a stream.\n", - "stream = model_client.create_stream(messages=messages)\n", - "\n", - "# Iterate over the stream and print the responses.\n", - "print(\"Streamed responses:\")\n", - "async for response in stream: # type: ignore\n", - " if isinstance(response, str):\n", - " # A partial response is a string.\n", - " print(response, flush=True, end=\"\")\n", - " else:\n", - " # The last response is a CreateResult object with the complete message.\n", - " print(\"\\n\\n------------\\n\")\n", - " print(\"The complete response:\", flush=True)\n", - " print(response.content, flush=True)\n", - " print(\"\\n\\n------------\\n\")\n", - " print(\"The token usage was:\", flush=True)\n", - " print(response.usage, flush=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "The last response in the streaming response is always the final response\n", - "of the type {py:class}`~autogen_core.components.models.CreateResult`.\n", - "```\n", - "\n", - "**NB the default usage response is to return zero values**" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### A Note on Token usage counts with streaming example\n", - "Comparing usage returns in the above Non Streaming `model_client.create(messages=messages)` vs streaming `model_client.create_stream(messages=messages)` we see differences.\n", - "The non streaming response by default returns valid prompt and completion token usage counts. \n", - "The streamed response by default returns zero values.\n", - "\n", - "as documented in the OPENAI API Reference an additional parameter `stream_options` can be specified to return valid usage counts. see [stream_options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options)\n", - "\n", - "Only set this when you using streaming ie , using `create_stream` \n", - "\n", - "to enable this in `create_stream` set `extra_create_args={\"stream_options\": {\"include_usage\": True}},`\n", - "\n", - "- **Note whilst other API's like LiteLLM also support this, it is not always guarenteed that it is fully supported or correct**\n", - "\n", - "#### Streaming example with token usage\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streamed responses:\n", - "In a lush, emerald valley hidden by towering peaks, there lived a dragon named Ember. Unlike others of her kind, Ember cherished solitude over treasure, and the songs of the stream over the roar of flames. One misty dawn, a young shepherd stumbled into her sanctuary, lost and frightened. \n", - "\n", - "Instead of fury, he was met with kindness as Ember extended a wing, guiding him back to safety. In gratitude, the shepherd visited yearly, bringing tales of his world beyond the mountains. Over time, a friendship blossomed, binding man and dragon in shared stories and laughter.\n", - "\n", - "As the years passed, the legend of Ember the gentle-hearted spread far and wide, forever changing the way dragons were seen in the hearts of many.\n", - "\n", - "------------\n", - "\n", - "The complete response:\n", - "In a lush, emerald valley hidden by towering peaks, there lived a dragon named Ember. Unlike others of her kind, Ember cherished solitude over treasure, and the songs of the stream over the roar of flames. One misty dawn, a young shepherd stumbled into her sanctuary, lost and frightened. \n", - "\n", - "Instead of fury, he was met with kindness as Ember extended a wing, guiding him back to safety. In gratitude, the shepherd visited yearly, bringing tales of his world beyond the mountains. Over time, a friendship blossomed, binding man and dragon in shared stories and laughter.\n", - "\n", - "As the years passed, the legend of Ember the gentle-hearted spread far and wide, forever changing the way dragons were seen in the hearts of many.\n", - "\n", - "\n", - "------------\n", - "\n", - "The token usage was:\n", - "RequestUsage(prompt_tokens=17, completion_tokens=146)\n" - ] - } - ], - "source": [ - "messages = [\n", - " UserMessage(content=\"Write a very short story about a dragon.\", source=\"user\"),\n", - "]\n", - "\n", - "# Create a stream.\n", - "stream = model_client.create_stream(messages=messages, extra_create_args={\"stream_options\": {\"include_usage\": True}})\n", - "\n", - "# Iterate over the stream and print the responses.\n", - "print(\"Streamed responses:\")\n", - "async for response in stream: # type: ignore\n", - " if isinstance(response, str):\n", - " # A partial response is a string.\n", - " print(response, flush=True, end=\"\")\n", - " else:\n", - " # The last response is a CreateResult object with the complete message.\n", - " print(\"\\n\\n------------\\n\")\n", - " print(\"The complete response:\", flush=True)\n", - " print(response.content, flush=True)\n", - " print(\"\\n\\n------------\\n\")\n", - " print(\"The token usage was:\", flush=True)\n", - " print(response.usage, flush=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Azure OpenAI\n", - "\n", - "To use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`, you need to provide\n", - "the deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n", - "For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n", - "To use AAD authentication, you need to first install the `azure-identity` package." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "# pip install azure-identity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following code snippet shows how to use AAD authentication.\n", - "The identity used must be assigned the [**Cognitive Services OpenAI User**](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "\n", - "# Create the token provider\n", - "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", - "\n", - "az_model_client = AzureOpenAIChatCompletionClient(\n", - " azure_deployment=\"{your-azure-deployment}\",\n", - " model=\"{model-name, such as gpt-4o}\",\n", - " api_version=\"2024-06-01\",\n", - " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", - " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", - " # api_key=\"sk-...\", # For key-based authentication.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Build Agent using Model Client\n", - "\n", - "Let's create a simple AI agent that can respond to messages using the ChatCompletion API." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from autogen_core import MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", - "\n", - "\n", - "@dataclass\n", - "class Message:\n", - " content: str\n", - "\n", - "\n", - "class SimpleAgent(RoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient) -> None:\n", - " super().__init__(\"A simple agent\")\n", - " self._system_messages = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", - " self._model_client = model_client\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # Prepare input to the chat completion model.\n", - " user_message = UserMessage(content=message.content, source=\"user\")\n", - " response = await self._model_client.create(\n", - " self._system_messages + [user_message], cancellation_token=ctx.cancellation_token\n", - " )\n", - " # Return with the model's response.\n", - " assert isinstance(response.content, str)\n", - " return Message(content=response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `SimpleAgent` class is a subclass of the\n", - "{py:class}`autogen_core.components.RoutedAgent` class for the convenience of automatically routing messages to the appropriate handlers.\n", - "It has a single handler, `handle_user_message`, which handles message from the user. It uses the `ChatCompletionClient` to generate a response to the message.\n", - "It then returns the response to the user, following the direct communication model.\n", - "\n", - "```{note}\n", - "The `cancellation_token` of the type {py:class}`autogen_core.base.CancellationToken` is used to cancel\n", - "asynchronous operations. It is linked to async calls inside the message handlers\n", - "and can be used by the caller to cancel the handlers.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Seattle is a vibrant city with a wide range of activities and attractions. Here are some fun things to do in Seattle:\n", - "\n", - "1. **Space Needle**: Visit this iconic observation tower for stunning views of the city and surrounding mountains.\n", - "\n", - "2. **Pike Place Market**: Explore this historic market where you can see the famous fish toss, buy local produce, and find unique crafts and eateries.\n", - "\n", - "3. **Museum of Pop Culture (MoPOP)**: Dive into the world of contemporary culture, music, and science fiction at this interactive museum.\n", - "\n", - "4. **Chihuly Garden and Glass**: Marvel at the beautiful glass art installations by artist Dale Chihuly, located right next to the Space Needle.\n", - "\n", - "5. **Seattle Aquarium**: Discover the diverse marine life of the Pacific Northwest at this engaging aquarium.\n", - "\n", - "6. **Seattle Art Museum**: Explore a vast collection of art from around the world, including contemporary and indigenous art.\n", - "\n", - "7. **Kerry Park**: For one of the best views of the Seattle skyline, head to this small park on Queen Anne Hill.\n", - "\n", - "8. **Ballard Locks**: Watch boats pass through the locks and observe the salmon ladder to see salmon migrating.\n", - "\n", - "9. **Ferry to Bainbridge Island**: Take a scenic ferry ride across Puget Sound to enjoy charming shops, restaurants, and beautiful natural scenery.\n", - "\n", - "10. **Olympic Sculpture Park**: Stroll through this outdoor park with large-scale sculptures and stunning views of the waterfront and mountains.\n", - "\n", - "11. **Underground Tour**: Discover Seattle's history on this quirky tour of the city's underground passageways in Pioneer Square.\n", - "\n", - "12. **Seattle Waterfront**: Enjoy the shops, restaurants, and attractions along the waterfront, including the Seattle Great Wheel and the aquarium.\n", - "\n", - "13. **Discovery Park**: Explore the largest green space in Seattle, featuring trails, beaches, and views of Puget Sound.\n", - "\n", - "14. **Food Tours**: Try out Seattle’s diverse culinary scene, including fresh seafood, international cuisines, and coffee culture (don’t miss the original Starbucks!).\n", - "\n", - "15. **Attend a Sports Game**: Catch a Seahawks (NFL), Mariners (MLB), or Sounders (MLS) game for a lively local experience.\n", - "\n", - "Whether you're interested in culture, nature, food, or history, Seattle has something for everyone to enjoy!\n" - ] - } - ], - "source": [ - "# Create the runtime and register the agent.\n", - "from autogen_core import AgentId\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "await SimpleAgent.register(\n", - " runtime,\n", - " \"simple_agent\",\n", - " lambda: SimpleAgent(\n", - " OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-mini\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n", - " )\n", - " ),\n", - ")\n", - "# Start the runtime processing messages.\n", - "runtime.start()\n", - "# Send a message to the agent and get the response.\n", - "message = Message(\"Hello, what are some fun things to do in Seattle?\")\n", - "response = await runtime.send_message(message, AgentId(\"simple_agent\", \"default\"))\n", - "print(response.content)\n", - "# Stop the runtime processing messages.\n", - "await runtime.stop()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Manage Model Context\n", - "\n", - "The above `SimpleAgent` always responds with a fresh context that contains only\n", - "the system message and the latest user's message.\n", - "We can use model context classes from {py:mod}`autogen_core.components.model_context`\n", - "to make the agent \"remember\" previous conversations.\n", - "A model context supports storage and retrieval of Chat Completion messages.\n", - "It is always used together with a model client to generate LLM-based responses.\n", - "\n", - "For example, {py:mod}`~autogen_core.components.model_context.BufferedChatCompletionContext`\n", - "is a most-recent-used (MRU) context that stores the most recent `buffer_size`\n", - "number of messages. This is useful to avoid context overflow in many LLMs.\n", - "\n", - "Let's update the previous example to use\n", - "{py:mod}`~autogen_core.components.model_context.BufferedChatCompletionContext`." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.components.model_context import BufferedChatCompletionContext\n", - "from autogen_core.components.models import AssistantMessage\n", - "\n", - "\n", - "class SimpleAgentWithContext(RoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient) -> None:\n", - " super().__init__(\"A simple agent\")\n", - " self._system_messages = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", - " self._model_client = model_client\n", - " self._model_context = BufferedChatCompletionContext(buffer_size=5)\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # Prepare input to the chat completion model.\n", - " user_message = UserMessage(content=message.content, source=\"user\")\n", - " # Add message to model context.\n", - " await self._model_context.add_message(user_message)\n", - " # Generate a response.\n", - " response = await self._model_client.create(\n", - " self._system_messages + (await self._model_context.get_messages()),\n", - " cancellation_token=ctx.cancellation_token,\n", - " )\n", - " # Return with the model's response.\n", - " assert isinstance(response.content, str)\n", - " # Add message to model context.\n", - " await self._model_context.add_message(AssistantMessage(content=response.content, source=self.metadata[\"type\"]))\n", - " return Message(content=response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's try to ask follow up questions after the first one." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Question: Hello, what are some fun things to do in Seattle?\n", - "Response: Seattle offers a wide variety of fun activities and attractions for visitors. Here are some highlights:\n", - "\n", - "1. **Pike Place Market**: Explore this iconic market, where you can find fresh produce, unique crafts, and the famous fish-throwing vendors. Don’t forget to visit the original Starbucks!\n", - "\n", - "2. **Space Needle**: Enjoy breathtaking views of the city and Mount Rainier from the observation deck of this iconic structure. You can also dine at the SkyCity restaurant.\n", - "\n", - "3. **Chihuly Garden and Glass**: Admire the stunning glass art installations created by artist Dale Chihuly. The garden and exhibit are particularly beautiful, especially in good weather.\n", - "\n", - "4. **Museum of Pop Culture (MoPOP)**: Dive into the world of music, science fiction, and pop culture through interactive exhibits and memorabilia.\n", - "\n", - "5. **Seattle Aquarium**: Located on the waterfront, the aquarium features a variety of marine life native to the Pacific Northwest, including otters and diving birds.\n", - "\n", - "6. **Seattle Art Museum (SAM)**: Explore a diverse collection of art from around the world, including Native American art and contemporary pieces.\n", - "\n", - "7. **Ballard Locks**: Watch boats travel between the Puget Sound and Lake Union, and see salmon navigating the fish ladder during spawning season.\n", - "\n", - "8. **Fremont Troll**: Visit this quirky public art installation located under the Aurora Bridge, where you can take fun photos with the giant troll.\n", - "\n", - "9. **Kerry Park**: For a picturesque view of the Seattle skyline, head to Kerry Park on Queen Anne Hill, especially at sunset.\n", - "\n", - "10. **Take a Ferry Ride**: Enjoy the scenic views while taking a ferry to nearby Bainbridge Island or Vashon Island for a relaxing day trip.\n", - "\n", - "11. **Underground Tour**: Explore Seattle’s history on an entertaining underground tour in Pioneer Square, where you’ll learn about the city’s early days.\n", - "\n", - "12. **Attend a Sporting Event**: Depending on the season, catch a Seattle Seahawks (NFL) game, a Seattle Mariners (MLB) game, or a Seattle Sounders (MLS) match.\n", - "\n", - "13. **Explore Discovery Park**: Enjoy nature with hiking trails, beach access, and stunning views of the Puget Sound and Olympic Mountains.\n", - "\n", - "14. **West Seattle’s Alki Beach**: Relax at this beach with beautiful views of the Seattle skyline and enjoy beachside activities like biking or kayaking.\n", - "\n", - "15. **Dining and Craft Beer**: Seattle has a vibrant food scene and is known for its seafood, coffee culture, and craft breweries. Make sure to explore local restaurants and breweries.\n", - "\n", - "There’s something for everyone in Seattle, whether you’re interested in nature, art, history, or food!\n", - "-----\n", - "Question: What was the first thing you mentioned?\n", - "Response: The first thing I mentioned was **Pike Place Market**, an iconic market in Seattle where you can find fresh produce, unique crafts, and experience the famous fish-throwing vendors. It's also home to the original Starbucks and various charming shops and eateries.\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await SimpleAgentWithContext.register(\n", - " runtime,\n", - " \"simple_agent_context\",\n", - " lambda: SimpleAgentWithContext(\n", - " OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-mini\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n", - " )\n", - " ),\n", - ")\n", - "# Start the runtime processing messages.\n", - "runtime.start()\n", - "agent_id = AgentId(\"simple_agent_context\", \"default\")\n", - "\n", - "# First question.\n", - "message = Message(\"Hello, what are some fun things to do in Seattle?\")\n", - "print(f\"Question: {message.content}\")\n", - "response = await runtime.send_message(message, agent_id)\n", - "print(f\"Response: {response.content}\")\n", - "print(\"-----\")\n", - "\n", - "# Second question.\n", - "message = Message(\"What was the first thing you mentioned?\")\n", - "print(f\"Question: {message.content}\")\n", - "response = await runtime.send_message(message, agent_id)\n", - "print(f\"Response: {response.content}\")\n", - "\n", - "# Stop the runtime processing messages.\n", - "await runtime.stop()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the second response, you can see the agent now can recall its own previous responses." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Model Clients\n", + "\n", + "AutoGen provides the {py:mod}`autogen_core.components.models` module with a suite of built-in\n", + "model clients for using ChatCompletion API.\n", + "All model clients implement the {py:class}`~autogen_core.components.models.ChatCompletionClient` protocol class." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Built-in Model Clients\n", + "\n", + "Currently there are two built-in model clients:\n", + "{py:class}`~autogen_ext.models.OpenAIChatCompletionClient` and\n", + "{py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`.\n", + "Both clients are asynchronous.\n", + "\n", + "To use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`, you need to provide the API key\n", + "either through the environment variable `OPENAI_API_KEY` or through the `api_key` argument." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.components.models import UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", + "\n", + "# Create an OpenAI model client.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"sk-...\", # Optional if you have an API key set in the environment.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can call the {py:meth}`~autogen_ext.models.OpenAIChatCompletionClient.create` method to create a\n", + "chat completion request, and await for an {py:class}`~autogen_core.components.models.CreateResult` object in return." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The capital of France is Paris.\n" + ] + } + ], + "source": [ + "# Send a message list to the model and await the response.\n", + "messages = [\n", + " UserMessage(content=\"What is the capital of France?\", source=\"user\"),\n", + "]\n", + "response = await model_client.create(messages=messages)\n", + "\n", + "# Print the response\n", + "print(response.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "RequestUsage(prompt_tokens=15, completion_tokens=7)\n" + ] + } + ], + "source": [ + "# Print the response token usage\n", + "print(response.usage)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Streaming Response\n", + "\n", + "You can use the {py:meth}`~autogen_ext.models.OpenAIChatCompletionClient.create_streaming` method to create a\n", + "chat completion request with streaming response." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Streamed responses:\n", + "In the heart of an ancient forest, beneath the shadow of snow-capped peaks, a dragon named Elara lived secretly for centuries. Elara was unlike any dragon from the old tales; her scales shimmered with a deep emerald hue, each scale engraved with symbols of lost wisdom. The villagers in the nearby valley spoke of mysterious lights dancing across the night sky, but none dared venture close enough to solve the enigma.\n", + "\n", + "One cold winter's eve, a young girl named Lira, brimming with curiosity and armed with the innocence of youth, wandered into Elara’s domain. Instead of fire and fury, she found warmth and a gentle gaze. The dragon shared stories of a world long forgotten and in return, Lira gifted her simple stories of human life, rich in laughter and scent of earth.\n", + "\n", + "From that night on, the villagers noticed subtle changes—the crops grew taller, and the air seemed sweeter. Elara had infused the valley with ancient magic, a guardian of balance, watching quietly as her new friend thrived under the stars. And so, Lira and Elara’s bond marked the beginning of a timeless friendship that spun tales of hope whispered through the leaves of the ever-verdant forest.\n", + "\n", + "------------\n", + "\n", + "The complete response:\n", + "In the heart of an ancient forest, beneath the shadow of snow-capped peaks, a dragon named Elara lived secretly for centuries. Elara was unlike any dragon from the old tales; her scales shimmered with a deep emerald hue, each scale engraved with symbols of lost wisdom. The villagers in the nearby valley spoke of mysterious lights dancing across the night sky, but none dared venture close enough to solve the enigma.\n", + "\n", + "One cold winter's eve, a young girl named Lira, brimming with curiosity and armed with the innocence of youth, wandered into Elara’s domain. Instead of fire and fury, she found warmth and a gentle gaze. The dragon shared stories of a world long forgotten and in return, Lira gifted her simple stories of human life, rich in laughter and scent of earth.\n", + "\n", + "From that night on, the villagers noticed subtle changes—the crops grew taller, and the air seemed sweeter. Elara had infused the valley with ancient magic, a guardian of balance, watching quietly as her new friend thrived under the stars. And so, Lira and Elara’s bond marked the beginning of a timeless friendship that spun tales of hope whispered through the leaves of the ever-verdant forest.\n", + "\n", + "\n", + "------------\n", + "\n", + "The token usage was:\n", + "RequestUsage(prompt_tokens=0, completion_tokens=0)\n" + ] + } + ], + "source": [ + "messages = [\n", + " UserMessage(content=\"Write a very short story about a dragon.\", source=\"user\"),\n", + "]\n", + "\n", + "# Create a stream.\n", + "stream = model_client.create_stream(messages=messages)\n", + "\n", + "# Iterate over the stream and print the responses.\n", + "print(\"Streamed responses:\")\n", + "async for response in stream: # type: ignore\n", + " if isinstance(response, str):\n", + " # A partial response is a string.\n", + " print(response, flush=True, end=\"\")\n", + " else:\n", + " # The last response is a CreateResult object with the complete message.\n", + " print(\"\\n\\n------------\\n\")\n", + " print(\"The complete response:\", flush=True)\n", + " print(response.content, flush=True)\n", + " print(\"\\n\\n------------\\n\")\n", + " print(\"The token usage was:\", flush=True)\n", + " print(response.usage, flush=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "The last response in the streaming response is always the final response\n", + "of the type {py:class}`~autogen_core.components.models.CreateResult`.\n", + "```\n", + "\n", + "**NB the default usage response is to return zero values**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### A Note on Token usage counts with streaming example\n", + "Comparing usage returns in the above Non Streaming `model_client.create(messages=messages)` vs streaming `model_client.create_stream(messages=messages)` we see differences.\n", + "The non streaming response by default returns valid prompt and completion token usage counts. \n", + "The streamed response by default returns zero values.\n", + "\n", + "as documented in the OPENAI API Reference an additional parameter `stream_options` can be specified to return valid usage counts. see [stream_options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options)\n", + "\n", + "Only set this when you using streaming ie , using `create_stream` \n", + "\n", + "to enable this in `create_stream` set `extra_create_args={\"stream_options\": {\"include_usage\": True}},`\n", + "\n", + "- **Note whilst other API's like LiteLLM also support this, it is not always guarenteed that it is fully supported or correct**\n", + "\n", + "#### Streaming example with token usage\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Streamed responses:\n", + "In a lush, emerald valley hidden by towering peaks, there lived a dragon named Ember. Unlike others of her kind, Ember cherished solitude over treasure, and the songs of the stream over the roar of flames. One misty dawn, a young shepherd stumbled into her sanctuary, lost and frightened. \n", + "\n", + "Instead of fury, he was met with kindness as Ember extended a wing, guiding him back to safety. In gratitude, the shepherd visited yearly, bringing tales of his world beyond the mountains. Over time, a friendship blossomed, binding man and dragon in shared stories and laughter.\n", + "\n", + "As the years passed, the legend of Ember the gentle-hearted spread far and wide, forever changing the way dragons were seen in the hearts of many.\n", + "\n", + "------------\n", + "\n", + "The complete response:\n", + "In a lush, emerald valley hidden by towering peaks, there lived a dragon named Ember. Unlike others of her kind, Ember cherished solitude over treasure, and the songs of the stream over the roar of flames. One misty dawn, a young shepherd stumbled into her sanctuary, lost and frightened. \n", + "\n", + "Instead of fury, he was met with kindness as Ember extended a wing, guiding him back to safety. In gratitude, the shepherd visited yearly, bringing tales of his world beyond the mountains. Over time, a friendship blossomed, binding man and dragon in shared stories and laughter.\n", + "\n", + "As the years passed, the legend of Ember the gentle-hearted spread far and wide, forever changing the way dragons were seen in the hearts of many.\n", + "\n", + "\n", + "------------\n", + "\n", + "The token usage was:\n", + "RequestUsage(prompt_tokens=17, completion_tokens=146)\n" + ] + } + ], + "source": [ + "messages = [\n", + " UserMessage(content=\"Write a very short story about a dragon.\", source=\"user\"),\n", + "]\n", + "\n", + "# Create a stream.\n", + "stream = model_client.create_stream(messages=messages, extra_create_args={\"stream_options\": {\"include_usage\": True}})\n", + "\n", + "# Iterate over the stream and print the responses.\n", + "print(\"Streamed responses:\")\n", + "async for response in stream: # type: ignore\n", + " if isinstance(response, str):\n", + " # A partial response is a string.\n", + " print(response, flush=True, end=\"\")\n", + " else:\n", + " # The last response is a CreateResult object with the complete message.\n", + " print(\"\\n\\n------------\\n\")\n", + " print(\"The complete response:\", flush=True)\n", + " print(response.content, flush=True)\n", + " print(\"\\n\\n------------\\n\")\n", + " print(\"The token usage was:\", flush=True)\n", + " print(response.usage, flush=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Azure OpenAI\n", + "\n", + "To use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`, you need to provide\n", + "the deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n", + "For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n", + "To use AAD authentication, you need to first install the `azure-identity` package." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "# pip install azure-identity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following code snippet shows how to use AAD authentication.\n", + "The identity used must be assigned the [**Cognitive Services OpenAI User**](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "\n", + "# Create the token provider\n", + "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", + "\n", + "az_model_client = AzureOpenAIChatCompletionClient(\n", + " azure_deployment=\"{your-azure-deployment}\",\n", + " model=\"{model-name, such as gpt-4o}\",\n", + " api_version=\"2024-06-01\",\n", + " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", + " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", + " # api_key=\"sk-...\", # For key-based authentication.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Build Agent using Model Client\n", + "\n", + "Let's create a simple AI agent that can respond to messages using the ChatCompletion API." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", + "\n", + "\n", + "@dataclass\n", + "class Message:\n", + " content: str\n", + "\n", + "\n", + "class SimpleAgent(RoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient) -> None:\n", + " super().__init__(\"A simple agent\")\n", + " self._system_messages = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", + " self._model_client = model_client\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # Prepare input to the chat completion model.\n", + " user_message = UserMessage(content=message.content, source=\"user\")\n", + " response = await self._model_client.create(\n", + " self._system_messages + [user_message], cancellation_token=ctx.cancellation_token\n", + " )\n", + " # Return with the model's response.\n", + " assert isinstance(response.content, str)\n", + " return Message(content=response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `SimpleAgent` class is a subclass of the\n", + "{py:class}`autogen_core.components.RoutedAgent` class for the convenience of automatically routing messages to the appropriate handlers.\n", + "It has a single handler, `handle_user_message`, which handles message from the user. It uses the `ChatCompletionClient` to generate a response to the message.\n", + "It then returns the response to the user, following the direct communication model.\n", + "\n", + "```{note}\n", + "The `cancellation_token` of the type {py:class}`autogen_core.base.CancellationToken` is used to cancel\n", + "asynchronous operations. It is linked to async calls inside the message handlers\n", + "and can be used by the caller to cancel the handlers.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Seattle is a vibrant city with a wide range of activities and attractions. Here are some fun things to do in Seattle:\n", + "\n", + "1. **Space Needle**: Visit this iconic observation tower for stunning views of the city and surrounding mountains.\n", + "\n", + "2. **Pike Place Market**: Explore this historic market where you can see the famous fish toss, buy local produce, and find unique crafts and eateries.\n", + "\n", + "3. **Museum of Pop Culture (MoPOP)**: Dive into the world of contemporary culture, music, and science fiction at this interactive museum.\n", + "\n", + "4. **Chihuly Garden and Glass**: Marvel at the beautiful glass art installations by artist Dale Chihuly, located right next to the Space Needle.\n", + "\n", + "5. **Seattle Aquarium**: Discover the diverse marine life of the Pacific Northwest at this engaging aquarium.\n", + "\n", + "6. **Seattle Art Museum**: Explore a vast collection of art from around the world, including contemporary and indigenous art.\n", + "\n", + "7. **Kerry Park**: For one of the best views of the Seattle skyline, head to this small park on Queen Anne Hill.\n", + "\n", + "8. **Ballard Locks**: Watch boats pass through the locks and observe the salmon ladder to see salmon migrating.\n", + "\n", + "9. **Ferry to Bainbridge Island**: Take a scenic ferry ride across Puget Sound to enjoy charming shops, restaurants, and beautiful natural scenery.\n", + "\n", + "10. **Olympic Sculpture Park**: Stroll through this outdoor park with large-scale sculptures and stunning views of the waterfront and mountains.\n", + "\n", + "11. **Underground Tour**: Discover Seattle's history on this quirky tour of the city's underground passageways in Pioneer Square.\n", + "\n", + "12. **Seattle Waterfront**: Enjoy the shops, restaurants, and attractions along the waterfront, including the Seattle Great Wheel and the aquarium.\n", + "\n", + "13. **Discovery Park**: Explore the largest green space in Seattle, featuring trails, beaches, and views of Puget Sound.\n", + "\n", + "14. **Food Tours**: Try out Seattle’s diverse culinary scene, including fresh seafood, international cuisines, and coffee culture (don’t miss the original Starbucks!).\n", + "\n", + "15. **Attend a Sports Game**: Catch a Seahawks (NFL), Mariners (MLB), or Sounders (MLS) game for a lively local experience.\n", + "\n", + "Whether you're interested in culture, nature, food, or history, Seattle has something for everyone to enjoy!\n" + ] + } + ], + "source": [ + "# Create the runtime and register the agent.\n", + "from autogen_core import AgentId\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "await SimpleAgent.register(\n", + " runtime,\n", + " \"simple_agent\",\n", + " lambda: SimpleAgent(\n", + " OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-mini\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n", + " )\n", + " ),\n", + ")\n", + "# Start the runtime processing messages.\n", + "runtime.start()\n", + "# Send a message to the agent and get the response.\n", + "message = Message(\"Hello, what are some fun things to do in Seattle?\")\n", + "response = await runtime.send_message(message, AgentId(\"simple_agent\", \"default\"))\n", + "print(response.content)\n", + "# Stop the runtime processing messages.\n", + "await runtime.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Manage Model Context\n", + "\n", + "The above `SimpleAgent` always responds with a fresh context that contains only\n", + "the system message and the latest user's message.\n", + "We can use model context classes from {py:mod}`autogen_core.components.model_context`\n", + "to make the agent \"remember\" previous conversations.\n", + "A model context supports storage and retrieval of Chat Completion messages.\n", + "It is always used together with a model client to generate LLM-based responses.\n", + "\n", + "For example, {py:mod}`~autogen_core.components.model_context.BufferedChatCompletionContext`\n", + "is a most-recent-used (MRU) context that stores the most recent `buffer_size`\n", + "number of messages. This is useful to avoid context overflow in many LLMs.\n", + "\n", + "Let's update the previous example to use\n", + "{py:mod}`~autogen_core.components.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.components.model_context import BufferedChatCompletionContext\n", + "from autogen_core.components.models import AssistantMessage\n", + "\n", + "\n", + "class SimpleAgentWithContext(RoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient) -> None:\n", + " super().__init__(\"A simple agent\")\n", + " self._system_messages = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", + " self._model_client = model_client\n", + " self._model_context = BufferedChatCompletionContext(buffer_size=5)\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # Prepare input to the chat completion model.\n", + " user_message = UserMessage(content=message.content, source=\"user\")\n", + " # Add message to model context.\n", + " await self._model_context.add_message(user_message)\n", + " # Generate a response.\n", + " response = await self._model_client.create(\n", + " self._system_messages + (await self._model_context.get_messages()),\n", + " cancellation_token=ctx.cancellation_token,\n", + " )\n", + " # Return with the model's response.\n", + " assert isinstance(response.content, str)\n", + " # Add message to model context.\n", + " await self._model_context.add_message(AssistantMessage(content=response.content, source=self.metadata[\"type\"]))\n", + " return Message(content=response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's try to ask follow up questions after the first one." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Question: Hello, what are some fun things to do in Seattle?\n", + "Response: Seattle offers a wide variety of fun activities and attractions for visitors. Here are some highlights:\n", + "\n", + "1. **Pike Place Market**: Explore this iconic market, where you can find fresh produce, unique crafts, and the famous fish-throwing vendors. Don’t forget to visit the original Starbucks!\n", + "\n", + "2. **Space Needle**: Enjoy breathtaking views of the city and Mount Rainier from the observation deck of this iconic structure. You can also dine at the SkyCity restaurant.\n", + "\n", + "3. **Chihuly Garden and Glass**: Admire the stunning glass art installations created by artist Dale Chihuly. The garden and exhibit are particularly beautiful, especially in good weather.\n", + "\n", + "4. **Museum of Pop Culture (MoPOP)**: Dive into the world of music, science fiction, and pop culture through interactive exhibits and memorabilia.\n", + "\n", + "5. **Seattle Aquarium**: Located on the waterfront, the aquarium features a variety of marine life native to the Pacific Northwest, including otters and diving birds.\n", + "\n", + "6. **Seattle Art Museum (SAM)**: Explore a diverse collection of art from around the world, including Native American art and contemporary pieces.\n", + "\n", + "7. **Ballard Locks**: Watch boats travel between the Puget Sound and Lake Union, and see salmon navigating the fish ladder during spawning season.\n", + "\n", + "8. **Fremont Troll**: Visit this quirky public art installation located under the Aurora Bridge, where you can take fun photos with the giant troll.\n", + "\n", + "9. **Kerry Park**: For a picturesque view of the Seattle skyline, head to Kerry Park on Queen Anne Hill, especially at sunset.\n", + "\n", + "10. **Take a Ferry Ride**: Enjoy the scenic views while taking a ferry to nearby Bainbridge Island or Vashon Island for a relaxing day trip.\n", + "\n", + "11. **Underground Tour**: Explore Seattle’s history on an entertaining underground tour in Pioneer Square, where you’ll learn about the city’s early days.\n", + "\n", + "12. **Attend a Sporting Event**: Depending on the season, catch a Seattle Seahawks (NFL) game, a Seattle Mariners (MLB) game, or a Seattle Sounders (MLS) match.\n", + "\n", + "13. **Explore Discovery Park**: Enjoy nature with hiking trails, beach access, and stunning views of the Puget Sound and Olympic Mountains.\n", + "\n", + "14. **West Seattle’s Alki Beach**: Relax at this beach with beautiful views of the Seattle skyline and enjoy beachside activities like biking or kayaking.\n", + "\n", + "15. **Dining and Craft Beer**: Seattle has a vibrant food scene and is known for its seafood, coffee culture, and craft breweries. Make sure to explore local restaurants and breweries.\n", + "\n", + "There’s something for everyone in Seattle, whether you’re interested in nature, art, history, or food!\n", + "-----\n", + "Question: What was the first thing you mentioned?\n", + "Response: The first thing I mentioned was **Pike Place Market**, an iconic market in Seattle where you can find fresh produce, unique crafts, and experience the famous fish-throwing vendors. It's also home to the original Starbucks and various charming shops and eateries.\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await SimpleAgentWithContext.register(\n", + " runtime,\n", + " \"simple_agent_context\",\n", + " lambda: SimpleAgentWithContext(\n", + " OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-mini\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n", + " )\n", + " ),\n", + ")\n", + "# Start the runtime processing messages.\n", + "runtime.start()\n", + "agent_id = AgentId(\"simple_agent_context\", \"default\")\n", + "\n", + "# First question.\n", + "message = Message(\"Hello, what are some fun things to do in Seattle?\")\n", + "print(f\"Question: {message.content}\")\n", + "response = await runtime.send_message(message, agent_id)\n", + "print(f\"Response: {response.content}\")\n", + "print(\"-----\")\n", + "\n", + "# Second question.\n", + "message = Message(\"What was the first thing you mentioned?\")\n", + "print(f\"Question: {message.content}\")\n", + "response = await runtime.send_message(message, agent_id)\n", + "print(f\"Response: {response.content}\")\n", + "\n", + "# Stop the runtime processing messages.\n", + "await runtime.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the second response, you can see the agent now can recall its own previous responses." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/telemetry.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/telemetry.md index 372ef8d3c..530243311 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/telemetry.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/telemetry.md @@ -48,7 +48,7 @@ Now you can send the trace_provider when creating your runtime: # for single threaded runtime single_threaded_runtime = SingleThreadedAgentRuntime(tracer_provider=tracer_provider) # or for worker runtime -worker_runtime = WorkerAgentRuntime(tracer_provider=tracer_provider) +worker_runtime = GrpcWorkerAgentRuntime(tracer_provider=tracer_provider) ``` And that's it! Your application is now instrumented with open telemetry. You can now view your telemetry data in your telemetry backend. @@ -65,5 +65,5 @@ tracer_provider = trace.get_tracer_provider() # for single threaded runtime single_threaded_runtime = SingleThreadedAgentRuntime(tracer_provider=tracer_provider) # or for worker runtime -worker_runtime = WorkerAgentRuntime(tracer_provider=tracer_provider) +worker_runtime = GrpcWorkerAgentRuntime(tracer_provider=tracer_provider) ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index a5ddf267a..d8ea48c63 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -1,315 +1,321 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tools\n", - "\n", - "Tools are code that can be executed by an agent to perform actions. A tool\n", - "can be a simple function such as a calculator, or an API call to a third-party service\n", - "such as stock price lookup or weather forecast.\n", - "In the context of AI agents, tools are designed to be executed by agents in\n", - "response to model-generated function calls.\n", - "\n", - "AutoGen provides the {py:mod}`autogen_core.components.tools` module with a suite of built-in\n", - "tools and utilities for creating and running custom tools." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Built-in Tools\n", - "\n", - "One of the built-in tools is the {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`,\n", - "which allows agents to execute Python code snippets.\n", - "\n", - "Here is how you create the tool and use it." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello, world!\n", - "\n" - ] - } - ], - "source": [ - "from autogen_core import CancellationToken\n", - "from autogen_core.components.tools import PythonCodeExecutionTool\n", - "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", - "\n", - "# Create the tool.\n", - "code_executor = DockerCommandLineCodeExecutor()\n", - "await code_executor.start()\n", - "code_execution_tool = PythonCodeExecutionTool(code_executor)\n", - "cancellation_token = CancellationToken()\n", - "\n", - "# Use the tool directly without an agent.\n", - "code = \"print('Hello, world!')\"\n", - "result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n", - "print(code_execution_tool.return_value_as_string(result))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`\n", - "class is a built-in code executor that runs Python code snippets in a subprocess\n", - "in the local command line environment.\n", - "The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n", - "and provides a simple interface to execute Python code snippets.\n", - "\n", - "Other built-in tools will be added in the future." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Function Tools\n", - "\n", - "A tool can also be a simple Python function that performs a specific action.\n", - "To create a custom function tool, you just need to create a Python function\n", - "and use the {py:class}`~autogen_core.components.tools.FunctionTool` class to wrap it.\n", - "\n", - "The {py:class}`~autogen_core.components.tools.FunctionTool` class uses descriptions and type annotations\n", - "to inform the LLM when and how to use a given function. The description provides context\n", - "about the function’s purpose and intended use cases, while type annotations inform the LLM about\n", - "the expected parameters and return type.\n", - "\n", - "For example, a simple tool to obtain the stock price of a company might look like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "80.44429939059668\n" - ] - } - ], - "source": [ - "import random\n", - "\n", - "from autogen_core import CancellationToken\n", - "from autogen_core.components.tools import FunctionTool\n", - "from typing_extensions import Annotated\n", - "\n", - "\n", - "async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n", - " # Returns a random stock price for demonstration purposes.\n", - " return random.uniform(10, 200)\n", - "\n", - "\n", - "# Create a function tool.\n", - "stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n", - "\n", - "# Run the tool.\n", - "cancellation_token = CancellationToken()\n", - "result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n", - "\n", - "# Print the result.\n", - "print(stock_price_tool.return_value_as_string(result))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Tool-Equipped Agent\n", - "\n", - "To use tools with an agent, you can use {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n", - "by using it in a composition pattern.\n", - "Here is an example tool-use agent that uses {py:class}`~autogen_core.components.tool_agent.ToolAgent`\n", - "as an inner agent for executing tools." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "from typing import List\n", - "\n", - "from autogen_core import AgentId, AgentInstantiationContext, MessageContext, RoutedAgent, message_handler\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", - "from autogen_core.components.models import (\n", - " ChatCompletionClient,\n", - " LLMMessage,\n", - " SystemMessage,\n", - " UserMessage,\n", - ")\n", - "from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n", - "from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop\n", - "from autogen_ext.models import OpenAIChatCompletionClient\n", - "\n", - "\n", - "@dataclass\n", - "class Message:\n", - " content: str\n", - "\n", - "\n", - "class ToolUseAgent(RoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent_type: str) -> None:\n", - " super().__init__(\"An agent with tools\")\n", - " self._system_messages: List[LLMMessage] = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", - " self._model_client = model_client\n", - " self._tool_schema = tool_schema\n", - " self._tool_agent_id = AgentId(tool_agent_type, self.id.key)\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # Create a session of messages.\n", - " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n", - " # Run the caller loop to handle tool calls.\n", - " messages = await tool_agent_caller_loop(\n", - " self,\n", - " tool_agent_id=self._tool_agent_id,\n", - " model_client=self._model_client,\n", - " input_messages=session,\n", - " tool_schema=self._tool_schema,\n", - " cancellation_token=ctx.cancellation_token,\n", - " )\n", - " # Return the final response.\n", - " assert isinstance(messages[-1].content, str)\n", - " return Message(content=messages[-1].content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `ToolUseAgent` class uses a convenience function {py:meth}`~autogen_core.components.tool_agent.tool_agent_caller_loop`, \n", - "to handle the interaction between the model and the tool agent.\n", - "The core idea can be described using a simple control flow graph:\n", - "\n", - "![ToolUseAgent control flow graph](tool-use-agent-cfg.svg)\n", - "\n", - "The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n", - "and determines whether the model has generated a tool call.\n", - "If the model has generated tool calls, then the handler sends a function call\n", - "message to the {py:class}`~autogen_core.components.tool_agent.ToolAgent` agent\n", - "to execute the tools,\n", - "and then queries the model again with the results of the tool calls.\n", - "This process continues until the model stops generating tool calls,\n", - "at which point the final response is returned to the user.\n", - "\n", - "By having the tool execution logic in a separate agent,\n", - "we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n", - "can be observed externally and intercepted if necessary.\n", - "\n", - "To run the agent, we need to create a runtime and register the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='tool_use_agent')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Create a runtime.\n", - "runtime = SingleThreadedAgentRuntime()\n", - "# Create the tools.\n", - "tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n", - "# Register the agents.\n", - "await ToolAgent.register(runtime, \"tool_executor_agent\", lambda: ToolAgent(\"tool executor agent\", tools))\n", - "await ToolUseAgent.register(\n", - " runtime,\n", - " \"tool_use_agent\",\n", - " lambda: ToolUseAgent(\n", - " OpenAIChatCompletionClient(model=\"gpt-4o-mini\"), [tool.schema for tool in tools], \"tool_executor_agent\"\n", - " ),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example uses the {py:class}`autogen_core.components.models.OpenAIChatCompletionClient`,\n", - "for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n", - "Let's test the agent with a question about stock price." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $179.46.\n" - ] - } - ], - "source": [ - "# Start processing messages.\n", - "runtime.start()\n", - "# Send a direct message to the tool agent.\n", - "tool_use_agent = AgentId(\"tool_use_agent\", \"default\")\n", - "response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n", - "print(response.content)\n", - "# Stop processing messages.\n", - "await runtime.stop()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "autogen_core", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tools\n", + "\n", + "Tools are code that can be executed by an agent to perform actions. A tool\n", + "can be a simple function such as a calculator, or an API call to a third-party service\n", + "such as stock price lookup or weather forecast.\n", + "In the context of AI agents, tools are designed to be executed by agents in\n", + "response to model-generated function calls.\n", + "\n", + "AutoGen provides the {py:mod}`autogen_core.components.tools` module with a suite of built-in\n", + "tools and utilities for creating and running custom tools." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Built-in Tools\n", + "\n", + "One of the built-in tools is the {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`,\n", + "which allows agents to execute Python code snippets.\n", + "\n", + "Here is how you create the tool and use it." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello, world!\n", + "\n" + ] + } + ], + "source": [ + "from autogen_core import CancellationToken\n", + "from autogen_core.components.tools import PythonCodeExecutionTool\n", + "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", + "\n", + "# Create the tool.\n", + "code_executor = DockerCommandLineCodeExecutor()\n", + "await code_executor.start()\n", + "code_execution_tool = PythonCodeExecutionTool(code_executor)\n", + "cancellation_token = CancellationToken()\n", + "\n", + "# Use the tool directly without an agent.\n", + "code = \"print('Hello, world!')\"\n", + "result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n", + "print(code_execution_tool.return_value_as_string(result))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`\n", + "class is a built-in code executor that runs Python code snippets in a subprocess\n", + "in the local command line environment.\n", + "The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n", + "and provides a simple interface to execute Python code snippets.\n", + "\n", + "Other built-in tools will be added in the future." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Function Tools\n", + "\n", + "A tool can also be a simple Python function that performs a specific action.\n", + "To create a custom function tool, you just need to create a Python function\n", + "and use the {py:class}`~autogen_core.components.tools.FunctionTool` class to wrap it.\n", + "\n", + "The {py:class}`~autogen_core.components.tools.FunctionTool` class uses descriptions and type annotations\n", + "to inform the LLM when and how to use a given function. The description provides context\n", + "about the function’s purpose and intended use cases, while type annotations inform the LLM about\n", + "the expected parameters and return type.\n", + "\n", + "For example, a simple tool to obtain the stock price of a company might look like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "80.44429939059668\n" + ] + } + ], + "source": [ + "import random\n", + "\n", + "from autogen_core import CancellationToken\n", + "from autogen_core.components.tools import FunctionTool\n", + "from typing_extensions import Annotated\n", + "\n", + "\n", + "async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n", + " # Returns a random stock price for demonstration purposes.\n", + " return random.uniform(10, 200)\n", + "\n", + "\n", + "# Create a function tool.\n", + "stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n", + "\n", + "# Run the tool.\n", + "cancellation_token = CancellationToken()\n", + "result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n", + "\n", + "# Print the result.\n", + "print(stock_price_tool.return_value_as_string(result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tool-Equipped Agent\n", + "\n", + "To use tools with an agent, you can use {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n", + "by using it in a composition pattern.\n", + "Here is an example tool-use agent that uses {py:class}`~autogen_core.components.tool_agent.ToolAgent`\n", + "as an inner agent for executing tools." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "from autogen_core import (\n", + " AgentId,\n", + " AgentInstantiationContext,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " message_handler,\n", + ")\n", + "from autogen_core.components.models import (\n", + " ChatCompletionClient,\n", + " LLMMessage,\n", + " SystemMessage,\n", + " UserMessage,\n", + ")\n", + "from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n", + "from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", + "\n", + "\n", + "@dataclass\n", + "class Message:\n", + " content: str\n", + "\n", + "\n", + "class ToolUseAgent(RoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent_type: str) -> None:\n", + " super().__init__(\"An agent with tools\")\n", + " self._system_messages: List[LLMMessage] = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n", + " self._model_client = model_client\n", + " self._tool_schema = tool_schema\n", + " self._tool_agent_id = AgentId(tool_agent_type, self.id.key)\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # Create a session of messages.\n", + " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n", + " # Run the caller loop to handle tool calls.\n", + " messages = await tool_agent_caller_loop(\n", + " self,\n", + " tool_agent_id=self._tool_agent_id,\n", + " model_client=self._model_client,\n", + " input_messages=session,\n", + " tool_schema=self._tool_schema,\n", + " cancellation_token=ctx.cancellation_token,\n", + " )\n", + " # Return the final response.\n", + " assert isinstance(messages[-1].content, str)\n", + " return Message(content=messages[-1].content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `ToolUseAgent` class uses a convenience function {py:meth}`~autogen_core.components.tool_agent.tool_agent_caller_loop`, \n", + "to handle the interaction between the model and the tool agent.\n", + "The core idea can be described using a simple control flow graph:\n", + "\n", + "![ToolUseAgent control flow graph](tool-use-agent-cfg.svg)\n", + "\n", + "The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n", + "and determines whether the model has generated a tool call.\n", + "If the model has generated tool calls, then the handler sends a function call\n", + "message to the {py:class}`~autogen_core.components.tool_agent.ToolAgent` agent\n", + "to execute the tools,\n", + "and then queries the model again with the results of the tool calls.\n", + "This process continues until the model stops generating tool calls,\n", + "at which point the final response is returned to the user.\n", + "\n", + "By having the tool execution logic in a separate agent,\n", + "we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n", + "can be observed externally and intercepted if necessary.\n", + "\n", + "To run the agent, we need to create a runtime and register the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='tool_use_agent')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create a runtime.\n", + "runtime = SingleThreadedAgentRuntime()\n", + "# Create the tools.\n", + "tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n", + "# Register the agents.\n", + "await ToolAgent.register(runtime, \"tool_executor_agent\", lambda: ToolAgent(\"tool executor agent\", tools))\n", + "await ToolUseAgent.register(\n", + " runtime,\n", + " \"tool_use_agent\",\n", + " lambda: ToolUseAgent(\n", + " OpenAIChatCompletionClient(model=\"gpt-4o-mini\"), [tool.schema for tool in tools], \"tool_executor_agent\"\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example uses the {py:class}`autogen_core.components.models.OpenAIChatCompletionClient`,\n", + "for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n", + "Let's test the agent with a question about stock price." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $179.46.\n" + ] + } + ], + "source": [ + "# Start processing messages.\n", + "runtime.start()\n", + "# Send a direct message to the tool agent.\n", + "tool_use_agent = AgentId(\"tool_use_agent\", \"default\")\n", + "response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n", + "print(response.content)\n", + "# Stop processing messages.\n", + "await runtime.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "autogen_core", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb index 1ad55133f..4f6c5511b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb @@ -310,7 +310,7 @@ "source": [ "import tempfile\n", "\n", - "from autogen_core.application import SingleThreadedAgentRuntime\n", + "from autogen_core import SingleThreadedAgentRuntime\n", "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 67f9bbc54..143c7fc40 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -25,7 +25,6 @@ dependencies = [ "opentelemetry-api~=1.27.0", "asyncio_atexit", "jsonref~=1.1.0", - "grpcio~=1.62.0", # TODO: update this once we have a stable version. ] [project.optional-dependencies] @@ -35,6 +34,7 @@ grpc = [ [tool.uv] dev-dependencies = [ + "autogen_test_utils", "aiofiles", "azure-identity", "chess", @@ -75,6 +75,8 @@ dev-dependencies = [ "autodoc_pydantic~=2.2", "pygments", + "autogen_ext==0.4.0.dev8", + # Documentation tooling "sphinx-autobuild", ] diff --git a/python/packages/autogen-core/samples/chess_game.py b/python/packages/autogen-core/samples/chess_game.py index 4936c3242..686748c7e 100644 --- a/python/packages/autogen-core/samples/chess_game.py +++ b/python/packages/autogen-core/samples/chess_game.py @@ -7,8 +7,14 @@ import asyncio import logging from typing import Annotated, Literal -from autogen_core import AgentId, AgentInstantiationContext, AgentRuntime, DefaultSubscription, DefaultTopicId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import ( + AgentId, + AgentInstantiationContext, + AgentRuntime, + DefaultSubscription, + DefaultTopicId, + SingleThreadedAgentRuntime, +) from autogen_core.components.model_context import BufferedChatCompletionContext from autogen_core.components.models import SystemMessage from autogen_core.components.tools import FunctionTool diff --git a/python/packages/autogen-core/samples/distributed-group-chat/README.md b/python/packages/autogen-core/samples/distributed-group-chat/README.md index b4cf16583..25722e6eb 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/README.md +++ b/python/packages/autogen-core/samples/distributed-group-chat/README.md @@ -1,8 +1,6 @@ # Distributed Group Chat -from autogen_core.application import WorkerAgentRuntimeHost - -This example runs a gRPC server using [WorkerAgentRuntimeHost](../../src/autogen_core/application/_worker_runtime_host.py) and instantiates three distributed runtimes using [WorkerAgentRuntime](../../src/autogen_core/application/_worker_runtime.py). These runtimes connect to the gRPC server as hosts and facilitate a round-robin distributed group chat. This example leverages the [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service) to implement writer and editor LLM agents. Agents are instructed to provide concise answers, as the primary goal of this example is to showcase the distributed runtime rather than the quality of agent responses. +This example runs a gRPC server using [GrpcWorkerAgentRuntimeHost](../../src/autogen_core/application/_worker_runtime_host.py) and instantiates three distributed runtimes using [GrpcWorkerAgentRuntime](../../src/autogen_core/application/_worker_runtime.py). These runtimes connect to the gRPC server as hosts and facilitate a round-robin distributed group chat. This example leverages the [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service) to implement writer and editor LLM agents. Agents are instructed to provide concise answers, as the primary goal of this example is to showcase the distributed runtime rather than the quality of agent responses. ## Setup diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_agents.py b/python/packages/autogen-core/samples/distributed-group-chat/_agents.py index f819bf726..45d2f8d5d 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_agents.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_agents.py @@ -5,7 +5,6 @@ from uuid import uuid4 from _types import GroupChatMessage, MessageChunk, RequestToSpeak, UIAgentConfig from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, message_handler -from autogen_core.application import WorkerAgentRuntime from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -13,6 +12,7 @@ from autogen_core.components.models import ( SystemMessage, UserMessage, ) +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown @@ -168,7 +168,7 @@ class UIAgent(RoutedAgent): async def publish_message_to_ui( - runtime: RoutedAgent | WorkerAgentRuntime, + runtime: RoutedAgent | GrpcWorkerAgentRuntime, source: str, user_message: str, ui_config: UIAgentConfig, @@ -193,7 +193,7 @@ async def publish_message_to_ui( async def publish_message_to_ui_and_backend( - runtime: RoutedAgent | WorkerAgentRuntime, + runtime: RoutedAgent | GrpcWorkerAgentRuntime, source: str, user_message: str, ui_config: UIAgentConfig, diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py index fee5e91e6..f516f20a3 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py @@ -8,15 +8,15 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_core.application import WorkerAgentRuntime from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown async def main(config: AppConfig): set_all_log_levels(logging.ERROR) - editor_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) + editor_agent_runtime = GrpcWorkerAgentRuntime(host_address=config.host.address) editor_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(4) Console().print(Markdown("Starting **`Editor Agent`**")) diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py index aa68f3c19..ac0507891 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py @@ -8,8 +8,8 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_core.application import WorkerAgentRuntime from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown @@ -18,7 +18,7 @@ set_all_log_levels(logging.ERROR) async def main(config: AppConfig): set_all_log_levels(logging.ERROR) - group_chat_manager_runtime = WorkerAgentRuntime(host_address=config.host.address) + group_chat_manager_runtime = GrpcWorkerAgentRuntime(host_address=config.host.address) group_chat_manager_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(1) diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_host.py b/python/packages/autogen-core/samples/distributed-group-chat/run_host.py index 6f1d1f646..27b7b91db 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_host.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_host.py @@ -2,13 +2,13 @@ import asyncio from _types import HostConfig from _utils import load_config -from autogen_core.application import WorkerAgentRuntimeHost +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost from rich.console import Console from rich.markdown import Markdown async def main(host_config: HostConfig): - host = WorkerAgentRuntimeHost(address=host_config.address) + host = GrpcWorkerAgentRuntimeHost(address=host_config.address) host.start() console = Console() diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py b/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py index e4d127aa0..db43e6ec8 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py @@ -9,7 +9,7 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from chainlit import Message # type: ignore [reportAttributeAccessIssue] from rich.console import Console from rich.markdown import Markdown @@ -36,7 +36,7 @@ async def send_cl_stream(msg: MessageChunk) -> None: async def main(config: AppConfig): set_all_log_levels(logging.ERROR) - ui_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) + ui_agent_runtime = GrpcWorkerAgentRuntime(host_address=config.host.address) ui_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py index 1168dcf0b..a7172d15c 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py @@ -8,15 +8,15 @@ from _utils import get_serializers, load_config, set_all_log_levels from autogen_core import ( TypeSubscription, ) -from autogen_core.application import WorkerAgentRuntime from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime from rich.console import Console from rich.markdown import Markdown async def main(config: AppConfig) -> None: set_all_log_levels(logging.ERROR) - writer_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) + writer_agent_runtime = GrpcWorkerAgentRuntime(host_address=config.host.address) writer_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(3) Console().print(Markdown("Starting **`Writer Agent`**")) diff --git a/python/packages/autogen-core/samples/semantic_router/run_host.py b/python/packages/autogen-core/samples/semantic_router/run_host.py index 3349af31f..0efa537ff 100644 --- a/python/packages/autogen-core/samples/semantic_router/run_host.py +++ b/python/packages/autogen-core/samples/semantic_router/run_host.py @@ -2,12 +2,12 @@ import asyncio import logging import platform -from autogen_core.application import WorkerAgentRuntimeHost -from autogen_core.application.logging import TRACE_LOGGER_NAME +from autogen_core import TRACE_LOGGER_NAME +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost async def run_host(): - host = WorkerAgentRuntimeHost(address="localhost:50051") + host = GrpcWorkerAgentRuntimeHost(address="localhost:50051") host.start() # Start a host service in the background. if platform.system() == "Windows": try: diff --git a/python/packages/autogen-core/samples/semantic_router/run_semantic_router.py b/python/packages/autogen-core/samples/semantic_router/run_semantic_router.py index aacae5784..9c1958c13 100644 --- a/python/packages/autogen-core/samples/semantic_router/run_semantic_router.py +++ b/python/packages/autogen-core/samples/semantic_router/run_semantic_router.py @@ -32,7 +32,7 @@ from _semantic_router_components import ( WorkerAgentMessage, ) from autogen_core import ClosureAgent, ClosureContext, DefaultSubscription, DefaultTopicId, MessageContext -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime class MockIntentClassifier(IntentClassifierBase): @@ -78,7 +78,7 @@ async def output_result( async def run_workers(): - agent_runtime = WorkerAgentRuntime(host_address="localhost:50051") + agent_runtime = GrpcWorkerAgentRuntime(host_address="localhost:50051") agent_runtime.start() diff --git a/python/packages/autogen-core/samples/slow_human_in_loop.py b/python/packages/autogen-core/samples/slow_human_in_loop.py index 50f793e2f..2cfcf0a64 100644 --- a/python/packages/autogen-core/samples/slow_human_in_loop.py +++ b/python/packages/autogen-core/samples/slow_human_in_loop.py @@ -37,10 +37,10 @@ from autogen_core import ( FunctionCall, MessageContext, RoutedAgent, + SingleThreadedAgentRuntime, message_handler, type_subscription, ) -from autogen_core.application import SingleThreadedAgentRuntime from autogen_core.base.intervention import DefaultInterventionHandler from autogen_core.components.model_context import BufferedChatCompletionContext from autogen_core.components.models import ( diff --git a/python/packages/autogen-core/samples/worker/run_cascading_publisher.py b/python/packages/autogen-core/samples/worker/run_cascading_publisher.py index 6e6b256c1..d5c5f7de4 100644 --- a/python/packages/autogen-core/samples/worker/run_cascading_publisher.py +++ b/python/packages/autogen-core/samples/worker/run_cascading_publisher.py @@ -1,10 +1,10 @@ from agents import CascadingMessage, ObserverAgent from autogen_core import DefaultTopicId, try_get_known_serializers_for_type -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime async def main() -> None: - runtime = WorkerAgentRuntime(host_address="localhost:50051") + runtime = GrpcWorkerAgentRuntime(host_address="localhost:50051") runtime.add_message_serializer(try_get_known_serializers_for_type(CascadingMessage)) runtime.start() await ObserverAgent.register(runtime, "observer_agent", lambda: ObserverAgent()) diff --git a/python/packages/autogen-core/samples/worker/run_cascading_worker.py b/python/packages/autogen-core/samples/worker/run_cascading_worker.py index 052361d19..410c09c84 100644 --- a/python/packages/autogen-core/samples/worker/run_cascading_worker.py +++ b/python/packages/autogen-core/samples/worker/run_cascading_worker.py @@ -2,11 +2,11 @@ import uuid from agents import CascadingAgent, ReceiveMessageEvent from autogen_core import try_get_known_serializers_for_type -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime async def main() -> None: - runtime = WorkerAgentRuntime(host_address="localhost:50051") + runtime = GrpcWorkerAgentRuntime(host_address="localhost:50051") runtime.add_message_serializer(try_get_known_serializers_for_type(ReceiveMessageEvent)) runtime.start() agent_type = f"cascading_agent_{uuid.uuid4()}".replace("-", "_") diff --git a/python/packages/autogen-core/samples/worker/run_host.py b/python/packages/autogen-core/samples/worker/run_host.py index 99db8dc00..3f899987c 100644 --- a/python/packages/autogen-core/samples/worker/run_host.py +++ b/python/packages/autogen-core/samples/worker/run_host.py @@ -1,10 +1,10 @@ import asyncio -from autogen_core.application import WorkerAgentRuntimeHost +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost async def main() -> None: - service = WorkerAgentRuntimeHost(address="localhost:50051") + service = GrpcWorkerAgentRuntimeHost(address="localhost:50051") service.start() await service.stop_when_signal() diff --git a/python/packages/autogen-core/samples/worker/run_worker_pub_sub.py b/python/packages/autogen-core/samples/worker/run_worker_pub_sub.py index 5287e7da8..27eb0a2c8 100644 --- a/python/packages/autogen-core/samples/worker/run_worker_pub_sub.py +++ b/python/packages/autogen-core/samples/worker/run_worker_pub_sub.py @@ -11,7 +11,7 @@ from autogen_core import ( message_handler, try_get_known_serializers_for_type, ) -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime @dataclass @@ -72,7 +72,7 @@ class GreeterAgent(RoutedAgent): async def main() -> None: - runtime = WorkerAgentRuntime(host_address="localhost:50051") + runtime = GrpcWorkerAgentRuntime(host_address="localhost:50051") runtime.start() for t in [AskToGreet, Greeting, ReturnedGreeting, Feedback, ReturnedFeedback]: runtime.add_message_serializer(try_get_known_serializers_for_type(t)) diff --git a/python/packages/autogen-core/samples/worker/run_worker_rpc.py b/python/packages/autogen-core/samples/worker/run_worker_rpc.py index bf5dcffaf..1696a948a 100644 --- a/python/packages/autogen-core/samples/worker/run_worker_rpc.py +++ b/python/packages/autogen-core/samples/worker/run_worker_rpc.py @@ -10,7 +10,7 @@ from autogen_core import ( RoutedAgent, message_handler, ) -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime @dataclass @@ -53,7 +53,7 @@ class GreeterAgent(RoutedAgent): async def main() -> None: - runtime = WorkerAgentRuntime(host_address="localhost:50051") + runtime = GrpcWorkerAgentRuntime(host_address="localhost:50051") runtime.start() await ReceiveAgent.register( diff --git a/python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py b/python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py index df9dd6332..c50aacedd 100644 --- a/python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py +++ b/python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py @@ -12,7 +12,7 @@ from autogen_core import ( TypeSubscription, try_get_known_serializers_for_type, ) -from autogen_core.application import WorkerAgentRuntime +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime # Add the local package directory to sys.path thisdir = os.path.dirname(os.path.abspath(__file__)) @@ -34,7 +34,7 @@ async def main() -> None: agentHost = agentHost[8:] agnext_logger.info("0") agnext_logger.info(agentHost) - runtime = WorkerAgentRuntime(host_address=agentHost, payload_serialization_format=PROTOBUF_DATA_CONTENT_TYPE) + runtime = GrpcWorkerAgentRuntime(host_address=agentHost, payload_serialization_format=PROTOBUF_DATA_CONTENT_TYPE) agnext_logger.info("1") runtime.start() diff --git a/python/packages/autogen-core/src/autogen_core/__init__.py b/python/packages/autogen-core/src/autogen_core/__init__.py index 41ac49b95..1eb61324e 100644 --- a/python/packages/autogen-core/src/autogen_core/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/__init__.py @@ -12,6 +12,7 @@ from ._agent_type import AgentType from ._base_agent import BaseAgent from ._cancellation_token import CancellationToken from ._closure_agent import ClosureAgent, ClosureContext +from ._constants import EVENT_LOGGER_NAME, ROOT_LOGGER_NAME, TRACE_LOGGER_NAME from ._default_subscription import DefaultSubscription, default_subscription, type_subscription from ._default_topic import DefaultTopicId from ._image import Image @@ -25,6 +26,7 @@ from ._serialization import ( UnknownPayload, try_get_known_serializers_for_type, ) +from ._single_threaded_agent_runtime import SingleThreadedAgentRuntime from ._subscription import Subscription from ._subscription_context import SubscriptionInstantiationContext from ._topic import TopicId @@ -66,4 +68,8 @@ __all__ = [ "TypePrefixSubscription", "JSON_DATA_CONTENT_TYPE", "PROTOBUF_DATA_CONTENT_TYPE", + "SingleThreadedAgentRuntime", + "ROOT_LOGGER_NAME", + "EVENT_LOGGER_NAME", + "TRACE_LOGGER_NAME", ] diff --git a/python/packages/autogen-core/src/autogen_core/_constants.py b/python/packages/autogen-core/src/autogen_core/_constants.py new file mode 100644 index 000000000..8fc4580c0 --- /dev/null +++ b/python/packages/autogen-core/src/autogen_core/_constants.py @@ -0,0 +1,9 @@ +ROOT_LOGGER_NAME = "autogen_core" +"""str: Logger name used for structured event logging""" + +EVENT_LOGGER_NAME = "autogen_core.events" +"""str: Logger name used for structured event logging""" + + +TRACE_LOGGER_NAME = "autogen_core.trace" +"""str: Logger name used for developer intended trace logging. The content and format of this log should not be depended upon.""" diff --git a/python/packages/autogen-core/src/autogen_core/application/_helpers.py b/python/packages/autogen-core/src/autogen_core/_runtime_impl_helpers.py similarity index 93% rename from python/packages/autogen-core/src/autogen_core/application/_helpers.py rename to python/packages/autogen-core/src/autogen_core/_runtime_impl_helpers.py index fe4e1697f..dcd0bb6c9 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_helpers.py +++ b/python/packages/autogen-core/src/autogen_core/_runtime_impl_helpers.py @@ -1,11 +1,11 @@ from collections import defaultdict from typing import Awaitable, Callable, DefaultDict, List, Set -from .._agent import Agent -from .._agent_id import AgentId -from .._agent_type import AgentType -from .._subscription import Subscription -from .._topic import TopicId +from ._agent import Agent +from ._agent_id import AgentId +from ._agent_type import AgentType +from ._subscription import Subscription +from ._topic import TopicId async def get_impl( diff --git a/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py new file mode 100644 index 000000000..4feab3051 --- /dev/null +++ b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py @@ -0,0 +1,687 @@ +from __future__ import annotations + +import asyncio +import inspect +import logging +import threading +import uuid +import warnings +from asyncio import CancelledError, Future, Task +from collections.abc import Sequence +from dataclasses import dataclass +from enum import Enum +from typing import Any, Awaitable, Callable, Dict, List, Mapping, ParamSpec, Set, Type, TypeVar, cast + +from opentelemetry.trace import TracerProvider +from typing_extensions import deprecated + +from autogen_core._serialization import MessageSerializer, SerializationRegistry + +from ._agent import Agent +from ._agent_id import AgentId +from ._agent_instantiation import AgentInstantiationContext +from ._agent_metadata import AgentMetadata +from ._agent_runtime import AgentRuntime +from ._agent_type import AgentType +from ._cancellation_token import CancellationToken +from ._message_context import MessageContext +from ._message_handler_context import MessageHandlerContext +from ._runtime_impl_helpers import SubscriptionManager, get_impl +from ._subscription import Subscription +from ._subscription_context import SubscriptionInstantiationContext +from ._telemetry import EnvelopeMetadata, MessageRuntimeTracingConfig, TraceHelper, get_telemetry_envelope_metadata +from ._topic import TopicId +from .base.intervention import DropMessage, InterventionHandler +from .exceptions import MessageDroppedException + +logger = logging.getLogger("autogen_core") +event_logger = logging.getLogger("autogen_core.events") + +# We use a type parameter in some functions which shadows the built-in `type` function. +# This is a workaround to avoid shadowing the built-in `type` function. +type_func_alias = type + + +@dataclass(kw_only=True) +class PublishMessageEnvelope: + """A message envelope for publishing messages to all agents that can handle + the message of the type T.""" + + message: Any + cancellation_token: CancellationToken + sender: AgentId | None + topic_id: TopicId + metadata: EnvelopeMetadata | None = None + message_id: str + + +@dataclass(kw_only=True) +class SendMessageEnvelope: + """A message envelope for sending a message to a specific agent that can handle + the message of the type T.""" + + message: Any + sender: AgentId | None + recipient: AgentId + future: Future[Any] + cancellation_token: CancellationToken + metadata: EnvelopeMetadata | None = None + + +@dataclass(kw_only=True) +class ResponseMessageEnvelope: + """A message envelope for sending a response to a message.""" + + message: Any + future: Future[Any] + sender: AgentId + recipient: AgentId | None + metadata: EnvelopeMetadata | None = None + + +P = ParamSpec("P") +T = TypeVar("T", bound=Agent) + + +class Counter: + def __init__(self) -> None: + self._count: int = 0 + self.threadLock = threading.Lock() + + def increment(self) -> None: + self.threadLock.acquire() + self._count += 1 + self.threadLock.release() + + def get(self) -> int: + return self._count + + def decrement(self) -> None: + self.threadLock.acquire() + self._count -= 1 + self.threadLock.release() + + +class RunContext: + class RunState(Enum): + RUNNING = 0 + CANCELLED = 1 + UNTIL_IDLE = 2 + + def __init__(self, runtime: SingleThreadedAgentRuntime) -> None: + self._runtime = runtime + self._run_state = RunContext.RunState.RUNNING + self._end_condition: Callable[[], bool] = self._stop_when_cancelled + self._run_task = asyncio.create_task(self._run()) + self._lock = asyncio.Lock() + + async def _run(self) -> None: + while True: + async with self._lock: + if self._end_condition(): + return + + await self._runtime.process_next() + + async def stop(self) -> None: + async with self._lock: + self._run_state = RunContext.RunState.CANCELLED + self._end_condition = self._stop_when_cancelled + await self._run_task + + async def stop_when_idle(self) -> None: + async with self._lock: + self._run_state = RunContext.RunState.UNTIL_IDLE + self._end_condition = self._stop_when_idle + await self._run_task + + async def stop_when(self, condition: Callable[[], bool]) -> None: + async with self._lock: + self._end_condition = condition + await self._run_task + + def _stop_when_cancelled(self) -> bool: + return self._run_state == RunContext.RunState.CANCELLED + + def _stop_when_idle(self) -> bool: + return self._run_state == RunContext.RunState.UNTIL_IDLE and self._runtime.idle + + +def _warn_if_none(value: Any, handler_name: str) -> None: + """ + Utility function to check if the intervention handler returned None and issue a warning. + + Args: + value: The return value to check + handler_name: Name of the intervention handler method for the warning message + """ + if value is None: + warnings.warn( + f"Intervention handler {handler_name} returned None. This might be unintentional. " + "Consider returning the original message or DropMessage explicitly.", + RuntimeWarning, + stacklevel=2, + ) + + +class SingleThreadedAgentRuntime(AgentRuntime): + def __init__( + self, + *, + intervention_handlers: List[InterventionHandler] | None = None, + tracer_provider: TracerProvider | None = None, + ) -> None: + self._tracer_helper = TraceHelper(tracer_provider, MessageRuntimeTracingConfig("SingleThreadedAgentRuntime")) + self._message_queue: List[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope] = [] + # (namespace, type) -> List[AgentId] + self._agent_factories: Dict[ + str, Callable[[], Agent | Awaitable[Agent]] | Callable[[AgentRuntime, AgentId], Agent | Awaitable[Agent]] + ] = {} + self._instantiated_agents: Dict[AgentId, Agent] = {} + self._intervention_handlers = intervention_handlers + self._outstanding_tasks = Counter() + self._background_tasks: Set[Task[Any]] = set() + self._subscription_manager = SubscriptionManager() + self._run_context: RunContext | None = None + self._serialization_registry = SerializationRegistry() + + @property + def unprocessed_messages( + self, + ) -> Sequence[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope]: + return self._message_queue + + @property + def outstanding_tasks(self) -> int: + return self._outstanding_tasks.get() + + @property + def _known_agent_names(self) -> Set[str]: + return set(self._agent_factories.keys()) + + # Returns the response of the message + async def send_message( + self, + message: Any, + recipient: AgentId, + *, + sender: AgentId | None = None, + cancellation_token: CancellationToken | None = None, + ) -> Any: + if cancellation_token is None: + cancellation_token = CancellationToken() + + # event_logger.info( + # MessageEvent( + # payload=message, + # sender=sender, + # receiver=recipient, + # kind=MessageKind.DIRECT, + # delivery_stage=DeliveryStage.SEND, + # ) + # ) + + with self._tracer_helper.trace_block( + "create", + recipient, + parent=None, + extraAttributes={"message_type": type(message).__name__}, + ): + future = asyncio.get_event_loop().create_future() + if recipient.type not in self._known_agent_names: + future.set_exception(Exception("Recipient not found")) + + content = message.__dict__ if hasattr(message, "__dict__") else message + logger.info(f"Sending message of type {type(message).__name__} to {recipient.type}: {content}") + + self._message_queue.append( + SendMessageEnvelope( + message=message, + recipient=recipient, + future=future, + cancellation_token=cancellation_token, + sender=sender, + metadata=get_telemetry_envelope_metadata(), + ) + ) + + cancellation_token.link_future(future) + + return await future + + async def publish_message( + self, + message: Any, + topic_id: TopicId, + *, + sender: AgentId | None = None, + cancellation_token: CancellationToken | None = None, + message_id: str | None = None, + ) -> None: + with self._tracer_helper.trace_block( + "create", + topic_id, + parent=None, + extraAttributes={"message_type": type(message).__name__}, + ): + if cancellation_token is None: + cancellation_token = CancellationToken() + content = message.__dict__ if hasattr(message, "__dict__") else message + logger.info(f"Publishing message of type {type(message).__name__} to all subscribers: {content}") + + if message_id is None: + message_id = str(uuid.uuid4()) + + # event_logger.info( + # MessageEvent( + # payload=message, + # sender=sender, + # receiver=None, + # kind=MessageKind.PUBLISH, + # delivery_stage=DeliveryStage.SEND, + # ) + # ) + + self._message_queue.append( + PublishMessageEnvelope( + message=message, + cancellation_token=cancellation_token, + sender=sender, + topic_id=topic_id, + metadata=get_telemetry_envelope_metadata(), + message_id=message_id, + ) + ) + + async def save_state(self) -> Mapping[str, Any]: + state: Dict[str, Dict[str, Any]] = {} + for agent_id in self._instantiated_agents: + state[str(agent_id)] = dict(await (await self._get_agent(agent_id)).save_state()) + return state + + async def load_state(self, state: Mapping[str, Any]) -> None: + for agent_id_str in state: + agent_id = AgentId.from_str(agent_id_str) + if agent_id.type in self._known_agent_names: + await (await self._get_agent(agent_id)).load_state(state[str(agent_id)]) + + async def _process_send(self, message_envelope: SendMessageEnvelope) -> None: + with self._tracer_helper.trace_block("send", message_envelope.recipient, parent=message_envelope.metadata): + recipient = message_envelope.recipient + # todo: check if recipient is in the known namespaces + # assert recipient in self._agents + + try: + # TODO use id + sender_name = message_envelope.sender.type if message_envelope.sender is not None else "Unknown" + logger.info( + f"Calling message handler for {recipient} with message type {type(message_envelope.message).__name__} sent by {sender_name}" + ) + # event_logger.info( + # MessageEvent( + # payload=message_envelope.message, + # sender=message_envelope.sender, + # receiver=recipient, + # kind=MessageKind.DIRECT, + # delivery_stage=DeliveryStage.DELIVER, + # ) + # ) + recipient_agent = await self._get_agent(recipient) + message_context = MessageContext( + sender=message_envelope.sender, + topic_id=None, + is_rpc=True, + cancellation_token=message_envelope.cancellation_token, + # Will be fixed when send API removed + message_id="NOT_DEFINED_TODO_FIX", + ) + with MessageHandlerContext.populate_context(recipient_agent.id): + response = await recipient_agent.on_message( + message_envelope.message, + ctx=message_context, + ) + except CancelledError as e: + if not message_envelope.future.cancelled(): + message_envelope.future.set_exception(e) + self._outstanding_tasks.decrement() + return + except BaseException as e: + message_envelope.future.set_exception(e) + self._outstanding_tasks.decrement() + return + + self._message_queue.append( + ResponseMessageEnvelope( + message=response, + future=message_envelope.future, + sender=message_envelope.recipient, + recipient=message_envelope.sender, + metadata=get_telemetry_envelope_metadata(), + ) + ) + self._outstanding_tasks.decrement() + + async def _process_publish(self, message_envelope: PublishMessageEnvelope) -> None: + with self._tracer_helper.trace_block("publish", message_envelope.topic_id, parent=message_envelope.metadata): + try: + responses: List[Awaitable[Any]] = [] + recipients = await self._subscription_manager.get_subscribed_recipients(message_envelope.topic_id) + for agent_id in recipients: + # Avoid sending the message back to the sender + if message_envelope.sender is not None and agent_id == message_envelope.sender: + continue + + sender_agent = ( + await self._get_agent(message_envelope.sender) if message_envelope.sender is not None else None + ) + sender_name = str(sender_agent.id) if sender_agent is not None else "Unknown" + logger.info( + f"Calling message handler for {agent_id.type} with message type {type(message_envelope.message).__name__} published by {sender_name}" + ) + # event_logger.info( + # MessageEvent( + # payload=message_envelope.message, + # sender=message_envelope.sender, + # receiver=agent, + # kind=MessageKind.PUBLISH, + # delivery_stage=DeliveryStage.DELIVER, + # ) + # ) + message_context = MessageContext( + sender=message_envelope.sender, + topic_id=message_envelope.topic_id, + is_rpc=False, + cancellation_token=message_envelope.cancellation_token, + message_id=message_envelope.message_id, + ) + agent = await self._get_agent(agent_id) + + async def _on_message(agent: Agent, message_context: MessageContext) -> Any: + with self._tracer_helper.trace_block("process", agent.id, parent=None): + with MessageHandlerContext.populate_context(agent.id): + return await agent.on_message( + message_envelope.message, + ctx=message_context, + ) + + future = _on_message(agent, message_context) + responses.append(future) + + await asyncio.gather(*responses) + except BaseException as e: + # Ignore cancelled errors from logs + if isinstance(e, CancelledError): + return + logger.error("Error processing publish message", exc_info=True) + finally: + self._outstanding_tasks.decrement() + # TODO if responses are given for a publish + + async def _process_response(self, message_envelope: ResponseMessageEnvelope) -> None: + with self._tracer_helper.trace_block("ack", message_envelope.recipient, parent=message_envelope.metadata): + content = ( + message_envelope.message.__dict__ + if hasattr(message_envelope.message, "__dict__") + else message_envelope.message + ) + logger.info( + f"Resolving response with message type {type(message_envelope.message).__name__} for recipient {message_envelope.recipient} from {message_envelope.sender.type}: {content}" + ) + # event_logger.info( + # MessageEvent( + # payload=message_envelope.message, + # sender=message_envelope.sender, + # receiver=message_envelope.recipient, + # kind=MessageKind.RESPOND, + # delivery_stage=DeliveryStage.DELIVER, + # ) + # ) + self._outstanding_tasks.decrement() + if not message_envelope.future.cancelled(): + message_envelope.future.set_result(message_envelope.message) + + async def process_next(self) -> None: + """Process the next message in the queue.""" + + if len(self._message_queue) == 0: + # Yield control to the event loop to allow other tasks to run + await asyncio.sleep(0) + return + message_envelope = self._message_queue.pop(0) + + match message_envelope: + case SendMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future): + if self._intervention_handlers is not None: + for handler in self._intervention_handlers: + with self._tracer_helper.trace_block( + "intercept", handler.__class__.__name__, parent=message_envelope.metadata + ): + try: + temp_message = await handler.on_send(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_send") + except BaseException as e: + future.set_exception(e) + return + if temp_message is DropMessage or isinstance(temp_message, DropMessage): + future.set_exception(MessageDroppedException()) + return + + message_envelope.message = temp_message + self._outstanding_tasks.increment() + task = asyncio.create_task(self._process_send(message_envelope)) + self._background_tasks.add(task) + task.add_done_callback(self._background_tasks.discard) + case PublishMessageEnvelope( + message=message, + sender=sender, + ): + if self._intervention_handlers is not None: + for handler in self._intervention_handlers: + with self._tracer_helper.trace_block( + "intercept", handler.__class__.__name__, parent=message_envelope.metadata + ): + try: + temp_message = await handler.on_publish(message, sender=sender) + _warn_if_none(temp_message, "on_publish") + except BaseException as e: + # TODO: we should raise the intervention exception to the publisher. + logger.error(f"Exception raised in in intervention handler: {e}", exc_info=True) + return + if temp_message is DropMessage or isinstance(temp_message, DropMessage): + # TODO log message dropped + return + + message_envelope.message = temp_message + self._outstanding_tasks.increment() + task = asyncio.create_task(self._process_publish(message_envelope)) + self._background_tasks.add(task) + task.add_done_callback(self._background_tasks.discard) + case ResponseMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future): + if self._intervention_handlers is not None: + for handler in self._intervention_handlers: + try: + temp_message = await handler.on_response(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_response") + except BaseException as e: + # TODO: should we raise the exception to sender of the response instead? + future.set_exception(e) + return + if temp_message is DropMessage or isinstance(temp_message, DropMessage): + future.set_exception(MessageDroppedException()) + return + message_envelope.message = temp_message + self._outstanding_tasks.increment() + task = asyncio.create_task(self._process_response(message_envelope)) + self._background_tasks.add(task) + task.add_done_callback(self._background_tasks.discard) + + # Yield control to the message loop to allow other tasks to run + await asyncio.sleep(0) + + @property + def idle(self) -> bool: + return len(self._message_queue) == 0 and self._outstanding_tasks.get() == 0 + + def start(self) -> None: + """Start the runtime message processing loop.""" + if self._run_context is not None: + raise RuntimeError("Runtime is already started") + self._run_context = RunContext(self) + + async def stop(self) -> None: + """Stop the runtime message processing loop.""" + if self._run_context is None: + raise RuntimeError("Runtime is not started") + await self._run_context.stop() + self._run_context = None + + async def stop_when_idle(self) -> None: + """Stop the runtime message processing loop when there is + no outstanding message being processed or queued.""" + if self._run_context is None: + raise RuntimeError("Runtime is not started") + await self._run_context.stop_when_idle() + self._run_context = None + + async def stop_when(self, condition: Callable[[], bool]) -> None: + """Stop the runtime message processing loop when the condition is met.""" + if self._run_context is None: + raise RuntimeError("Runtime is not started") + await self._run_context.stop_when(condition) + self._run_context = None + + async def agent_metadata(self, agent: AgentId) -> AgentMetadata: + return (await self._get_agent(agent)).metadata + + async def agent_save_state(self, agent: AgentId) -> Mapping[str, Any]: + return await (await self._get_agent(agent)).save_state() + + async def agent_load_state(self, agent: AgentId, state: Mapping[str, Any]) -> None: + await (await self._get_agent(agent)).load_state(state) + + @deprecated( + "Use your agent's `register` method directly instead of this method. See documentation for latest usage." + ) + async def register( + self, + type: str, + agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]], + subscriptions: Callable[[], list[Subscription] | Awaitable[list[Subscription]]] + | list[Subscription] + | None = None, + ) -> AgentType: + if type in self._agent_factories: + raise ValueError(f"Agent with type {type} already exists.") + + if subscriptions is not None: + if callable(subscriptions): + with SubscriptionInstantiationContext.populate_context(AgentType(type)): + subscriptions_list_result = subscriptions() + if inspect.isawaitable(subscriptions_list_result): + subscriptions_list = await subscriptions_list_result + else: + subscriptions_list = subscriptions_list_result + else: + subscriptions_list = subscriptions + + for subscription in subscriptions_list: + await self.add_subscription(subscription) + + self._agent_factories[type] = agent_factory + return AgentType(type) + + async def register_factory( + self, + *, + type: AgentType, + agent_factory: Callable[[], T | Awaitable[T]], + expected_class: type[T], + ) -> AgentType: + if type.type in self._agent_factories: + raise ValueError(f"Agent with type {type} already exists.") + + async def factory_wrapper() -> T: + maybe_agent_instance = agent_factory() + if inspect.isawaitable(maybe_agent_instance): + agent_instance = await maybe_agent_instance + else: + agent_instance = maybe_agent_instance + + if type_func_alias(agent_instance) != expected_class: + raise ValueError("Factory registered using the wrong type.") + + return agent_instance + + self._agent_factories[type.type] = factory_wrapper + + return type + + async def _invoke_agent_factory( + self, + agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]], + agent_id: AgentId, + ) -> T: + with AgentInstantiationContext.populate_context((self, agent_id)): + if len(inspect.signature(agent_factory).parameters) == 0: + factory_one = cast(Callable[[], T], agent_factory) + agent = factory_one() + elif len(inspect.signature(agent_factory).parameters) == 2: + warnings.warn( + "Agent factories that take two arguments are deprecated. Use AgentInstantiationContext instead. Two arg factories will be removed in a future version.", + stacklevel=2, + ) + factory_two = cast(Callable[[AgentRuntime, AgentId], T], agent_factory) + agent = factory_two(self, agent_id) + else: + raise ValueError("Agent factory must take 0 or 2 arguments.") + + if inspect.isawaitable(agent): + return cast(T, await agent) + + return agent + + async def _get_agent(self, agent_id: AgentId) -> Agent: + if agent_id in self._instantiated_agents: + return self._instantiated_agents[agent_id] + + if agent_id.type not in self._agent_factories: + raise LookupError(f"Agent with name {agent_id.type} not found.") + + agent_factory = self._agent_factories[agent_id.type] + agent = await self._invoke_agent_factory(agent_factory, agent_id) + self._instantiated_agents[agent_id] = agent + return agent + + # TODO: uncomment out the following type ignore when this is fixed in mypy: https://github.com/python/mypy/issues/3737 + async def try_get_underlying_agent_instance(self, id: AgentId, type: Type[T] = Agent) -> T: # type: ignore[assignment] + if id.type not in self._agent_factories: + raise LookupError(f"Agent with name {id.type} not found.") + + # TODO: check if remote + agent_instance = await self._get_agent(id) + + if not isinstance(agent_instance, type): + raise TypeError( + f"Agent with name {id.type} is not of type {type.__name__}. It is of type {type_func_alias(agent_instance).__name__}" + ) + + return agent_instance + + async def add_subscription(self, subscription: Subscription) -> None: + await self._subscription_manager.add_subscription(subscription) + + async def remove_subscription(self, id: str) -> None: + await self._subscription_manager.remove_subscription(id) + + async def get( + self, id_or_type: AgentId | AgentType | str, /, key: str = "default", *, lazy: bool = True + ) -> AgentId: + return await get_impl( + id_or_type=id_or_type, + key=key, + lazy=lazy, + instance_getter=self._get_agent, + ) + + def add_message_serializer(self, serializer: MessageSerializer[Any] | Sequence[MessageSerializer[Any]]) -> None: + self._serialization_registry.add_serializer(serializer) diff --git a/python/packages/autogen-core/src/autogen_core/application/telemetry/__init__.py b/python/packages/autogen-core/src/autogen_core/_telemetry/__init__.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/telemetry/__init__.py rename to python/packages/autogen-core/src/autogen_core/_telemetry/__init__.py diff --git a/python/packages/autogen-core/src/autogen_core/application/telemetry/_constants.py b/python/packages/autogen-core/src/autogen_core/_telemetry/_constants.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/telemetry/_constants.py rename to python/packages/autogen-core/src/autogen_core/_telemetry/_constants.py diff --git a/python/packages/autogen-core/src/autogen_core/application/telemetry/_propagation.py b/python/packages/autogen-core/src/autogen_core/_telemetry/_propagation.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/telemetry/_propagation.py rename to python/packages/autogen-core/src/autogen_core/_telemetry/_propagation.py diff --git a/python/packages/autogen-core/src/autogen_core/application/telemetry/_tracing.py b/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/telemetry/_tracing.py rename to python/packages/autogen-core/src/autogen_core/_telemetry/_tracing.py diff --git a/python/packages/autogen-core/src/autogen_core/application/telemetry/_tracing_config.py b/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py similarity index 99% rename from python/packages/autogen-core/src/autogen_core/application/telemetry/_tracing_config.py rename to python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py index c21e1bfc6..2cb345bcb 100644 --- a/python/packages/autogen-core/src/autogen_core/application/telemetry/_tracing_config.py +++ b/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py @@ -6,7 +6,8 @@ from opentelemetry.trace import SpanKind from opentelemetry.util import types from typing_extensions import NotRequired -from ... import AgentId, TopicId +from .._agent_id import AgentId +from .._topic import TopicId from ._constants import NAMESPACE logger = logging.getLogger("autogen_core") diff --git a/python/packages/autogen-core/src/autogen_core/_type_helpers.py b/python/packages/autogen-core/src/autogen_core/_type_helpers.py index 3dab707fe..66e52e4b6 100644 --- a/python/packages/autogen-core/src/autogen_core/_type_helpers.py +++ b/python/packages/autogen-core/src/autogen_core/_type_helpers.py @@ -1,9 +1,6 @@ from collections.abc import Sequence from types import NoneType, UnionType -from typing import Any, Optional, Tuple, Type, Union, get_args, get_origin - -# Had to redefine this from grpc.aio._typing as using that one was causing mypy errors -ChannelArgumentType = Sequence[Tuple[str, Any]] +from typing import Any, Optional, Type, Union, get_args, get_origin def is_union(t: object) -> bool: diff --git a/python/packages/autogen-core/src/autogen_core/application/__init__.py b/python/packages/autogen-core/src/autogen_core/application/__init__.py index 0caa89c6d..e69de29bb 100644 --- a/python/packages/autogen-core/src/autogen_core/application/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/application/__init__.py @@ -1,9 +0,0 @@ -""" -The :mod:`autogen_core.application` module provides implementations of core components that are used to compose an application -""" - -from ._single_threaded_agent_runtime import SingleThreadedAgentRuntime -from ._worker_runtime import WorkerAgentRuntime -from ._worker_runtime_host import WorkerAgentRuntimeHost - -__all__ = ["SingleThreadedAgentRuntime", "WorkerAgentRuntime", "WorkerAgentRuntimeHost"] diff --git a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py index ebd764bc7..5b636a756 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py @@ -1,689 +1,10 @@ -from __future__ import annotations - -import asyncio -import inspect -import logging -import threading -import uuid -import warnings -from asyncio import CancelledError, Future, Task -from collections.abc import Sequence -from dataclasses import dataclass -from enum import Enum -from typing import Any, Awaitable, Callable, Dict, List, Mapping, ParamSpec, Set, Type, TypeVar, cast - -from opentelemetry.trace import TracerProvider from typing_extensions import deprecated -from autogen_core._serialization import MessageSerializer, SerializationRegistry +from .._single_threaded_agent_runtime import SingleThreadedAgentRuntime as SingleThreadedAgentRuntimeAlias -from .. import ( - Agent, - AgentId, - AgentInstantiationContext, - AgentMetadata, - AgentRuntime, - AgentType, - CancellationToken, - MessageContext, - MessageHandlerContext, - Subscription, - SubscriptionInstantiationContext, - TopicId, + +@deprecated( + "autogen_core.application.SingleThreadedAgentRuntime moved to autogen_core.SingleThreadedAgentRuntime. This alias will be removed in 0.4.0." ) -from ..base.intervention import DropMessage, InterventionHandler -from ..exceptions import MessageDroppedException -from ._helpers import SubscriptionManager, get_impl -from .telemetry import EnvelopeMetadata, MessageRuntimeTracingConfig, TraceHelper, get_telemetry_envelope_metadata - -logger = logging.getLogger("autogen_core") -event_logger = logging.getLogger("autogen_core.events") - -# We use a type parameter in some functions which shadows the built-in `type` function. -# This is a workaround to avoid shadowing the built-in `type` function. -type_func_alias = type - - -@dataclass(kw_only=True) -class PublishMessageEnvelope: - """A message envelope for publishing messages to all agents that can handle - the message of the type T.""" - - message: Any - cancellation_token: CancellationToken - sender: AgentId | None - topic_id: TopicId - metadata: EnvelopeMetadata | None = None - message_id: str - - -@dataclass(kw_only=True) -class SendMessageEnvelope: - """A message envelope for sending a message to a specific agent that can handle - the message of the type T.""" - - message: Any - sender: AgentId | None - recipient: AgentId - future: Future[Any] - cancellation_token: CancellationToken - metadata: EnvelopeMetadata | None = None - - -@dataclass(kw_only=True) -class ResponseMessageEnvelope: - """A message envelope for sending a response to a message.""" - - message: Any - future: Future[Any] - sender: AgentId - recipient: AgentId | None - metadata: EnvelopeMetadata | None = None - - -P = ParamSpec("P") -T = TypeVar("T", bound=Agent) - - -class Counter: - def __init__(self) -> None: - self._count: int = 0 - self.threadLock = threading.Lock() - - def increment(self) -> None: - self.threadLock.acquire() - self._count += 1 - self.threadLock.release() - - def get(self) -> int: - return self._count - - def decrement(self) -> None: - self.threadLock.acquire() - self._count -= 1 - self.threadLock.release() - - -class RunContext: - class RunState(Enum): - RUNNING = 0 - CANCELLED = 1 - UNTIL_IDLE = 2 - - def __init__(self, runtime: SingleThreadedAgentRuntime) -> None: - self._runtime = runtime - self._run_state = RunContext.RunState.RUNNING - self._end_condition: Callable[[], bool] = self._stop_when_cancelled - self._run_task = asyncio.create_task(self._run()) - self._lock = asyncio.Lock() - - async def _run(self) -> None: - while True: - async with self._lock: - if self._end_condition(): - return - - await self._runtime.process_next() - - async def stop(self) -> None: - async with self._lock: - self._run_state = RunContext.RunState.CANCELLED - self._end_condition = self._stop_when_cancelled - await self._run_task - - async def stop_when_idle(self) -> None: - async with self._lock: - self._run_state = RunContext.RunState.UNTIL_IDLE - self._end_condition = self._stop_when_idle - await self._run_task - - async def stop_when(self, condition: Callable[[], bool]) -> None: - async with self._lock: - self._end_condition = condition - await self._run_task - - def _stop_when_cancelled(self) -> bool: - return self._run_state == RunContext.RunState.CANCELLED - - def _stop_when_idle(self) -> bool: - return self._run_state == RunContext.RunState.UNTIL_IDLE and self._runtime.idle - - -def _warn_if_none(value: Any, handler_name: str) -> None: - """ - Utility function to check if the intervention handler returned None and issue a warning. - - Args: - value: The return value to check - handler_name: Name of the intervention handler method for the warning message - """ - if value is None: - warnings.warn( - f"Intervention handler {handler_name} returned None. This might be unintentional. " - "Consider returning the original message or DropMessage explicitly.", - RuntimeWarning, - stacklevel=2, - ) - - -class SingleThreadedAgentRuntime(AgentRuntime): - def __init__( - self, - *, - intervention_handlers: List[InterventionHandler] | None = None, - tracer_provider: TracerProvider | None = None, - ) -> None: - self._tracer_helper = TraceHelper(tracer_provider, MessageRuntimeTracingConfig("SingleThreadedAgentRuntime")) - self._message_queue: List[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope] = [] - # (namespace, type) -> List[AgentId] - self._agent_factories: Dict[ - str, Callable[[], Agent | Awaitable[Agent]] | Callable[[AgentRuntime, AgentId], Agent | Awaitable[Agent]] - ] = {} - self._instantiated_agents: Dict[AgentId, Agent] = {} - self._intervention_handlers = intervention_handlers - self._outstanding_tasks = Counter() - self._background_tasks: Set[Task[Any]] = set() - self._subscription_manager = SubscriptionManager() - self._run_context: RunContext | None = None - self._serialization_registry = SerializationRegistry() - - @property - def unprocessed_messages( - self, - ) -> Sequence[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope]: - return self._message_queue - - @property - def outstanding_tasks(self) -> int: - return self._outstanding_tasks.get() - - @property - def _known_agent_names(self) -> Set[str]: - return set(self._agent_factories.keys()) - - # Returns the response of the message - async def send_message( - self, - message: Any, - recipient: AgentId, - *, - sender: AgentId | None = None, - cancellation_token: CancellationToken | None = None, - ) -> Any: - if cancellation_token is None: - cancellation_token = CancellationToken() - - # event_logger.info( - # MessageEvent( - # payload=message, - # sender=sender, - # receiver=recipient, - # kind=MessageKind.DIRECT, - # delivery_stage=DeliveryStage.SEND, - # ) - # ) - - with self._tracer_helper.trace_block( - "create", - recipient, - parent=None, - extraAttributes={"message_type": type(message).__name__}, - ): - future = asyncio.get_event_loop().create_future() - if recipient.type not in self._known_agent_names: - future.set_exception(Exception("Recipient not found")) - - content = message.__dict__ if hasattr(message, "__dict__") else message - logger.info(f"Sending message of type {type(message).__name__} to {recipient.type}: {content}") - - self._message_queue.append( - SendMessageEnvelope( - message=message, - recipient=recipient, - future=future, - cancellation_token=cancellation_token, - sender=sender, - metadata=get_telemetry_envelope_metadata(), - ) - ) - - cancellation_token.link_future(future) - - return await future - - async def publish_message( - self, - message: Any, - topic_id: TopicId, - *, - sender: AgentId | None = None, - cancellation_token: CancellationToken | None = None, - message_id: str | None = None, - ) -> None: - with self._tracer_helper.trace_block( - "create", - topic_id, - parent=None, - extraAttributes={"message_type": type(message).__name__}, - ): - if cancellation_token is None: - cancellation_token = CancellationToken() - content = message.__dict__ if hasattr(message, "__dict__") else message - logger.info(f"Publishing message of type {type(message).__name__} to all subscribers: {content}") - - if message_id is None: - message_id = str(uuid.uuid4()) - - # event_logger.info( - # MessageEvent( - # payload=message, - # sender=sender, - # receiver=None, - # kind=MessageKind.PUBLISH, - # delivery_stage=DeliveryStage.SEND, - # ) - # ) - - self._message_queue.append( - PublishMessageEnvelope( - message=message, - cancellation_token=cancellation_token, - sender=sender, - topic_id=topic_id, - metadata=get_telemetry_envelope_metadata(), - message_id=message_id, - ) - ) - - async def save_state(self) -> Mapping[str, Any]: - state: Dict[str, Dict[str, Any]] = {} - for agent_id in self._instantiated_agents: - state[str(agent_id)] = dict(await (await self._get_agent(agent_id)).save_state()) - return state - - async def load_state(self, state: Mapping[str, Any]) -> None: - for agent_id_str in state: - agent_id = AgentId.from_str(agent_id_str) - if agent_id.type in self._known_agent_names: - await (await self._get_agent(agent_id)).load_state(state[str(agent_id)]) - - async def _process_send(self, message_envelope: SendMessageEnvelope) -> None: - with self._tracer_helper.trace_block("send", message_envelope.recipient, parent=message_envelope.metadata): - recipient = message_envelope.recipient - # todo: check if recipient is in the known namespaces - # assert recipient in self._agents - - try: - # TODO use id - sender_name = message_envelope.sender.type if message_envelope.sender is not None else "Unknown" - logger.info( - f"Calling message handler for {recipient} with message type {type(message_envelope.message).__name__} sent by {sender_name}" - ) - # event_logger.info( - # MessageEvent( - # payload=message_envelope.message, - # sender=message_envelope.sender, - # receiver=recipient, - # kind=MessageKind.DIRECT, - # delivery_stage=DeliveryStage.DELIVER, - # ) - # ) - recipient_agent = await self._get_agent(recipient) - message_context = MessageContext( - sender=message_envelope.sender, - topic_id=None, - is_rpc=True, - cancellation_token=message_envelope.cancellation_token, - # Will be fixed when send API removed - message_id="NOT_DEFINED_TODO_FIX", - ) - with MessageHandlerContext.populate_context(recipient_agent.id): - response = await recipient_agent.on_message( - message_envelope.message, - ctx=message_context, - ) - except CancelledError as e: - if not message_envelope.future.cancelled(): - message_envelope.future.set_exception(e) - self._outstanding_tasks.decrement() - return - except BaseException as e: - message_envelope.future.set_exception(e) - self._outstanding_tasks.decrement() - return - - self._message_queue.append( - ResponseMessageEnvelope( - message=response, - future=message_envelope.future, - sender=message_envelope.recipient, - recipient=message_envelope.sender, - metadata=get_telemetry_envelope_metadata(), - ) - ) - self._outstanding_tasks.decrement() - - async def _process_publish(self, message_envelope: PublishMessageEnvelope) -> None: - with self._tracer_helper.trace_block("publish", message_envelope.topic_id, parent=message_envelope.metadata): - try: - responses: List[Awaitable[Any]] = [] - recipients = await self._subscription_manager.get_subscribed_recipients(message_envelope.topic_id) - for agent_id in recipients: - # Avoid sending the message back to the sender - if message_envelope.sender is not None and agent_id == message_envelope.sender: - continue - - sender_agent = ( - await self._get_agent(message_envelope.sender) if message_envelope.sender is not None else None - ) - sender_name = str(sender_agent.id) if sender_agent is not None else "Unknown" - logger.info( - f"Calling message handler for {agent_id.type} with message type {type(message_envelope.message).__name__} published by {sender_name}" - ) - # event_logger.info( - # MessageEvent( - # payload=message_envelope.message, - # sender=message_envelope.sender, - # receiver=agent, - # kind=MessageKind.PUBLISH, - # delivery_stage=DeliveryStage.DELIVER, - # ) - # ) - message_context = MessageContext( - sender=message_envelope.sender, - topic_id=message_envelope.topic_id, - is_rpc=False, - cancellation_token=message_envelope.cancellation_token, - message_id=message_envelope.message_id, - ) - agent = await self._get_agent(agent_id) - - async def _on_message(agent: Agent, message_context: MessageContext) -> Any: - with self._tracer_helper.trace_block("process", agent.id, parent=None): - with MessageHandlerContext.populate_context(agent.id): - return await agent.on_message( - message_envelope.message, - ctx=message_context, - ) - - future = _on_message(agent, message_context) - responses.append(future) - - await asyncio.gather(*responses) - except BaseException as e: - # Ignore cancelled errors from logs - if isinstance(e, CancelledError): - return - logger.error("Error processing publish message", exc_info=True) - finally: - self._outstanding_tasks.decrement() - # TODO if responses are given for a publish - - async def _process_response(self, message_envelope: ResponseMessageEnvelope) -> None: - with self._tracer_helper.trace_block("ack", message_envelope.recipient, parent=message_envelope.metadata): - content = ( - message_envelope.message.__dict__ - if hasattr(message_envelope.message, "__dict__") - else message_envelope.message - ) - logger.info( - f"Resolving response with message type {type(message_envelope.message).__name__} for recipient {message_envelope.recipient} from {message_envelope.sender.type}: {content}" - ) - # event_logger.info( - # MessageEvent( - # payload=message_envelope.message, - # sender=message_envelope.sender, - # receiver=message_envelope.recipient, - # kind=MessageKind.RESPOND, - # delivery_stage=DeliveryStage.DELIVER, - # ) - # ) - self._outstanding_tasks.decrement() - if not message_envelope.future.cancelled(): - message_envelope.future.set_result(message_envelope.message) - - async def process_next(self) -> None: - """Process the next message in the queue.""" - - if len(self._message_queue) == 0: - # Yield control to the event loop to allow other tasks to run - await asyncio.sleep(0) - return - message_envelope = self._message_queue.pop(0) - - match message_envelope: - case SendMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future): - if self._intervention_handlers is not None: - for handler in self._intervention_handlers: - with self._tracer_helper.trace_block( - "intercept", handler.__class__.__name__, parent=message_envelope.metadata - ): - try: - temp_message = await handler.on_send(message, sender=sender, recipient=recipient) - _warn_if_none(temp_message, "on_send") - except BaseException as e: - future.set_exception(e) - return - if temp_message is DropMessage or isinstance(temp_message, DropMessage): - future.set_exception(MessageDroppedException()) - return - - message_envelope.message = temp_message - self._outstanding_tasks.increment() - task = asyncio.create_task(self._process_send(message_envelope)) - self._background_tasks.add(task) - task.add_done_callback(self._background_tasks.discard) - case PublishMessageEnvelope( - message=message, - sender=sender, - ): - if self._intervention_handlers is not None: - for handler in self._intervention_handlers: - with self._tracer_helper.trace_block( - "intercept", handler.__class__.__name__, parent=message_envelope.metadata - ): - try: - temp_message = await handler.on_publish(message, sender=sender) - _warn_if_none(temp_message, "on_publish") - except BaseException as e: - # TODO: we should raise the intervention exception to the publisher. - logger.error(f"Exception raised in in intervention handler: {e}", exc_info=True) - return - if temp_message is DropMessage or isinstance(temp_message, DropMessage): - # TODO log message dropped - return - - message_envelope.message = temp_message - self._outstanding_tasks.increment() - task = asyncio.create_task(self._process_publish(message_envelope)) - self._background_tasks.add(task) - task.add_done_callback(self._background_tasks.discard) - case ResponseMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future): - if self._intervention_handlers is not None: - for handler in self._intervention_handlers: - try: - temp_message = await handler.on_response(message, sender=sender, recipient=recipient) - _warn_if_none(temp_message, "on_response") - except BaseException as e: - # TODO: should we raise the exception to sender of the response instead? - future.set_exception(e) - return - if temp_message is DropMessage or isinstance(temp_message, DropMessage): - future.set_exception(MessageDroppedException()) - return - message_envelope.message = temp_message - self._outstanding_tasks.increment() - task = asyncio.create_task(self._process_response(message_envelope)) - self._background_tasks.add(task) - task.add_done_callback(self._background_tasks.discard) - - # Yield control to the message loop to allow other tasks to run - await asyncio.sleep(0) - - @property - def idle(self) -> bool: - return len(self._message_queue) == 0 and self._outstanding_tasks.get() == 0 - - def start(self) -> None: - """Start the runtime message processing loop.""" - if self._run_context is not None: - raise RuntimeError("Runtime is already started") - self._run_context = RunContext(self) - - async def stop(self) -> None: - """Stop the runtime message processing loop.""" - if self._run_context is None: - raise RuntimeError("Runtime is not started") - await self._run_context.stop() - self._run_context = None - - async def stop_when_idle(self) -> None: - """Stop the runtime message processing loop when there is - no outstanding message being processed or queued.""" - if self._run_context is None: - raise RuntimeError("Runtime is not started") - await self._run_context.stop_when_idle() - self._run_context = None - - async def stop_when(self, condition: Callable[[], bool]) -> None: - """Stop the runtime message processing loop when the condition is met.""" - if self._run_context is None: - raise RuntimeError("Runtime is not started") - await self._run_context.stop_when(condition) - self._run_context = None - - async def agent_metadata(self, agent: AgentId) -> AgentMetadata: - return (await self._get_agent(agent)).metadata - - async def agent_save_state(self, agent: AgentId) -> Mapping[str, Any]: - return await (await self._get_agent(agent)).save_state() - - async def agent_load_state(self, agent: AgentId, state: Mapping[str, Any]) -> None: - await (await self._get_agent(agent)).load_state(state) - - @deprecated( - "Use your agent's `register` method directly instead of this method. See documentation for latest usage." - ) - async def register( - self, - type: str, - agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]], - subscriptions: Callable[[], list[Subscription] | Awaitable[list[Subscription]]] - | list[Subscription] - | None = None, - ) -> AgentType: - if type in self._agent_factories: - raise ValueError(f"Agent with type {type} already exists.") - - if subscriptions is not None: - if callable(subscriptions): - with SubscriptionInstantiationContext.populate_context(AgentType(type)): - subscriptions_list_result = subscriptions() - if inspect.isawaitable(subscriptions_list_result): - subscriptions_list = await subscriptions_list_result - else: - subscriptions_list = subscriptions_list_result - else: - subscriptions_list = subscriptions - - for subscription in subscriptions_list: - await self.add_subscription(subscription) - - self._agent_factories[type] = agent_factory - return AgentType(type) - - async def register_factory( - self, - *, - type: AgentType, - agent_factory: Callable[[], T | Awaitable[T]], - expected_class: type[T], - ) -> AgentType: - if type.type in self._agent_factories: - raise ValueError(f"Agent with type {type} already exists.") - - async def factory_wrapper() -> T: - maybe_agent_instance = agent_factory() - if inspect.isawaitable(maybe_agent_instance): - agent_instance = await maybe_agent_instance - else: - agent_instance = maybe_agent_instance - - if type_func_alias(agent_instance) != expected_class: - raise ValueError("Factory registered using the wrong type.") - - return agent_instance - - self._agent_factories[type.type] = factory_wrapper - - return type - - async def _invoke_agent_factory( - self, - agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]], - agent_id: AgentId, - ) -> T: - with AgentInstantiationContext.populate_context((self, agent_id)): - if len(inspect.signature(agent_factory).parameters) == 0: - factory_one = cast(Callable[[], T], agent_factory) - agent = factory_one() - elif len(inspect.signature(agent_factory).parameters) == 2: - warnings.warn( - "Agent factories that take two arguments are deprecated. Use AgentInstantiationContext instead. Two arg factories will be removed in a future version.", - stacklevel=2, - ) - factory_two = cast(Callable[[AgentRuntime, AgentId], T], agent_factory) - agent = factory_two(self, agent_id) - else: - raise ValueError("Agent factory must take 0 or 2 arguments.") - - if inspect.isawaitable(agent): - return cast(T, await agent) - - return agent - - async def _get_agent(self, agent_id: AgentId) -> Agent: - if agent_id in self._instantiated_agents: - return self._instantiated_agents[agent_id] - - if agent_id.type not in self._agent_factories: - raise LookupError(f"Agent with name {agent_id.type} not found.") - - agent_factory = self._agent_factories[agent_id.type] - agent = await self._invoke_agent_factory(agent_factory, agent_id) - self._instantiated_agents[agent_id] = agent - return agent - - # TODO: uncomment out the following type ignore when this is fixed in mypy: https://github.com/python/mypy/issues/3737 - async def try_get_underlying_agent_instance(self, id: AgentId, type: Type[T] = Agent) -> T: # type: ignore[assignment] - if id.type not in self._agent_factories: - raise LookupError(f"Agent with name {id.type} not found.") - - # TODO: check if remote - agent_instance = await self._get_agent(id) - - if not isinstance(agent_instance, type): - raise TypeError( - f"Agent with name {id.type} is not of type {type.__name__}. It is of type {type_func_alias(agent_instance).__name__}" - ) - - return agent_instance - - async def add_subscription(self, subscription: Subscription) -> None: - await self._subscription_manager.add_subscription(subscription) - - async def remove_subscription(self, id: str) -> None: - await self._subscription_manager.remove_subscription(id) - - async def get( - self, id_or_type: AgentId | AgentType | str, /, key: str = "default", *, lazy: bool = True - ) -> AgentId: - return await get_impl( - id_or_type=id_or_type, - key=key, - lazy=lazy, - instance_getter=self._get_agent, - ) - - def add_message_serializer(self, serializer: MessageSerializer[Any] | Sequence[MessageSerializer[Any]]) -> None: - self._serialization_registry.add_serializer(serializer) +class SingleThreadedAgentRuntime(SingleThreadedAgentRuntimeAlias): + pass diff --git a/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py b/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py index f5020fea8..f3002e61a 100644 --- a/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py @@ -1,15 +1,9 @@ ROOT_LOGGER_NAME = "autogen_core" -"""str: Logger name used for structured event logging""" +"""Deprecated alias. Use autogen_core.ROOT_LOGGER_NAME""" EVENT_LOGGER_NAME = "autogen_core.events" -"""str: Logger name used for structured event logging""" +"""Deprecated alias. Use autogen_core.EVENT_LOGGER_NAME""" TRACE_LOGGER_NAME = "autogen_core.trace" -"""str: Logger name used for developer intended trace logging. The content and format of this log should not be depended upon.""" - -__all__ = [ - "ROOT_LOGGER_NAME", - "EVENT_LOGGER_NAME", - "TRACE_LOGGER_NAME", -] +"""Deprecated alias. Use autogen_core.TRACE_LOGGER_NAME""" diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/__init__.py b/python/packages/autogen-core/src/autogen_core/application/protos/__init__.py deleted file mode 100644 index b3ea671c3..000000000 --- a/python/packages/autogen-core/src/autogen_core/application/protos/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -The :mod:`autogen_core.worker.protos` module provides Google Protobuf classes for agent-worker communication -""" - -import os -import sys - -sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py deleted file mode 100644 index 319ee2c63..000000000 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: agent_worker.proto -# Protobuf Python Version: 4.25.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -import cloudevent_pb2 as cloudevent__pb2 -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x61gent_worker.proto\x12\x06\x61gents\x1a\x10\x63loudevent.proto\x1a\x19google/protobuf/any.proto\"\'\n\x07TopicId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\"$\n\x07\x41gentId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"E\n\x07Payload\x12\x11\n\tdata_type\x18\x01 \x01(\t\x12\x19\n\x11\x64\x61ta_content_type\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\x89\x02\n\nRpcRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12$\n\x06source\x18\x02 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12\x1f\n\x06target\x18\x03 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0e\n\x06method\x18\x04 \x01(\t\x12 \n\x07payload\x18\x05 \x01(\x0b\x32\x0f.agents.Payload\x12\x32\n\x08metadata\x18\x06 \x03(\x0b\x32 .agents.RpcRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"\xb8\x01\n\x0bRpcResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12 \n\x07payload\x18\x02 \x01(\x0b\x32\x0f.agents.Payload\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x33\n\x08metadata\x18\x04 \x03(\x0b\x32!.agents.RpcResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe4\x01\n\x05\x45vent\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x14\n\x0ctopic_source\x18\x02 \x01(\t\x12$\n\x06source\x18\x03 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12 \n\x07payload\x18\x04 \x01(\x0b\x32\x0f.agents.Payload\x12-\n\x08metadata\x18\x05 \x03(\x0b\x32\x1b.agents.Event.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"<\n\x18RegisterAgentTypeRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"^\n\x19RegisterAgentTypeResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\":\n\x10TypeSubscription\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"G\n\x16TypePrefixSubscription\x12\x19\n\x11topic_type_prefix\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"\x96\x01\n\x0cSubscription\x12\x34\n\x10typeSubscription\x18\x01 \x01(\x0b\x32\x18.agents.TypeSubscriptionH\x00\x12@\n\x16typePrefixSubscription\x18\x02 \x01(\x0b\x32\x1e.agents.TypePrefixSubscriptionH\x00\x42\x0e\n\x0csubscription\"X\n\x16\x41\x64\x64SubscriptionRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12*\n\x0csubscription\x18\x02 \x01(\x0b\x32\x14.agents.Subscription\"\\\n\x17\x41\x64\x64SubscriptionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\x9d\x01\n\nAgentState\x12!\n\x08\x61gent_id\x18\x01 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0c\n\x04\x65Tag\x18\x02 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x03 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\x04 \x01(\tH\x00\x12*\n\nproto_data\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x06\n\x04\x64\x61ta\"j\n\x10GetStateResponse\x12\'\n\x0b\x61gent_state\x18\x01 \x01(\x0b\x32\x12.agents.AgentState\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"B\n\x11SaveStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\x05\x65rror\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\xa6\x03\n\x07Message\x12%\n\x07request\x18\x01 \x01(\x0b\x32\x12.agents.RpcRequestH\x00\x12\'\n\x08response\x18\x02 \x01(\x0b\x32\x13.agents.RpcResponseH\x00\x12,\n\ncloudEvent\x18\x03 \x01(\x0b\x32\x16.cloudevent.CloudEventH\x00\x12\x44\n\x18registerAgentTypeRequest\x18\x04 \x01(\x0b\x32 .agents.RegisterAgentTypeRequestH\x00\x12\x46\n\x19registerAgentTypeResponse\x18\x05 \x01(\x0b\x32!.agents.RegisterAgentTypeResponseH\x00\x12@\n\x16\x61\x64\x64SubscriptionRequest\x18\x06 \x01(\x0b\x32\x1e.agents.AddSubscriptionRequestH\x00\x12\x42\n\x17\x61\x64\x64SubscriptionResponse\x18\x07 \x01(\x0b\x32\x1f.agents.AddSubscriptionResponseH\x00\x42\t\n\x07message2\xb2\x01\n\x08\x41gentRpc\x12\x33\n\x0bOpenChannel\x12\x0f.agents.Message\x1a\x0f.agents.Message(\x01\x30\x01\x12\x35\n\x08GetState\x12\x0f.agents.AgentId\x1a\x18.agents.GetStateResponse\x12:\n\tSaveState\x12\x12.agents.AgentState\x1a\x19.agents.SaveStateResponseB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agent_worker_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None - _globals['DESCRIPTOR']._serialized_options = b'\252\002\036Microsoft.AutoGen.Abstractions' - _globals['_RPCREQUEST_METADATAENTRY']._options = None - _globals['_RPCREQUEST_METADATAENTRY']._serialized_options = b'8\001' - _globals['_RPCRESPONSE_METADATAENTRY']._options = None - _globals['_RPCRESPONSE_METADATAENTRY']._serialized_options = b'8\001' - _globals['_EVENT_METADATAENTRY']._options = None - _globals['_EVENT_METADATAENTRY']._serialized_options = b'8\001' - _globals['_TOPICID']._serialized_start=75 - _globals['_TOPICID']._serialized_end=114 - _globals['_AGENTID']._serialized_start=116 - _globals['_AGENTID']._serialized_end=152 - _globals['_PAYLOAD']._serialized_start=154 - _globals['_PAYLOAD']._serialized_end=223 - _globals['_RPCREQUEST']._serialized_start=226 - _globals['_RPCREQUEST']._serialized_end=491 - _globals['_RPCREQUEST_METADATAENTRY']._serialized_start=433 - _globals['_RPCREQUEST_METADATAENTRY']._serialized_end=480 - _globals['_RPCRESPONSE']._serialized_start=494 - _globals['_RPCRESPONSE']._serialized_end=678 - _globals['_RPCRESPONSE_METADATAENTRY']._serialized_start=433 - _globals['_RPCRESPONSE_METADATAENTRY']._serialized_end=480 - _globals['_EVENT']._serialized_start=681 - _globals['_EVENT']._serialized_end=909 - _globals['_EVENT_METADATAENTRY']._serialized_start=433 - _globals['_EVENT_METADATAENTRY']._serialized_end=480 - _globals['_REGISTERAGENTTYPEREQUEST']._serialized_start=911 - _globals['_REGISTERAGENTTYPEREQUEST']._serialized_end=971 - _globals['_REGISTERAGENTTYPERESPONSE']._serialized_start=973 - _globals['_REGISTERAGENTTYPERESPONSE']._serialized_end=1067 - _globals['_TYPESUBSCRIPTION']._serialized_start=1069 - _globals['_TYPESUBSCRIPTION']._serialized_end=1127 - _globals['_TYPEPREFIXSUBSCRIPTION']._serialized_start=1129 - _globals['_TYPEPREFIXSUBSCRIPTION']._serialized_end=1200 - _globals['_SUBSCRIPTION']._serialized_start=1203 - _globals['_SUBSCRIPTION']._serialized_end=1353 - _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_start=1355 - _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_end=1443 - _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_start=1445 - _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_end=1537 - _globals['_AGENTSTATE']._serialized_start=1540 - _globals['_AGENTSTATE']._serialized_end=1697 - _globals['_GETSTATERESPONSE']._serialized_start=1699 - _globals['_GETSTATERESPONSE']._serialized_end=1805 - _globals['_SAVESTATERESPONSE']._serialized_start=1807 - _globals['_SAVESTATERESPONSE']._serialized_end=1873 - _globals['_MESSAGE']._serialized_start=1876 - _globals['_MESSAGE']._serialized_end=2298 - _globals['_AGENTRPC']._serialized_start=2301 - _globals['_AGENTRPC']._serialized_end=2479 -# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.py b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.py deleted file mode 100644 index fc2702158..000000000 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.py +++ /dev/null @@ -1,132 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import agent_worker_pb2 as agent__worker__pb2 - - -class AgentRpcStub(object): - """Missing associated documentation comment in .proto file.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.OpenChannel = channel.stream_stream( - '/agents.AgentRpc/OpenChannel', - request_serializer=agent__worker__pb2.Message.SerializeToString, - response_deserializer=agent__worker__pb2.Message.FromString, - ) - self.GetState = channel.unary_unary( - '/agents.AgentRpc/GetState', - request_serializer=agent__worker__pb2.AgentId.SerializeToString, - response_deserializer=agent__worker__pb2.GetStateResponse.FromString, - ) - self.SaveState = channel.unary_unary( - '/agents.AgentRpc/SaveState', - request_serializer=agent__worker__pb2.AgentState.SerializeToString, - response_deserializer=agent__worker__pb2.SaveStateResponse.FromString, - ) - - -class AgentRpcServicer(object): - """Missing associated documentation comment in .proto file.""" - - def OpenChannel(self, request_iterator, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetState(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SaveState(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AgentRpcServicer_to_server(servicer, server): - rpc_method_handlers = { - 'OpenChannel': grpc.stream_stream_rpc_method_handler( - servicer.OpenChannel, - request_deserializer=agent__worker__pb2.Message.FromString, - response_serializer=agent__worker__pb2.Message.SerializeToString, - ), - 'GetState': grpc.unary_unary_rpc_method_handler( - servicer.GetState, - request_deserializer=agent__worker__pb2.AgentId.FromString, - response_serializer=agent__worker__pb2.GetStateResponse.SerializeToString, - ), - 'SaveState': grpc.unary_unary_rpc_method_handler( - servicer.SaveState, - request_deserializer=agent__worker__pb2.AgentState.FromString, - response_serializer=agent__worker__pb2.SaveStateResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agents.AgentRpc', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class AgentRpc(object): - """Missing associated documentation comment in .proto file.""" - - @staticmethod - def OpenChannel(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/agents.AgentRpc/OpenChannel', - agent__worker__pb2.Message.SerializeToString, - agent__worker__pb2.Message.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def GetState(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/agents.AgentRpc/GetState', - agent__worker__pb2.AgentId.SerializeToString, - agent__worker__pb2.GetStateResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def SaveState(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/agents.AgentRpc/SaveState', - agent__worker__pb2.AgentState.SerializeToString, - agent__worker__pb2.SaveStateResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.py b/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.py deleted file mode 100644 index e59848860..000000000 --- a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: cloudevent.proto -# Protobuf Python Version: 4.25.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63loudevent.proto\x12\ncloudevent\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa4\x05\n\nCloudEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x14\n\x0cspec_version\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12:\n\nattributes\x18\x05 \x03(\x0b\x32&.cloudevent.CloudEvent.AttributesEntry\x12\x36\n\x08metadata\x18\x06 \x03(\x0b\x32$.cloudevent.CloudEvent.MetadataEntry\x12\x17\n\x0f\x64\x61tacontenttype\x18\x07 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x08 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\t \x01(\tH\x00\x12*\n\nproto_data\x18\n \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x1a\x62\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12>\n\x05value\x18\x02 \x01(\x0b\x32/.cloudevent.CloudEvent.CloudEventAttributeValue:\x02\x38\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\xd3\x01\n\x18\x43loudEventAttributeValue\x12\x14\n\nce_boolean\x18\x01 \x01(\x08H\x00\x12\x14\n\nce_integer\x18\x02 \x01(\x05H\x00\x12\x13\n\tce_string\x18\x03 \x01(\tH\x00\x12\x12\n\x08\x63\x65_bytes\x18\x04 \x01(\x0cH\x00\x12\x10\n\x06\x63\x65_uri\x18\x05 \x01(\tH\x00\x12\x14\n\nce_uri_ref\x18\x06 \x01(\tH\x00\x12\x32\n\x0c\x63\x65_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x06\n\x04\x61ttrB\x06\n\x04\x64\x61taB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'cloudevent_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None - _globals['DESCRIPTOR']._serialized_options = b'\252\002\036Microsoft.AutoGen.Abstractions' - _globals['_CLOUDEVENT_ATTRIBUTESENTRY']._options = None - _globals['_CLOUDEVENT_ATTRIBUTESENTRY']._serialized_options = b'8\001' - _globals['_CLOUDEVENT_METADATAENTRY']._options = None - _globals['_CLOUDEVENT_METADATAENTRY']._serialized_options = b'8\001' - _globals['_CLOUDEVENT']._serialized_start=93 - _globals['_CLOUDEVENT']._serialized_end=769 - _globals['_CLOUDEVENT_ATTRIBUTESENTRY']._serialized_start=400 - _globals['_CLOUDEVENT_ATTRIBUTESENTRY']._serialized_end=498 - _globals['_CLOUDEVENT_METADATAENTRY']._serialized_start=500 - _globals['_CLOUDEVENT_METADATAENTRY']._serialized_end=547 - _globals['_CLOUDEVENT_CLOUDEVENTATTRIBUTEVALUE']._serialized_start=550 - _globals['_CLOUDEVENT_CLOUDEVENTATTRIBUTEVALUE']._serialized_end=761 -# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/src/autogen_core/application/logging/events.py b/python/packages/autogen-core/src/autogen_core/logging.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/logging/events.py rename to python/packages/autogen-core/src/autogen_core/logging.py diff --git a/python/packages/autogen-core/tests/test_base_agent.py b/python/packages/autogen-core/tests/test_base_agent.py index c7ccfc2d4..64bcf59d1 100644 --- a/python/packages/autogen-core/tests/test_base_agent.py +++ b/python/packages/autogen-core/tests/test_base_agent.py @@ -1,7 +1,7 @@ import pytest from autogen_core import AgentId, AgentInstantiationContext, AgentRuntime +from autogen_test_utils import NoopAgent from pytest_mock import MockerFixture -from test_utils import NoopAgent @pytest.mark.asyncio diff --git a/python/packages/autogen-core/tests/test_cancellation.py b/python/packages/autogen-core/tests/test_cancellation.py index 930f16d9e..34a5d7f96 100644 --- a/python/packages/autogen-core/tests/test_cancellation.py +++ b/python/packages/autogen-core/tests/test_cancellation.py @@ -8,9 +8,9 @@ from autogen_core import ( CancellationToken, MessageContext, RoutedAgent, + SingleThreadedAgentRuntime, message_handler, ) -from autogen_core.application import SingleThreadedAgentRuntime @dataclass diff --git a/python/packages/autogen-core/tests/test_closure_agent.py b/python/packages/autogen-core/tests/test_closure_agent.py index 38f66cc7b..171f51308 100644 --- a/python/packages/autogen-core/tests/test_closure_agent.py +++ b/python/packages/autogen-core/tests/test_closure_agent.py @@ -2,8 +2,14 @@ import asyncio from dataclasses import dataclass import pytest -from autogen_core import ClosureAgent, ClosureContext, DefaultSubscription, DefaultTopicId, MessageContext -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import ( + ClosureAgent, + ClosureContext, + DefaultSubscription, + DefaultTopicId, + MessageContext, + SingleThreadedAgentRuntime, +) @dataclass diff --git a/python/packages/autogen-core/tests/test_intervention.py b/python/packages/autogen-core/tests/test_intervention.py index 66b45cca0..a046201fe 100644 --- a/python/packages/autogen-core/tests/test_intervention.py +++ b/python/packages/autogen-core/tests/test_intervention.py @@ -1,9 +1,8 @@ import pytest -from autogen_core import AgentId -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, SingleThreadedAgentRuntime from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage from autogen_core.exceptions import MessageDroppedException -from test_utils import LoopbackAgent, MessageType +from autogen_test_utils import LoopbackAgent, MessageType @pytest.mark.asyncio diff --git a/python/packages/autogen-core/tests/test_routed_agent.py b/python/packages/autogen-core/tests/test_routed_agent.py index 2408c2b1d..440c839fa 100644 --- a/python/packages/autogen-core/tests/test_routed_agent.py +++ b/python/packages/autogen-core/tests/test_routed_agent.py @@ -3,9 +3,18 @@ from dataclasses import dataclass from typing import Callable, cast import pytest -from autogen_core import AgentId, MessageContext, RoutedAgent, TopicId, TypeSubscription, event, message_handler, rpc -from autogen_core.application import SingleThreadedAgentRuntime -from test_utils import LoopbackAgent +from autogen_core import ( + AgentId, + MessageContext, + RoutedAgent, + SingleThreadedAgentRuntime, + TopicId, + TypeSubscription, + event, + message_handler, + rpc, +) +from autogen_test_utils import LoopbackAgent @dataclass diff --git a/python/packages/autogen-core/tests/test_runtime.py b/python/packages/autogen-core/tests/test_runtime.py index 86441b40a..e5b04bd87 100644 --- a/python/packages/autogen-core/tests/test_runtime.py +++ b/python/packages/autogen-core/tests/test_runtime.py @@ -6,14 +6,13 @@ from autogen_core import ( AgentInstantiationContext, AgentType, DefaultTopicId, + SingleThreadedAgentRuntime, TopicId, TypeSubscription, try_get_known_serializers_for_type, type_subscription, ) -from autogen_core.application import SingleThreadedAgentRuntime -from opentelemetry.sdk.trace import TracerProvider -from test_utils import ( +from autogen_test_utils import ( CascadingAgent, CascadingMessageType, LoopbackAgent, @@ -21,7 +20,8 @@ from test_utils import ( MessageType, NoopAgent, ) -from test_utils.telemetry_test_utils import TestExporter, get_test_tracer_provider +from autogen_test_utils.telemetry_test_utils import TestExporter, get_test_tracer_provider +from opentelemetry.sdk.trace import TracerProvider test_exporter = TestExporter() diff --git a/python/packages/autogen-core/tests/test_state.py b/python/packages/autogen-core/tests/test_state.py index 99a7b132f..94bea5959 100644 --- a/python/packages/autogen-core/tests/test_state.py +++ b/python/packages/autogen-core/tests/test_state.py @@ -1,8 +1,7 @@ from typing import Any, Mapping import pytest -from autogen_core import AgentId, BaseAgent, MessageContext -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, BaseAgent, MessageContext, SingleThreadedAgentRuntime class StatefulAgent(BaseAgent): diff --git a/python/packages/autogen-core/tests/test_subscription.py b/python/packages/autogen-core/tests/test_subscription.py index f2c3bd3ee..2fd0af1f6 100644 --- a/python/packages/autogen-core/tests/test_subscription.py +++ b/python/packages/autogen-core/tests/test_subscription.py @@ -1,8 +1,14 @@ import pytest -from autogen_core import AgentId, DefaultSubscription, DefaultTopicId, TopicId, TypeSubscription -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import ( + AgentId, + DefaultSubscription, + DefaultTopicId, + SingleThreadedAgentRuntime, + TopicId, + TypeSubscription, +) from autogen_core.exceptions import CantHandleException -from test_utils import LoopbackAgent, MessageType +from autogen_test_utils import LoopbackAgent, MessageType def test_type_subscription_match() -> None: diff --git a/python/packages/autogen-core/tests/test_tool_agent.py b/python/packages/autogen-core/tests/test_tool_agent.py index c7f02b260..6f240eabc 100644 --- a/python/packages/autogen-core/tests/test_tool_agent.py +++ b/python/packages/autogen-core/tests/test_tool_agent.py @@ -3,8 +3,7 @@ import json from typing import Any, AsyncGenerator, List, Mapping, Optional, Sequence, Union import pytest -from autogen_core import AgentId, CancellationToken, FunctionCall -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, CancellationToken, FunctionCall, SingleThreadedAgentRuntime from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index bf02885e8..2cf5cb046 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -41,21 +41,28 @@ video-surfer = [ "openai-whisper", ] +grpc = [ + "grpcio~=1.62.0", # TODO: update this once we have a stable version. +] + [tool.hatch.build.targets.wheel] packages = ["src/autogen_ext"] [tool.uv] -dev-dependencies = [] +dev-dependencies = [ + "autogen_test_utils" +] [tool.ruff] extend = "../../pyproject.toml" include = ["src/**", "tests/*.py"] -exclude = ["src/autogen_ext/agents/web_surfer/*.js"] +exclude = ["src/autogen_ext/agents/web_surfer/*.js", "src/autogen_ext/runtimes/grpc/protos", "tests/protos"] [tool.pyright] extends = "../../pyproject.toml" include = ["src", "tests"] +exclude = ["src/autogen_ext/runtimes/grpc/protos", "tests/protos"] [tool.pytest.ini_options] minversion = "6.0" @@ -66,6 +73,7 @@ include = "../../shared_tasks.toml" [tool.poe.tasks] test = "pytest -n auto" +mypy = "mypy --config-file ../../pyproject.toml --exclude src/autogen_ext/runtimes/grpc/protos --exclude tests/protos src tests" [tool.mypy] [[tool.mypy.overrides]] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index 851c62358..a21c8e0f4 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -22,12 +22,12 @@ from typing import ( import tiktoken from autogen_core import ( + EVENT_LOGGER_NAME, + TRACE_LOGGER_NAME, CancellationToken, FunctionCall, Image, ) -from autogen_core.application.logging import EVENT_LOGGER_NAME, TRACE_LOGGER_NAME -from autogen_core.application.logging.events import LLMCallEvent from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -42,6 +42,7 @@ from autogen_core.components.models import ( UserMessage, ) from autogen_core.components.tools import Tool, ToolSchema +from autogen_core.logging import LLMCallEvent from openai import AsyncAzureOpenAI, AsyncOpenAI from openai.types.chat import ( ChatCompletion, diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/__init__.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/__init__.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/__init__.py new file mode 100644 index 000000000..dacfa6b0b --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/__init__.py @@ -0,0 +1,16 @@ +from ._worker_runtime import GrpcWorkerAgentRuntime +from ._worker_runtime_host import GrpcWorkerAgentRuntimeHost +from ._worker_runtime_host_servicer import GrpcWorkerAgentRuntimeHostServicer + +try: + import grpc # type: ignore +except ImportError as e: + raise ImportError( + "To use the GRPC runtime the grpc extra must be installed. Run `pip install autogen-ext[grpc]`" + ) from e + +__all__ = [ + "GrpcWorkerAgentRuntime", + "GrpcWorkerAgentRuntimeHost", + "GrpcWorkerAgentRuntimeHostServicer", +] diff --git a/python/packages/autogen-core/src/autogen_core/application/_constants.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_constants.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/_constants.py rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_constants.py diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_type_helpers.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_type_helpers.py new file mode 100644 index 000000000..be24207ce --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_type_helpers.py @@ -0,0 +1,4 @@ +from typing import Any, Sequence, Tuple + +# Had to redefine this from grpc.aio._typing as using that one was causing mypy errors +ChannelArgumentType = Sequence[Tuple[str, Any]] diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py similarity index 98% rename from python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py index 9d186d91f..703f88a53 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py @@ -28,13 +28,9 @@ from typing import ( cast, ) -from google.protobuf import any_pb2 -from opentelemetry.trace import TracerProvider -from typing_extensions import Self, deprecated - -from autogen_core.application.protos import cloudevent_pb2 - -from .. import ( +from autogen_core import ( + JSON_DATA_CONTENT_TYPE, + PROTOBUF_DATA_CONTENT_TYPE, Agent, AgentId, AgentInstantiationContext, @@ -44,24 +40,26 @@ from .. import ( CancellationToken, MessageContext, MessageHandlerContext, + MessageSerializer, Subscription, SubscriptionInstantiationContext, TopicId, + TypePrefixSubscription, + TypeSubscription, ) -from .._serialization import ( - JSON_DATA_CONTENT_TYPE, - PROTOBUF_DATA_CONTENT_TYPE, - MessageSerializer, +from autogen_core._runtime_impl_helpers import SubscriptionManager, get_impl +from autogen_core._serialization import ( SerializationRegistry, ) -from .._type_helpers import ChannelArgumentType -from .._type_prefix_subscription import TypePrefixSubscription -from .._type_subscription import TypeSubscription +from autogen_core._telemetry import MessageRuntimeTracingConfig, TraceHelper, get_telemetry_grpc_metadata +from google.protobuf import any_pb2 +from opentelemetry.trace import TracerProvider +from typing_extensions import Self, deprecated + from . import _constants from ._constants import GRPC_IMPORT_ERROR_STR -from ._helpers import SubscriptionManager, get_impl -from .protos import agent_worker_pb2, agent_worker_pb2_grpc -from .telemetry import MessageRuntimeTracingConfig, TraceHelper, get_telemetry_grpc_metadata +from ._type_helpers import ChannelArgumentType +from .protos import agent_worker_pb2, agent_worker_pb2_grpc, cloudevent_pb2 try: import grpc.aio @@ -181,7 +179,7 @@ class HostConnection: return await self._recv_queue.get() -class WorkerAgentRuntime(AgentRuntime): +class GrpcWorkerAgentRuntime(AgentRuntime): def __init__( self, host_address: str, diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host.py similarity index 91% rename from python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host.py index ab1239a68..04e941215 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host.py @@ -3,9 +3,9 @@ import logging import signal from typing import Optional, Sequence -from .._type_helpers import ChannelArgumentType from ._constants import GRPC_IMPORT_ERROR_STR -from ._worker_runtime_host_servicer import WorkerAgentRuntimeHostServicer +from ._type_helpers import ChannelArgumentType +from ._worker_runtime_host_servicer import GrpcWorkerAgentRuntimeHostServicer try: import grpc @@ -16,10 +16,10 @@ from .protos import agent_worker_pb2_grpc logger = logging.getLogger("autogen_core") -class WorkerAgentRuntimeHost: +class GrpcWorkerAgentRuntimeHost: def __init__(self, address: str, extra_grpc_config: Optional[ChannelArgumentType] = None) -> None: self._server = grpc.aio.server(options=extra_grpc_config) - self._servicer = WorkerAgentRuntimeHostServicer() + self._servicer = GrpcWorkerAgentRuntimeHostServicer() agent_worker_pb2_grpc.add_AgentRpcServicer_to_server(self._servicer, self._server) self._server.add_insecure_port(address) self._address = address diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host_servicer.py similarity index 98% rename from python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host_servicer.py index 574f0a1b1..0bb8ae0a8 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime_host_servicer.py @@ -4,10 +4,10 @@ from _collections_abc import AsyncIterator, Iterator from asyncio import Future, Task from typing import Any, Dict, Set, cast -from .. import Subscription, TopicId, TypeSubscription -from .._type_prefix_subscription import TypePrefixSubscription +from autogen_core import Subscription, TopicId, TypePrefixSubscription, TypeSubscription +from autogen_core._runtime_impl_helpers import SubscriptionManager + from ._constants import GRPC_IMPORT_ERROR_STR -from ._helpers import SubscriptionManager try: import grpc @@ -20,7 +20,7 @@ logger = logging.getLogger("autogen_core") event_logger = logging.getLogger("autogen_core.events") -class WorkerAgentRuntimeHostServicer(agent_worker_pb2_grpc.AgentRpcServicer): +class GrpcWorkerAgentRuntimeHostServicer(agent_worker_pb2_grpc.AgentRpcServicer): """A gRPC servicer that hosts message delivery service for agents.""" def __init__(self) -> None: diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/__init__.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/__init__.py new file mode 100644 index 000000000..b8b57d2f8 --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/__init__.py @@ -0,0 +1,8 @@ +""" +The :mod:`autogen_ext.runtimes.grpc.protos` module provides Google Protobuf classes for agent-worker communication +""" + +import os +import sys + +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.py new file mode 100644 index 000000000..b20849e61 --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: agent_worker.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" + +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +import cloudevent_pb2 as cloudevent__pb2 +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x12\x61gent_worker.proto\x12\x06\x61gents\x1a\x10\x63loudevent.proto\x1a\x19google/protobuf/any.proto"\'\n\x07TopicId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t"$\n\x07\x41gentId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t"E\n\x07Payload\x12\x11\n\tdata_type\x18\x01 \x01(\t\x12\x19\n\x11\x64\x61ta_content_type\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c"\x89\x02\n\nRpcRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12$\n\x06source\x18\x02 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12\x1f\n\x06target\x18\x03 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0e\n\x06method\x18\x04 \x01(\t\x12 \n\x07payload\x18\x05 \x01(\x0b\x32\x0f.agents.Payload\x12\x32\n\x08metadata\x18\x06 \x03(\x0b\x32 .agents.RpcRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source"\xb8\x01\n\x0bRpcResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12 \n\x07payload\x18\x02 \x01(\x0b\x32\x0f.agents.Payload\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x33\n\x08metadata\x18\x04 \x03(\x0b\x32!.agents.RpcResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xe4\x01\n\x05\x45vent\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x14\n\x0ctopic_source\x18\x02 \x01(\t\x12$\n\x06source\x18\x03 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12 \n\x07payload\x18\x04 \x01(\x0b\x32\x0f.agents.Payload\x12-\n\x08metadata\x18\x05 \x03(\x0b\x32\x1b.agents.Event.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source"<\n\x18RegisterAgentTypeRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t"^\n\x19RegisterAgentTypeResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error":\n\x10TypeSubscription\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t"G\n\x16TypePrefixSubscription\x12\x19\n\x11topic_type_prefix\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t"\x96\x01\n\x0cSubscription\x12\x34\n\x10typeSubscription\x18\x01 \x01(\x0b\x32\x18.agents.TypeSubscriptionH\x00\x12@\n\x16typePrefixSubscription\x18\x02 \x01(\x0b\x32\x1e.agents.TypePrefixSubscriptionH\x00\x42\x0e\n\x0csubscription"X\n\x16\x41\x64\x64SubscriptionRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12*\n\x0csubscription\x18\x02 \x01(\x0b\x32\x14.agents.Subscription"\\\n\x17\x41\x64\x64SubscriptionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error"\x9d\x01\n\nAgentState\x12!\n\x08\x61gent_id\x18\x01 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0c\n\x04\x65Tag\x18\x02 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x03 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\x04 \x01(\tH\x00\x12*\n\nproto_data\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x06\n\x04\x64\x61ta"j\n\x10GetStateResponse\x12\'\n\x0b\x61gent_state\x18\x01 \x01(\x0b\x32\x12.agents.AgentState\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error"B\n\x11SaveStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\x05\x65rror\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error"\xa6\x03\n\x07Message\x12%\n\x07request\x18\x01 \x01(\x0b\x32\x12.agents.RpcRequestH\x00\x12\'\n\x08response\x18\x02 \x01(\x0b\x32\x13.agents.RpcResponseH\x00\x12,\n\ncloudEvent\x18\x03 \x01(\x0b\x32\x16.cloudevent.CloudEventH\x00\x12\x44\n\x18registerAgentTypeRequest\x18\x04 \x01(\x0b\x32 .agents.RegisterAgentTypeRequestH\x00\x12\x46\n\x19registerAgentTypeResponse\x18\x05 \x01(\x0b\x32!.agents.RegisterAgentTypeResponseH\x00\x12@\n\x16\x61\x64\x64SubscriptionRequest\x18\x06 \x01(\x0b\x32\x1e.agents.AddSubscriptionRequestH\x00\x12\x42\n\x17\x61\x64\x64SubscriptionResponse\x18\x07 \x01(\x0b\x32\x1f.agents.AddSubscriptionResponseH\x00\x42\t\n\x07message2\xb2\x01\n\x08\x41gentRpc\x12\x33\n\x0bOpenChannel\x12\x0f.agents.Message\x1a\x0f.agents.Message(\x01\x30\x01\x12\x35\n\x08GetState\x12\x0f.agents.AgentId\x1a\x18.agents.GetStateResponse\x12:\n\tSaveState\x12\x12.agents.AgentState\x1a\x19.agents.SaveStateResponseB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3' +) + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "agent_worker_pb2", _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + _globals["DESCRIPTOR"]._options = None + _globals["DESCRIPTOR"]._serialized_options = b"\252\002\036Microsoft.AutoGen.Abstractions" + _globals["_RPCREQUEST_METADATAENTRY"]._options = None + _globals["_RPCREQUEST_METADATAENTRY"]._serialized_options = b"8\001" + _globals["_RPCRESPONSE_METADATAENTRY"]._options = None + _globals["_RPCRESPONSE_METADATAENTRY"]._serialized_options = b"8\001" + _globals["_EVENT_METADATAENTRY"]._options = None + _globals["_EVENT_METADATAENTRY"]._serialized_options = b"8\001" + _globals["_TOPICID"]._serialized_start = 75 + _globals["_TOPICID"]._serialized_end = 114 + _globals["_AGENTID"]._serialized_start = 116 + _globals["_AGENTID"]._serialized_end = 152 + _globals["_PAYLOAD"]._serialized_start = 154 + _globals["_PAYLOAD"]._serialized_end = 223 + _globals["_RPCREQUEST"]._serialized_start = 226 + _globals["_RPCREQUEST"]._serialized_end = 491 + _globals["_RPCREQUEST_METADATAENTRY"]._serialized_start = 433 + _globals["_RPCREQUEST_METADATAENTRY"]._serialized_end = 480 + _globals["_RPCRESPONSE"]._serialized_start = 494 + _globals["_RPCRESPONSE"]._serialized_end = 678 + _globals["_RPCRESPONSE_METADATAENTRY"]._serialized_start = 433 + _globals["_RPCRESPONSE_METADATAENTRY"]._serialized_end = 480 + _globals["_EVENT"]._serialized_start = 681 + _globals["_EVENT"]._serialized_end = 909 + _globals["_EVENT_METADATAENTRY"]._serialized_start = 433 + _globals["_EVENT_METADATAENTRY"]._serialized_end = 480 + _globals["_REGISTERAGENTTYPEREQUEST"]._serialized_start = 911 + _globals["_REGISTERAGENTTYPEREQUEST"]._serialized_end = 971 + _globals["_REGISTERAGENTTYPERESPONSE"]._serialized_start = 973 + _globals["_REGISTERAGENTTYPERESPONSE"]._serialized_end = 1067 + _globals["_TYPESUBSCRIPTION"]._serialized_start = 1069 + _globals["_TYPESUBSCRIPTION"]._serialized_end = 1127 + _globals["_TYPEPREFIXSUBSCRIPTION"]._serialized_start = 1129 + _globals["_TYPEPREFIXSUBSCRIPTION"]._serialized_end = 1200 + _globals["_SUBSCRIPTION"]._serialized_start = 1203 + _globals["_SUBSCRIPTION"]._serialized_end = 1353 + _globals["_ADDSUBSCRIPTIONREQUEST"]._serialized_start = 1355 + _globals["_ADDSUBSCRIPTIONREQUEST"]._serialized_end = 1443 + _globals["_ADDSUBSCRIPTIONRESPONSE"]._serialized_start = 1445 + _globals["_ADDSUBSCRIPTIONRESPONSE"]._serialized_end = 1537 + _globals["_AGENTSTATE"]._serialized_start = 1540 + _globals["_AGENTSTATE"]._serialized_end = 1697 + _globals["_GETSTATERESPONSE"]._serialized_start = 1699 + _globals["_GETSTATERESPONSE"]._serialized_end = 1805 + _globals["_SAVESTATERESPONSE"]._serialized_start = 1807 + _globals["_SAVESTATERESPONSE"]._serialized_end = 1873 + _globals["_MESSAGE"]._serialized_start = 1876 + _globals["_MESSAGE"]._serialized_end = 2298 + _globals["_AGENTRPC"]._serialized_start = 2301 + _globals["_AGENTRPC"]._serialized_end = 2479 +# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.pyi similarity index 70% rename from python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.pyi index 79e384ab9..f9f8a7c47 100644 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2.pyi @@ -4,13 +4,14 @@ isort:skip_file """ import builtins -import cloudevent_pb2 import collections.abc +import typing + +import cloudevent_pb2 import google.protobuf.any_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message -import typing DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @@ -67,7 +68,12 @@ class Payload(google.protobuf.message.Message): data_content_type: builtins.str = ..., data: builtins.bytes = ..., ) -> None: ... - def ClearField(self, field_name: typing.Literal["data", b"data", "data_content_type", b"data_content_type", "data_type", b"data_type"]) -> None: ... + def ClearField( + self, + field_name: typing.Literal[ + "data", b"data", "data_content_type", b"data_content_type", "data_type", b"data_type" + ], + ) -> None: ... global___Payload = Payload @@ -117,8 +123,31 @@ class RpcRequest(google.protobuf.message.Message): payload: global___Payload | None = ..., metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["_source", b"_source", "payload", b"payload", "source", b"source", "target", b"target"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_source", b"_source", "metadata", b"metadata", "method", b"method", "payload", b"payload", "request_id", b"request_id", "source", b"source", "target", b"target"]) -> None: ... + def HasField( + self, + field_name: typing.Literal[ + "_source", b"_source", "payload", b"payload", "source", b"source", "target", b"target" + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "_source", + b"_source", + "metadata", + b"metadata", + "method", + b"method", + "payload", + b"payload", + "request_id", + b"request_id", + "source", + b"source", + "target", + b"target", + ], + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_source", b"_source"]) -> typing.Literal["source"] | None: ... global___RpcRequest = RpcRequest @@ -162,7 +191,12 @@ class RpcResponse(google.protobuf.message.Message): metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., ) -> None: ... def HasField(self, field_name: typing.Literal["payload", b"payload"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["error", b"error", "metadata", b"metadata", "payload", b"payload", "request_id", b"request_id"]) -> None: ... + def ClearField( + self, + field_name: typing.Literal[ + "error", b"error", "metadata", b"metadata", "payload", b"payload", "request_id", b"request_id" + ], + ) -> None: ... global___RpcResponse = RpcResponse @@ -208,8 +242,26 @@ class Event(google.protobuf.message.Message): payload: global___Payload | None = ..., metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["_source", b"_source", "payload", b"payload", "source", b"source"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_source", b"_source", "metadata", b"metadata", "payload", b"payload", "source", b"source", "topic_source", b"topic_source", "topic_type", b"topic_type"]) -> None: ... + def HasField( + self, field_name: typing.Literal["_source", b"_source", "payload", b"payload", "source", b"source"] + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "_source", + b"_source", + "metadata", + b"metadata", + "payload", + b"payload", + "source", + b"source", + "topic_source", + b"topic_source", + "topic_type", + b"topic_type", + ], + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_source", b"_source"]) -> typing.Literal["source"] | None: ... global___Event = Event @@ -250,7 +302,12 @@ class RegisterAgentTypeResponse(google.protobuf.message.Message): error: builtins.str | None = ..., ) -> None: ... def HasField(self, field_name: typing.Literal["_error", b"_error", "error", b"error"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_error", b"_error", "error", b"error", "request_id", b"request_id", "success", b"success"]) -> None: ... + def ClearField( + self, + field_name: typing.Literal[ + "_error", b"_error", "error", b"error", "request_id", b"request_id", "success", b"success" + ], + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_error", b"_error"]) -> typing.Literal["error"] | None: ... global___RegisterAgentTypeResponse = RegisterAgentTypeResponse @@ -269,7 +326,9 @@ class TypeSubscription(google.protobuf.message.Message): topic_type: builtins.str = ..., agent_type: builtins.str = ..., ) -> None: ... - def ClearField(self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type", b"topic_type"]) -> None: ... + def ClearField( + self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type", b"topic_type"] + ) -> None: ... global___TypeSubscription = TypeSubscription @@ -287,7 +346,9 @@ class TypePrefixSubscription(google.protobuf.message.Message): topic_type_prefix: builtins.str = ..., agent_type: builtins.str = ..., ) -> None: ... - def ClearField(self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type_prefix", b"topic_type_prefix"]) -> None: ... + def ClearField( + self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type_prefix", b"topic_type_prefix"] + ) -> None: ... global___TypePrefixSubscription = TypePrefixSubscription @@ -307,9 +368,31 @@ class Subscription(google.protobuf.message.Message): typeSubscription: global___TypeSubscription | None = ..., typePrefixSubscription: global___TypePrefixSubscription | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["subscription", b"subscription", "typePrefixSubscription", b"typePrefixSubscription", "typeSubscription", b"typeSubscription"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["subscription", b"subscription", "typePrefixSubscription", b"typePrefixSubscription", "typeSubscription", b"typeSubscription"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["subscription", b"subscription"]) -> typing.Literal["typeSubscription", "typePrefixSubscription"] | None: ... + def HasField( + self, + field_name: typing.Literal[ + "subscription", + b"subscription", + "typePrefixSubscription", + b"typePrefixSubscription", + "typeSubscription", + b"typeSubscription", + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "subscription", + b"subscription", + "typePrefixSubscription", + b"typePrefixSubscription", + "typeSubscription", + b"typeSubscription", + ], + ) -> None: ... + def WhichOneof( + self, oneof_group: typing.Literal["subscription", b"subscription"] + ) -> typing.Literal["typeSubscription", "typePrefixSubscription"] | None: ... global___Subscription = Subscription @@ -329,7 +412,9 @@ class AddSubscriptionRequest(google.protobuf.message.Message): subscription: global___Subscription | None = ..., ) -> None: ... def HasField(self, field_name: typing.Literal["subscription", b"subscription"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["request_id", b"request_id", "subscription", b"subscription"]) -> None: ... + def ClearField( + self, field_name: typing.Literal["request_id", b"request_id", "subscription", b"subscription"] + ) -> None: ... global___AddSubscriptionRequest = AddSubscriptionRequest @@ -351,7 +436,12 @@ class AddSubscriptionResponse(google.protobuf.message.Message): error: builtins.str | None = ..., ) -> None: ... def HasField(self, field_name: typing.Literal["_error", b"_error", "error", b"error"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_error", b"_error", "error", b"error", "request_id", b"request_id", "success", b"success"]) -> None: ... + def ClearField( + self, + field_name: typing.Literal[ + "_error", b"_error", "error", b"error", "request_id", b"request_id", "success", b"success" + ], + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_error", b"_error"]) -> typing.Literal["error"] | None: ... global___AddSubscriptionResponse = AddSubscriptionResponse @@ -381,9 +471,41 @@ class AgentState(google.protobuf.message.Message): text_data: builtins.str = ..., proto_data: google.protobuf.any_pb2.Any | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["agent_id", b"agent_id", "binary_data", b"binary_data", "data", b"data", "proto_data", b"proto_data", "text_data", b"text_data"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["agent_id", b"agent_id", "binary_data", b"binary_data", "data", b"data", "eTag", b"eTag", "proto_data", b"proto_data", "text_data", b"text_data"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["data", b"data"]) -> typing.Literal["binary_data", "text_data", "proto_data"] | None: ... + def HasField( + self, + field_name: typing.Literal[ + "agent_id", + b"agent_id", + "binary_data", + b"binary_data", + "data", + b"data", + "proto_data", + b"proto_data", + "text_data", + b"text_data", + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "agent_id", + b"agent_id", + "binary_data", + b"binary_data", + "data", + b"data", + "eTag", + b"eTag", + "proto_data", + b"proto_data", + "text_data", + b"text_data", + ], + ) -> None: ... + def WhichOneof( + self, oneof_group: typing.Literal["data", b"data"] + ) -> typing.Literal["binary_data", "text_data", "proto_data"] | None: ... global___AgentState = AgentState @@ -405,8 +527,15 @@ class GetStateResponse(google.protobuf.message.Message): success: builtins.bool = ..., error: builtins.str | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["_error", b"_error", "agent_state", b"agent_state", "error", b"error"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_error", b"_error", "agent_state", b"agent_state", "error", b"error", "success", b"success"]) -> None: ... + def HasField( + self, field_name: typing.Literal["_error", b"_error", "agent_state", b"agent_state", "error", b"error"] + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "_error", b"_error", "agent_state", b"agent_state", "error", b"error", "success", b"success" + ], + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_error", b"_error"]) -> typing.Literal["error"] | None: ... global___GetStateResponse = GetStateResponse @@ -426,7 +555,9 @@ class SaveStateResponse(google.protobuf.message.Message): error: builtins.str | None = ..., ) -> None: ... def HasField(self, field_name: typing.Literal["_error", b"_error", "error", b"error"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["_error", b"_error", "error", b"error", "success", b"success"]) -> None: ... + def ClearField( + self, field_name: typing.Literal["_error", b"_error", "error", b"error", "success", b"success"] + ) -> None: ... def WhichOneof(self, oneof_group: typing.Literal["_error", b"_error"]) -> typing.Literal["error"] | None: ... global___SaveStateResponse = SaveStateResponse @@ -467,8 +598,61 @@ class Message(google.protobuf.message.Message): addSubscriptionRequest: global___AddSubscriptionRequest | None = ..., addSubscriptionResponse: global___AddSubscriptionResponse | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["addSubscriptionRequest", b"addSubscriptionRequest", "addSubscriptionResponse", b"addSubscriptionResponse", "cloudEvent", b"cloudEvent", "message", b"message", "registerAgentTypeRequest", b"registerAgentTypeRequest", "registerAgentTypeResponse", b"registerAgentTypeResponse", "request", b"request", "response", b"response"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["addSubscriptionRequest", b"addSubscriptionRequest", "addSubscriptionResponse", b"addSubscriptionResponse", "cloudEvent", b"cloudEvent", "message", b"message", "registerAgentTypeRequest", b"registerAgentTypeRequest", "registerAgentTypeResponse", b"registerAgentTypeResponse", "request", b"request", "response", b"response"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["message", b"message"]) -> typing.Literal["request", "response", "cloudEvent", "registerAgentTypeRequest", "registerAgentTypeResponse", "addSubscriptionRequest", "addSubscriptionResponse"] | None: ... + def HasField( + self, + field_name: typing.Literal[ + "addSubscriptionRequest", + b"addSubscriptionRequest", + "addSubscriptionResponse", + b"addSubscriptionResponse", + "cloudEvent", + b"cloudEvent", + "message", + b"message", + "registerAgentTypeRequest", + b"registerAgentTypeRequest", + "registerAgentTypeResponse", + b"registerAgentTypeResponse", + "request", + b"request", + "response", + b"response", + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "addSubscriptionRequest", + b"addSubscriptionRequest", + "addSubscriptionResponse", + b"addSubscriptionResponse", + "cloudEvent", + b"cloudEvent", + "message", + b"message", + "registerAgentTypeRequest", + b"registerAgentTypeRequest", + "registerAgentTypeResponse", + b"registerAgentTypeResponse", + "request", + b"request", + "response", + b"response", + ], + ) -> None: ... + def WhichOneof( + self, oneof_group: typing.Literal["message", b"message"] + ) -> ( + typing.Literal[ + "request", + "response", + "cloudEvent", + "registerAgentTypeRequest", + "registerAgentTypeResponse", + "addSubscriptionRequest", + "addSubscriptionResponse", + ] + | None + ): ... global___Message = Message diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.py new file mode 100644 index 000000000..c956a5d69 --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.py @@ -0,0 +1,167 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" + +import agent_worker_pb2 as agent__worker__pb2 +import grpc + + +class AgentRpcStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.OpenChannel = channel.stream_stream( + "/agents.AgentRpc/OpenChannel", + request_serializer=agent__worker__pb2.Message.SerializeToString, + response_deserializer=agent__worker__pb2.Message.FromString, + ) + self.GetState = channel.unary_unary( + "/agents.AgentRpc/GetState", + request_serializer=agent__worker__pb2.AgentId.SerializeToString, + response_deserializer=agent__worker__pb2.GetStateResponse.FromString, + ) + self.SaveState = channel.unary_unary( + "/agents.AgentRpc/SaveState", + request_serializer=agent__worker__pb2.AgentState.SerializeToString, + response_deserializer=agent__worker__pb2.SaveStateResponse.FromString, + ) + + +class AgentRpcServicer(object): + """Missing associated documentation comment in .proto file.""" + + def OpenChannel(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetState(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SaveState(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_AgentRpcServicer_to_server(servicer, server): + rpc_method_handlers = { + "OpenChannel": grpc.stream_stream_rpc_method_handler( + servicer.OpenChannel, + request_deserializer=agent__worker__pb2.Message.FromString, + response_serializer=agent__worker__pb2.Message.SerializeToString, + ), + "GetState": grpc.unary_unary_rpc_method_handler( + servicer.GetState, + request_deserializer=agent__worker__pb2.AgentId.FromString, + response_serializer=agent__worker__pb2.GetStateResponse.SerializeToString, + ), + "SaveState": grpc.unary_unary_rpc_method_handler( + servicer.SaveState, + request_deserializer=agent__worker__pb2.AgentState.FromString, + response_serializer=agent__worker__pb2.SaveStateResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler("agents.AgentRpc", rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class AgentRpc(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def OpenChannel( + request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.stream_stream( + request_iterator, + target, + "/agents.AgentRpc/OpenChannel", + agent__worker__pb2.Message.SerializeToString, + agent__worker__pb2.Message.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetState( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/agents.AgentRpc/GetState", + agent__worker__pb2.AgentId.SerializeToString, + agent__worker__pb2.GetStateResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SaveState( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/agents.AgentRpc/SaveState", + agent__worker__pb2.AgentState.SerializeToString, + agent__worker__pb2.SaveStateResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.pyi b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.pyi similarity index 76% rename from python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.pyi rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.pyi index bf6bc1ba2..a60c5ee78 100644 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2_grpc.pyi +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/agent_worker_pb2_grpc.pyi @@ -4,16 +4,16 @@ isort:skip_file """ import abc -import agent_worker_pb2 import collections.abc +import typing + +import agent_worker_pb2 import grpc import grpc.aio -import typing _T = typing.TypeVar("_T") class _MaybeAsyncIterator(collections.abc.AsyncIterator[_T], collections.abc.Iterator[_T], metaclass=abc.ABCMeta): ... - class _ServicerContext(grpc.ServicerContext, grpc.aio.ServicerContext): # type: ignore[misc, type-arg] ... @@ -56,20 +56,26 @@ class AgentRpcServicer(metaclass=abc.ABCMeta): self, request_iterator: _MaybeAsyncIterator[agent_worker_pb2.Message], context: _ServicerContext, - ) -> typing.Union[collections.abc.Iterator[agent_worker_pb2.Message], collections.abc.AsyncIterator[agent_worker_pb2.Message]]: ... - + ) -> typing.Union[ + collections.abc.Iterator[agent_worker_pb2.Message], collections.abc.AsyncIterator[agent_worker_pb2.Message] + ]: ... @abc.abstractmethod def GetState( self, request: agent_worker_pb2.AgentId, context: _ServicerContext, - ) -> typing.Union[agent_worker_pb2.GetStateResponse, collections.abc.Awaitable[agent_worker_pb2.GetStateResponse]]: ... - + ) -> typing.Union[ + agent_worker_pb2.GetStateResponse, collections.abc.Awaitable[agent_worker_pb2.GetStateResponse] + ]: ... @abc.abstractmethod def SaveState( self, request: agent_worker_pb2.AgentState, context: _ServicerContext, - ) -> typing.Union[agent_worker_pb2.SaveStateResponse, collections.abc.Awaitable[agent_worker_pb2.SaveStateResponse]]: ... + ) -> typing.Union[ + agent_worker_pb2.SaveStateResponse, collections.abc.Awaitable[agent_worker_pb2.SaveStateResponse] + ]: ... -def add_AgentRpcServicer_to_server(servicer: AgentRpcServicer, server: typing.Union[grpc.Server, grpc.aio.Server]) -> None: ... +def add_AgentRpcServicer_to_server( + servicer: AgentRpcServicer, server: typing.Union[grpc.Server, grpc.aio.Server] +) -> None: ... diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.py new file mode 100644 index 000000000..1c157bd7d --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: cloudevent.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" + +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x10\x63loudevent.proto\x12\ncloudevent\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa4\x05\n\nCloudEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x14\n\x0cspec_version\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12:\n\nattributes\x18\x05 \x03(\x0b\x32&.cloudevent.CloudEvent.AttributesEntry\x12\x36\n\x08metadata\x18\x06 \x03(\x0b\x32$.cloudevent.CloudEvent.MetadataEntry\x12\x17\n\x0f\x64\x61tacontenttype\x18\x07 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x08 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\t \x01(\tH\x00\x12*\n\nproto_data\x18\n \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x1a\x62\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12>\n\x05value\x18\x02 \x01(\x0b\x32/.cloudevent.CloudEvent.CloudEventAttributeValue:\x02\x38\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\xd3\x01\n\x18\x43loudEventAttributeValue\x12\x14\n\nce_boolean\x18\x01 \x01(\x08H\x00\x12\x14\n\nce_integer\x18\x02 \x01(\x05H\x00\x12\x13\n\tce_string\x18\x03 \x01(\tH\x00\x12\x12\n\x08\x63\x65_bytes\x18\x04 \x01(\x0cH\x00\x12\x10\n\x06\x63\x65_uri\x18\x05 \x01(\tH\x00\x12\x14\n\nce_uri_ref\x18\x06 \x01(\tH\x00\x12\x32\n\x0c\x63\x65_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x06\n\x04\x61ttrB\x06\n\x04\x64\x61taB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3' +) + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "cloudevent_pb2", _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + _globals["DESCRIPTOR"]._options = None + _globals["DESCRIPTOR"]._serialized_options = b"\252\002\036Microsoft.AutoGen.Abstractions" + _globals["_CLOUDEVENT_ATTRIBUTESENTRY"]._options = None + _globals["_CLOUDEVENT_ATTRIBUTESENTRY"]._serialized_options = b"8\001" + _globals["_CLOUDEVENT_METADATAENTRY"]._options = None + _globals["_CLOUDEVENT_METADATAENTRY"]._serialized_options = b"8\001" + _globals["_CLOUDEVENT"]._serialized_start = 93 + _globals["_CLOUDEVENT"]._serialized_end = 769 + _globals["_CLOUDEVENT_ATTRIBUTESENTRY"]._serialized_start = 400 + _globals["_CLOUDEVENT_ATTRIBUTESENTRY"]._serialized_end = 498 + _globals["_CLOUDEVENT_METADATAENTRY"]._serialized_start = 500 + _globals["_CLOUDEVENT_METADATAENTRY"]._serialized_end = 547 + _globals["_CLOUDEVENT_CLOUDEVENTATTRIBUTEVALUE"]._serialized_start = 550 + _globals["_CLOUDEVENT_CLOUDEVENTATTRIBUTEVALUE"]._serialized_end = 761 +# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.pyi b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.pyi similarity index 63% rename from python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.pyi rename to python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.pyi index c51398893..033a3a509 100644 --- a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2.pyi +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2.pyi @@ -5,12 +5,13 @@ isort:skip_file import builtins import collections.abc +import typing + import google.protobuf.any_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import google.protobuf.timestamp_pb2 -import typing DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @@ -87,9 +88,54 @@ class CloudEvent(google.protobuf.message.Message): ce_uri_ref: builtins.str = ..., ce_timestamp: google.protobuf.timestamp_pb2.Timestamp | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["attr", b"attr", "ce_boolean", b"ce_boolean", "ce_bytes", b"ce_bytes", "ce_integer", b"ce_integer", "ce_string", b"ce_string", "ce_timestamp", b"ce_timestamp", "ce_uri", b"ce_uri", "ce_uri_ref", b"ce_uri_ref"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["attr", b"attr", "ce_boolean", b"ce_boolean", "ce_bytes", b"ce_bytes", "ce_integer", b"ce_integer", "ce_string", b"ce_string", "ce_timestamp", b"ce_timestamp", "ce_uri", b"ce_uri", "ce_uri_ref", b"ce_uri_ref"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["attr", b"attr"]) -> typing.Literal["ce_boolean", "ce_integer", "ce_string", "ce_bytes", "ce_uri", "ce_uri_ref", "ce_timestamp"] | None: ... + def HasField( + self, + field_name: typing.Literal[ + "attr", + b"attr", + "ce_boolean", + b"ce_boolean", + "ce_bytes", + b"ce_bytes", + "ce_integer", + b"ce_integer", + "ce_string", + b"ce_string", + "ce_timestamp", + b"ce_timestamp", + "ce_uri", + b"ce_uri", + "ce_uri_ref", + b"ce_uri_ref", + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "attr", + b"attr", + "ce_boolean", + b"ce_boolean", + "ce_bytes", + b"ce_bytes", + "ce_integer", + b"ce_integer", + "ce_string", + b"ce_string", + "ce_timestamp", + b"ce_timestamp", + "ce_uri", + b"ce_uri", + "ce_uri_ref", + b"ce_uri_ref", + ], + ) -> None: ... + def WhichOneof( + self, oneof_group: typing.Literal["attr", b"attr"] + ) -> ( + typing.Literal["ce_boolean", "ce_integer", "ce_string", "ce_bytes", "ce_uri", "ce_uri_ref", "ce_timestamp"] + | None + ): ... ID_FIELD_NUMBER: builtins.int SOURCE_FIELD_NUMBER: builtins.int @@ -115,7 +161,9 @@ class CloudEvent(google.protobuf.message.Message): binary_data: builtins.bytes text_data: builtins.str @property - def attributes(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___CloudEvent.CloudEventAttributeValue]: + def attributes( + self, + ) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___CloudEvent.CloudEventAttributeValue]: """Optional & Extension Attributes""" @property @@ -136,8 +184,41 @@ class CloudEvent(google.protobuf.message.Message): text_data: builtins.str = ..., proto_data: google.protobuf.any_pb2.Any | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["binary_data", b"binary_data", "data", b"data", "proto_data", b"proto_data", "text_data", b"text_data"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["attributes", b"attributes", "binary_data", b"binary_data", "data", b"data", "datacontenttype", b"datacontenttype", "id", b"id", "metadata", b"metadata", "proto_data", b"proto_data", "source", b"source", "spec_version", b"spec_version", "text_data", b"text_data", "type", b"type"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["data", b"data"]) -> typing.Literal["binary_data", "text_data", "proto_data"] | None: ... + def HasField( + self, + field_name: typing.Literal[ + "binary_data", b"binary_data", "data", b"data", "proto_data", b"proto_data", "text_data", b"text_data" + ], + ) -> builtins.bool: ... + def ClearField( + self, + field_name: typing.Literal[ + "attributes", + b"attributes", + "binary_data", + b"binary_data", + "data", + b"data", + "datacontenttype", + b"datacontenttype", + "id", + b"id", + "metadata", + b"metadata", + "proto_data", + b"proto_data", + "source", + b"source", + "spec_version", + b"spec_version", + "text_data", + b"text_data", + "type", + b"type", + ], + ) -> None: ... + def WhichOneof( + self, oneof_group: typing.Literal["data", b"data"] + ) -> typing.Literal["binary_data", "text_data", "proto_data"] | None: ... global___CloudEvent = CloudEvent diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.py new file mode 100644 index 000000000..bf947056a --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" + +import grpc diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.pyi b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.pyi new file mode 100644 index 000000000..cb7968e33 --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/cloudevent_pb2_grpc.pyi @@ -0,0 +1,17 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" + +import abc +import collections.abc +import typing + +import grpc +import grpc.aio + +_T = typing.TypeVar("_T") + +class _MaybeAsyncIterator(collections.abc.AsyncIterator[_T], collections.abc.Iterator[_T], metaclass=abc.ABCMeta): ... +class _ServicerContext(grpc.ServicerContext, grpc.aio.ServicerContext): # type: ignore[misc, type-arg] + ... diff --git a/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py b/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py index bd5f913bf..62211463a 100644 --- a/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py +++ b/python/packages/autogen-ext/tests/models/test_reply_chat_completion_client.py @@ -3,8 +3,15 @@ from dataclasses import dataclass from typing import List import pytest -from autogen_core import AgentId, DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import ( + AgentId, + DefaultTopicId, + MessageContext, + RoutedAgent, + SingleThreadedAgentRuntime, + default_subscription, + message_handler, +) from autogen_core.components.models import ChatCompletionClient, CreateResult, SystemMessage, UserMessage from autogen_ext.models import ReplayChatCompletionClient diff --git a/python/packages/autogen-ext/tests/protos/serialization_test_pb2.py b/python/packages/autogen-ext/tests/protos/serialization_test_pb2.py new file mode 100644 index 000000000..55fb572bf --- /dev/null +++ b/python/packages/autogen-ext/tests/protos/serialization_test_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: serialization_test.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18serialization_test.proto\x12\x06\x61gents"\x1f\n\x0cProtoMessage\x12\x0f\n\x07message\x18\x01 \x01(\t"L\n\x13NestingProtoMessage\x12\x0f\n\x07message\x18\x01 \x01(\t\x12$\n\x06nested\x18\x02 \x01(\x0b\x32\x14.agents.ProtoMessageb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "serialization_test_pb2", _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals["_PROTOMESSAGE"]._serialized_start=36 + _globals["_PROTOMESSAGE"]._serialized_end=67 + _globals["_NESTINGPROTOMESSAGE"]._serialized_start=69 + _globals["_NESTINGPROTOMESSAGE"]._serialized_end=145 +# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-ext/tests/protos/serialization_test_pb2.pyi b/python/packages/autogen-ext/tests/protos/serialization_test_pb2.pyi new file mode 100644 index 000000000..b8a284663 --- /dev/null +++ b/python/packages/autogen-ext/tests/protos/serialization_test_pb2.pyi @@ -0,0 +1,46 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" + +import builtins +import google.protobuf.descriptor +import google.protobuf.message +import typing + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +@typing.final +class ProtoMessage(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + MESSAGE_FIELD_NUMBER: builtins.int + message: builtins.str + def __init__( + self, + *, + message: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["message", b"message"]) -> None: ... + +global___ProtoMessage = ProtoMessage + +@typing.final +class NestingProtoMessage(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + MESSAGE_FIELD_NUMBER: builtins.int + NESTED_FIELD_NUMBER: builtins.int + message: builtins.str + @property + def nested(self) -> global___ProtoMessage: ... + def __init__( + self, + *, + message: builtins.str = ..., + nested: global___ProtoMessage | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["nested", b"nested"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["message", b"message", "nested", b"nested"]) -> None: ... + +global___NestingProtoMessage = NestingProtoMessage diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2_grpc.py b/python/packages/autogen-ext/tests/protos/serialization_test_pb2_grpc.py similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2_grpc.py rename to python/packages/autogen-ext/tests/protos/serialization_test_pb2_grpc.py diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2_grpc.pyi b/python/packages/autogen-ext/tests/protos/serialization_test_pb2_grpc.pyi similarity index 100% rename from python/packages/autogen-core/src/autogen_core/application/protos/cloudevent_pb2_grpc.pyi rename to python/packages/autogen-ext/tests/protos/serialization_test_pb2_grpc.pyi diff --git a/python/packages/autogen-core/tests/test_worker_runtime.py b/python/packages/autogen-ext/tests/test_worker_runtime.py similarity index 87% rename from python/packages/autogen-core/tests/test_worker_runtime.py rename to python/packages/autogen-ext/tests/test_worker_runtime.py index 5f2061fa8..6375e284d 100644 --- a/python/packages/autogen-core/tests/test_worker_runtime.py +++ b/python/packages/autogen-ext/tests/test_worker_runtime.py @@ -19,9 +19,8 @@ from autogen_core import ( try_get_known_serializers_for_type, type_subscription, ) -from autogen_core.application import WorkerAgentRuntime, WorkerAgentRuntimeHost -from protos.serialization_test_pb2 import ProtoMessage -from test_utils import ( +from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime, GrpcWorkerAgentRuntimeHost +from autogen_test_utils import ( CascadingAgent, CascadingMessageType, ContentMessage, @@ -30,15 +29,16 @@ from test_utils import ( MessageType, NoopAgent, ) +from protos.serialization_test_pb2 import ProtoMessage @pytest.mark.asyncio async def test_agent_types_must_be_unique_single_worker() -> None: host_address = "localhost:50051" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker = WorkerAgentRuntime(host_address=host_address) + worker = GrpcWorkerAgentRuntime(host_address=host_address) worker.start() await worker.register_factory(type=AgentType("name1"), agent_factory=lambda: NoopAgent(), expected_class=NoopAgent) @@ -57,12 +57,12 @@ async def test_agent_types_must_be_unique_single_worker() -> None: @pytest.mark.asyncio async def test_agent_types_must_be_unique_multiple_workers() -> None: host_address = "localhost:50052" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker1 = WorkerAgentRuntime(host_address=host_address) + worker1 = GrpcWorkerAgentRuntime(host_address=host_address) worker1.start() - worker2 = WorkerAgentRuntime(host_address=host_address) + worker2 = GrpcWorkerAgentRuntime(host_address=host_address) worker2.start() await worker1.register_factory(type=AgentType("name1"), agent_factory=lambda: NoopAgent(), expected_class=NoopAgent) @@ -82,10 +82,10 @@ async def test_agent_types_must_be_unique_multiple_workers() -> None: @pytest.mark.asyncio async def test_register_receives_publish() -> None: host_address = "localhost:50053" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker1 = WorkerAgentRuntime(host_address=host_address) + worker1 = GrpcWorkerAgentRuntime(host_address=host_address) worker1.start() worker1.add_message_serializer(try_get_known_serializers_for_type(MessageType)) await worker1.register_factory( @@ -93,7 +93,7 @@ async def test_register_receives_publish() -> None: ) await worker1.add_subscription(TypeSubscription("default", "name1")) - worker2 = WorkerAgentRuntime(host_address=host_address) + worker2 = GrpcWorkerAgentRuntime(host_address=host_address) worker2.start() worker2.add_message_serializer(try_get_known_serializers_for_type(MessageType)) await worker2.register_factory( @@ -127,9 +127,9 @@ async def test_register_receives_publish() -> None: @pytest.mark.asyncio async def test_register_receives_publish_cascade_single_worker() -> None: host_address = "localhost:50054" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - runtime = WorkerAgentRuntime(host_address=host_address) + runtime = GrpcWorkerAgentRuntime(host_address=host_address) runtime.start() num_agents = 5 @@ -164,7 +164,7 @@ async def test_register_receives_publish_cascade_single_worker() -> None: async def test_register_receives_publish_cascade_multiple_workers() -> None: logging.basicConfig(level=logging.DEBUG) host_address = "localhost:50055" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() # TODO: Increasing num_initial_messages or max_round to 2 causes the test to fail. @@ -176,16 +176,16 @@ async def test_register_receives_publish_cascade_multiple_workers() -> None: total_num_calls_expected += num_initial_messages * ((num_agents - 1) ** i) # Run multiple workers one for each agent. - workers: List[WorkerAgentRuntime] = [] + workers: List[GrpcWorkerAgentRuntime] = [] # Register agents for i in range(num_agents): - runtime = WorkerAgentRuntime(host_address=host_address) + runtime = GrpcWorkerAgentRuntime(host_address=host_address) runtime.start() await CascadingAgent.register(runtime, f"name{i}", lambda: CascadingAgent(max_rounds)) workers.append(runtime) # Publish messages - publisher = WorkerAgentRuntime(host_address=host_address) + publisher = GrpcWorkerAgentRuntime(host_address=host_address) publisher.add_message_serializer(try_get_known_serializers_for_type(CascadingMessageType)) publisher.start() for _ in range(num_initial_messages): @@ -207,11 +207,11 @@ async def test_register_receives_publish_cascade_multiple_workers() -> None: @pytest.mark.asyncio async def test_default_subscription() -> None: host_address = "localhost:50056" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker = WorkerAgentRuntime(host_address=host_address) + worker = GrpcWorkerAgentRuntime(host_address=host_address) worker.start() - publisher = WorkerAgentRuntime(host_address=host_address) + publisher = GrpcWorkerAgentRuntime(host_address=host_address) publisher.add_message_serializer(try_get_known_serializers_for_type(MessageType)) publisher.start() @@ -241,11 +241,11 @@ async def test_default_subscription() -> None: @pytest.mark.asyncio async def test_default_subscription_other_source() -> None: host_address = "localhost:50057" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - runtime = WorkerAgentRuntime(host_address=host_address) + runtime = GrpcWorkerAgentRuntime(host_address=host_address) runtime.start() - publisher = WorkerAgentRuntime(host_address=host_address) + publisher = GrpcWorkerAgentRuntime(host_address=host_address) publisher.add_message_serializer(try_get_known_serializers_for_type(MessageType)) publisher.start() @@ -275,11 +275,11 @@ async def test_default_subscription_other_source() -> None: @pytest.mark.asyncio async def test_type_subscription() -> None: host_address = "localhost:50058" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker = WorkerAgentRuntime(host_address=host_address) + worker = GrpcWorkerAgentRuntime(host_address=host_address) worker.start() - publisher = WorkerAgentRuntime(host_address=host_address) + publisher = GrpcWorkerAgentRuntime(host_address=host_address) publisher.add_message_serializer(try_get_known_serializers_for_type(MessageType)) publisher.start() @@ -312,9 +312,9 @@ async def test_type_subscription() -> None: @pytest.mark.asyncio async def test_duplicate_subscription() -> None: host_address = "localhost:50059" - host = WorkerAgentRuntimeHost(address=host_address) - worker1 = WorkerAgentRuntime(host_address=host_address) - worker1_2 = WorkerAgentRuntime(host_address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) + worker1 = GrpcWorkerAgentRuntime(host_address=host_address) + worker1_2 = GrpcWorkerAgentRuntime(host_address=host_address) host.start() try: worker1.start() @@ -343,10 +343,10 @@ async def test_duplicate_subscription() -> None: @pytest.mark.asyncio async def test_disconnected_agent() -> None: host_address = "localhost:50060" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - worker1 = WorkerAgentRuntime(host_address=host_address) - worker1_2 = WorkerAgentRuntime(host_address=host_address) + worker1 = GrpcWorkerAgentRuntime(host_address=host_address) + worker1_2 = GrpcWorkerAgentRuntime(host_address=host_address) # TODO: Implementing `get_current_subscriptions` and `get_subscribed_recipients` requires access # to some private properties. This needs to be updated once they are available publicly @@ -421,13 +421,13 @@ class ProtoReceivingAgent(RoutedAgent): @pytest.mark.asyncio async def test_proto_payloads() -> None: host_address = "localhost:50057" - host = WorkerAgentRuntimeHost(address=host_address) + host = GrpcWorkerAgentRuntimeHost(address=host_address) host.start() - receiver_runtime = WorkerAgentRuntime( + receiver_runtime = GrpcWorkerAgentRuntime( host_address=host_address, payload_serialization_format=PROTOBUF_DATA_CONTENT_TYPE ) receiver_runtime.start() - publisher_runtime = WorkerAgentRuntime( + publisher_runtime = GrpcWorkerAgentRuntime( host_address=host_address, payload_serialization_format=PROTOBUF_DATA_CONTENT_TYPE ) publisher_runtime.add_message_serializer(try_get_known_serializers_for_type(ProtoMessage)) @@ -473,10 +473,10 @@ async def test_grpc_max_message_size() -> None: ("grpc.max_receive_message_length", new_max_size), ] host_address = "localhost:50061" - host = WorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config) - worker1 = WorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) - worker2 = WorkerAgentRuntime(host_address=host_address) - worker3 = WorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) + host = GrpcWorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config) + worker1 = GrpcWorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) + worker2 = GrpcWorkerAgentRuntime(host_address=host_address) + worker3 = GrpcWorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config) try: host.start() diff --git a/python/packages/autogen-magentic-one/examples/example.py b/python/packages/autogen-magentic-one/examples/example.py index 400e8ddcc..2b96db531 100644 --- a/python/packages/autogen-magentic-one/examples/example.py +++ b/python/packages/autogen-magentic-one/examples/example.py @@ -5,8 +5,7 @@ import asyncio import logging import os -from autogen_core import AgentId, AgentProxy -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core.components.code_executor import CodeBlock from autogen_ext.code_executors import DockerCommandLineCodeExecutor diff --git a/python/packages/autogen-magentic-one/examples/example_coder.py b/python/packages/autogen-magentic-one/examples/example_coder.py index 5e8b36686..b861c5855 100644 --- a/python/packages/autogen-magentic-one/examples/example_coder.py +++ b/python/packages/autogen-magentic-one/examples/example_coder.py @@ -7,8 +7,7 @@ round-robin orchestrator agent. The code snippets are executed inside a docker c import asyncio import logging -from autogen_core import AgentId, AgentProxy -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core.components.code_executor import CodeBlock from autogen_ext.code_executors import DockerCommandLineCodeExecutor diff --git a/python/packages/autogen-magentic-one/examples/example_file_surfer.py b/python/packages/autogen-magentic-one/examples/example_file_surfer.py index 4f752896b..4f73fa752 100644 --- a/python/packages/autogen-magentic-one/examples/example_file_surfer.py +++ b/python/packages/autogen-magentic-one/examples/example_file_surfer.py @@ -5,8 +5,7 @@ to write input or perform actions, orchestrated by an round-robin orchestrator a import asyncio import logging -from autogen_core import AgentId, AgentProxy -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_magentic_one.agents.file_surfer import FileSurfer from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator diff --git a/python/packages/autogen-magentic-one/examples/example_userproxy.py b/python/packages/autogen-magentic-one/examples/example_userproxy.py index 24e907a4f..fcda46b12 100644 --- a/python/packages/autogen-magentic-one/examples/example_userproxy.py +++ b/python/packages/autogen-magentic-one/examples/example_userproxy.py @@ -7,8 +7,7 @@ The code snippets are not executed in this example.""" import asyncio import logging -from autogen_core import AgentId, AgentProxy -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME # from typing import Any, Dict, List, Tuple, Union diff --git a/python/packages/autogen-magentic-one/examples/example_websurfer.py b/python/packages/autogen-magentic-one/examples/example_websurfer.py index 302949fbf..ed5e283bc 100644 --- a/python/packages/autogen-magentic-one/examples/example_websurfer.py +++ b/python/packages/autogen-magentic-one/examples/example_websurfer.py @@ -7,8 +7,7 @@ import asyncio import logging import os -from autogen_core import AgentId, AgentProxy -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator diff --git a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py index 40edc8ace..1b2640757 100644 --- a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py +++ b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py @@ -6,7 +6,7 @@ from datetime import datetime import json from dataclasses import asdict -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import AgentId, AgentProxy from autogen_core import DefaultTopicId diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py index 36da9fc0f..219d2eed3 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py @@ -6,11 +6,11 @@ from datetime import datetime from typing import Any, Dict, List, Literal from autogen_core import Image -from autogen_core.application.logging.events import LLMCallEvent from autogen_core.components.models import ( ChatCompletionClient, ModelCapabilities, ) +from autogen_core.logging import LLMCallEvent from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from .messages import ( diff --git a/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py b/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py index 9298fd0a6..a1bc6a200 100644 --- a/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py +++ b/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py @@ -8,8 +8,7 @@ from math import ceil from typing import Mapping import pytest -from autogen_core import AgentId, AgentProxy, FunctionCall -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import AgentId, AgentProxy, FunctionCall, SingleThreadedAgentRuntime from autogen_core.components.models import ( UserMessage, ) diff --git a/python/packages/autogen-test-utils/LICENSE-CODE b/python/packages/autogen-test-utils/LICENSE-CODE new file mode 100644 index 000000000..9e841e7a2 --- /dev/null +++ b/python/packages/autogen-test-utils/LICENSE-CODE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/autogen-test-utils/README.md b/python/packages/autogen-test-utils/README.md new file mode 100644 index 000000000..1aabf6386 --- /dev/null +++ b/python/packages/autogen-test-utils/README.md @@ -0,0 +1 @@ +# test-utils diff --git a/python/packages/autogen-test-utils/pyproject.toml b/python/packages/autogen-test-utils/pyproject.toml new file mode 100644 index 000000000..7ba46cb9c --- /dev/null +++ b/python/packages/autogen-test-utils/pyproject.toml @@ -0,0 +1,31 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "autogen-test-utils" +version = "0.0.0" +license = {file = "LICENSE-CODE"} +requires-python = ">=3.10" +dependencies = ["autogen-core", + +] + +[tool.uv] +dev-dependencies = [] + + +[tool.ruff] +extend = "../../pyproject.toml" +include = ["src/**"] + +[tool.pyright] +extends = "../../pyproject.toml" +include = ["src"] + +[tool.poe] +include = "../../shared_tasks.toml" + +[tool.poe.tasks] + +test = "true" diff --git a/python/packages/autogen-core/tests/test_utils/__init__.py b/python/packages/autogen-test-utils/src/autogen_test_utils/__init__.py similarity index 100% rename from python/packages/autogen-core/tests/test_utils/__init__.py rename to python/packages/autogen-test-utils/src/autogen_test_utils/__init__.py diff --git a/python/packages/autogen-test-utils/src/autogen_test_utils/py.typed b/python/packages/autogen-test-utils/src/autogen_test_utils/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/python/packages/autogen-core/tests/test_utils/telemetry_test_utils.py b/python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py similarity index 100% rename from python/packages/autogen-core/tests/test_utils/telemetry_test_utils.py rename to python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py diff --git a/python/pyproject.toml b/python/pyproject.toml index 9f4d37735..06f42ac69 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -26,6 +26,7 @@ dev-dependencies = [ autogen-core = { workspace = true } autogen-ext = { workspace = true } autogen-agentchat = { workspace = true } +autogen-test-utils = { workspace = true } [tool.ruff] line-length = 120 @@ -66,7 +67,6 @@ include = ["src", "tests", "samples"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false reportMissingTypeStubs = false -exclude = ["src/autogen_core/application/protos"] [tool.poe.tasks] fmt = "python run_task_in_pkgs_if_exist.py fmt" @@ -78,8 +78,15 @@ test = "python run_task_in_pkgs_if_exist.py test" check = ["fmt", "lint", "pyright", "mypy", "test"] -gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/src/autogen_core/application/protos --grpc_python_out=./packages/autogen-core/src/autogen_core/application/protos --mypy_out=./packages/autogen-core/src/autogen_core/application/protos --mypy_grpc_out=./packages/autogen-core/src/autogen_core/application/protos --proto_path ../protos/ agent_worker.proto --proto_path ../protos/ cloudevent.proto" +gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --grpc_python_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --mypy_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --mypy_grpc_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --proto_path ../protos/ agent_worker.proto --proto_path ../protos/ cloudevent.proto" -gen-test-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" +gen-proto-samples = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/samples/protos --grpc_python_out=./packages/autogen-core/samples/protos --mypy_out=./packages/autogen-core/samples/protos --mypy_grpc_out=./packages/autogen-core/samples/protos --proto_path ../protos/ agent_events.proto" -gen-proto-samples = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/samples/protos --grpc_python_out=./packages/autogen-core/samples/protos --mypy_out=./packages/autogen-core/samples/protos --mypy_grpc_out=./packages/autogen-core/samples/protos --proto_path ../protos/ agent_events.proto" \ No newline at end of file + +teasdst = { cmd = "sphinx-build docs/src docs/build" } + +[[tool.poe.tasks.gen-test-proto.sequence]] +cmd = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" + +[[tool.poe.tasks.gen-test-proto.sequence]] +cmd = "python -m grpc_tools.protoc --python_out=./packages/autogen-ext/tests/protos --grpc_python_out=./packages/autogen-ext/tests/protos --mypy_out=./packages/autogen-ext/tests/protos --mypy_grpc_out=./packages/autogen-ext/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" diff --git a/python/uv.lock b/python/uv.lock index dc51c0cda..1cf852434 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -40,6 +40,7 @@ members = [ "autogen-core", "autogen-ext", "autogen-magentic-one", + "autogen-test-utils", "autogenstudio", ] requirements = [ @@ -188,21 +189,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/05/be/6a403b464dcab3631fe8e27b0f1d906d9e45c5e92aca97ee007e5a895560/aiohttp-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1277cd707c465cd09572a774559a3cc7c7a28802eb3a2a9472588f062097205", size = 1306186 }, { url = "https://files.pythonhosted.org/packages/8e/fd/bb50fe781068a736a02bf5c7ad5f3ab53e39f1d1e63110da6d30f7605edc/aiohttp-3.10.10-cp312-cp312-win32.whl", hash = "sha256:59bb3c54aa420521dc4ce3cc2c3fe2ad82adf7b09403fa1f48ae45c0cbde6628", size = 359289 }, { url = "https://files.pythonhosted.org/packages/70/9e/5add7e240f77ef67c275c82cc1d08afbca57b77593118c1f6e920ae8ad3f/aiohttp-3.10.10-cp312-cp312-win_amd64.whl", hash = "sha256:0e1b370d8007c4ae31ee6db7f9a2fe801a42b146cec80a86766e7ad5c4a259cf", size = 379313 }, - { url = "https://files.pythonhosted.org/packages/b1/eb/618b1b76c7fe8082a71c9d62e3fe84c5b9af6703078caa9ec57850a12080/aiohttp-3.10.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad7593bb24b2ab09e65e8a1d385606f0f47c65b5a2ae6c551db67d6653e78c28", size = 576114 }, - { url = "https://files.pythonhosted.org/packages/aa/37/3126995d7869f8b30d05381b81a2d4fb4ec6ad313db788e009bc6d39c211/aiohttp-3.10.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1eb89d3d29adaf533588f209768a9c02e44e4baf832b08118749c5fad191781d", size = 391901 }, - { url = "https://files.pythonhosted.org/packages/3e/f2/8fdfc845be1f811c31ceb797968523813f8e1263ee3e9120d61253f6848f/aiohttp-3.10.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3fe407bf93533a6fa82dece0e74dbcaaf5d684e5a51862887f9eaebe6372cd79", size = 387418 }, - { url = "https://files.pythonhosted.org/packages/60/d5/33d2061d36bf07e80286e04b7e0a4de37ce04b5ebfed72dba67659a05250/aiohttp-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aed5155f819873d23520919e16703fc8925e509abbb1a1491b0087d1cd969e", size = 1287073 }, - { url = "https://files.pythonhosted.org/packages/00/52/affb55be16a4747740bd630b4c002dac6c5eac42f9bb64202fc3cf3f1930/aiohttp-3.10.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f05e9727ce409358baa615dbeb9b969db94324a79b5a5cea45d39bdb01d82e6", size = 1323612 }, - { url = "https://files.pythonhosted.org/packages/94/f2/cddb69b975387daa2182a8442566971d6410b8a0179bb4540d81c97b1611/aiohttp-3.10.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dffb610a30d643983aeb185ce134f97f290f8935f0abccdd32c77bed9388b42", size = 1368406 }, - { url = "https://files.pythonhosted.org/packages/c1/e4/afba7327da4d932da8c6e29aecaf855f9d52dace53ac15bfc8030a246f1b/aiohttp-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6658732517ddabe22c9036479eabce6036655ba87a0224c612e1ae6af2087e", size = 1282761 }, - { url = "https://files.pythonhosted.org/packages/9f/6b/364856faa0c9031ea76e24ef0f7fef79cddd9fa8e7dba9a1771c6acc56b5/aiohttp-3.10.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:741a46d58677d8c733175d7e5aa618d277cd9d880301a380fd296975a9cdd7bc", size = 1236518 }, - { url = "https://files.pythonhosted.org/packages/46/af/c382846f8356fe64a7b5908bb9b477457aa23b71be7ed551013b7b7d4d87/aiohttp-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e00e3505cd80440f6c98c6d69269dcc2a119f86ad0a9fd70bccc59504bebd68a", size = 1250344 }, - { url = "https://files.pythonhosted.org/packages/87/53/294f87fc086fd0772d0ab82497beb9df67f0f27a8b3dd5742a2656db2bc6/aiohttp-3.10.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ffe595f10566f8276b76dc3a11ae4bb7eba1aac8ddd75811736a15b0d5311414", size = 1248956 }, - { url = "https://files.pythonhosted.org/packages/86/30/7d746717fe11bdfefb88bb6c09c5fc985d85c4632da8bb6018e273899254/aiohttp-3.10.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdfcf6443637c148c4e1a20c48c566aa694fa5e288d34b20fcdc58507882fed3", size = 1293379 }, - { url = "https://files.pythonhosted.org/packages/48/b9/45d670a834458db67a24258e9139ba61fa3bd7d69b98ecf3650c22806f8f/aiohttp-3.10.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d183cf9c797a5291e8301790ed6d053480ed94070637bfaad914dd38b0981f67", size = 1320108 }, - { url = "https://files.pythonhosted.org/packages/72/8c/804bb2e837a175635d2000a0659eafc15b2e9d92d3d81c8f69e141ecd0b0/aiohttp-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77abf6665ae54000b98b3c742bc6ea1d1fb31c394bcabf8b5d2c1ac3ebfe7f3b", size = 1281546 }, - { url = "https://files.pythonhosted.org/packages/89/c0/862e6a9de3d6eeb126cd9d9ea388243b70df9b871ce1a42b193b7a4a77fc/aiohttp-3.10.10-cp313-cp313-win32.whl", hash = "sha256:4470c73c12cd9109db8277287d11f9dd98f77fc54155fc71a7738a83ffcc8ea8", size = 357516 }, - { url = "https://files.pythonhosted.org/packages/ae/63/3e1aee3e554263f3f1011cca50d78a4894ae16ce99bf78101ac3a2f0ef74/aiohttp-3.10.10-cp313-cp313-win_amd64.whl", hash = "sha256:486f7aabfa292719a2753c016cc3a8f8172965cabb3ea2e7f7436c7f5a22a151", size = 376785 }, ] [[package]] @@ -362,6 +348,9 @@ dependencies = [ [package.metadata] requires-dist = [{ name = "autogen-core", editable = "packages/autogen-core" }] +[package.metadata.requires-dev] +dev = [] + [[package]] name = "autogen-core" version = "0.4.0.dev8" @@ -369,7 +358,6 @@ source = { editable = "packages/autogen-core" } dependencies = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio" }, { name = "jsonref" }, { name = "openai" }, { name = "opentelemetry-api" }, @@ -389,6 +377,8 @@ grpc = [ dev = [ { name = "aiofiles" }, { name = "autodoc-pydantic" }, + { name = "autogen-ext" }, + { name = "autogen-test-utils" }, { name = "azure-identity" }, { name = "chess" }, { name = "colorama" }, @@ -431,7 +421,6 @@ dev = [ requires-dist = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio", specifier = "~=1.62.0" }, { name = "grpcio", marker = "extra == 'grpc'", specifier = "~=1.62.0" }, { name = "jsonref", specifier = "~=1.1.0" }, { name = "openai", specifier = ">=1.3" }, @@ -447,6 +436,8 @@ requires-dist = [ dev = [ { name = "aiofiles" }, { name = "autodoc-pydantic", specifier = "~=2.2" }, + { name = "autogen-ext", editable = "packages/autogen-ext" }, + { name = "autogen-test-utils", editable = "packages/autogen-test-utils" }, { name = "azure-identity" }, { name = "chess" }, { name = "colorama" }, @@ -504,6 +495,9 @@ docker = [ file-surfer = [ { name = "markitdown" }, ] +grpc = [ + { name = "grpcio" }, +] langchain = [ { name = "langchain-core" }, ] @@ -527,6 +521,11 @@ web-surfer = [ { name = "playwright" }, ] +[package.dev-dependencies] +dev = [ + { name = "autogen-test-utils" }, +] + [package.metadata] requires-dist = [ { name = "aiofiles", marker = "extra == 'openai'" }, @@ -536,6 +535,7 @@ requires-dist = [ { name = "azure-identity", marker = "extra == 'azure'" }, { name = "docker", marker = "extra == 'docker'", specifier = "~=7.0" }, { name = "ffmpeg-python", marker = "extra == 'video-surfer'" }, + { name = "grpcio", marker = "extra == 'grpc'", specifier = "~=1.62.0" }, { name = "langchain-core", marker = "extra == 'langchain'", specifier = "~=0.3.3" }, { name = "markitdown", marker = "extra == 'file-surfer'", specifier = ">=0.0.1a2" }, { name = "markitdown", marker = "extra == 'magentic-one'", specifier = ">=0.0.1a2" }, @@ -548,6 +548,9 @@ requires-dist = [ { name = "playwright", marker = "extra == 'web-surfer'", specifier = ">=1.48.0" }, ] +[package.metadata.requires-dev] +dev = [{ name = "autogen-test-utils", editable = "packages/autogen-test-utils" }] + [[package]] name = "autogen-magentic-one" version = "0.0.1" @@ -617,6 +620,20 @@ dev = [ { name = "types-requests" }, ] +[[package]] +name = "autogen-test-utils" +version = "0.0.0" +source = { editable = "packages/autogen-test-utils" } +dependencies = [ + { name = "autogen-core" }, +] + +[package.metadata] +requires-dist = [{ name = "autogen-core", editable = "packages/autogen-core" }] + +[package.metadata.requires-dev] +dev = [] + [[package]] name = "autogenstudio" version = "0.4.0.dev38" @@ -803,17 +820,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, - { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, - { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, - { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, - { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, - { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, - { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, - { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, - { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, ] [[package]] @@ -876,21 +882,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 }, { url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 }, { url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 }, - { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, - { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, - { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, - { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, - { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, - { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, - { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, - { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, - { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, - { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, - { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, - { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, - { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, - { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, - { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, ] @@ -1046,10 +1037,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/79/8bba39190d2ea17840925d287f1c6c3a7c60b58f5090444e9ecf176c540f/debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2", size = 4170911 }, { url = "https://files.pythonhosted.org/packages/3b/19/5b3d312936db8eb281310fa27903459328ed722d845d594ba5feaeb2f0b3/debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca", size = 5195476 }, { url = "https://files.pythonhosted.org/packages/9f/49/ad20b29f8c921fd5124530d3d39b8f2077efd51b71339a2eff02bba693e9/debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39", size = 5235031 }, - { url = "https://files.pythonhosted.org/packages/41/95/29b247518d0a6afdb5249f5d05743c9c5bfaf4bd13a85b81cb5e1dc65837/debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40", size = 2517557 }, - { url = "https://files.pythonhosted.org/packages/4d/93/026e2000a0740e2f54b198f8dc317accf3a70b6524b2b15fa8e6eca74414/debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7", size = 4162703 }, - { url = "https://files.pythonhosted.org/packages/c3/92/a48e653b19a171434290ecdc5935b7a292a65488139c5271d6d0eceeb0f1/debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba", size = 5195220 }, - { url = "https://files.pythonhosted.org/packages/4e/b3/dc3c5527edafcd1a6d0f8c4ecc6c5c9bc431f77340cf4193328e98f0ac38/debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa", size = 5235333 }, { url = "https://files.pythonhosted.org/packages/51/b1/a0866521c71a6ae3d3ca320e74835163a4671b1367ba360a55a0a51e5a91/debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae", size = 5210683 }, ] @@ -1269,21 +1256,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 }, { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 }, { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 }, - { url = "https://files.pythonhosted.org/packages/da/3b/915f0bca8a7ea04483622e84a9bd90033bab54bdf485479556c74fd5eaf5/frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", size = 91538 }, - { url = "https://files.pythonhosted.org/packages/c7/d1/a7c98aad7e44afe5306a2b068434a5830f1470675f0e715abb86eb15f15b/frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", size = 52849 }, - { url = "https://files.pythonhosted.org/packages/3a/c8/76f23bf9ab15d5f760eb48701909645f686f9c64fbb8982674c241fbef14/frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", size = 50583 }, - { url = "https://files.pythonhosted.org/packages/1f/22/462a3dd093d11df623179d7754a3b3269de3b42de2808cddef50ee0f4f48/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", size = 265636 }, - { url = "https://files.pythonhosted.org/packages/80/cf/e075e407fc2ae7328155a1cd7e22f932773c8073c1fc78016607d19cc3e5/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", size = 270214 }, - { url = "https://files.pythonhosted.org/packages/a1/58/0642d061d5de779f39c50cbb00df49682832923f3d2ebfb0fedf02d05f7f/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", size = 273905 }, - { url = "https://files.pythonhosted.org/packages/ab/66/3fe0f5f8f2add5b4ab7aa4e199f767fd3b55da26e3ca4ce2cc36698e50c4/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", size = 250542 }, - { url = "https://files.pythonhosted.org/packages/f6/b8/260791bde9198c87a465224e0e2bb62c4e716f5d198fc3a1dacc4895dbd1/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", size = 267026 }, - { url = "https://files.pythonhosted.org/packages/2e/a4/3d24f88c527f08f8d44ade24eaee83b2627793fa62fa07cbb7ff7a2f7d42/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", size = 257690 }, - { url = "https://files.pythonhosted.org/packages/de/9a/d311d660420b2beeff3459b6626f2ab4fb236d07afbdac034a4371fe696e/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", size = 253893 }, - { url = "https://files.pythonhosted.org/packages/c6/23/e491aadc25b56eabd0f18c53bb19f3cdc6de30b2129ee0bc39cd387cd560/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", size = 267006 }, - { url = "https://files.pythonhosted.org/packages/08/c4/ab918ce636a35fb974d13d666dcbe03969592aeca6c3ab3835acff01f79c/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", size = 276157 }, - { url = "https://files.pythonhosted.org/packages/c0/29/3b7a0bbbbe5a34833ba26f686aabfe982924adbdcafdc294a7a129c31688/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", size = 264642 }, - { url = "https://files.pythonhosted.org/packages/ab/42/0595b3dbffc2e82d7fe658c12d5a5bafcd7516c6bf2d1d1feb5387caa9c1/frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", size = 44914 }, - { url = "https://files.pythonhosted.org/packages/17/c4/b7db1206a3fea44bf3b838ca61deb6f74424a8a5db1dd53ecb21da669be6/frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", size = 51167 }, { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, ] @@ -1338,22 +1310,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, - { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, - { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, - { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, - { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, - { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, - { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, - { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, - { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, - { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, - { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, - { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, - { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, - { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, - { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, - { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, - { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, ] [[package]] @@ -1656,18 +1612,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5f/e8/e47734280e19cd465832e610e1c69367ee72947de738785c4b6fc4031e25/jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2", size = 496023 }, { url = "https://files.pythonhosted.org/packages/52/01/5f65dd1387d39aa3fd4a98a5be1d8470e929a0cb0dd6cbfebaccd9a20ac5/jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0", size = 197425 }, { url = "https://files.pythonhosted.org/packages/43/b2/bd6665030f7d7cd5d9182c62a869c3d5ceadd7bff9f1b305de9192e7dbf8/jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6", size = 198966 }, - { url = "https://files.pythonhosted.org/packages/23/38/7b48e0149778ff4b893567c9fd997ecfcc013e290375aa7823e1f681b3d3/jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31", size = 288674 }, - { url = "https://files.pythonhosted.org/packages/85/3b/96d15b483d82a637279da53a1d299dd5da6e029b9905bcd1a4e1f89b8e4f/jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701", size = 301531 }, - { url = "https://files.pythonhosted.org/packages/cf/54/9681f112cbec4e197259e9db679bd4bc314f4bd24f74b9aa5e93073990b5/jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3", size = 335954 }, - { url = "https://files.pythonhosted.org/packages/4a/4d/f9c0ba82b154c66278e28348086086264ccf50622ae468ec215e4bbc2873/jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd", size = 353996 }, - { url = "https://files.pythonhosted.org/packages/ee/be/7f26b258ef190f6d582e21c76c7dd1097753a2203bad3e1643f45392720a/jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c", size = 369733 }, - { url = "https://files.pythonhosted.org/packages/5f/85/037ed5261fa622312471ef5520b2135c26b29256c83adc16c8cc55dc4108/jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592", size = 389920 }, - { url = "https://files.pythonhosted.org/packages/a8/f3/2e01294712faa476be9e6ceb49e424c3919e03415ded76d103378a06bb80/jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885", size = 324138 }, - { url = "https://files.pythonhosted.org/packages/00/45/50377814f21b6412c7785be27f2dace225af52e0af20be7af899a7e3f264/jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a", size = 367610 }, - { url = "https://files.pythonhosted.org/packages/af/fc/51ba30875125381bfe21a1572c176de1a7dd64a386a7498355fc100decc4/jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71", size = 512945 }, - { url = "https://files.pythonhosted.org/packages/69/60/af26168bd4916f9199ed433161e9f8a4eeda581a4e5982560d0f22dd146c/jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991", size = 494963 }, - { url = "https://files.pythonhosted.org/packages/f3/2f/4f3cc5c9067a6fd1020d3c4365546535a69ed77da7fba2bec24368f3662c/jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4", size = 196869 }, - { url = "https://files.pythonhosted.org/packages/7a/fc/8709ee90837e94790d8b50db51c7b8a70e86e41b2c81e824c20b0ecfeba7/jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda", size = 198919 }, ] [[package]] @@ -2303,23 +2247,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/ed/e6276c8d9668028213df01f598f385b05b55a4e1b4662ee12ef05dab35aa/lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d", size = 5012542 }, { url = "https://files.pythonhosted.org/packages/36/88/684d4e800f5aa28df2a991a6a622783fb73cf0e46235cfa690f9776f032e/lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30", size = 3486454 }, { url = "https://files.pythonhosted.org/packages/fc/82/ace5a5676051e60355bd8fb945df7b1ba4f4fb8447f2010fb816bfd57724/lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f", size = 3816857 }, - { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, - { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, - { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, - { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, - { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, - { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, - { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, - { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, - { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, - { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, - { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, - { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, - { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, - { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, - { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, - { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, - { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, { url = "https://files.pythonhosted.org/packages/99/f7/b73a431c8500565aa500e99e60b448d305eaf7c0b4c893c7c5a8a69cc595/lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c", size = 3925431 }, { url = "https://files.pythonhosted.org/packages/db/48/4a206623c0d093d0e3b15f415ffb4345b0bdf661a3d0b15a112948c033c7/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a", size = 4216683 }, { url = "https://files.pythonhosted.org/packages/54/47/577820c45dd954523ae8453b632d91e76da94ca6d9ee40d8c98dd86f916b/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005", size = 4326732 }, @@ -2446,26 +2373,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, - { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, - { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, - { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, - { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, - { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, - { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, - { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, - { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, - { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, - { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, - { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, - { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, - { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, - { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, - { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, - { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, - { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, - { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, - { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, - { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, ] [[package]] @@ -2597,17 +2504,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254 }, { url = "https://files.pythonhosted.org/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085 }, { url = "https://files.pythonhosted.org/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347 }, - { url = "https://files.pythonhosted.org/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142 }, - { url = "https://files.pythonhosted.org/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523 }, - { url = "https://files.pythonhosted.org/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556 }, - { url = "https://files.pythonhosted.org/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105 }, - { url = "https://files.pythonhosted.org/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979 }, - { url = "https://files.pythonhosted.org/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816 }, - { url = "https://files.pythonhosted.org/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973 }, - { url = "https://files.pythonhosted.org/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435 }, - { url = "https://files.pythonhosted.org/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082 }, - { url = "https://files.pythonhosted.org/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037 }, - { url = "https://files.pythonhosted.org/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140 }, ] [[package]] @@ -2664,21 +2560,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 }, { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 }, { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 }, - { url = "https://files.pythonhosted.org/packages/22/67/1c7c0f39fe069aa4e5d794f323be24bf4d33d62d2a348acdb7991f8f30db/multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", size = 48771 }, - { url = "https://files.pythonhosted.org/packages/3c/25/c186ee7b212bdf0df2519eacfb1981a017bda34392c67542c274651daf23/multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", size = 29533 }, - { url = "https://files.pythonhosted.org/packages/67/5e/04575fd837e0958e324ca035b339cea174554f6f641d3fb2b4f2e7ff44a2/multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", size = 29595 }, - { url = "https://files.pythonhosted.org/packages/d3/b2/e56388f86663810c07cfe4a3c3d87227f3811eeb2d08450b9e5d19d78876/multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", size = 130094 }, - { url = "https://files.pythonhosted.org/packages/6c/ee/30ae9b4186a644d284543d55d491fbd4239b015d36b23fea43b4c94f7052/multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", size = 134876 }, - { url = "https://files.pythonhosted.org/packages/84/c7/70461c13ba8ce3c779503c70ec9d0345ae84de04521c1f45a04d5f48943d/multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", size = 133500 }, - { url = "https://files.pythonhosted.org/packages/4a/9f/002af221253f10f99959561123fae676148dd730e2daa2cd053846a58507/multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", size = 131099 }, - { url = "https://files.pythonhosted.org/packages/82/42/d1c7a7301d52af79d88548a97e297f9d99c961ad76bbe6f67442bb77f097/multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", size = 120403 }, - { url = "https://files.pythonhosted.org/packages/68/f3/471985c2c7ac707547553e8f37cff5158030d36bdec4414cb825fbaa5327/multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", size = 125348 }, - { url = "https://files.pythonhosted.org/packages/67/2c/e6df05c77e0e433c214ec1d21ddd203d9a4770a1f2866a8ca40a545869a0/multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", size = 119673 }, - { url = "https://files.pythonhosted.org/packages/c5/cd/bc8608fff06239c9fb333f9db7743a1b2eafe98c2666c9a196e867a3a0a4/multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", size = 129927 }, - { url = "https://files.pythonhosted.org/packages/44/8e/281b69b7bc84fc963a44dc6e0bbcc7150e517b91df368a27834299a526ac/multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", size = 128711 }, - { url = "https://files.pythonhosted.org/packages/12/a4/63e7cd38ed29dd9f1881d5119f272c898ca92536cdb53ffe0843197f6c85/multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", size = 125519 }, - { url = "https://files.pythonhosted.org/packages/38/e0/4f5855037a72cd8a7a2f60a3952d9aa45feedb37ae7831642102604e8a37/multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", size = 26426 }, - { url = "https://files.pythonhosted.org/packages/7e/a5/17ee3a4db1e310b7405f5d25834460073a8ccd86198ce044dfaf69eac073/multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", size = 28531 }, { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, ] @@ -2708,11 +2589,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ba/07/37d67048786ae84e6612575e173d713c9a05d0ae495dde1e68d972207d98/mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2", size = 12589275 }, { url = "https://files.pythonhosted.org/packages/1f/17/b1018c6bb3e9f1ce3956722b3bf91bff86c1cefccca71cec05eae49d6d41/mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0", size = 13037783 }, { url = "https://files.pythonhosted.org/packages/cb/32/cd540755579e54a88099aee0287086d996f5a24281a673f78a0e14dba150/mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2", size = 9726197 }, - { url = "https://files.pythonhosted.org/packages/11/bb/ab4cfdc562cad80418f077d8be9b4491ee4fb257440da951b85cbb0a639e/mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7", size = 11069721 }, - { url = "https://files.pythonhosted.org/packages/59/3b/a393b1607cb749ea2c621def5ba8c58308ff05e30d9dbdc7c15028bca111/mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62", size = 10063996 }, - { url = "https://files.pythonhosted.org/packages/d1/1f/6b76be289a5a521bb1caedc1f08e76ff17ab59061007f201a8a18cc514d1/mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8", size = 12584043 }, - { url = "https://files.pythonhosted.org/packages/a6/83/5a85c9a5976c6f96e3a5a7591aa28b4a6ca3a07e9e5ba0cec090c8b596d6/mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7", size = 13036996 }, - { url = "https://files.pythonhosted.org/packages/b4/59/c39a6f752f1f893fccbcf1bdd2aca67c79c842402b5283563d006a67cf76/mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc", size = 9737709 }, { url = "https://files.pythonhosted.org/packages/3b/86/72ce7f57431d87a7ff17d442f521146a6585019eb8f4f31b7c02801f78ad/mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a", size = 2647043 }, ] @@ -3205,13 +3081,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ad/9b/be8b3d3aec42aa47f6058482ace0d2ca3023477a46643d766e96281d5d31/orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b", size = 170424 }, { url = "https://files.pythonhosted.org/packages/1b/15/a4cc61e23c39b9dec4620cb95817c83c84078be1771d602f6d03f0e5c696/orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f", size = 145132 }, { url = "https://files.pythonhosted.org/packages/9f/8a/ce7c28e4ea337f6d95261345d7c61322f8561c52f57b263a3ad7025984f4/orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f", size = 139389 }, - { url = "https://files.pythonhosted.org/packages/0c/69/f1c4382cd44bdaf10006c4e82cb85d2bcae735369f84031e203c4e5d87de/orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1", size = 270695 }, - { url = "https://files.pythonhosted.org/packages/61/29/aeb5153271d4953872b06ed239eb54993a5f344353727c42d3aabb2046f6/orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1", size = 141632 }, - { url = "https://files.pythonhosted.org/packages/bc/a2/c8ac38d8fb461a9b717c766fbe1f7d3acf9bde2f12488eb13194960782e4/orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d", size = 144854 }, - { url = "https://files.pythonhosted.org/packages/79/51/e7698fdb28bdec633888cc667edc29fd5376fce9ade0a5b3e22f5ebe0343/orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01", size = 172023 }, - { url = "https://files.pythonhosted.org/packages/02/2d/0d99c20878658c7e33b90e6a4bb75cf2924d6ff29c2365262cff3c26589a/orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4", size = 170429 }, - { url = "https://files.pythonhosted.org/packages/cd/45/6a4a446f4fb29bb4703c3537d5c6a2bf7fed768cb4d7b7dce9d71b72fc93/orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db", size = 145099 }, - { url = "https://files.pythonhosted.org/packages/72/6e/4631fe219a4203aa111e9bb763ad2e2e0cdd1a03805029e4da124d96863f/orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd", size = 139176 }, ] [[package]] @@ -3268,19 +3137,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, - { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, - { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, - { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, - { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, - { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, - { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, - { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, - { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, - { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, - { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, - { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, - { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, - { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, ] [[package]] @@ -3383,25 +3239,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/3a/da80224a6eb15bba7a0dcb2346e2b686bb9bf98378c0b4353cd88e62b171/pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6", size = 2249631 }, { url = "https://files.pythonhosted.org/packages/57/97/73f756c338c1d86bb802ee88c3cab015ad7ce4b838f8a24f16b676b1ac7c/pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47", size = 2567533 }, { url = "https://files.pythonhosted.org/packages/0b/30/2b61876e2722374558b871dfbfcbe4e406626d63f4f6ed92e9c8e24cac37/pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25", size = 2254890 }, - { url = "https://files.pythonhosted.org/packages/63/24/e2e15e392d00fcf4215907465d8ec2a2f23bcec1481a8ebe4ae760459995/pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699", size = 3147300 }, - { url = "https://files.pythonhosted.org/packages/43/72/92ad4afaa2afc233dc44184adff289c2e77e8cd916b3ddb72ac69495bda3/pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38", size = 2978742 }, - { url = "https://files.pythonhosted.org/packages/9e/da/c8d69c5bc85d72a8523fe862f05ababdc52c0a755cfe3d362656bb86552b/pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2", size = 4194349 }, - { url = "https://files.pythonhosted.org/packages/cd/e8/686d0caeed6b998351d57796496a70185376ed9c8ec7d99e1d19ad591fc6/pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2", size = 4298714 }, - { url = "https://files.pythonhosted.org/packages/ec/da/430015cec620d622f06854be67fd2f6721f52fc17fca8ac34b32e2d60739/pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527", size = 4208514 }, - { url = "https://files.pythonhosted.org/packages/44/ae/7e4f6662a9b1cb5f92b9cc9cab8321c381ffbee309210940e57432a4063a/pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa", size = 4380055 }, - { url = "https://files.pythonhosted.org/packages/74/d5/1a807779ac8a0eeed57f2b92a3c32ea1b696e6140c15bd42eaf908a261cd/pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f", size = 4296751 }, - { url = "https://files.pythonhosted.org/packages/38/8c/5fa3385163ee7080bc13026d59656267daaaaf3c728c233d530e2c2757c8/pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb", size = 4430378 }, - { url = "https://files.pythonhosted.org/packages/ca/1d/ad9c14811133977ff87035bf426875b93097fb50af747793f013979facdb/pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798", size = 2249588 }, - { url = "https://files.pythonhosted.org/packages/fb/01/3755ba287dac715e6afdb333cb1f6d69740a7475220b4637b5ce3d78cec2/pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de", size = 2567509 }, - { url = "https://files.pythonhosted.org/packages/c0/98/2c7d727079b6be1aba82d195767d35fcc2d32204c7a5820f822df5330152/pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84", size = 2254791 }, - { url = "https://files.pythonhosted.org/packages/eb/38/998b04cc6f474e78b563716b20eecf42a2fa16a84589d23c8898e64b0ffd/pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b", size = 3150854 }, - { url = "https://files.pythonhosted.org/packages/13/8e/be23a96292113c6cb26b2aa3c8b3681ec62b44ed5c2bd0b258bd59503d3c/pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003", size = 2982369 }, - { url = "https://files.pythonhosted.org/packages/97/8a/3db4eaabb7a2ae8203cd3a332a005e4aba00067fc514aaaf3e9721be31f1/pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2", size = 4333703 }, - { url = "https://files.pythonhosted.org/packages/28/ac/629ffc84ff67b9228fe87a97272ab125bbd4dc462745f35f192d37b822f1/pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a", size = 4412550 }, - { url = "https://files.pythonhosted.org/packages/d6/07/a505921d36bb2df6868806eaf56ef58699c16c388e378b0dcdb6e5b2fb36/pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8", size = 4461038 }, - { url = "https://files.pythonhosted.org/packages/d6/b9/fb620dd47fc7cc9678af8f8bd8c772034ca4977237049287e99dda360b66/pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8", size = 2253197 }, - { url = "https://files.pythonhosted.org/packages/df/86/25dde85c06c89d7fc5db17940f07aae0a56ac69aa9ccb5eb0f09798862a8/pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904", size = 2572169 }, - { url = "https://files.pythonhosted.org/packages/51/85/9c33f2517add612e17f3381aee7c4072779130c634921a756c97bc29fb49/pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3", size = 2256828 }, { url = "https://files.pythonhosted.org/packages/36/57/42a4dd825eab762ba9e690d696d894ba366e06791936056e26e099398cda/pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2", size = 3119239 }, { url = "https://files.pythonhosted.org/packages/98/f7/25f9f9e368226a1d6cf3507081a1a7944eddd3ca7821023377043f5a83c8/pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2", size = 2950803 }, { url = "https://files.pythonhosted.org/packages/59/01/98ead48a6c2e31e6185d4c16c978a67fe3ccb5da5c2ff2ba8475379bb693/pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b", size = 3281098 }, @@ -3561,22 +3398,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fd/bd/8657918a35d50b18a9e4d78a5df7b6c82a637a311ab20851eef4326305c1/propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348", size = 235922 }, { url = "https://files.pythonhosted.org/packages/a8/6f/ec0095e1647b4727db945213a9f395b1103c442ef65e54c62e92a72a3f75/propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5", size = 40177 }, { url = "https://files.pythonhosted.org/packages/20/a2/bd0896fdc4f4c1db46d9bc361c8c79a9bf08ccc08ba054a98e38e7ba1557/propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3", size = 44446 }, - { url = "https://files.pythonhosted.org/packages/a8/a7/5f37b69197d4f558bfef5b4bceaff7c43cc9b51adf5bd75e9081d7ea80e4/propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7", size = 78120 }, - { url = "https://files.pythonhosted.org/packages/c8/cd/48ab2b30a6b353ecb95a244915f85756d74f815862eb2ecc7a518d565b48/propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763", size = 45127 }, - { url = "https://files.pythonhosted.org/packages/a5/ba/0a1ef94a3412aab057bd996ed5f0ac7458be5bf469e85c70fa9ceb43290b/propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d", size = 44419 }, - { url = "https://files.pythonhosted.org/packages/b4/6c/ca70bee4f22fa99eacd04f4d2f1699be9d13538ccf22b3169a61c60a27fa/propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a", size = 229611 }, - { url = "https://files.pythonhosted.org/packages/19/70/47b872a263e8511ca33718d96a10c17d3c853aefadeb86dc26e8421184b9/propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b", size = 234005 }, - { url = "https://files.pythonhosted.org/packages/4f/be/3b0ab8c84a22e4a3224719099c1229ddfdd8a6a1558cf75cb55ee1e35c25/propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb", size = 237270 }, - { url = "https://files.pythonhosted.org/packages/04/d8/f071bb000d4b8f851d312c3c75701e586b3f643fe14a2e3409b1b9ab3936/propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf", size = 231877 }, - { url = "https://files.pythonhosted.org/packages/93/e7/57a035a1359e542bbb0a7df95aad6b9871ebee6dce2840cb157a415bd1f3/propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2", size = 217848 }, - { url = "https://files.pythonhosted.org/packages/f0/93/d1dea40f112ec183398fb6c42fde340edd7bab202411c4aa1a8289f461b6/propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f", size = 216987 }, - { url = "https://files.pythonhosted.org/packages/62/4c/877340871251145d3522c2b5d25c16a1690ad655fbab7bb9ece6b117e39f/propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136", size = 212451 }, - { url = "https://files.pythonhosted.org/packages/7c/bb/a91b72efeeb42906ef58ccf0cdb87947b54d7475fee3c93425d732f16a61/propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325", size = 212879 }, - { url = "https://files.pythonhosted.org/packages/9b/7f/ee7fea8faac57b3ec5d91ff47470c6c5d40d7f15d0b1fccac806348fa59e/propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44", size = 222288 }, - { url = "https://files.pythonhosted.org/packages/ff/d7/acd67901c43d2e6b20a7a973d9d5fd543c6e277af29b1eb0e1f7bd7ca7d2/propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83", size = 228257 }, - { url = "https://files.pythonhosted.org/packages/8d/6f/6272ecc7a8daad1d0754cfc6c8846076a8cb13f810005c79b15ce0ef0cf2/propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544", size = 221075 }, - { url = "https://files.pythonhosted.org/packages/7c/bd/c7a6a719a6b3dd8b3aeadb3675b5783983529e4a3185946aa444d3e078f6/propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032", size = 39654 }, - { url = "https://files.pythonhosted.org/packages/88/e7/0eef39eff84fa3e001b44de0bd41c7c0e3432e7648ffd3d64955910f002d/propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e", size = 43705 }, { url = "https://files.pythonhosted.org/packages/3d/b6/e6d98278f2d49b22b4d033c9f792eda783b9ab2094b041f013fc69bcde87/propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036", size = 11603 }, ] @@ -3600,8 +3421,6 @@ version = "6.1.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/26/10/2a30b13c61e7cf937f4adf90710776b7918ed0a9c434e2c38224732af310/psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a", size = 508565 } wheels = [ - { url = "https://files.pythonhosted.org/packages/da/2b/f4dea5d993d9cd22ad958eea828a41d5d225556123d372f02547c29c4f97/psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e", size = 246648 }, - { url = "https://files.pythonhosted.org/packages/9f/14/4aa97a7f2e0ac33a050d990ab31686d651ae4ef8c86661fef067f00437b9/psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85", size = 249905 }, { url = "https://files.pythonhosted.org/packages/01/9e/8be43078a171381953cfee33c07c0d628594b5dbfc5157847b85022c2c1b/psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688", size = 247762 }, { url = "https://files.pythonhosted.org/packages/1d/cb/313e80644ea407f04f6602a9e23096540d9dc1878755f3952ea8d3d104be/psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e", size = 248777 }, { url = "https://files.pythonhosted.org/packages/65/8e/bcbe2025c587b5d703369b6a75b65d41d1367553da6e3f788aff91eaf5bd/psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38", size = 284259 }, @@ -3728,18 +3547,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/b9/41f7efe80f6ce2ed3ee3c2dcfe10ab7adc1172f778cc9659509a79518c43/pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24", size = 2116872 }, { url = "https://files.pythonhosted.org/packages/63/08/b59b7a92e03dd25554b0436554bf23e7c29abae7cce4b1c459cd92746811/pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84", size = 1738535 }, { url = "https://files.pythonhosted.org/packages/88/8d/479293e4d39ab409747926eec4329de5b7129beaedc3786eca070605d07f/pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9", size = 1917992 }, - { url = "https://files.pythonhosted.org/packages/ad/ef/16ee2df472bf0e419b6bc68c05bf0145c49247a1095e85cee1463c6a44a1/pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc", size = 1856143 }, - { url = "https://files.pythonhosted.org/packages/da/fa/bc3dbb83605669a34a93308e297ab22be82dfb9dcf88c6cf4b4f264e0a42/pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd", size = 1770063 }, - { url = "https://files.pythonhosted.org/packages/4e/48/e813f3bbd257a712303ebdf55c8dc46f9589ec74b384c9f652597df3288d/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05", size = 1790013 }, - { url = "https://files.pythonhosted.org/packages/b4/e0/56eda3a37929a1d297fcab1966db8c339023bcca0b64c5a84896db3fcc5c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d", size = 1801077 }, - { url = "https://files.pythonhosted.org/packages/04/be/5e49376769bfbf82486da6c5c1683b891809365c20d7c7e52792ce4c71f3/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510", size = 1996782 }, - { url = "https://files.pythonhosted.org/packages/bc/24/e3ee6c04f1d58cc15f37bcc62f32c7478ff55142b7b3e6d42ea374ea427c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6", size = 2661375 }, - { url = "https://files.pythonhosted.org/packages/c1/f8/11a9006de4e89d016b8de74ebb1db727dc100608bb1e6bbe9d56a3cbbcce/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b", size = 2071635 }, - { url = "https://files.pythonhosted.org/packages/7c/45/bdce5779b59f468bdf262a5bc9eecbae87f271c51aef628d8c073b4b4b4c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327", size = 1916994 }, - { url = "https://files.pythonhosted.org/packages/d8/fa/c648308fe711ee1f88192cad6026ab4f925396d1293e8356de7e55be89b5/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6", size = 1968877 }, - { url = "https://files.pythonhosted.org/packages/16/16/b805c74b35607d24d37103007f899abc4880923b04929547ae68d478b7f4/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f", size = 2116814 }, - { url = "https://files.pythonhosted.org/packages/d1/58/5305e723d9fcdf1c5a655e6a4cc2a07128bf644ff4b1d98daf7a9dbf57da/pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769", size = 1738360 }, - { url = "https://files.pythonhosted.org/packages/a5/ae/e14b0ff8b3f48e02394d8acd911376b7b66e164535687ef7dc24ea03072f/pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5", size = 1919411 }, { url = "https://files.pythonhosted.org/packages/13/a9/5d582eb3204464284611f636b55c0a7410d748ff338756323cb1ce721b96/pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5", size = 1857135 }, { url = "https://files.pythonhosted.org/packages/2c/57/faf36290933fe16717f97829eabfb1868182ac495f99cf0eda9f59687c9d/pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec", size = 1740583 }, { url = "https://files.pythonhosted.org/packages/91/7c/d99e3513dc191c4fec363aef1bf4c8af9125d8fa53af7cb97e8babef4e40/pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480", size = 1793637 }, @@ -3985,9 +3792,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, - { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, - { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, - { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, ] [[package]] @@ -4023,15 +3827,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, ] [[package]] @@ -4079,27 +3874,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/07/18/907134c85c7152f679ed744e73e645b365f3ad571f38bdb62e36f347699a/pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7", size = 575533 }, { url = "https://files.pythonhosted.org/packages/ce/2c/a6f4a20202a4d3c582ad93f95ee78d79bbdc26803495aec2912b17dbbb6c/pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a", size = 637768 }, { url = "https://files.pythonhosted.org/packages/5f/0e/eb16ff731632d30554bf5af4dbba3ffcd04518219d82028aea4ae1b02ca5/pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b", size = 540675 }, - { url = "https://files.pythonhosted.org/packages/04/a7/0f7e2f6c126fe6e62dbae0bc93b1bd3f1099cf7fea47a5468defebe3f39d/pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726", size = 1006564 }, - { url = "https://files.pythonhosted.org/packages/31/b6/a187165c852c5d49f826a690857684333a6a4a065af0a6015572d2284f6a/pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3", size = 1340447 }, - { url = "https://files.pythonhosted.org/packages/68/ba/f4280c58ff71f321602a6e24fd19879b7e79793fb8ab14027027c0fb58ef/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50", size = 665485 }, - { url = "https://files.pythonhosted.org/packages/77/b5/c987a5c53c7d8704216f29fc3d810b32f156bcea488a940e330e1bcbb88d/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb", size = 903484 }, - { url = "https://files.pythonhosted.org/packages/29/c9/07da157d2db18c72a7eccef8e684cefc155b712a88e3d479d930aa9eceba/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187", size = 859981 }, - { url = "https://files.pythonhosted.org/packages/43/09/e12501bd0b8394b7d02c41efd35c537a1988da67fc9c745cae9c6c776d31/pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b", size = 860334 }, - { url = "https://files.pythonhosted.org/packages/eb/ff/f5ec1d455f8f7385cc0a8b2acd8c807d7fade875c14c44b85c1bddabae21/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18", size = 1196179 }, - { url = "https://files.pythonhosted.org/packages/ec/8a/bb2ac43295b1950fe436a81fc5b298be0b96ac76fb029b514d3ed58f7b27/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115", size = 1507668 }, - { url = "https://files.pythonhosted.org/packages/a9/49/dbc284ebcfd2dca23f6349227ff1616a7ee2c4a35fe0a5d6c3deff2b4fed/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e", size = 1406539 }, - { url = "https://files.pythonhosted.org/packages/00/68/093cdce3fe31e30a341d8e52a1ad86392e13c57970d722c1f62a1d1a54b6/pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5", size = 575567 }, - { url = "https://files.pythonhosted.org/packages/92/ae/6cc4657148143412b5819b05e362ae7dd09fb9fe76e2a539dcff3d0386bc/pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad", size = 637551 }, - { url = "https://files.pythonhosted.org/packages/6c/67/fbff102e201688f97c8092e4c3445d1c1068c2f27bbd45a578df97ed5f94/pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797", size = 540378 }, - { url = "https://files.pythonhosted.org/packages/3f/fe/2d998380b6e0122c6c4bdf9b6caf490831e5f5e2d08a203b5adff060c226/pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a", size = 1007378 }, - { url = "https://files.pythonhosted.org/packages/4a/f4/30d6e7157f12b3a0390bde94d6a8567cdb88846ed068a6e17238a4ccf600/pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc", size = 1329532 }, - { url = "https://files.pythonhosted.org/packages/82/86/3fe917870e15ee1c3ad48229a2a64458e36036e64b4afa9659045d82bfa8/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5", size = 653242 }, - { url = "https://files.pythonhosted.org/packages/50/2d/242e7e6ef6c8c19e6cb52d095834508cd581ffb925699fd3c640cdc758f1/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672", size = 888404 }, - { url = "https://files.pythonhosted.org/packages/ac/11/7270566e1f31e4ea73c81ec821a4b1688fd551009a3d2bab11ec66cb1e8f/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797", size = 845858 }, - { url = "https://files.pythonhosted.org/packages/91/d5/72b38fbc69867795c8711bdd735312f9fef1e3d9204e2f63ab57085434b9/pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386", size = 847375 }, - { url = "https://files.pythonhosted.org/packages/dd/9a/10ed3c7f72b4c24e719c59359fbadd1a27556a28b36cdf1cd9e4fb7845d5/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306", size = 1183489 }, - { url = "https://files.pythonhosted.org/packages/72/2d/8660892543fabf1fe41861efa222455811adac9f3c0818d6c3170a1153e3/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6", size = 1492932 }, - { url = "https://files.pythonhosted.org/packages/7b/d6/32fd69744afb53995619bc5effa2a405ae0d343cd3e747d0fbc43fe894ee/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0", size = 1392485 }, { url = "https://files.pythonhosted.org/packages/53/fb/36b2b2548286e9444e52fcd198760af99fd89102b5be50f0660fcfe902df/pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072", size = 906955 }, { url = "https://files.pythonhosted.org/packages/77/8f/6ce54f8979a01656e894946db6299e2273fcee21c8e5fa57c6295ef11f57/pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1", size = 565701 }, { url = "https://files.pythonhosted.org/packages/ee/1c/bf8cd66730a866b16db8483286078892b7f6536f8c389fb46e4beba0a970/pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d", size = 794312 }, @@ -4172,21 +3946,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ea/75/9753e9dcebfa7c3645563ef5c8a58f3a47e799c872165f37c55737dadd3e/regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a", size = 787333 }, { url = "https://files.pythonhosted.org/packages/bc/4e/ba1cbca93141f7416624b3ae63573e785d4bc1834c8be44a8f0747919eca/regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776", size = 262058 }, { url = "https://files.pythonhosted.org/packages/6e/16/efc5f194778bf43e5888209e5cec4b258005d37c613b67ae137df3b89c53/regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009", size = 273526 }, - { url = "https://files.pythonhosted.org/packages/93/0a/d1c6b9af1ff1e36832fe38d74d5c5bab913f2bdcbbd6bc0e7f3ce8b2f577/regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784", size = 483376 }, - { url = "https://files.pythonhosted.org/packages/a4/42/5910a050c105d7f750a72dcb49c30220c3ae4e2654e54aaaa0e9bc0584cb/regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36", size = 288112 }, - { url = "https://files.pythonhosted.org/packages/8d/56/0c262aff0e9224fa7ffce47b5458d373f4d3e3ff84e99b5ff0cb15e0b5b2/regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92", size = 284608 }, - { url = "https://files.pythonhosted.org/packages/b9/54/9fe8f9aec5007bbbbce28ba3d2e3eaca425f95387b7d1e84f0d137d25237/regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86", size = 795337 }, - { url = "https://files.pythonhosted.org/packages/b2/e7/6b2f642c3cded271c4f16cc4daa7231be544d30fe2b168e0223724b49a61/regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85", size = 835848 }, - { url = "https://files.pythonhosted.org/packages/cd/9e/187363bdf5d8c0e4662117b92aa32bf52f8f09620ae93abc7537d96d3311/regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963", size = 823503 }, - { url = "https://files.pythonhosted.org/packages/f8/10/601303b8ee93589f879664b0cfd3127949ff32b17f9b6c490fb201106c4d/regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6", size = 797049 }, - { url = "https://files.pythonhosted.org/packages/ef/1c/ea200f61ce9f341763f2717ab4daebe4422d83e9fd4ac5e33435fd3a148d/regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802", size = 784144 }, - { url = "https://files.pythonhosted.org/packages/d8/5c/d2429be49ef3292def7688401d3deb11702c13dcaecdc71d2b407421275b/regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29", size = 782483 }, - { url = "https://files.pythonhosted.org/packages/12/d9/cbc30f2ff7164f3b26a7760f87c54bf8b2faed286f60efd80350a51c5b99/regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8", size = 790320 }, - { url = "https://files.pythonhosted.org/packages/19/1d/43ed03a236313639da5a45e61bc553c8d41e925bcf29b0f8ecff0c2c3f25/regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84", size = 860435 }, - { url = "https://files.pythonhosted.org/packages/34/4f/5d04da61c7c56e785058a46349f7285ae3ebc0726c6ea7c5c70600a52233/regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554", size = 859571 }, - { url = "https://files.pythonhosted.org/packages/12/7f/8398c8155a3c70703a8e91c29532558186558e1aea44144b382faa2a6f7a/regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8", size = 787398 }, - { url = "https://files.pythonhosted.org/packages/58/3a/f5903977647a9a7e46d5535e9e96c194304aeeca7501240509bde2f9e17f/regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8", size = 262035 }, - { url = "https://files.pythonhosted.org/packages/ff/80/51ba3a4b7482f6011095b3a036e07374f64de180b7d870b704ed22509002/regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f", size = 273510 }, ] [[package]] @@ -4287,19 +4046,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/70/2d/5536d28c507a4679179ab15aa0049440e4d3dd6752050fa0843ed11e9354/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174", size = 528807 }, { url = "https://files.pythonhosted.org/packages/e3/62/7ebe6ec0d3dd6130921f8cffb7e34afb7f71b3819aa0446a24c5e81245ec/rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139", size = 200993 }, { url = "https://files.pythonhosted.org/packages/ec/2f/b938864d66b86a6e4acadefdc56de75ef56f7cafdfd568a6464605457bd5/rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585", size = 214458 }, - { url = "https://files.pythonhosted.org/packages/99/32/43b919a0a423c270a838ac2726b1c7168b946f2563fd99a51aaa9692d00f/rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29", size = 321465 }, - { url = "https://files.pythonhosted.org/packages/58/a9/c4d899cb28e9e47b0ff12462e8f827381f243176036f17bef9c1604667f2/rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91", size = 312900 }, - { url = "https://files.pythonhosted.org/packages/8f/90/9e51670575b5dfaa8c823369ef7d943087bfb73d4f124a99ad6ef19a2b26/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24", size = 370973 }, - { url = "https://files.pythonhosted.org/packages/fc/c1/523f2a03f853fc0d4c1acbef161747e9ab7df0a8abf6236106e333540921/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7", size = 370890 }, - { url = "https://files.pythonhosted.org/packages/51/ca/2458a771f16b0931de4d384decbe43016710bc948036c8f4562d6e063437/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9", size = 397174 }, - { url = "https://files.pythonhosted.org/packages/00/7d/6e06807f6305ea2408b364efb0eef83a6e21b5e7b5267ad6b473b9a7e416/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8", size = 426449 }, - { url = "https://files.pythonhosted.org/packages/8c/d1/6c9e65260a819a1714510a7d69ac1d68aa23ee9ce8a2d9da12187263c8fc/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879", size = 357698 }, - { url = "https://files.pythonhosted.org/packages/5d/fb/ecea8b5286d2f03eec922be7173a03ed17278944f7c124348f535116db15/rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f", size = 378530 }, - { url = "https://files.pythonhosted.org/packages/e3/e3/ac72f858957f52a109c588589b73bd2fad4a0fc82387fb55fb34aeb0f9cd/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c", size = 545753 }, - { url = "https://files.pythonhosted.org/packages/b2/a4/a27683b519d5fc98e4390a3b130117d80fd475c67aeda8aac83c0e8e326a/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2", size = 552443 }, - { url = "https://files.pythonhosted.org/packages/a1/ed/c074d248409b4432b1ccb2056974175fa0af2d1bc1f9c21121f80a358fa3/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57", size = 528380 }, - { url = "https://files.pythonhosted.org/packages/d5/bd/04caf938895d2d78201e89c0c8a94dfd9990c34a19ff52fb01d0912343e3/rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a", size = 200540 }, - { url = "https://files.pythonhosted.org/packages/95/cc/109eb8b9863680411ae703664abacaa035820c7755acc9686d5dd02cdd2e/rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2", size = 214111 }, { url = "https://files.pythonhosted.org/packages/06/39/bf1f664c347c946ef56cecaa896e3693d91acc741afa78ebb3fdb7aba08b/rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045", size = 319444 }, { url = "https://files.pythonhosted.org/packages/c1/71/876135d3cb90d62468540b84e8e83ff4dc92052ab309bfdea7ea0b9221ad/rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc", size = 311699 }, { url = "https://files.pythonhosted.org/packages/f7/da/8ccaeba6a3dda7467aebaf893de9eafd56275e2c90773c83bf15fb0b8374/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02", size = 367825 }, @@ -4371,14 +4117,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8e/ee/8a26858ca517e9c64f84b4c7734b89bda8e63bec85c3d2f432d225bb1886/scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066", size = 40849331 }, { url = "https://files.pythonhosted.org/packages/a5/cd/06f72bc9187840f1c99e1a8750aad4216fc7dfdd7df46e6280add14b4822/scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1", size = 42544049 }, { url = "https://files.pythonhosted.org/packages/aa/7d/43ab67228ef98c6b5dd42ab386eae2d7877036970a0d7e3dd3eb47a0d530/scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f", size = 44521212 }, - { url = "https://files.pythonhosted.org/packages/50/ef/ac98346db016ff18a6ad7626a35808f37074d25796fd0234c2bb0ed1e054/scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79", size = 39091068 }, - { url = "https://files.pythonhosted.org/packages/b9/cc/70948fe9f393b911b4251e96b55bbdeaa8cca41f37c26fd1df0232933b9e/scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e", size = 29875417 }, - { url = "https://files.pythonhosted.org/packages/3b/2e/35f549b7d231c1c9f9639f9ef49b815d816bf54dd050da5da1c11517a218/scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73", size = 23084508 }, - { url = "https://files.pythonhosted.org/packages/3f/d6/b028e3f3e59fae61fb8c0f450db732c43dd1d836223a589a8be9f6377203/scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e", size = 25503364 }, - { url = "https://files.pythonhosted.org/packages/a7/2f/6c142b352ac15967744d62b165537a965e95d557085db4beab2a11f7943b/scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d", size = 35292639 }, - { url = "https://files.pythonhosted.org/packages/56/46/2449e6e51e0d7c3575f289f6acb7f828938eaab8874dbccfeb0cd2b71a27/scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e", size = 40798288 }, - { url = "https://files.pythonhosted.org/packages/32/cd/9d86f7ed7f4497c9fd3e39f8918dd93d9f647ba80d7e34e4946c0c2d1a7c/scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06", size = 42524647 }, - { url = "https://files.pythonhosted.org/packages/f5/1b/6ee032251bf4cdb0cc50059374e86a9f076308c1512b61c4e003e241efb7/scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84", size = 44469524 }, ] [[package]] @@ -4659,14 +4397,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ea/09/badfc9293bc3ccba6ede05e5f2b44a760aa47d84da1fc5a326e963e3d4d9/SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588", size = 3205147 }, { url = "https://files.pythonhosted.org/packages/c8/60/70e681de02a13c4b27979b7b78da3058c49bacc9858c89ba672e030f03f2/SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e", size = 2062709 }, { url = "https://files.pythonhosted.org/packages/b7/ed/f6cd9395e41bfe47dd253d74d2dfc3cab34980d4e20c8878cb1117306085/SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5", size = 2088433 }, - { url = "https://files.pythonhosted.org/packages/78/5c/236398ae3678b3237726819b484f15f5c038a9549da01703a771f05a00d6/SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef", size = 2087651 }, - { url = "https://files.pythonhosted.org/packages/a8/14/55c47420c0d23fb67a35af8be4719199b81c59f3084c28d131a7767b0b0b/SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8", size = 2078132 }, - { url = "https://files.pythonhosted.org/packages/3d/97/1e843b36abff8c4a7aa2e37f9bea364f90d021754c2de94d792c2d91405b/SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b", size = 3164559 }, - { url = "https://files.pythonhosted.org/packages/7b/c5/07f18a897b997f6d6b234fab2bf31dccf66d5d16a79fe329aefc95cd7461/SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2", size = 3177897 }, - { url = "https://files.pythonhosted.org/packages/b3/cd/e16f3cbefd82b5c40b33732da634ec67a5f33b587744c7ab41699789d492/SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf", size = 3111289 }, - { url = "https://files.pythonhosted.org/packages/15/85/5b8a3b0bc29c9928aa62b5c91fcc8335f57c1de0a6343873b5f372e3672b/SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c", size = 3139491 }, - { url = "https://files.pythonhosted.org/packages/a1/95/81babb6089938680dfe2cd3f88cd3fd39cccd1543b7cb603b21ad881bff1/SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436", size = 2060439 }, - { url = "https://files.pythonhosted.org/packages/c1/ce/5f7428df55660d6879d0522adc73a3364970b5ef33ec17fa125c5dbcac1d/SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88", size = 2084574 }, { url = "https://files.pythonhosted.org/packages/b8/49/21633706dd6feb14cd3f7935fc00b60870ea057686035e1a99ae6d9d9d53/SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e", size = 1883787 }, ] @@ -4866,12 +4596,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/32/e0e3a859136e95c85a572e4806dc58bf1ddf651108ae8b97d5f3ebe1a244/tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04", size = 1175432 }, { url = "https://files.pythonhosted.org/packages/c7/89/926b66e9025b97e9fbabeaa59048a736fe3c3e4530a204109571104f921c/tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc", size = 1236576 }, { url = "https://files.pythonhosted.org/packages/45/e2/39d4aa02a52bba73b2cd21ba4533c84425ff8786cc63c511d68c8897376e/tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db", size = 883824 }, - { url = "https://files.pythonhosted.org/packages/e3/38/802e79ba0ee5fcbf240cd624143f57744e5d411d2e9d9ad2db70d8395986/tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24", size = 1039648 }, - { url = "https://files.pythonhosted.org/packages/b1/da/24cdbfc302c98663fbea66f5866f7fa1048405c7564ab88483aea97c3b1a/tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a", size = 982763 }, - { url = "https://files.pythonhosted.org/packages/e4/f0/0ecf79a279dfa41fc97d00adccf976ecc2556d3c08ef3e25e45eb31f665b/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5", size = 1144417 }, - { url = "https://files.pythonhosted.org/packages/ab/d3/155d2d4514f3471a25dc1d6d20549ef254e2aa9bb5b1060809b1d3b03d3a/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953", size = 1175108 }, - { url = "https://files.pythonhosted.org/packages/19/eb/5989e16821ee8300ef8ee13c16effc20dfc26c777d05fbb6825e3c037b81/tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7", size = 1236520 }, - { url = "https://files.pythonhosted.org/packages/40/59/14b20465f1d1cb89cfbc96ec27e5617b2d41c79da12b5e04e96d689be2a7/tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69", size = 883849 }, ] [[package]] @@ -4952,7 +4676,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6d/69/d8ada8b6e0a4257556d5b4ddeb4345ea8eeaaef3c98b60d1cca197c7ad8e/torch-2.5.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", size = 91811673 }, { url = "https://files.pythonhosted.org/packages/5f/ba/607d013b55b9fd805db2a5c2662ec7551f1910b4eef39653eeaba182c5b2/torch-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", size = 203046841 }, { url = "https://files.pythonhosted.org/packages/57/6c/bf52ff061da33deb9f94f4121fde7ff3058812cb7d2036c97bc167793bd1/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", size = 63858109 }, - { url = "https://files.pythonhosted.org/packages/69/72/20cb30f3b39a9face296491a86adb6ff8f1a47a897e4d14667e6cf89d5c3/torch-2.5.1-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", size = 906393265 }, ] [[package]] @@ -5239,18 +4962,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/44/81/1f701323a9f70805bc81c74c990137123344a80ea23ab9504a99492907f8/watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444", size = 264109 }, { url = "https://files.pythonhosted.org/packages/b4/0b/32cde5bc2ebd9f351be326837c61bdeb05ad652b793f25c91cac0b48a60b/watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896", size = 277055 }, { url = "https://files.pythonhosted.org/packages/4b/81/daade76ce33d21dbec7a15afd7479de8db786e5f7b7d249263b4ea174e08/watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418", size = 266169 }, - { url = "https://files.pythonhosted.org/packages/30/dc/6e9f5447ae14f645532468a84323a942996d74d5e817837a5c8ce9d16c69/watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48", size = 373764 }, - { url = "https://files.pythonhosted.org/packages/79/c0/c3a9929c372816c7fc87d8149bd722608ea58dc0986d3ef7564c79ad7112/watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90", size = 367873 }, - { url = "https://files.pythonhosted.org/packages/2e/11/ff9a4445a7cfc1c98caf99042df38964af12eed47d496dd5d0d90417349f/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94", size = 438381 }, - { url = "https://files.pythonhosted.org/packages/48/a3/763ba18c98211d7bb6c0f417b2d7946d346cdc359d585cc28a17b48e964b/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e", size = 432809 }, - { url = "https://files.pythonhosted.org/packages/30/4c/616c111b9d40eea2547489abaf4ffc84511e86888a166d3a4522c2ba44b5/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827", size = 451801 }, - { url = "https://files.pythonhosted.org/packages/b6/be/d7da83307863a422abbfeb12903a76e43200c90ebe5d6afd6a59d158edea/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df", size = 468886 }, - { url = "https://files.pythonhosted.org/packages/1d/d3/3dfe131ee59d5e90b932cf56aba5c996309d94dafe3d02d204364c23461c/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab", size = 472973 }, - { url = "https://files.pythonhosted.org/packages/42/6c/279288cc5653a289290d183b60a6d80e05f439d5bfdfaf2d113738d0f932/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f", size = 425282 }, - { url = "https://files.pythonhosted.org/packages/d6/d7/58afe5e85217e845edf26d8780c2d2d2ae77675eeb8d1b8b8121d799ce52/watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b", size = 612540 }, - { url = "https://files.pythonhosted.org/packages/6d/d5/b96eeb9fe3fda137200dd2f31553670cbc731b1e13164fd69b49870b76ec/watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18", size = 593625 }, - { url = "https://files.pythonhosted.org/packages/c1/e5/c326fe52ee0054107267608d8cea275e80be4455b6079491dfd9da29f46f/watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07", size = 263899 }, - { url = "https://files.pythonhosted.org/packages/a6/8b/8a7755c5e7221bb35fe4af2dc44db9174f90ebf0344fd5e9b1e8b42d381e/watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366", size = 276622 }, { url = "https://files.pythonhosted.org/packages/df/94/1ad200e937ec91b2a9d6b39ae1cf9c2b1a9cc88d5ceb43aa5c6962eb3c11/watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f", size = 376986 }, { url = "https://files.pythonhosted.org/packages/ee/fd/d9e020d687ccf90fe95efc513fbb39a8049cf5a3ff51f53c59fcf4c47a5d/watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b", size = 369445 }, { url = "https://files.pythonhosted.org/packages/43/cb/c0279b35053555d10ef03559c5aebfcb0c703d9c70a7b4e532df74b9b0e8/watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4", size = 439383 }, @@ -5314,17 +5025,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/f1/a29dd6046d3a722d26f182b783a7997d25298873a14028c4760347974ea3/websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf", size = 164686 }, { url = "https://files.pythonhosted.org/packages/0f/99/ab1cdb282f7e595391226f03f9b498f52109d25a2ba03832e21614967dfa/websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c", size = 158712 }, { url = "https://files.pythonhosted.org/packages/46/93/e19160db48b5581feac8468330aa11b7292880a94a37d7030478596cc14e/websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3", size = 159145 }, - { url = "https://files.pythonhosted.org/packages/51/20/2b99ca918e1cbd33c53db2cace5f0c0cd8296fc77558e1908799c712e1cd/websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6", size = 157828 }, - { url = "https://files.pythonhosted.org/packages/b8/47/0932a71d3d9c0e9483174f60713c84cee58d62839a143f21a2bcdbd2d205/websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708", size = 155487 }, - { url = "https://files.pythonhosted.org/packages/a9/60/f1711eb59ac7a6c5e98e5637fef5302f45b6f76a2c9d64fd83bbb341377a/websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418", size = 155721 }, - { url = "https://files.pythonhosted.org/packages/6a/e6/ba9a8db7f9d9b0e5f829cf626ff32677f39824968317223605a6b419d445/websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a", size = 165609 }, - { url = "https://files.pythonhosted.org/packages/c1/22/4ec80f1b9c27a0aebd84ccd857252eda8418ab9681eb571b37ca4c5e1305/websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f", size = 164556 }, - { url = "https://files.pythonhosted.org/packages/27/ac/35f423cb6bb15600438db80755609d27eda36d4c0b3c9d745ea12766c45e/websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5", size = 164993 }, - { url = "https://files.pythonhosted.org/packages/31/4e/98db4fd267f8be9e52e86b6ee4e9aa7c42b83452ea0ea0672f176224b977/websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135", size = 165360 }, - { url = "https://files.pythonhosted.org/packages/3f/15/3f0de7cda70ffc94b7e7024544072bc5b26e2c1eb36545291abb755d8cdb/websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2", size = 164745 }, - { url = "https://files.pythonhosted.org/packages/a1/6e/66b6b756aebbd680b934c8bdbb6dcb9ce45aad72cde5f8a7208dbb00dd36/websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6", size = 164732 }, - { url = "https://files.pythonhosted.org/packages/35/c6/12e3aab52c11aeb289e3dbbc05929e7a9d90d7a9173958477d3ef4f8ce2d/websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d", size = 158709 }, - { url = "https://files.pythonhosted.org/packages/41/d8/63d6194aae711d7263df4498200c690a9c39fb437ede10f3e157a6343e0d/websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2", size = 159144 }, { url = "https://files.pythonhosted.org/packages/2d/75/6da22cb3ad5b8c606963f9a5f9f88656256fecc29d420b4b2bf9e0c7d56f/websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238", size = 155499 }, { url = "https://files.pythonhosted.org/packages/c0/ba/22833d58629088fcb2ccccedfae725ac0bbcd713319629e97125b52ac681/websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5", size = 155737 }, { url = "https://files.pythonhosted.org/packages/95/54/61684fe22bdb831e9e1843d972adadf359cf04ab8613285282baea6a24bb/websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9", size = 157095 }, @@ -5472,22 +5172,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ad/8d/b7b5d43cf22a020b564ddf7502d83df150d797e34f18f6bf5fe0f12cbd91/yarl-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a5cf32539373ff39d97723e39a9283a7277cbf1224f7aef0c56c9598b6486c3", size = 355746 }, { url = "https://files.pythonhosted.org/packages/d9/a6/a2098bf3f09d38eb540b2b192e180d9d41c2ff64b692783db2188f0a55e3/yarl-1.16.0-cp312-cp312-win32.whl", hash = "sha256:a5b6c09b9b4253d6a208b0f4a2f9206e511ec68dce9198e0fbec4f160137aa67", size = 82675 }, { url = "https://files.pythonhosted.org/packages/ed/a6/0a54b382cfc336e772b72681d6816a99222dc2d21876e649474973b8d244/yarl-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:1208ca14eed2fda324042adf8d6c0adf4a31522fa95e0929027cd487875f0240", size = 88986 }, - { url = "https://files.pythonhosted.org/packages/57/56/eef0a7050fcd11d70c536453f014d4b2dfd83fb934c9857fa1a912832405/yarl-1.16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5ace0177520bd4caa99295a9b6fb831d0e9a57d8e0501a22ffaa61b4c024283", size = 139373 }, - { url = "https://files.pythonhosted.org/packages/3f/b2/88eb9e98c5a4549606ebf673cba0d701f13ec855021b781f8e3fd7c04190/yarl-1.16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7118bdb5e3ed81acaa2095cba7ec02a0fe74b52a16ab9f9ac8e28e53ee299732", size = 92759 }, - { url = "https://files.pythonhosted.org/packages/95/1d/c3b794ef82a3b1894a9f8fc1012b073a85464b95c646ac217e8013137ea3/yarl-1.16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38fec8a2a94c58bd47c9a50a45d321ab2285ad133adefbbadf3012c054b7e656", size = 90573 }, - { url = "https://files.pythonhosted.org/packages/7f/35/39a5dcbf7ef320607bcfd1c0498ce348181b97627c3901139b429d806cf1/yarl-1.16.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8791d66d81ee45866a7bb15a517b01a2bcf583a18ebf5d72a84e6064c417e64b", size = 332461 }, - { url = "https://files.pythonhosted.org/packages/36/29/2a468c8b44aa750d0f3416bc24d58464237b402388a8f03091a58537274a/yarl-1.16.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cf936ba67bc6c734f3aa1c01391da74ab7fc046a9f8bbfa230b8393b90cf472", size = 343045 }, - { url = "https://files.pythonhosted.org/packages/91/6a/002300c86ed7ef3cd5ac890a0e17101aee06c64abe2e43f9dad85bc32c70/yarl-1.16.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1aab176dd55b59f77a63b27cffaca67d29987d91a5b615cbead41331e6b7428", size = 344592 }, - { url = "https://files.pythonhosted.org/packages/ea/69/ca4228e0f560f0c5817e0ebd789690c78ab17e6a876b38a5d000889b2f63/yarl-1.16.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:995d0759004c08abd5d1b81300a91d18c8577c6389300bed1c7c11675105a44d", size = 338127 }, - { url = "https://files.pythonhosted.org/packages/81/df/32eea6e5199f7298ec15cf708895f35a7d2899177ed556e6bdf6819462aa/yarl-1.16.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1bc22e00edeb068f71967ab99081e9406cd56dbed864fc3a8259442999d71552", size = 326127 }, - { url = "https://files.pythonhosted.org/packages/9a/11/1a888df53acd3d1d4b8dc803e0c8ed4a4b6cabc2abe19e4de31aa6b86857/yarl-1.16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35b4f7842154176523e0a63c9b871168c69b98065d05a4f637fce342a6a2693a", size = 345219 }, - { url = "https://files.pythonhosted.org/packages/34/88/44fd8b372c4c50c010e66c62bfb34e67d6bd217c973599e0ee03f74e74ec/yarl-1.16.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:7ace71c4b7a0c41f317ae24be62bb61e9d80838d38acb20e70697c625e71f120", size = 339742 }, - { url = "https://files.pythonhosted.org/packages/ee/c8/eaa53bd40db61265cec09d3c432d8bcd8ab9fd3a9fc5b0afdd13ab27b4a8/yarl-1.16.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8f639e3f5795a6568aa4f7d2ac6057c757dcd187593679f035adbf12b892bb00", size = 344695 }, - { url = "https://files.pythonhosted.org/packages/1b/8f/b00aa91bd3bc8ef41781b13ac967c9c5c2e3ca0c516cffdd15ac035a1839/yarl-1.16.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e8be3aff14f0120ad049121322b107f8a759be76a6a62138322d4c8a337a9e2c", size = 353617 }, - { url = "https://files.pythonhosted.org/packages/f1/88/8e86a28a840b8dc30c880fdde127f9610c56e55796a2cc969949b4a60fe7/yarl-1.16.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:122d8e7986043d0549e9eb23c7fd23be078be4b70c9eb42a20052b3d3149c6f2", size = 359911 }, - { url = "https://files.pythonhosted.org/packages/ee/61/9d59f7096fd72d5f68168ed8134773982ee48a8cb4009ecb34344e064999/yarl-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0fd9c227990f609c165f56b46107d0bc34553fe0387818c42c02f77974402c36", size = 358847 }, - { url = "https://files.pythonhosted.org/packages/f7/25/c323097b066a2b5a554f77e27a35bc067aebfcd3a001a0a3a6bc14190460/yarl-1.16.0-cp313-cp313-win32.whl", hash = "sha256:595ca5e943baed31d56b33b34736461a371c6ea0038d3baec399949dd628560b", size = 308302 }, - { url = "https://files.pythonhosted.org/packages/52/76/ca2c3de3511a127fc4124723e7ccc641aef5e0ec56c66d25dbd11f19ab84/yarl-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:921b81b8d78f0e60242fb3db615ea3f368827a76af095d5a69f1c3366db3f596", size = 314035 }, { url = "https://files.pythonhosted.org/packages/fb/f7/87a32867ddc1a9817018bfd6109ee57646a543acf0d272843d8393e575f9/yarl-1.16.0-py3-none-any.whl", hash = "sha256:e6980a558d8461230c457218bd6c92dfc1d10205548215c2c21d79dc8d0a96f3", size = 43746 }, ]