mirror of
https://github.com/microsoft/autogen.git
synced 2026-02-11 22:14:58 -05:00
Move grpc runtimes to ext, flatten application (#4553)
* Move grpc runtimes to ext, flatten application * rename to grpc * fmt
This commit is contained in:
@@ -25,8 +25,14 @@
|
||||
"import asyncio\n",
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core import ClosureAgent, ClosureContext, DefaultSubscription, DefaultTopicId, MessageContext\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime"
|
||||
"from autogen_core import (\n",
|
||||
" ClosureAgent,\n",
|
||||
" ClosureContext,\n",
|
||||
" DefaultSubscription,\n",
|
||||
" DefaultTopicId,\n",
|
||||
" MessageContext,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -35,15 +35,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import Any, Callable, List, Literal\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.tools import tool # pyright: ignore\n",
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -41,8 +41,7 @@
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"from llama_index.core import Settings\n",
|
||||
"from llama_index.core.agent import ReActAgent\n",
|
||||
|
||||
@@ -39,8 +39,15 @@
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import (\n",
|
||||
" AgentId,\n",
|
||||
" DefaultTopicId,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" default_subscription,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.model_context import BufferedChatCompletionContext\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" AssistantMessage,\n",
|
||||
|
||||
@@ -386,7 +386,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import SingleThreadedAgentRuntime\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await OpenAIAssistantAgent.register(\n",
|
||||
|
||||
@@ -22,8 +22,15 @@
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import Any\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import (\n",
|
||||
" AgentId,\n",
|
||||
" DefaultTopicId,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" default_subscription,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.base.intervention import DefaultInterventionHandler"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,283 +1,290 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# User Approval for Tool Execution using Intervention Handler\n",
|
||||
"\n",
|
||||
"This cookbook shows how to intercept the tool execution using\n",
|
||||
"an intervention hanlder, and prompt the user for permission to execute the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import Any, List\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, AgentType, FunctionCall, MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.tools import PythonCodeExecutionTool, ToolSchema\n",
|
||||
"from autogen_core.tool_agent import ToolAgent, ToolException, tool_agent_caller_loop\n",
|
||||
"from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's define a simple message type that carries a string content."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's create a simple tool use agent that is capable of using tools through a\n",
|
||||
"{py:class}`~autogen_core.components.tool_agent.ToolAgent`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ToolUseAgent(RoutedAgent):\n",
|
||||
" \"\"\"An agent that uses tools to perform tasks. It executes the tools\n",
|
||||
" by itself by sending the tool execution task to a ToolAgent.\"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" description: str,\n",
|
||||
" system_messages: List[SystemMessage],\n",
|
||||
" model_client: ChatCompletionClient,\n",
|
||||
" tool_schema: List[ToolSchema],\n",
|
||||
" tool_agent_type: AgentType,\n",
|
||||
" ) -> None:\n",
|
||||
" super().__init__(description)\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._system_messages = system_messages\n",
|
||||
" self._tool_schema = tool_schema\n",
|
||||
" self._tool_agent_id = AgentId(type=tool_agent_type, key=self.id.key)\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" \"\"\"Handle a user message, execute the model and tools, and returns the response.\"\"\"\n",
|
||||
" session: List[LLMMessage] = [UserMessage(content=message.content, source=\"User\")]\n",
|
||||
" # Use the tool agent to execute the tools, and get the output messages.\n",
|
||||
" output_messages = await tool_agent_caller_loop(\n",
|
||||
" self,\n",
|
||||
" tool_agent_id=self._tool_agent_id,\n",
|
||||
" model_client=self._model_client,\n",
|
||||
" input_messages=session,\n",
|
||||
" tool_schema=self._tool_schema,\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" # Extract the final response from the output messages.\n",
|
||||
" final_response = output_messages[-1].content\n",
|
||||
" assert isinstance(final_response, str)\n",
|
||||
" return Message(content=final_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tool use agent sends tool call requests to the tool agent to execute tools,\n",
|
||||
"so we can intercept the messages sent by the tool use agent to the tool agent\n",
|
||||
"to prompt the user for permission to execute the tool.\n",
|
||||
"\n",
|
||||
"Let's create an intervention handler that intercepts the messages and prompts\n",
|
||||
"user for before allowing the tool execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ToolInterventionHandler(DefaultInterventionHandler):\n",
|
||||
" async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:\n",
|
||||
" if isinstance(message, FunctionCall):\n",
|
||||
" # Request user prompt for tool execution.\n",
|
||||
" user_input = input(\n",
|
||||
" f\"Function call: {message.name}\\nArguments: {message.arguments}\\nDo you want to execute the tool? (y/n): \"\n",
|
||||
" )\n",
|
||||
" if user_input.strip().lower() != \"y\":\n",
|
||||
" raise ToolException(content=\"User denied tool execution.\", call_id=message.id)\n",
|
||||
" return message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can create a runtime with the intervention handler registered."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create the runtime with the intervention handler.\n",
|
||||
"runtime = SingleThreadedAgentRuntime(intervention_handlers=[ToolInterventionHandler()])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example, we will use a tool for Python code execution.\n",
|
||||
"First, we create a Docker-based command-line code executor\n",
|
||||
"using {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`,\n",
|
||||
"and then use it to instantiate a built-in Python code execution tool\n",
|
||||
"{py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`\n",
|
||||
"that runs code in a Docker container."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create the docker executor for the Python code execution tool.\n",
|
||||
"docker_executor = DockerCommandLineCodeExecutor()\n",
|
||||
"\n",
|
||||
"# Create the Python code execution tool.\n",
|
||||
"python_tool = PythonCodeExecutionTool(executor=docker_executor)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Register the agents with tools and tool schema."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='tool_enabled_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Register agents.\n",
|
||||
"tool_agent_type = await ToolAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_executor_agent\",\n",
|
||||
" lambda: ToolAgent(\n",
|
||||
" description=\"Tool Executor Agent\",\n",
|
||||
" tools=[python_tool],\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"await ToolUseAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_enabled_agent\",\n",
|
||||
" lambda: ToolUseAgent(\n",
|
||||
" description=\"Tool Use Agent\",\n",
|
||||
" system_messages=[SystemMessage(content=\"You are a helpful AI Assistant. Use your tools to solve problems.\")],\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
|
||||
" tool_schema=[python_tool.schema],\n",
|
||||
" tool_agent_type=tool_agent_type,\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the agents by starting the runtime and sending a message to the tool use agent.\n",
|
||||
"The intervention handler will prompt you for permission to execute the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The output of the code is: **Hello, World!**\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Start the runtime and the docker executor.\n",
|
||||
"await docker_executor.start()\n",
|
||||
"runtime.start()\n",
|
||||
"\n",
|
||||
"# Send a task to the tool user.\n",
|
||||
"response = await runtime.send_message(\n",
|
||||
" Message(\"Run the following Python code: print('Hello, World!')\"), AgentId(\"tool_enabled_agent\", \"default\")\n",
|
||||
")\n",
|
||||
"print(response.content)\n",
|
||||
"\n",
|
||||
"# Stop the runtime and the docker executor.\n",
|
||||
"await runtime.stop()\n",
|
||||
"await docker_executor.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# User Approval for Tool Execution using Intervention Handler\n",
|
||||
"\n",
|
||||
"This cookbook shows how to intercept the tool execution using\n",
|
||||
"an intervention hanlder, and prompt the user for permission to execute the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import Any, List\n",
|
||||
"\n",
|
||||
"from autogen_core import (\n",
|
||||
" AgentId,\n",
|
||||
" AgentType,\n",
|
||||
" FunctionCall,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.tools import PythonCodeExecutionTool, ToolSchema\n",
|
||||
"from autogen_core.tool_agent import ToolAgent, ToolException, tool_agent_caller_loop\n",
|
||||
"from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's define a simple message type that carries a string content."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's create a simple tool use agent that is capable of using tools through a\n",
|
||||
"{py:class}`~autogen_core.components.tool_agent.ToolAgent`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ToolUseAgent(RoutedAgent):\n",
|
||||
" \"\"\"An agent that uses tools to perform tasks. It executes the tools\n",
|
||||
" by itself by sending the tool execution task to a ToolAgent.\"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" description: str,\n",
|
||||
" system_messages: List[SystemMessage],\n",
|
||||
" model_client: ChatCompletionClient,\n",
|
||||
" tool_schema: List[ToolSchema],\n",
|
||||
" tool_agent_type: AgentType,\n",
|
||||
" ) -> None:\n",
|
||||
" super().__init__(description)\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._system_messages = system_messages\n",
|
||||
" self._tool_schema = tool_schema\n",
|
||||
" self._tool_agent_id = AgentId(type=tool_agent_type, key=self.id.key)\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" \"\"\"Handle a user message, execute the model and tools, and returns the response.\"\"\"\n",
|
||||
" session: List[LLMMessage] = [UserMessage(content=message.content, source=\"User\")]\n",
|
||||
" # Use the tool agent to execute the tools, and get the output messages.\n",
|
||||
" output_messages = await tool_agent_caller_loop(\n",
|
||||
" self,\n",
|
||||
" tool_agent_id=self._tool_agent_id,\n",
|
||||
" model_client=self._model_client,\n",
|
||||
" input_messages=session,\n",
|
||||
" tool_schema=self._tool_schema,\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" # Extract the final response from the output messages.\n",
|
||||
" final_response = output_messages[-1].content\n",
|
||||
" assert isinstance(final_response, str)\n",
|
||||
" return Message(content=final_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tool use agent sends tool call requests to the tool agent to execute tools,\n",
|
||||
"so we can intercept the messages sent by the tool use agent to the tool agent\n",
|
||||
"to prompt the user for permission to execute the tool.\n",
|
||||
"\n",
|
||||
"Let's create an intervention handler that intercepts the messages and prompts\n",
|
||||
"user for before allowing the tool execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ToolInterventionHandler(DefaultInterventionHandler):\n",
|
||||
" async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:\n",
|
||||
" if isinstance(message, FunctionCall):\n",
|
||||
" # Request user prompt for tool execution.\n",
|
||||
" user_input = input(\n",
|
||||
" f\"Function call: {message.name}\\nArguments: {message.arguments}\\nDo you want to execute the tool? (y/n): \"\n",
|
||||
" )\n",
|
||||
" if user_input.strip().lower() != \"y\":\n",
|
||||
" raise ToolException(content=\"User denied tool execution.\", call_id=message.id)\n",
|
||||
" return message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can create a runtime with the intervention handler registered."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create the runtime with the intervention handler.\n",
|
||||
"runtime = SingleThreadedAgentRuntime(intervention_handlers=[ToolInterventionHandler()])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example, we will use a tool for Python code execution.\n",
|
||||
"First, we create a Docker-based command-line code executor\n",
|
||||
"using {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`,\n",
|
||||
"and then use it to instantiate a built-in Python code execution tool\n",
|
||||
"{py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`\n",
|
||||
"that runs code in a Docker container."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create the docker executor for the Python code execution tool.\n",
|
||||
"docker_executor = DockerCommandLineCodeExecutor()\n",
|
||||
"\n",
|
||||
"# Create the Python code execution tool.\n",
|
||||
"python_tool = PythonCodeExecutionTool(executor=docker_executor)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Register the agents with tools and tool schema."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='tool_enabled_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Register agents.\n",
|
||||
"tool_agent_type = await ToolAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_executor_agent\",\n",
|
||||
" lambda: ToolAgent(\n",
|
||||
" description=\"Tool Executor Agent\",\n",
|
||||
" tools=[python_tool],\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"await ToolUseAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_enabled_agent\",\n",
|
||||
" lambda: ToolUseAgent(\n",
|
||||
" description=\"Tool Use Agent\",\n",
|
||||
" system_messages=[SystemMessage(content=\"You are a helpful AI Assistant. Use your tools to solve problems.\")],\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
|
||||
" tool_schema=[python_tool.schema],\n",
|
||||
" tool_agent_type=tool_agent_type,\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the agents by starting the runtime and sending a message to the tool use agent.\n",
|
||||
"The intervention handler will prompt you for permission to execute the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The output of the code is: **Hello, World!**\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Start the runtime and the docker executor.\n",
|
||||
"await docker_executor.start()\n",
|
||||
"runtime.start()\n",
|
||||
"\n",
|
||||
"# Send a task to the tool user.\n",
|
||||
"response = await runtime.send_message(\n",
|
||||
" Message(\"Run the following Python code: print('Hello, World!')\"), AgentId(\"tool_enabled_agent\", \"default\")\n",
|
||||
")\n",
|
||||
"print(response.content)\n",
|
||||
"\n",
|
||||
"# Stop the runtime and the docker executor.\n",
|
||||
"await runtime.stop()\n",
|
||||
"await docker_executor.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -34,13 +34,13 @@
|
||||
" DefaultTopicId,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" TopicId,\n",
|
||||
" TypeSubscription,\n",
|
||||
" default_subscription,\n",
|
||||
" message_handler,\n",
|
||||
" type_subscription,\n",
|
||||
")\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -78,11 +78,11 @@
|
||||
" Image,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" TopicId,\n",
|
||||
" TypeSubscription,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" AssistantMessage,\n",
|
||||
" ChatCompletionClient,\n",
|
||||
|
||||
@@ -56,8 +56,15 @@
|
||||
"import uuid\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"\n",
|
||||
"from autogen_core import FunctionCall, MessageContext, RoutedAgent, TopicId, TypeSubscription, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import (\n",
|
||||
" FunctionCall,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" TopicId,\n",
|
||||
" TypeSubscription,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" AssistantMessage,\n",
|
||||
" ChatCompletionClient,\n",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -441,8 +441,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core import DefaultTopicId\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import DefaultTopicId, SingleThreadedAgentRuntime\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
|
||||
@@ -18,7 +18,7 @@ The key can correspond to a user id, a session id, or could just be "default" if
|
||||
|
||||
## How do I increase the GRPC message size?
|
||||
|
||||
If you need to provide custom gRPC options, such as overriding the `max_send_message_length` and `max_receive_message_length`, you can define an `extra_grpc_config` variable and pass it to both the `WorkerAgentRuntimeHost` and `WorkerAgentRuntime` instances.
|
||||
If you need to provide custom gRPC options, such as overriding the `max_send_message_length` and `max_receive_message_length`, you can define an `extra_grpc_config` variable and pass it to both the `GrpcWorkerAgentRuntimeHost` and `GrpcWorkerAgentRuntime` instances.
|
||||
|
||||
```python
|
||||
# Define custom gRPC options
|
||||
@@ -27,10 +27,10 @@ extra_grpc_config = [
|
||||
("grpc.max_receive_message_length", new_max_size),
|
||||
]
|
||||
|
||||
# Create instances of WorkerAgentRuntimeHost and WorkerAgentRuntime with the custom gRPC options
|
||||
# Create instances of GrpcWorkerAgentRuntimeHost and GrpcWorkerAgentRuntime with the custom gRPC options
|
||||
|
||||
host = WorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config)
|
||||
worker1 = WorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config)
|
||||
host = GrpcWorkerAgentRuntimeHost(address=host_address, extra_grpc_config=extra_grpc_config)
|
||||
worker1 = GrpcWorkerAgentRuntime(host_address=host_address, extra_grpc_config=extra_grpc_config)
|
||||
```
|
||||
|
||||
**Note**: When `WorkerAgentRuntime` creates a host connection for the clients, it uses `DEFAULT_GRPC_CONFIG` from `HostConnection` class as default set of values which will can be overriden if you pass parameters with the same name using `extra_grpc_config`.
|
||||
**Note**: When `GrpcWorkerAgentRuntime` creates a host connection for the clients, it uses `DEFAULT_GRPC_CONFIG` from `HostConnection` class as default set of values which will can be overriden if you pass parameters with the same name using `extra_grpc_config`.
|
||||
|
||||
@@ -117,7 +117,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -132,7 +132,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import SingleThreadedAgentRuntime\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())"
|
||||
|
||||
@@ -28,18 +28,18 @@
|
||||
"```\n",
|
||||
"````\n",
|
||||
"\n",
|
||||
"We can start a host service using {py:class}`~autogen_core.application.WorkerAgentRuntimeHost`."
|
||||
"We can start a host service using {py:class}`~autogen_core.application.GrpcWorkerAgentRuntimeHost`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.application import WorkerAgentRuntimeHost\n",
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n",
|
||||
"\n",
|
||||
"host = WorkerAgentRuntimeHost(address=\"localhost:50051\")\n",
|
||||
"host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n",
|
||||
"host.start() # Start a host service in the background."
|
||||
]
|
||||
},
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can set up the worker agent runtimes.\n",
|
||||
"We use {py:class}`~autogen_core.application.WorkerAgentRuntime`.\n",
|
||||
"We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n",
|
||||
"We set up two worker runtimes. Each runtime hosts one agent.\n",
|
||||
"All agents publish and subscribe to the default topic, so they can see all\n",
|
||||
"messages being published.\n",
|
||||
@@ -104,7 +104,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -127,13 +127,13 @@
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from autogen_core.application import WorkerAgentRuntime\n",
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n",
|
||||
"\n",
|
||||
"worker1 = WorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker1.start()\n",
|
||||
"await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n",
|
||||
"\n",
|
||||
"worker2 = WorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker2.start()\n",
|
||||
"await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n",
|
||||
"\n",
|
||||
@@ -149,7 +149,7 @@
|
||||
"source": [
|
||||
"We can see each agent published exactly 5 messages.\n",
|
||||
"\n",
|
||||
"To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.WorkerAgentRuntime.stop`."
|
||||
"To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntime.stop`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -169,7 +169,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can call {py:meth}`~autogen_core.application.WorkerAgentRuntimeHost.stop`\n",
|
||||
"We can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntimeHost.stop`\n",
|
||||
"to stop the host service."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -90,8 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyAgent(RoutedAgent):\n",
|
||||
@@ -298,8 +297,7 @@
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core import MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -48,7 +48,7 @@ Now you can send the trace_provider when creating your runtime:
|
||||
# for single threaded runtime
|
||||
single_threaded_runtime = SingleThreadedAgentRuntime(tracer_provider=tracer_provider)
|
||||
# or for worker runtime
|
||||
worker_runtime = WorkerAgentRuntime(tracer_provider=tracer_provider)
|
||||
worker_runtime = GrpcWorkerAgentRuntime(tracer_provider=tracer_provider)
|
||||
```
|
||||
|
||||
And that's it! Your application is now instrumented with open telemetry. You can now view your telemetry data in your telemetry backend.
|
||||
@@ -65,5 +65,5 @@ tracer_provider = trace.get_tracer_provider()
|
||||
# for single threaded runtime
|
||||
single_threaded_runtime = SingleThreadedAgentRuntime(tracer_provider=tracer_provider)
|
||||
# or for worker runtime
|
||||
worker_runtime = WorkerAgentRuntime(tracer_provider=tracer_provider)
|
||||
worker_runtime = GrpcWorkerAgentRuntime(tracer_provider=tracer_provider)
|
||||
```
|
||||
|
||||
@@ -1,315 +1,321 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tools\n",
|
||||
"\n",
|
||||
"Tools are code that can be executed by an agent to perform actions. A tool\n",
|
||||
"can be a simple function such as a calculator, or an API call to a third-party service\n",
|
||||
"such as stock price lookup or weather forecast.\n",
|
||||
"In the context of AI agents, tools are designed to be executed by agents in\n",
|
||||
"response to model-generated function calls.\n",
|
||||
"\n",
|
||||
"AutoGen provides the {py:mod}`autogen_core.components.tools` module with a suite of built-in\n",
|
||||
"tools and utilities for creating and running custom tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in Tools\n",
|
||||
"\n",
|
||||
"One of the built-in tools is the {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`,\n",
|
||||
"which allows agents to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Here is how you create the tool and use it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello, world!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_core.components.tools import PythonCodeExecutionTool\n",
|
||||
"from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n",
|
||||
"\n",
|
||||
"# Create the tool.\n",
|
||||
"code_executor = DockerCommandLineCodeExecutor()\n",
|
||||
"await code_executor.start()\n",
|
||||
"code_execution_tool = PythonCodeExecutionTool(code_executor)\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"\n",
|
||||
"# Use the tool directly without an agent.\n",
|
||||
"code = \"print('Hello, world!')\"\n",
|
||||
"result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n",
|
||||
"print(code_execution_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`\n",
|
||||
"class is a built-in code executor that runs Python code snippets in a subprocess\n",
|
||||
"in the local command line environment.\n",
|
||||
"The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n",
|
||||
"and provides a simple interface to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Other built-in tools will be added in the future."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Function Tools\n",
|
||||
"\n",
|
||||
"A tool can also be a simple Python function that performs a specific action.\n",
|
||||
"To create a custom function tool, you just need to create a Python function\n",
|
||||
"and use the {py:class}`~autogen_core.components.tools.FunctionTool` class to wrap it.\n",
|
||||
"\n",
|
||||
"The {py:class}`~autogen_core.components.tools.FunctionTool` class uses descriptions and type annotations\n",
|
||||
"to inform the LLM when and how to use a given function. The description provides context\n",
|
||||
"about the function’s purpose and intended use cases, while type annotations inform the LLM about\n",
|
||||
"the expected parameters and return type.\n",
|
||||
"\n",
|
||||
"For example, a simple tool to obtain the stock price of a company might look like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"80.44429939059668\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_core.components.tools import FunctionTool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n",
|
||||
" # Returns a random stock price for demonstration purposes.\n",
|
||||
" return random.uniform(10, 200)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a function tool.\n",
|
||||
"stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n",
|
||||
"\n",
|
||||
"# Run the tool.\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n",
|
||||
"\n",
|
||||
"# Print the result.\n",
|
||||
"print(stock_price_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool-Equipped Agent\n",
|
||||
"\n",
|
||||
"To use tools with an agent, you can use {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n",
|
||||
"by using it in a composition pattern.\n",
|
||||
"Here is an example tool-use agent that uses {py:class}`~autogen_core.components.tool_agent.ToolAgent`\n",
|
||||
"as an inner agent for executing tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from autogen_core import AgentId, AgentInstantiationContext, MessageContext, RoutedAgent, message_handler\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n",
|
||||
"from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ToolUseAgent(RoutedAgent):\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent_type: str) -> None:\n",
|
||||
" super().__init__(\"An agent with tools\")\n",
|
||||
" self._system_messages: List[LLMMessage] = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._tool_schema = tool_schema\n",
|
||||
" self._tool_agent_id = AgentId(tool_agent_type, self.id.key)\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # Create a session of messages.\n",
|
||||
" session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n",
|
||||
" # Run the caller loop to handle tool calls.\n",
|
||||
" messages = await tool_agent_caller_loop(\n",
|
||||
" self,\n",
|
||||
" tool_agent_id=self._tool_agent_id,\n",
|
||||
" model_client=self._model_client,\n",
|
||||
" input_messages=session,\n",
|
||||
" tool_schema=self._tool_schema,\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" # Return the final response.\n",
|
||||
" assert isinstance(messages[-1].content, str)\n",
|
||||
" return Message(content=messages[-1].content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `ToolUseAgent` class uses a convenience function {py:meth}`~autogen_core.components.tool_agent.tool_agent_caller_loop`, \n",
|
||||
"to handle the interaction between the model and the tool agent.\n",
|
||||
"The core idea can be described using a simple control flow graph:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n",
|
||||
"and determines whether the model has generated a tool call.\n",
|
||||
"If the model has generated tool calls, then the handler sends a function call\n",
|
||||
"message to the {py:class}`~autogen_core.components.tool_agent.ToolAgent` agent\n",
|
||||
"to execute the tools,\n",
|
||||
"and then queries the model again with the results of the tool calls.\n",
|
||||
"This process continues until the model stops generating tool calls,\n",
|
||||
"at which point the final response is returned to the user.\n",
|
||||
"\n",
|
||||
"By having the tool execution logic in a separate agent,\n",
|
||||
"we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n",
|
||||
"can be observed externally and intercepted if necessary.\n",
|
||||
"\n",
|
||||
"To run the agent, we need to create a runtime and register the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='tool_use_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a runtime.\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"# Create the tools.\n",
|
||||
"tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n",
|
||||
"# Register the agents.\n",
|
||||
"await ToolAgent.register(runtime, \"tool_executor_agent\", lambda: ToolAgent(\"tool executor agent\", tools))\n",
|
||||
"await ToolUseAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_use_agent\",\n",
|
||||
" lambda: ToolUseAgent(\n",
|
||||
" OpenAIChatCompletionClient(model=\"gpt-4o-mini\"), [tool.schema for tool in tools], \"tool_executor_agent\"\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example uses the {py:class}`autogen_core.components.models.OpenAIChatCompletionClient`,\n",
|
||||
"for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n",
|
||||
"Let's test the agent with a question about stock price."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $179.46.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Start processing messages.\n",
|
||||
"runtime.start()\n",
|
||||
"# Send a direct message to the tool agent.\n",
|
||||
"tool_use_agent = AgentId(\"tool_use_agent\", \"default\")\n",
|
||||
"response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n",
|
||||
"print(response.content)\n",
|
||||
"# Stop processing messages.\n",
|
||||
"await runtime.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tools\n",
|
||||
"\n",
|
||||
"Tools are code that can be executed by an agent to perform actions. A tool\n",
|
||||
"can be a simple function such as a calculator, or an API call to a third-party service\n",
|
||||
"such as stock price lookup or weather forecast.\n",
|
||||
"In the context of AI agents, tools are designed to be executed by agents in\n",
|
||||
"response to model-generated function calls.\n",
|
||||
"\n",
|
||||
"AutoGen provides the {py:mod}`autogen_core.components.tools` module with a suite of built-in\n",
|
||||
"tools and utilities for creating and running custom tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in Tools\n",
|
||||
"\n",
|
||||
"One of the built-in tools is the {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`,\n",
|
||||
"which allows agents to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Here is how you create the tool and use it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello, world!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_core.components.tools import PythonCodeExecutionTool\n",
|
||||
"from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n",
|
||||
"\n",
|
||||
"# Create the tool.\n",
|
||||
"code_executor = DockerCommandLineCodeExecutor()\n",
|
||||
"await code_executor.start()\n",
|
||||
"code_execution_tool = PythonCodeExecutionTool(code_executor)\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"\n",
|
||||
"# Use the tool directly without an agent.\n",
|
||||
"code = \"print('Hello, world!')\"\n",
|
||||
"result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n",
|
||||
"print(code_execution_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`\n",
|
||||
"class is a built-in code executor that runs Python code snippets in a subprocess\n",
|
||||
"in the local command line environment.\n",
|
||||
"The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n",
|
||||
"and provides a simple interface to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Other built-in tools will be added in the future."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Function Tools\n",
|
||||
"\n",
|
||||
"A tool can also be a simple Python function that performs a specific action.\n",
|
||||
"To create a custom function tool, you just need to create a Python function\n",
|
||||
"and use the {py:class}`~autogen_core.components.tools.FunctionTool` class to wrap it.\n",
|
||||
"\n",
|
||||
"The {py:class}`~autogen_core.components.tools.FunctionTool` class uses descriptions and type annotations\n",
|
||||
"to inform the LLM when and how to use a given function. The description provides context\n",
|
||||
"about the function’s purpose and intended use cases, while type annotations inform the LLM about\n",
|
||||
"the expected parameters and return type.\n",
|
||||
"\n",
|
||||
"For example, a simple tool to obtain the stock price of a company might look like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"80.44429939059668\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_core.components.tools import FunctionTool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n",
|
||||
" # Returns a random stock price for demonstration purposes.\n",
|
||||
" return random.uniform(10, 200)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a function tool.\n",
|
||||
"stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n",
|
||||
"\n",
|
||||
"# Run the tool.\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n",
|
||||
"\n",
|
||||
"# Print the result.\n",
|
||||
"print(stock_price_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool-Equipped Agent\n",
|
||||
"\n",
|
||||
"To use tools with an agent, you can use {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n",
|
||||
"by using it in a composition pattern.\n",
|
||||
"Here is an example tool-use agent that uses {py:class}`~autogen_core.components.tool_agent.ToolAgent`\n",
|
||||
"as an inner agent for executing tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from autogen_core import (\n",
|
||||
" AgentId,\n",
|
||||
" AgentInstantiationContext,\n",
|
||||
" MessageContext,\n",
|
||||
" RoutedAgent,\n",
|
||||
" SingleThreadedAgentRuntime,\n",
|
||||
" message_handler,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n",
|
||||
"from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ToolUseAgent(RoutedAgent):\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent_type: str) -> None:\n",
|
||||
" super().__init__(\"An agent with tools\")\n",
|
||||
" self._system_messages: List[LLMMessage] = [SystemMessage(content=\"You are a helpful AI assistant.\")]\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._tool_schema = tool_schema\n",
|
||||
" self._tool_agent_id = AgentId(tool_agent_type, self.id.key)\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # Create a session of messages.\n",
|
||||
" session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n",
|
||||
" # Run the caller loop to handle tool calls.\n",
|
||||
" messages = await tool_agent_caller_loop(\n",
|
||||
" self,\n",
|
||||
" tool_agent_id=self._tool_agent_id,\n",
|
||||
" model_client=self._model_client,\n",
|
||||
" input_messages=session,\n",
|
||||
" tool_schema=self._tool_schema,\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" # Return the final response.\n",
|
||||
" assert isinstance(messages[-1].content, str)\n",
|
||||
" return Message(content=messages[-1].content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `ToolUseAgent` class uses a convenience function {py:meth}`~autogen_core.components.tool_agent.tool_agent_caller_loop`, \n",
|
||||
"to handle the interaction between the model and the tool agent.\n",
|
||||
"The core idea can be described using a simple control flow graph:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n",
|
||||
"and determines whether the model has generated a tool call.\n",
|
||||
"If the model has generated tool calls, then the handler sends a function call\n",
|
||||
"message to the {py:class}`~autogen_core.components.tool_agent.ToolAgent` agent\n",
|
||||
"to execute the tools,\n",
|
||||
"and then queries the model again with the results of the tool calls.\n",
|
||||
"This process continues until the model stops generating tool calls,\n",
|
||||
"at which point the final response is returned to the user.\n",
|
||||
"\n",
|
||||
"By having the tool execution logic in a separate agent,\n",
|
||||
"we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n",
|
||||
"can be observed externally and intercepted if necessary.\n",
|
||||
"\n",
|
||||
"To run the agent, we need to create a runtime and register the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='tool_use_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a runtime.\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"# Create the tools.\n",
|
||||
"tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n",
|
||||
"# Register the agents.\n",
|
||||
"await ToolAgent.register(runtime, \"tool_executor_agent\", lambda: ToolAgent(\"tool executor agent\", tools))\n",
|
||||
"await ToolUseAgent.register(\n",
|
||||
" runtime,\n",
|
||||
" \"tool_use_agent\",\n",
|
||||
" lambda: ToolUseAgent(\n",
|
||||
" OpenAIChatCompletionClient(model=\"gpt-4o-mini\"), [tool.schema for tool in tools], \"tool_executor_agent\"\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example uses the {py:class}`autogen_core.components.models.OpenAIChatCompletionClient`,\n",
|
||||
"for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n",
|
||||
"Let's test the agent with a question about stock price."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $179.46.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Start processing messages.\n",
|
||||
"runtime.start()\n",
|
||||
"# Send a direct message to the tool agent.\n",
|
||||
"tool_use_agent = AgentId(\"tool_use_agent\", \"default\")\n",
|
||||
"response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n",
|
||||
"print(response.content)\n",
|
||||
"# Stop processing messages.\n",
|
||||
"await runtime.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@
|
||||
"source": [
|
||||
"import tempfile\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
|
||||
Reference in New Issue
Block a user