From ed0890525d34106a855c9d40ba2a26791063f072 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 21 Aug 2024 13:59:59 -0700 Subject: [PATCH] Make RunContext internal (#386) * Make RunContext internal * Mypy --- .../docs/src/cookbook/langgraph-agent.ipynb | 598 +++--- .../docs/src/cookbook/llamaindex-agent.ipynb | 1062 +++++------ .../src/cookbook/openai-assistant-agent.ipynb | 1680 ++++++++--------- .../agent-and-agent-runtime.ipynb | 511 ++--- .../message-and-communication.ipynb | 16 +- .../src/getting-started/model-clients.ipynb | 4 +- .../multi-agent-design-patterns.ipynb | 4 +- python/docs/src/getting-started/tools.ipynb | 646 +++---- python/samples/byoa/langgraph_agent.py | 4 +- python/samples/byoa/llamaindex_agent.py | 4 +- python/samples/core/inner_outer_direct.py | 4 +- python/samples/core/one_agent_direct.py | 4 +- python/samples/core/two_agents_pub_sub.py | 4 +- python/samples/demos/assistant.py | 2 +- python/samples/demos/chat_room.py | 2 +- python/samples/demos/chess_game.py | 4 +- python/samples/demos/illustrator_critics.py | 2 +- python/samples/demos/software_consultancy.py | 2 +- python/samples/marketing-agents/test_usage.py | 4 +- python/samples/patterns/coder_executor.py | 4 +- python/samples/patterns/coder_reviewer.py | 4 +- python/samples/patterns/group_chat.py | 4 +- python/samples/patterns/mixture_of_agents.py | 4 +- python/samples/patterns/multi_agent_debate.py | 4 +- python/samples/tool-use/coding_direct.py | 4 +- .../tool-use/coding_direct_with_intercept.py | 4 +- python/samples/tool-use/coding_pub_sub.py | 4 +- python/samples/tool-use/custom_tool_direct.py | 4 +- .../_single_threaded_agent_runtime.py | 24 +- python/teams/team-one/examples/example.py | 4 +- .../teams/team-one/examples/example_coder.py | 4 +- .../team-one/examples/example_file_surfer.py | 4 +- .../team-one/examples/example_reflexagents.py | 4 +- .../team-one/examples/example_userproxy.py | 4 +- .../team-one/examples/example_websurfer.py | 4 +- .../headless_web_surfer/test_web_surfer.py | 12 +- python/tests/test_closure_agent.py | 4 +- python/tests/test_intervention.py | 20 +- python/tests/test_runtime.py | 8 +- python/tests/test_tool_agent.py | 4 +- 40 files changed, 2360 insertions(+), 2329 deletions(-) diff --git a/python/docs/src/cookbook/langgraph-agent.ipynb b/python/docs/src/cookbook/langgraph-agent.ipynb index 888a10494..9a605ff93 100644 --- a/python/docs/src/cookbook/langgraph-agent.ipynb +++ b/python/docs/src/cookbook/langgraph-agent.ipynb @@ -1,300 +1,300 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using LangGraph-Backed Agent\n", - "\n", - "This example demonstrates how to create an AI agent using LangGraph.\n", - "Based on the example in the LangGraph documentation:\n", - "https://langchain-ai.github.io/langgraph/.\n", - "\n", - "First install the dependencies:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "# pip install langgraph langchain-openai azure-identity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's import the modules." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from dataclasses import dataclass\n", - "from typing import Any, Callable, List, Literal\n", - "\n", - "from agnext.application import SingleThreadedAgentRuntime\n", - "from agnext.components import TypeRoutedAgent, message_handler\n", - "from agnext.core import AgentId, MessageContext\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "from langchain_core.messages import HumanMessage, SystemMessage\n", - "from langchain_core.tools import tool # pyright: ignore\n", - "from langchain_openai import AzureChatOpenAI, ChatOpenAI\n", - "from langgraph.graph import END, MessagesState, StateGraph\n", - "from langgraph.prebuilt import ToolNode" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define our message type that will be used to communicate with the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class Message:\n", - " content: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define the tools the agent will use." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "@tool # pyright: ignore\n", - "def get_weather(location: str) -> str:\n", - " \"\"\"Call to surf the web.\"\"\"\n", - " # This is a placeholder, but don't tell the LLM that...\n", - " if \"sf\" in location.lower() or \"san francisco\" in location.lower():\n", - " return \"It's 60 degrees and foggy.\"\n", - " return \"It's 90 degrees and sunny.\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define the agent using LangGraph's API." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "class LangGraphToolUseAgent(TypeRoutedAgent):\n", - " def __init__(self, description: str, model: ChatOpenAI, tools: List[Callable[..., Any]]) -> None: # pyright: ignore\n", - " super().__init__(description)\n", - " self._model = model.bind_tools(tools) # pyright: ignore\n", - "\n", - " # Define the function that determines whether to continue or not\n", - " def should_continue(state: MessagesState) -> Literal[\"tools\", END]: # type: ignore\n", - " messages = state[\"messages\"]\n", - " last_message = messages[-1]\n", - " # If the LLM makes a tool call, then we route to the \"tools\" node\n", - " if last_message.tool_calls: # type: ignore\n", - " return \"tools\"\n", - " # Otherwise, we stop (reply to the user)\n", - " return END\n", - "\n", - " # Define the function that calls the model\n", - " async def call_model(state: MessagesState): # type: ignore\n", - " messages = state[\"messages\"]\n", - " response = await self._model.ainvoke(messages)\n", - " # We return a list, because this will get added to the existing list\n", - " return {\"messages\": [response]}\n", - "\n", - " tool_node = ToolNode(tools) # pyright: ignore\n", - "\n", - " # Define a new graph\n", - " self._workflow = StateGraph(MessagesState)\n", - "\n", - " # Define the two nodes we will cycle between\n", - " self._workflow.add_node(\"agent\", call_model) # pyright: ignore\n", - " self._workflow.add_node(\"tools\", tool_node) # pyright: ignore\n", - "\n", - " # Set the entrypoint as `agent`\n", - " # This means that this node is the first one called\n", - " self._workflow.set_entry_point(\"agent\")\n", - "\n", - " # We now add a conditional edge\n", - " self._workflow.add_conditional_edges(\n", - " # First, we define the start node. We use `agent`.\n", - " # This means these are the edges taken after the `agent` node is called.\n", - " \"agent\",\n", - " # Next, we pass in the function that will determine which node is called next.\n", - " should_continue, # type: ignore\n", - " )\n", - "\n", - " # We now add a normal edge from `tools` to `agent`.\n", - " # This means that after `tools` is called, `agent` node is called next.\n", - " self._workflow.add_edge(\"tools\", \"agent\")\n", - "\n", - " # Finally, we compile it!\n", - " # This compiles it into a LangChain Runnable,\n", - " # meaning you can use it as you would any other runnable.\n", - " # Note that we're (optionally) passing the memory when compiling the graph\n", - " self._app = self._workflow.compile()\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # Use the Runnable\n", - " final_state = await self._app.ainvoke(\n", - " {\n", - " \"messages\": [\n", - " SystemMessage(\n", - " content=\"You are a helpful AI assistant. You can use tools to help answer questions.\"\n", - " ),\n", - " HumanMessage(content=message.content),\n", - " ]\n", - " },\n", - " config={\"configurable\": {\"thread_id\": 42}},\n", - " )\n", - " response = Message(content=final_state[\"messages\"][-1].content)\n", - " return response" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's test the agent. First we need to create an agent runtime and\n", - "register the agent, by providing the agent's name and a factory function\n", - "that will create the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await runtime.register(\n", - " \"langgraph_tool_use_agent\",\n", - " lambda: LangGraphToolUseAgent(\n", - " \"Tool use agent\",\n", - " ChatOpenAI(\n", - " model=\"gpt-4o\",\n", - " # api_key=os.getenv(\"OPENAI_API_KEY\"),\n", - " ),\n", - " # AzureChatOpenAI(\n", - " # azure_deployment=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n", - " # azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", - " # api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", - " # # Using Azure Active Directory authentication.\n", - " # azure_ad_token_provider=get_bearer_token_provider(DefaultAzureCredential()),\n", - " # # Using API key.\n", - " # # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", - " # ),\n", - " [get_weather],\n", - " ),\n", - ")\n", - "agent = AgentId(\"langgraph_tool_use_agent\", key=\"default\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Start the agent runtime." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "run_context = runtime.start()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Send a direct message to the agent, and print the response." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The weather in San Francisco is currently 60 degrees and foggy.\n" - ] - } - ], - "source": [ - "response = await runtime.send_message(Message(\"What's the weather in SF?\"), agent)\n", - "print(response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Stop the agent runtime." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "await run_context.stop()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using LangGraph-Backed Agent\n", + "\n", + "This example demonstrates how to create an AI agent using LangGraph.\n", + "Based on the example in the LangGraph documentation:\n", + "https://langchain-ai.github.io/langgraph/.\n", + "\n", + "First install the dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "# pip install langgraph langchain-openai azure-identity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's import the modules." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dataclasses import dataclass\n", + "from typing import Any, Callable, List, Literal\n", + "\n", + "from agnext.application import SingleThreadedAgentRuntime\n", + "from agnext.components import TypeRoutedAgent, message_handler\n", + "from agnext.core import AgentId, MessageContext\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_core.tools import tool # pyright: ignore\n", + "from langchain_openai import AzureChatOpenAI, ChatOpenAI\n", + "from langgraph.graph import END, MessagesState, StateGraph\n", + "from langgraph.prebuilt import ToolNode" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define our message type that will be used to communicate with the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Message:\n", + " content: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define the tools the agent will use." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "@tool # pyright: ignore\n", + "def get_weather(location: str) -> str:\n", + " \"\"\"Call to surf the web.\"\"\"\n", + " # This is a placeholder, but don't tell the LLM that...\n", + " if \"sf\" in location.lower() or \"san francisco\" in location.lower():\n", + " return \"It's 60 degrees and foggy.\"\n", + " return \"It's 90 degrees and sunny.\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define the agent using LangGraph's API." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "class LangGraphToolUseAgent(TypeRoutedAgent):\n", + " def __init__(self, description: str, model: ChatOpenAI, tools: List[Callable[..., Any]]) -> None: # pyright: ignore\n", + " super().__init__(description)\n", + " self._model = model.bind_tools(tools) # pyright: ignore\n", + "\n", + " # Define the function that determines whether to continue or not\n", + " def should_continue(state: MessagesState) -> Literal[\"tools\", END]: # type: ignore\n", + " messages = state[\"messages\"]\n", + " last_message = messages[-1]\n", + " # If the LLM makes a tool call, then we route to the \"tools\" node\n", + " if last_message.tool_calls: # type: ignore\n", + " return \"tools\"\n", + " # Otherwise, we stop (reply to the user)\n", + " return END\n", + "\n", + " # Define the function that calls the model\n", + " async def call_model(state: MessagesState): # type: ignore\n", + " messages = state[\"messages\"]\n", + " response = await self._model.ainvoke(messages)\n", + " # We return a list, because this will get added to the existing list\n", + " return {\"messages\": [response]}\n", + "\n", + " tool_node = ToolNode(tools) # pyright: ignore\n", + "\n", + " # Define a new graph\n", + " self._workflow = StateGraph(MessagesState)\n", + "\n", + " # Define the two nodes we will cycle between\n", + " self._workflow.add_node(\"agent\", call_model) # pyright: ignore\n", + " self._workflow.add_node(\"tools\", tool_node) # pyright: ignore\n", + "\n", + " # Set the entrypoint as `agent`\n", + " # This means that this node is the first one called\n", + " self._workflow.set_entry_point(\"agent\")\n", + "\n", + " # We now add a conditional edge\n", + " self._workflow.add_conditional_edges(\n", + " # First, we define the start node. We use `agent`.\n", + " # This means these are the edges taken after the `agent` node is called.\n", + " \"agent\",\n", + " # Next, we pass in the function that will determine which node is called next.\n", + " should_continue, # type: ignore\n", + " )\n", + "\n", + " # We now add a normal edge from `tools` to `agent`.\n", + " # This means that after `tools` is called, `agent` node is called next.\n", + " self._workflow.add_edge(\"tools\", \"agent\")\n", + "\n", + " # Finally, we compile it!\n", + " # This compiles it into a LangChain Runnable,\n", + " # meaning you can use it as you would any other runnable.\n", + " # Note that we're (optionally) passing the memory when compiling the graph\n", + " self._app = self._workflow.compile()\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # Use the Runnable\n", + " final_state = await self._app.ainvoke(\n", + " {\n", + " \"messages\": [\n", + " SystemMessage(\n", + " content=\"You are a helpful AI assistant. You can use tools to help answer questions.\"\n", + " ),\n", + " HumanMessage(content=message.content),\n", + " ]\n", + " },\n", + " config={\"configurable\": {\"thread_id\": 42}},\n", + " )\n", + " response = Message(content=final_state[\"messages\"][-1].content)\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's test the agent. First we need to create an agent runtime and\n", + "register the agent, by providing the agent's name and a factory function\n", + "that will create the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await runtime.register(\n", + " \"langgraph_tool_use_agent\",\n", + " lambda: LangGraphToolUseAgent(\n", + " \"Tool use agent\",\n", + " ChatOpenAI(\n", + " model=\"gpt-4o\",\n", + " # api_key=os.getenv(\"OPENAI_API_KEY\"),\n", + " ),\n", + " # AzureChatOpenAI(\n", + " # azure_deployment=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n", + " # azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", + " # api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", + " # # Using Azure Active Directory authentication.\n", + " # azure_ad_token_provider=get_bearer_token_provider(DefaultAzureCredential()),\n", + " # # Using API key.\n", + " # # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", + " # ),\n", + " [get_weather],\n", + " ),\n", + ")\n", + "agent = AgentId(\"langgraph_tool_use_agent\", key=\"default\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Start the agent runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Send a direct message to the agent, and print the response." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weather in San Francisco is currently 60 degrees and foggy.\n" + ] + } + ], + "source": [ + "response = await runtime.send_message(Message(\"What's the weather in SF?\"), agent)\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Stop the agent runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "await runtime.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/docs/src/cookbook/llamaindex-agent.ipynb b/python/docs/src/cookbook/llamaindex-agent.ipynb index ae8c18e67..65a5b8650 100644 --- a/python/docs/src/cookbook/llamaindex-agent.ipynb +++ b/python/docs/src/cookbook/llamaindex-agent.ipynb @@ -1,533 +1,533 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using LlamaIndex-Backed Agent\n", - "\n", - "This example demonstrates how to create an AI agent using LlamaIndex.\n", - "\n", - "First install the dependencies:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "# pip install \"llama-index-readers-web\" \"llama-index-readers-wikipedia\" \"llama-index-tools-wikipedia\" \"llama-index-embeddings-azure-openai\" \"llama-index-llms-azure-openai\" \"llama-index\" \"azure-identity\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's import the modules." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "import os\n", - "from dataclasses import dataclass\n", - "from typing import List, Optional\n", - "\n", - "from agnext.application import SingleThreadedAgentRuntime\n", - "from agnext.components import TypeRoutedAgent, message_handler\n", - "from agnext.core import AgentId, MessageContext\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "from llama_index.core import Settings\n", - "from llama_index.core.agent import ReActAgent\n", - "from llama_index.core.agent.runner.base import AgentRunner\n", - "from llama_index.core.base.llms.types import (\n", - " ChatMessage,\n", - " MessageRole,\n", - ")\n", - "from llama_index.core.chat_engine.types import AgentChatResponse\n", - "from llama_index.core.memory import ChatSummaryMemoryBuffer\n", - "from llama_index.core.memory.types import BaseMemory\n", - "from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n", - "from llama_index.embeddings.openai import OpenAIEmbedding\n", - "from llama_index.llms.azure_openai import AzureOpenAI\n", - "from llama_index.llms.openai import OpenAI\n", - "from llama_index.tools.wikipedia import WikipediaToolSpec" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define our message type that will be used to communicate with the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class Resource:\n", - " content: str\n", - " node_id: str\n", - " score: Optional[float] = None\n", - "\n", - "\n", - "@dataclass\n", - "class Message:\n", - " content: str\n", - " sources: Optional[List[Resource]] = None" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define the agent using LLamaIndex's API." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "class LlamaIndexAgent(TypeRoutedAgent):\n", - " def __init__(self, description: str, llama_index_agent: AgentRunner, memory: BaseMemory | None = None) -> None:\n", - " super().__init__(description)\n", - "\n", - " self._llama_index_agent = llama_index_agent\n", - " self._memory = memory\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # retriever history messages from memory!\n", - " history_messages: List[ChatMessage] = []\n", - "\n", - " response: AgentChatResponse # pyright: ignore\n", - " if self._memory is not None:\n", - " history_messages = self._memory.get(input=message.content)\n", - "\n", - " response = await self._llama_index_agent.achat(message=message.content, history_messages=history_messages) # pyright: ignore\n", - " else:\n", - " response = await self._llama_index_agent.achat(message=message.content) # pyright: ignore\n", - "\n", - " if isinstance(response, AgentChatResponse):\n", - " if self._memory is not None:\n", - " self._memory.put(ChatMessage(role=MessageRole.USER, content=message.content))\n", - " self._memory.put(ChatMessage(role=MessageRole.ASSISTANT, content=response.response))\n", - "\n", - " assert isinstance(response.response, str)\n", - "\n", - " resources: List[Resource] = [\n", - " Resource(content=source_node.get_text(), score=source_node.score, node_id=source_node.id_)\n", - " for source_node in response.source_nodes\n", - " ]\n", - "\n", - " tools: List[Resource] = [\n", - " Resource(content=source.content, node_id=source.tool_name) for source in response.sources\n", - " ]\n", - "\n", - " resources.extend(tools)\n", - " return Message(content=response.response, sources=resources)\n", - " else:\n", - " return Message(content=\"I'm sorry, I don't have an answer for you.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Setting up LlamaIndex." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "# llm = AzureOpenAI(\n", - "# deployment_name=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n", - "# temperature=0.0,\n", - "# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n", - "# # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", - "# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", - "# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", - "# )\n", - "llm = OpenAI(\n", - " model=\"gpt-4o\",\n", - " temperature=0.0,\n", - " api_key=os.getenv(\"OPENAI_API_KEY\"),\n", - ")\n", - "\n", - "# embed_model = AzureOpenAIEmbedding(\n", - "# deployment_name=os.getenv(\"AZURE_OPENAI_EMBEDDING_MODEL\"),\n", - "# temperature=0.0,\n", - "# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n", - "# api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", - "# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", - "# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", - "# )\n", - "embed_model = OpenAIEmbedding(\n", - " model=\"text-embedding-ada-002\",\n", - " api_key=os.getenv(\"OPENAI_API_KEY\"),\n", - ")\n", - "\n", - "Settings.llm = llm\n", - "Settings.embed_model = embed_model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Create the tools." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "wiki_spec = WikipediaToolSpec()\n", - "wikipedia_tool = wiki_spec.to_tool_list()[1]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's test the agent. First we need to create an agent runtime and\n", - "register the agent, by providing the agent's name and a factory function\n", - "that will create the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await runtime.register(\n", - " \"chat_agent\",\n", - " lambda: LlamaIndexAgent(\n", - " description=\"Llama Index Agent\",\n", - " llama_index_agent=ReActAgent.from_tools(\n", - " tools=[wikipedia_tool],\n", - " llm=llm,\n", - " max_iterations=8,\n", - " memory=ChatSummaryMemoryBuffer(llm=llm, token_limit=16000),\n", - " verbose=True,\n", - " ),\n", - " ),\n", - ")\n", - "agent = AgentId(\"chat_agent\", \"default\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Start the agent runtime." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [], - "source": [ - "run_context = runtime.start()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Send a direct message to the agent, and print the response." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "> Running step 3cbf60cd-9827-4dfe-a3a9-eaff2bed9b75. Step input: What are the best movies from studio Ghibli?\n", - "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", - "Action: search_data\n", - "Action Input: {'query': 'best movies from Studio Ghibli'}\n", - "\u001b[0m\u001b[1;3;34mObservation: This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n", - "\n", - "\n", - "== Works ==\n", - "\n", - "\n", - "=== Feature films ===\n", - "\n", - "\n", - "=== Television ===\n", - "\n", - "\n", - "=== Short films ===\n", - "\n", - "These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n", - "\n", - "\n", - "=== Commercials ===\n", - "\n", - "\n", - "=== Video games ===\n", - "\n", - "\n", - "=== Stage productions ===\n", - "Princess Mononoke (2013)\n", - "NausicaƤ of the Valley of the Wind (2019)\n", - "Spirited Away (2022)\n", - "My Neighbour Totoro (2022)\n", - "\n", - "\n", - "=== Other works ===\n", - "The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n", - "\n", - "\n", - "=== Exhibitions ===\n", - "A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n", - "\n", - "\n", - "== Related works ==\n", - "These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n", - "\n", - "\n", - "=== Pre-Ghibli ===\n", - "\n", - "\n", - "=== Cooperative works ===\n", - "\n", - "\n", - "=== Distributive works ===\n", - "These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n", - "\n", - "\n", - "=== Contributive works ===\n", - "Studio Ghibli has made contributions to the following anime series and movies:\n", - "\n", - "\n", - "== Significant achievements ==\n", - "The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n", - "The highest-grossing film of 1991 in Japan: Only Yesterday\n", - "The highest-grossing film of 1992 in Japan: Porco Rosso\n", - "The highest-grossing film of 1994 in Japan: Pom Poko\n", - "The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n", - "The highest-grossing film of 2002 in Japan: Spirited Away\n", - "The highest-grossing film of 2008 in Japan: Ponyo\n", - "The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n", - "The highest-grossing film of 2013 in Japan: The Wind Rises\n", - "The first Studio Ghibli film to use computer graphics: Pom Poko\n", - "The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n", - "The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n", - "The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n", - "The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n", - "\n", - "\n", - "== Notes ==\n", - "\n", - "\n", - "== References ==\n", - "\u001b[0m> Running step 561e3dd3-d98b-4d37-b612-c99387182ee0. Step input: None\n", - "\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer.\n", - "Answer: Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n", - "\n", - "1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n", - "2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n", - "3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n", - "4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n", - "5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n", - "6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n", - "7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n", - "8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n", - "9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n", - "10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n", - "\n", - "These films are celebrated for their storytelling, animation quality, and emotional depth.\n", - "\u001b[0m" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using LlamaIndex-Backed Agent\n", + "\n", + "This example demonstrates how to create an AI agent using LlamaIndex.\n", + "\n", + "First install the dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "# pip install \"llama-index-readers-web\" \"llama-index-readers-wikipedia\" \"llama-index-tools-wikipedia\" \"llama-index-embeddings-azure-openai\" \"llama-index-llms-azure-openai\" \"llama-index\" \"azure-identity\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's import the modules." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "import os\n", + "from dataclasses import dataclass\n", + "from typing import List, Optional\n", + "\n", + "from agnext.application import SingleThreadedAgentRuntime\n", + "from agnext.components import TypeRoutedAgent, message_handler\n", + "from agnext.core import AgentId, MessageContext\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "from llama_index.core import Settings\n", + "from llama_index.core.agent import ReActAgent\n", + "from llama_index.core.agent.runner.base import AgentRunner\n", + "from llama_index.core.base.llms.types import (\n", + " ChatMessage,\n", + " MessageRole,\n", + ")\n", + "from llama_index.core.chat_engine.types import AgentChatResponse\n", + "from llama_index.core.memory import ChatSummaryMemoryBuffer\n", + "from llama_index.core.memory.types import BaseMemory\n", + "from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n", + "from llama_index.embeddings.openai import OpenAIEmbedding\n", + "from llama_index.llms.azure_openai import AzureOpenAI\n", + "from llama_index.llms.openai import OpenAI\n", + "from llama_index.tools.wikipedia import WikipediaToolSpec" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define our message type that will be used to communicate with the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Resource:\n", + " content: str\n", + " node_id: str\n", + " score: Optional[float] = None\n", + "\n", + "\n", + "@dataclass\n", + "class Message:\n", + " content: str\n", + " sources: Optional[List[Resource]] = None" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define the agent using LLamaIndex's API." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "class LlamaIndexAgent(TypeRoutedAgent):\n", + " def __init__(self, description: str, llama_index_agent: AgentRunner, memory: BaseMemory | None = None) -> None:\n", + " super().__init__(description)\n", + "\n", + " self._llama_index_agent = llama_index_agent\n", + " self._memory = memory\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # retriever history messages from memory!\n", + " history_messages: List[ChatMessage] = []\n", + "\n", + " response: AgentChatResponse # pyright: ignore\n", + " if self._memory is not None:\n", + " history_messages = self._memory.get(input=message.content)\n", + "\n", + " response = await self._llama_index_agent.achat(message=message.content, history_messages=history_messages) # pyright: ignore\n", + " else:\n", + " response = await self._llama_index_agent.achat(message=message.content) # pyright: ignore\n", + "\n", + " if isinstance(response, AgentChatResponse):\n", + " if self._memory is not None:\n", + " self._memory.put(ChatMessage(role=MessageRole.USER, content=message.content))\n", + " self._memory.put(ChatMessage(role=MessageRole.ASSISTANT, content=response.response))\n", + "\n", + " assert isinstance(response.response, str)\n", + "\n", + " resources: List[Resource] = [\n", + " Resource(content=source_node.get_text(), score=source_node.score, node_id=source_node.id_)\n", + " for source_node in response.source_nodes\n", + " ]\n", + "\n", + " tools: List[Resource] = [\n", + " Resource(content=source.content, node_id=source.tool_name) for source in response.sources\n", + " ]\n", + "\n", + " resources.extend(tools)\n", + " return Message(content=response.response, sources=resources)\n", + " else:\n", + " return Message(content=\"I'm sorry, I don't have an answer for you.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Setting up LlamaIndex." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# llm = AzureOpenAI(\n", + "# deployment_name=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n", + "# temperature=0.0,\n", + "# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n", + "# # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", + "# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", + "# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", + "# )\n", + "llm = OpenAI(\n", + " model=\"gpt-4o\",\n", + " temperature=0.0,\n", + " api_key=os.getenv(\"OPENAI_API_KEY\"),\n", + ")\n", + "\n", + "# embed_model = AzureOpenAIEmbedding(\n", + "# deployment_name=os.getenv(\"AZURE_OPENAI_EMBEDDING_MODEL\"),\n", + "# temperature=0.0,\n", + "# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n", + "# api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", + "# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", + "# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", + "# )\n", + "embed_model = OpenAIEmbedding(\n", + " model=\"text-embedding-ada-002\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\"),\n", + ")\n", + "\n", + "Settings.llm = llm\n", + "Settings.embed_model = embed_model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create the tools." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "wiki_spec = WikipediaToolSpec()\n", + "wikipedia_tool = wiki_spec.to_tool_list()[1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's test the agent. First we need to create an agent runtime and\n", + "register the agent, by providing the agent's name and a factory function\n", + "that will create the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await runtime.register(\n", + " \"chat_agent\",\n", + " lambda: LlamaIndexAgent(\n", + " description=\"Llama Index Agent\",\n", + " llama_index_agent=ReActAgent.from_tools(\n", + " tools=[wikipedia_tool],\n", + " llm=llm,\n", + " max_iterations=8,\n", + " memory=ChatSummaryMemoryBuffer(llm=llm, token_limit=16000),\n", + " verbose=True,\n", + " ),\n", + " ),\n", + ")\n", + "agent = AgentId(\"chat_agent\", \"default\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Start the agent runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Send a direct message to the agent, and print the response." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> Running step 3cbf60cd-9827-4dfe-a3a9-eaff2bed9b75. Step input: What are the best movies from studio Ghibli?\n", + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: search_data\n", + "Action Input: {'query': 'best movies from Studio Ghibli'}\n", + "\u001b[0m\u001b[1;3;34mObservation: This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n", + "\n", + "\n", + "== Works ==\n", + "\n", + "\n", + "=== Feature films ===\n", + "\n", + "\n", + "=== Television ===\n", + "\n", + "\n", + "=== Short films ===\n", + "\n", + "These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n", + "\n", + "\n", + "=== Commercials ===\n", + "\n", + "\n", + "=== Video games ===\n", + "\n", + "\n", + "=== Stage productions ===\n", + "Princess Mononoke (2013)\n", + "NausicaƤ of the Valley of the Wind (2019)\n", + "Spirited Away (2022)\n", + "My Neighbour Totoro (2022)\n", + "\n", + "\n", + "=== Other works ===\n", + "The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n", + "\n", + "\n", + "=== Exhibitions ===\n", + "A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n", + "\n", + "\n", + "== Related works ==\n", + "These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n", + "\n", + "\n", + "=== Pre-Ghibli ===\n", + "\n", + "\n", + "=== Cooperative works ===\n", + "\n", + "\n", + "=== Distributive works ===\n", + "These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n", + "\n", + "\n", + "=== Contributive works ===\n", + "Studio Ghibli has made contributions to the following anime series and movies:\n", + "\n", + "\n", + "== Significant achievements ==\n", + "The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n", + "The highest-grossing film of 1991 in Japan: Only Yesterday\n", + "The highest-grossing film of 1992 in Japan: Porco Rosso\n", + "The highest-grossing film of 1994 in Japan: Pom Poko\n", + "The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n", + "The highest-grossing film of 2002 in Japan: Spirited Away\n", + "The highest-grossing film of 2008 in Japan: Ponyo\n", + "The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n", + "The highest-grossing film of 2013 in Japan: The Wind Rises\n", + "The first Studio Ghibli film to use computer graphics: Pom Poko\n", + "The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n", + "The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n", + "The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n", + "The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n", + "\n", + "\n", + "== Notes ==\n", + "\n", + "\n", + "== References ==\n", + "\u001b[0m> Running step 561e3dd3-d98b-4d37-b612-c99387182ee0. Step input: None\n", + "\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer.\n", + "Answer: Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n", + "\n", + "1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n", + "2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n", + "3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n", + "4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n", + "5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n", + "6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n", + "7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n", + "8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n", + "9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n", + "10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n", + "\n", + "These films are celebrated for their storytelling, animation quality, and emotional depth.\n", + "\u001b[0m" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n", + "\n", + "1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n", + "2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n", + "3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n", + "4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n", + "5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n", + "6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n", + "7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n", + "8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n", + "9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n", + "10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n", + "\n", + "These films are celebrated for their storytelling, animation quality, and emotional depth.\n" + ] + } + ], + "source": [ + "message = Message(content=\"What are the best movies from studio Ghibli?\")\n", + "response = await runtime.send_message(message, agent)\n", + "assert isinstance(response, Message)\n", + "print(response.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n", + "\n", + "\n", + "== Works ==\n", + "\n", + "\n", + "=== Feature films ===\n", + "\n", + "\n", + "=== Television ===\n", + "\n", + "\n", + "=== Short films ===\n", + "\n", + "These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n", + "\n", + "\n", + "=== Commercials ===\n", + "\n", + "\n", + "=== Video games ===\n", + "\n", + "\n", + "=== Stage productions ===\n", + "Princess Mononoke (2013)\n", + "NausicaƤ of the Valley of the Wind (2019)\n", + "Spirited Away (2022)\n", + "My Neighbour Totoro (2022)\n", + "\n", + "\n", + "=== Other works ===\n", + "The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n", + "\n", + "\n", + "=== Exhibitions ===\n", + "A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n", + "\n", + "\n", + "== Related works ==\n", + "These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n", + "\n", + "\n", + "=== Pre-Ghibli ===\n", + "\n", + "\n", + "=== Cooperative works ===\n", + "\n", + "\n", + "=== Distributive works ===\n", + "These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n", + "\n", + "\n", + "=== Contributive works ===\n", + "Studio Ghibli has made contributions to the following anime series and movies:\n", + "\n", + "\n", + "== Significant achievements ==\n", + "The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n", + "The highest-grossing film of 1991 in Japan: Only Yesterday\n", + "The highest-grossing film of 1992 in Japan: Porco Rosso\n", + "The highest-grossing film of 1994 in Japan: Pom Poko\n", + "The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n", + "The highest-grossing film of 2002 in Japan: Spirited Away\n", + "The highest-grossing film of 2008 in Japan: Ponyo\n", + "The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n", + "The highest-grossing film of 2013 in Japan: The Wind Rises\n", + "The first Studio Ghibli film to use computer graphics: Pom Poko\n", + "The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n", + "The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n", + "The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n", + "The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n", + "\n", + "\n", + "== Notes ==\n", + "\n", + "\n", + "== References ==\n" + ] + } + ], + "source": [ + "if response.sources is not None:\n", + " for source in response.sources:\n", + " print(source.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Stop the agent runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "await runtime.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n", - "\n", - "1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n", - "2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n", - "3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n", - "4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n", - "5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n", - "6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n", - "7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n", - "8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n", - "9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n", - "10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n", - "\n", - "These films are celebrated for their storytelling, animation quality, and emotional depth.\n" - ] - } - ], - "source": [ - "message = Message(content=\"What are the best movies from studio Ghibli?\")\n", - "response = await runtime.send_message(message, agent)\n", - "assert isinstance(response, Message)\n", - "print(response.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n", - "\n", - "\n", - "== Works ==\n", - "\n", - "\n", - "=== Feature films ===\n", - "\n", - "\n", - "=== Television ===\n", - "\n", - "\n", - "=== Short films ===\n", - "\n", - "These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n", - "\n", - "\n", - "=== Commercials ===\n", - "\n", - "\n", - "=== Video games ===\n", - "\n", - "\n", - "=== Stage productions ===\n", - "Princess Mononoke (2013)\n", - "NausicaƤ of the Valley of the Wind (2019)\n", - "Spirited Away (2022)\n", - "My Neighbour Totoro (2022)\n", - "\n", - "\n", - "=== Other works ===\n", - "The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n", - "\n", - "\n", - "=== Exhibitions ===\n", - "A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n", - "\n", - "\n", - "== Related works ==\n", - "These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n", - "\n", - "\n", - "=== Pre-Ghibli ===\n", - "\n", - "\n", - "=== Cooperative works ===\n", - "\n", - "\n", - "=== Distributive works ===\n", - "These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n", - "\n", - "\n", - "=== Contributive works ===\n", - "Studio Ghibli has made contributions to the following anime series and movies:\n", - "\n", - "\n", - "== Significant achievements ==\n", - "The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n", - "The highest-grossing film of 1991 in Japan: Only Yesterday\n", - "The highest-grossing film of 1992 in Japan: Porco Rosso\n", - "The highest-grossing film of 1994 in Japan: Pom Poko\n", - "The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n", - "The highest-grossing film of 2002 in Japan: Spirited Away\n", - "The highest-grossing film of 2008 in Japan: Ponyo\n", - "The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n", - "The highest-grossing film of 2013 in Japan: The Wind Rises\n", - "The first Studio Ghibli film to use computer graphics: Pom Poko\n", - "The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n", - "The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n", - "The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n", - "The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n", - "\n", - "\n", - "== Notes ==\n", - "\n", - "\n", - "== References ==\n" - ] - } - ], - "source": [ - "if response.sources is not None:\n", - " for source in response.sources:\n", - " print(source.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Stop the agent runtime." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "await run_context.stop()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/docs/src/cookbook/openai-assistant-agent.ipynb b/python/docs/src/cookbook/openai-assistant-agent.ipynb index a36c7fd05..43e2b43c4 100644 --- a/python/docs/src/cookbook/openai-assistant-agent.ipynb +++ b/python/docs/src/cookbook/openai-assistant-agent.ipynb @@ -1,842 +1,842 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# OpenAI Assistant Agent\n", - "\n", - "[Open AI Assistant](https://platform.openai.com/docs/assistants/overview) \n", - "and [Azure OpenAI Assistant](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistant)\n", - "are server-side APIs for building\n", - "agents.\n", - "They can be used to build agents in AGNext. This cookbook demonstrates how to\n", - "to use OpenAI Assistant to create an agent that can run code and Q&A over document.\n", - "\n", - "## Message Protocol\n", - "\n", - "First, we need to specify the message protocol for the agent backed by \n", - "OpenAI Assistant. The message protocol defines the structure of messages\n", - "handled and published by the agent. \n", - "For illustration, we define a simple\n", - "message protocol of 4 message types: `Message`, `Reset`, `UploadForCodeInterpreter` and `UploadForFileSearch`." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "\n", - "@dataclass\n", - "class TextMessage:\n", - " content: str\n", - " source: str\n", - "\n", - "\n", - "@dataclass\n", - "class Reset:\n", - " pass\n", - "\n", - "\n", - "@dataclass\n", - "class UploadForCodeInterpreter:\n", - " file_path: str\n", - "\n", - "\n", - "@dataclass\n", - "class UploadForFileSearch:\n", - " file_path: str\n", - " vector_store_id: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `TextMessage` message type is used to communicate with the agent. It has a\n", - "`content` field that contains the message content, and a `source` field\n", - "for the sender. The `Reset` message type is a control message that resets\n", - "the memory of the agent. It has no fields. This is useful when we need to\n", - "start a new conversation with the agent.\n", - "\n", - "The `UploadForCodeInterpreter` message type is used to upload data files\n", - "for the code interpreter and `UploadForFileSearch` message type is used to upload\n", - "documents for file search. Both message types have a `file_path` field that contains\n", - "the local path to the file to be uploaded.\n", - "\n", - "## Defining the Agent\n", - "\n", - "Next, we define the agent class.\n", - "The agent class constructor has the following arguments: `description`,\n", - "`client`, `assistant_id`, `thread_id`, and `assistant_event_handler_factory`.\n", - "The `client` argument is the OpenAI async client object, and the\n", - "`assistant_event_handler_factory` is for creating an assistant event handler\n", - "to handle OpenAI Assistant events.\n", - "This can be used to create streaming output from the assistant.\n", - "\n", - "The agent class has the following message handlers:\n", - "- `handle_message`: Handles the `TextMessage` message type, and sends back the\n", - " response from the assistant.\n", - "- `handle_reset`: Handles the `Reset` message type, and resets the memory\n", - " of the assistant agent.\n", - "- `handle_upload_for_code_interpreter`: Handles the `UploadForCodeInterpreter`\n", - " message type, and uploads the file to the code interpreter.\n", - "- `handle_upload_for_file_search`: Handles the `UploadForFileSearch`\n", - " message type, and uploads the document to the file search.\n", - "\n", - "\n", - "The memory of the assistant is stored inside a thread, which is kept in the\n", - "server side. The thread is referenced by the `thread_id` argument." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "import os\n", - "from typing import Any, Callable, List\n", - "\n", - "import aiofiles\n", - "from agnext.components import TypeRoutedAgent, message_handler\n", - "from agnext.core import AgentId, MessageContext\n", - "from openai import AsyncAssistantEventHandler, AsyncClient\n", - "from openai.types.beta.thread import ToolResources, ToolResourcesFileSearch\n", - "\n", - "\n", - "class OpenAIAssistantAgent(TypeRoutedAgent):\n", - " \"\"\"An agent implementation that uses the OpenAI Assistant API to generate\n", - " responses.\n", - "\n", - " Args:\n", - " description (str): The description of the agent.\n", - " client (openai.AsyncClient): The client to use for the OpenAI API.\n", - " assistant_id (str): The assistant ID to use for the OpenAI API.\n", - " thread_id (str): The thread ID to use for the OpenAI API.\n", - " assistant_event_handler_factory (Callable[[], AsyncAssistantEventHandler], optional):\n", - " A factory function to create an async assistant event handler. Defaults to None.\n", - " If provided, the agent will use the streaming mode with the event handler.\n", - " If not provided, the agent will use the blocking mode to generate responses.\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " description: str,\n", - " client: AsyncClient,\n", - " assistant_id: str,\n", - " thread_id: str,\n", - " assistant_event_handler_factory: Callable[[], AsyncAssistantEventHandler],\n", - " ) -> None:\n", - " super().__init__(description)\n", - " self._client = client\n", - " self._assistant_id = assistant_id\n", - " self._thread_id = thread_id\n", - " self._assistant_event_handler_factory = assistant_event_handler_factory\n", - "\n", - " @message_handler\n", - " async def handle_message(self, message: TextMessage, ctx: MessageContext) -> TextMessage:\n", - " \"\"\"Handle a message. This method adds the message to the thread and publishes a response.\"\"\"\n", - " # Save the message to the thread.\n", - " await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(\n", - " self._client.beta.threads.messages.create(\n", - " thread_id=self._thread_id,\n", - " content=message.content,\n", - " role=\"user\",\n", - " metadata={\"sender\": message.source},\n", - " )\n", - " )\n", - " )\n", - " # Generate a response.\n", - " async with self._client.beta.threads.runs.stream(\n", - " thread_id=self._thread_id,\n", - " assistant_id=self._assistant_id,\n", - " event_handler=self._assistant_event_handler_factory(),\n", - " ) as stream:\n", - " await ctx.cancellation_token.link_future(asyncio.ensure_future(stream.until_done()))\n", - "\n", - " # Get the last message.\n", - " messages = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, order=\"desc\", limit=1))\n", - " )\n", - " last_message_content = messages.data[0].content\n", - "\n", - " # Get the text content from the last message.\n", - " text_content = [content for content in last_message_content if content.type == \"text\"]\n", - " if not text_content:\n", - " raise ValueError(f\"Expected text content in the last message: {last_message_content}\")\n", - "\n", - " return TextMessage(content=text_content[0].text.value, source=self.metadata[\"type\"])\n", - "\n", - " @message_handler()\n", - " async def on_reset(self, message: Reset, ctx: MessageContext) -> None:\n", - " \"\"\"Handle a reset message. This method deletes all messages in the thread.\"\"\"\n", - " # Get all messages in this thread.\n", - " all_msgs: List[str] = []\n", - " while True:\n", - " if not all_msgs:\n", - " msgs = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id))\n", - " )\n", - " else:\n", - " msgs = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, after=all_msgs[-1]))\n", - " )\n", - " for msg in msgs.data:\n", - " all_msgs.append(msg.id)\n", - " if not msgs.has_next_page():\n", - " break\n", - " # Delete all the messages.\n", - " for msg_id in all_msgs:\n", - " status = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(\n", - " self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id)\n", - " )\n", - " )\n", - " assert status.deleted is True\n", - "\n", - " @message_handler()\n", - " async def on_upload_for_code_interpreter(self, message: UploadForCodeInterpreter, ctx: MessageContext) -> None:\n", - " \"\"\"Handle an upload for code interpreter. This method uploads a file and updates the thread with the file.\"\"\"\n", - " # Get the file content.\n", - " async with aiofiles.open(message.file_path, mode=\"rb\") as f:\n", - " file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(f.read()))\n", - " file_name = os.path.basename(message.file_path)\n", - " # Upload the file.\n", - " file = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(self._client.files.create(file=(file_name, file_content), purpose=\"assistants\"))\n", - " )\n", - " # Get existing file ids from tool resources.\n", - " thread = await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(self._client.beta.threads.retrieve(thread_id=self._thread_id))\n", - " )\n", - " tool_resources: ToolResources = thread.tool_resources if thread.tool_resources else ToolResources()\n", - " assert tool_resources.code_interpreter is not None\n", - " if tool_resources.code_interpreter.file_ids:\n", - " file_ids = tool_resources.code_interpreter.file_ids\n", - " else:\n", - " file_ids = [file.id]\n", - " # Update thread with new file.\n", - " await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(\n", - " self._client.beta.threads.update(\n", - " thread_id=self._thread_id,\n", - " tool_resources={\n", - " \"code_interpreter\": {\"file_ids\": file_ids},\n", - " },\n", - " )\n", - " )\n", - " )\n", - "\n", - " @message_handler()\n", - " async def on_upload_for_file_search(self, message: UploadForFileSearch, ctx: MessageContext) -> None:\n", - " \"\"\"Handle an upload for file search. This method uploads a file and updates the vector store.\"\"\"\n", - " # Get the file content.\n", - " async with aiofiles.open(message.file_path, mode=\"rb\") as file:\n", - " file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(file.read()))\n", - " file_name = os.path.basename(message.file_path)\n", - " # Upload the file.\n", - " await ctx.cancellation_token.link_future(\n", - " asyncio.ensure_future(\n", - " self._client.beta.vector_stores.file_batches.upload_and_poll(\n", - " vector_store_id=message.vector_store_id,\n", - " files=[(file_name, file_content)],\n", - " )\n", - " )\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The agent class is a thin wrapper around the OpenAI Assistant API to implement\n", - "the message protocol. More features, such as multi-modal message handling,\n", - "can be added by extending the message protocol." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assistant Event Handler\n", - "\n", - "The assistant event handler provides call-backs for handling Assistant API\n", - "specific events. This is useful for handling streaming output from the assistant\n", - "and further user interface integration." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from openai import AsyncAssistantEventHandler, AsyncClient\n", - "from openai.types.beta.threads import Message, Text, TextDelta\n", - "from openai.types.beta.threads.runs import RunStep, RunStepDelta\n", - "from typing_extensions import override\n", - "\n", - "\n", - "class EventHandler(AsyncAssistantEventHandler):\n", - " @override\n", - " async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:\n", - " print(delta.value, end=\"\", flush=True)\n", - "\n", - " @override\n", - " async def on_run_step_created(self, run_step: RunStep) -> None:\n", - " details = run_step.step_details\n", - " if details.type == \"tool_calls\":\n", - " for tool in details.tool_calls:\n", - " if tool.type == \"code_interpreter\":\n", - " print(\"\\nGenerating code to interpret:\\n\\n```python\")\n", - "\n", - " @override\n", - " async def on_run_step_done(self, run_step: RunStep) -> None:\n", - " details = run_step.step_details\n", - " if details.type == \"tool_calls\":\n", - " for tool in details.tool_calls:\n", - " if tool.type == \"code_interpreter\":\n", - " print(\"\\n```\\nExecuting code...\")\n", - "\n", - " @override\n", - " async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:\n", - " details = delta.step_details\n", - " if details is not None and details.type == \"tool_calls\":\n", - " for tool in details.tool_calls or []:\n", - " if tool.type == \"code_interpreter\" and tool.code_interpreter and tool.code_interpreter.input:\n", - " print(tool.code_interpreter.input, end=\"\", flush=True)\n", - "\n", - " @override\n", - " async def on_message_created(self, message: Message) -> None:\n", - " print(f\"{'-'*80}\\nAssistant:\\n\")\n", - "\n", - " @override\n", - " async def on_message_done(self, message: Message) -> None:\n", - " # print a citation to the file searched\n", - " if not message.content:\n", - " return\n", - " content = message.content[0]\n", - " if not content.type == \"text\":\n", - " return\n", - " text_content = content.text\n", - " annotations = text_content.annotations\n", - " citations: List[str] = []\n", - " for index, annotation in enumerate(annotations):\n", - " text_content.value = text_content.value.replace(annotation.text, f\"[{index}]\")\n", - " if file_citation := getattr(annotation, \"file_citation\", None):\n", - " client = AsyncClient()\n", - " cited_file = await client.files.retrieve(file_citation.file_id)\n", - " citations.append(f\"[{index}] {cited_file.filename}\")\n", - " if citations:\n", - " print(\"\\n\".join(citations))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using the Agent\n", - "\n", - "First we need to use the `openai` client to create the actual assistant,\n", - "thread, and vector store. Our AGNext agent will be using these." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "import openai\n", - "\n", - "# Create an assistant with code interpreter and file search tools.\n", - "oai_assistant = openai.beta.assistants.create(\n", - " model=\"gpt-4o-mini\",\n", - " description=\"An AI assistant that helps with everyday tasks.\",\n", - " instructions=\"Help the user with their task.\",\n", - " tools=[{\"type\": \"code_interpreter\"}, {\"type\": \"file_search\"}],\n", - ")\n", - "\n", - "# Create a vector store to be used for file search.\n", - "vector_store = openai.beta.vector_stores.create()\n", - "\n", - "# Create a thread which is used as the memory for the assistant.\n", - "thread = openai.beta.threads.create(\n", - " tool_resources={\"file_search\": {\"vector_store_ids\": [vector_store.id]}},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, we create a runtime, and register an agent factory function for this \n", - "agent with the runtime." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "from agnext.application import SingleThreadedAgentRuntime\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "await runtime.register(\n", - " \"assistant\",\n", - " lambda: OpenAIAssistantAgent(\n", - " description=\"OpenAI Assistant Agent\",\n", - " client=openai.AsyncClient(),\n", - " assistant_id=oai_assistant.id,\n", - " thread_id=thread.id,\n", - " assistant_event_handler_factory=lambda: EventHandler(),\n", - " ),\n", - ")\n", - "agent = AgentId(\"assistant\", \"default\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's turn on logging to see what's happening under the hood." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import logging\n", - "\n", - "logging.basicConfig(level=logging.WARNING)\n", - "logging.getLogger(\"agnext\").setLevel(logging.DEBUG)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's send a greeting message to the agent, and see the response streamed back." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'Hello, how are you today!', 'source': 'user'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# OpenAI Assistant Agent\n", + "\n", + "[Open AI Assistant](https://platform.openai.com/docs/assistants/overview) \n", + "and [Azure OpenAI Assistant](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistant)\n", + "are server-side APIs for building\n", + "agents.\n", + "They can be used to build agents in AGNext. This cookbook demonstrates how to\n", + "to use OpenAI Assistant to create an agent that can run code and Q&A over document.\n", + "\n", + "## Message Protocol\n", + "\n", + "First, we need to specify the message protocol for the agent backed by \n", + "OpenAI Assistant. The message protocol defines the structure of messages\n", + "handled and published by the agent. \n", + "For illustration, we define a simple\n", + "message protocol of 4 message types: `Message`, `Reset`, `UploadForCodeInterpreter` and `UploadForFileSearch`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "\n", + "@dataclass\n", + "class TextMessage:\n", + " content: str\n", + " source: str\n", + "\n", + "\n", + "@dataclass\n", + "class Reset:\n", + " pass\n", + "\n", + "\n", + "@dataclass\n", + "class UploadForCodeInterpreter:\n", + " file_path: str\n", + "\n", + "\n", + "@dataclass\n", + "class UploadForFileSearch:\n", + " file_path: str\n", + " vector_store_id: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `TextMessage` message type is used to communicate with the agent. It has a\n", + "`content` field that contains the message content, and a `source` field\n", + "for the sender. The `Reset` message type is a control message that resets\n", + "the memory of the agent. It has no fields. This is useful when we need to\n", + "start a new conversation with the agent.\n", + "\n", + "The `UploadForCodeInterpreter` message type is used to upload data files\n", + "for the code interpreter and `UploadForFileSearch` message type is used to upload\n", + "documents for file search. Both message types have a `file_path` field that contains\n", + "the local path to the file to be uploaded.\n", + "\n", + "## Defining the Agent\n", + "\n", + "Next, we define the agent class.\n", + "The agent class constructor has the following arguments: `description`,\n", + "`client`, `assistant_id`, `thread_id`, and `assistant_event_handler_factory`.\n", + "The `client` argument is the OpenAI async client object, and the\n", + "`assistant_event_handler_factory` is for creating an assistant event handler\n", + "to handle OpenAI Assistant events.\n", + "This can be used to create streaming output from the assistant.\n", + "\n", + "The agent class has the following message handlers:\n", + "- `handle_message`: Handles the `TextMessage` message type, and sends back the\n", + " response from the assistant.\n", + "- `handle_reset`: Handles the `Reset` message type, and resets the memory\n", + " of the assistant agent.\n", + "- `handle_upload_for_code_interpreter`: Handles the `UploadForCodeInterpreter`\n", + " message type, and uploads the file to the code interpreter.\n", + "- `handle_upload_for_file_search`: Handles the `UploadForFileSearch`\n", + " message type, and uploads the document to the file search.\n", + "\n", + "\n", + "The memory of the assistant is stored inside a thread, which is kept in the\n", + "server side. The thread is referenced by the `thread_id` argument." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "import os\n", + "from typing import Any, Callable, List\n", + "\n", + "import aiofiles\n", + "from agnext.components import TypeRoutedAgent, message_handler\n", + "from agnext.core import AgentId, MessageContext\n", + "from openai import AsyncAssistantEventHandler, AsyncClient\n", + "from openai.types.beta.thread import ToolResources, ToolResourcesFileSearch\n", + "\n", + "\n", + "class OpenAIAssistantAgent(TypeRoutedAgent):\n", + " \"\"\"An agent implementation that uses the OpenAI Assistant API to generate\n", + " responses.\n", + "\n", + " Args:\n", + " description (str): The description of the agent.\n", + " client (openai.AsyncClient): The client to use for the OpenAI API.\n", + " assistant_id (str): The assistant ID to use for the OpenAI API.\n", + " thread_id (str): The thread ID to use for the OpenAI API.\n", + " assistant_event_handler_factory (Callable[[], AsyncAssistantEventHandler], optional):\n", + " A factory function to create an async assistant event handler. Defaults to None.\n", + " If provided, the agent will use the streaming mode with the event handler.\n", + " If not provided, the agent will use the blocking mode to generate responses.\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " description: str,\n", + " client: AsyncClient,\n", + " assistant_id: str,\n", + " thread_id: str,\n", + " assistant_event_handler_factory: Callable[[], AsyncAssistantEventHandler],\n", + " ) -> None:\n", + " super().__init__(description)\n", + " self._client = client\n", + " self._assistant_id = assistant_id\n", + " self._thread_id = thread_id\n", + " self._assistant_event_handler_factory = assistant_event_handler_factory\n", + "\n", + " @message_handler\n", + " async def handle_message(self, message: TextMessage, ctx: MessageContext) -> TextMessage:\n", + " \"\"\"Handle a message. This method adds the message to the thread and publishes a response.\"\"\"\n", + " # Save the message to the thread.\n", + " await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(\n", + " self._client.beta.threads.messages.create(\n", + " thread_id=self._thread_id,\n", + " content=message.content,\n", + " role=\"user\",\n", + " metadata={\"sender\": message.source},\n", + " )\n", + " )\n", + " )\n", + " # Generate a response.\n", + " async with self._client.beta.threads.runs.stream(\n", + " thread_id=self._thread_id,\n", + " assistant_id=self._assistant_id,\n", + " event_handler=self._assistant_event_handler_factory(),\n", + " ) as stream:\n", + " await ctx.cancellation_token.link_future(asyncio.ensure_future(stream.until_done()))\n", + "\n", + " # Get the last message.\n", + " messages = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, order=\"desc\", limit=1))\n", + " )\n", + " last_message_content = messages.data[0].content\n", + "\n", + " # Get the text content from the last message.\n", + " text_content = [content for content in last_message_content if content.type == \"text\"]\n", + " if not text_content:\n", + " raise ValueError(f\"Expected text content in the last message: {last_message_content}\")\n", + "\n", + " return TextMessage(content=text_content[0].text.value, source=self.metadata[\"type\"])\n", + "\n", + " @message_handler()\n", + " async def on_reset(self, message: Reset, ctx: MessageContext) -> None:\n", + " \"\"\"Handle a reset message. This method deletes all messages in the thread.\"\"\"\n", + " # Get all messages in this thread.\n", + " all_msgs: List[str] = []\n", + " while True:\n", + " if not all_msgs:\n", + " msgs = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id))\n", + " )\n", + " else:\n", + " msgs = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, after=all_msgs[-1]))\n", + " )\n", + " for msg in msgs.data:\n", + " all_msgs.append(msg.id)\n", + " if not msgs.has_next_page():\n", + " break\n", + " # Delete all the messages.\n", + " for msg_id in all_msgs:\n", + " status = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(\n", + " self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id)\n", + " )\n", + " )\n", + " assert status.deleted is True\n", + "\n", + " @message_handler()\n", + " async def on_upload_for_code_interpreter(self, message: UploadForCodeInterpreter, ctx: MessageContext) -> None:\n", + " \"\"\"Handle an upload for code interpreter. This method uploads a file and updates the thread with the file.\"\"\"\n", + " # Get the file content.\n", + " async with aiofiles.open(message.file_path, mode=\"rb\") as f:\n", + " file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(f.read()))\n", + " file_name = os.path.basename(message.file_path)\n", + " # Upload the file.\n", + " file = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(self._client.files.create(file=(file_name, file_content), purpose=\"assistants\"))\n", + " )\n", + " # Get existing file ids from tool resources.\n", + " thread = await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(self._client.beta.threads.retrieve(thread_id=self._thread_id))\n", + " )\n", + " tool_resources: ToolResources = thread.tool_resources if thread.tool_resources else ToolResources()\n", + " assert tool_resources.code_interpreter is not None\n", + " if tool_resources.code_interpreter.file_ids:\n", + " file_ids = tool_resources.code_interpreter.file_ids\n", + " else:\n", + " file_ids = [file.id]\n", + " # Update thread with new file.\n", + " await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(\n", + " self._client.beta.threads.update(\n", + " thread_id=self._thread_id,\n", + " tool_resources={\n", + " \"code_interpreter\": {\"file_ids\": file_ids},\n", + " },\n", + " )\n", + " )\n", + " )\n", + "\n", + " @message_handler()\n", + " async def on_upload_for_file_search(self, message: UploadForFileSearch, ctx: MessageContext) -> None:\n", + " \"\"\"Handle an upload for file search. This method uploads a file and updates the vector store.\"\"\"\n", + " # Get the file content.\n", + " async with aiofiles.open(message.file_path, mode=\"rb\") as file:\n", + " file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(file.read()))\n", + " file_name = os.path.basename(message.file_path)\n", + " # Upload the file.\n", + " await ctx.cancellation_token.link_future(\n", + " asyncio.ensure_future(\n", + " self._client.beta.vector_stores.file_batches.upload_and_poll(\n", + " vector_store_id=message.vector_store_id,\n", + " files=[(file_name, file_content)],\n", + " )\n", + " )\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The agent class is a thin wrapper around the OpenAI Assistant API to implement\n", + "the message protocol. More features, such as multi-modal message handling,\n", + "can be added by extending the message protocol." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assistant Event Handler\n", + "\n", + "The assistant event handler provides call-backs for handling Assistant API\n", + "specific events. This is useful for handling streaming output from the assistant\n", + "and further user interface integration." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from openai import AsyncAssistantEventHandler, AsyncClient\n", + "from openai.types.beta.threads import Message, Text, TextDelta\n", + "from openai.types.beta.threads.runs import RunStep, RunStepDelta\n", + "from typing_extensions import override\n", + "\n", + "\n", + "class EventHandler(AsyncAssistantEventHandler):\n", + " @override\n", + " async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:\n", + " print(delta.value, end=\"\", flush=True)\n", + "\n", + " @override\n", + " async def on_run_step_created(self, run_step: RunStep) -> None:\n", + " details = run_step.step_details\n", + " if details.type == \"tool_calls\":\n", + " for tool in details.tool_calls:\n", + " if tool.type == \"code_interpreter\":\n", + " print(\"\\nGenerating code to interpret:\\n\\n```python\")\n", + "\n", + " @override\n", + " async def on_run_step_done(self, run_step: RunStep) -> None:\n", + " details = run_step.step_details\n", + " if details.type == \"tool_calls\":\n", + " for tool in details.tool_calls:\n", + " if tool.type == \"code_interpreter\":\n", + " print(\"\\n```\\nExecuting code...\")\n", + "\n", + " @override\n", + " async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:\n", + " details = delta.step_details\n", + " if details is not None and details.type == \"tool_calls\":\n", + " for tool in details.tool_calls or []:\n", + " if tool.type == \"code_interpreter\" and tool.code_interpreter and tool.code_interpreter.input:\n", + " print(tool.code_interpreter.input, end=\"\", flush=True)\n", + "\n", + " @override\n", + " async def on_message_created(self, message: Message) -> None:\n", + " print(f\"{'-'*80}\\nAssistant:\\n\")\n", + "\n", + " @override\n", + " async def on_message_done(self, message: Message) -> None:\n", + " # print a citation to the file searched\n", + " if not message.content:\n", + " return\n", + " content = message.content[0]\n", + " if not content.type == \"text\":\n", + " return\n", + " text_content = content.text\n", + " annotations = text_content.annotations\n", + " citations: List[str] = []\n", + " for index, annotation in enumerate(annotations):\n", + " text_content.value = text_content.value.replace(annotation.text, f\"[{index}]\")\n", + " if file_citation := getattr(annotation, \"file_citation\", None):\n", + " client = AsyncClient()\n", + " cited_file = await client.files.retrieve(file_citation.file_id)\n", + " citations.append(f\"[{index}] {cited_file.filename}\")\n", + " if citations:\n", + " print(\"\\n\".join(citations))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using the Agent\n", + "\n", + "First we need to use the `openai` client to create the actual assistant,\n", + "thread, and vector store. Our AGNext agent will be using these." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import openai\n", + "\n", + "# Create an assistant with code interpreter and file search tools.\n", + "oai_assistant = openai.beta.assistants.create(\n", + " model=\"gpt-4o-mini\",\n", + " description=\"An AI assistant that helps with everyday tasks.\",\n", + " instructions=\"Help the user with their task.\",\n", + " tools=[{\"type\": \"code_interpreter\"}, {\"type\": \"file_search\"}],\n", + ")\n", + "\n", + "# Create a vector store to be used for file search.\n", + "vector_store = openai.beta.vector_stores.create()\n", + "\n", + "# Create a thread which is used as the memory for the assistant.\n", + "thread = openai.beta.threads.create(\n", + " tool_resources={\"file_search\": {\"vector_store_ids\": [vector_store.id]}},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, we create a runtime, and register an agent factory function for this \n", + "agent with the runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "from agnext.application import SingleThreadedAgentRuntime\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "await runtime.register(\n", + " \"assistant\",\n", + " lambda: OpenAIAssistantAgent(\n", + " description=\"OpenAI Assistant Agent\",\n", + " client=openai.AsyncClient(),\n", + " assistant_id=oai_assistant.id,\n", + " thread_id=thread.id,\n", + " assistant_event_handler_factory=lambda: EventHandler(),\n", + " ),\n", + ")\n", + "agent = AgentId(\"assistant\", \"default\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's turn on logging to see what's happening under the hood." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "\n", + "logging.basicConfig(level=logging.WARNING)\n", + "logging.getLogger(\"agnext\").setLevel(logging.DEBUG)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's send a greeting message to the agent, and see the response streamed back." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'Hello, how are you today!', 'source': 'user'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Assistant:\n", + "\n", + "Hello! I'm here and ready to assist you. How can I help you today?" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"Hello! I'm here and ready to assist you. How can I help you today?\", 'source': 'assistant'}\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(TextMessage(content=\"Hello, how are you today!\", source=\"user\"), agent)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assistant with Code Interpreter\n", + "\n", + "Let's ask some math question to the agent, and see it uses the code interpreter\n", + "to answer the question." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'What is 1332322 x 123212?', 'source': 'user'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Calculating the product of 1332322 and 123212\n", + "result = 1332322 * 123212\n", + "result\n", + "```\n", + "Executing code...\n", + "--------------------------------------------------------------------------------\n", + "Assistant:\n", + "\n", + "The product of 1,332,322 and 123,212 is 164,158,058,264." + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The product of 1,332,322 and 123,212 is 164,158,058,264.', 'source': 'assistant'}\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(TextMessage(content=\"What is 1332322 x 123212?\", source=\"user\"), agent)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's get some data from Seattle Open Data portal. We will be using the\n", + "[City of Seattle Wage Data](https://data.seattle.gov/City-Business/City-of-Seattle-Wage-Data/2khk-5ukd/).\n", + "Let's download it first." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import requests\n", + "\n", + "response = requests.get(\"https://data.seattle.gov/resource/2khk-5ukd.csv\")\n", + "with open(\"seattle_city_wages.csv\", \"wb\") as file:\n", + " file.write(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's send the file to the agent using an `UploadForCodeInterpreter` message." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type UploadForCodeInterpreter to assistant: {'file_path': 'seattle_city_wages.csv'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type UploadForCodeInterpreter sent by Unknown\n", + "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(UploadForCodeInterpreter(file_path=\"seattle_city_wages.csv\"), agent)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now ask some questions about the data to the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'Take a look at the uploaded CSV file.', 'source': 'user'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "import pandas as pd\n", + "\n", + "# Load the uploaded CSV file to examine its contents\n", + "file_path = '/mnt/data/file-oEvRiyGyHc2jZViKyDqL8aoh'\n", + "csv_data = pd.read_csv(file_path)\n", + "\n", + "# Display the first few rows of the dataframe to understand its structure\n", + "csv_data.head()\n", + "```\n", + "Executing code...\n", + "--------------------------------------------------------------------------------\n", + "Assistant:\n", + "\n", + "The uploaded CSV file contains the following columns:\n", + "\n", + "1. **department**: The department in which the individual works.\n", + "2. **last_name**: The last name of the employee.\n", + "3. **first_name**: The first name of the employee.\n", + "4. **job_title**: The job title of the employee.\n", + "5. **hourly_rate**: The hourly rate for the employee's position.\n", + "\n", + "Here are the first few entries from the file:\n", + "\n", + "| department | last_name | first_name | job_title | hourly_rate |\n", + "|--------------------------------|-----------|------------|------------------------------------|-------------|\n", + "| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\n", + "| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\n", + "| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\n", + "| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\n", + "| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\n", + "\n", + "If you need any specific analysis or information from this data, please let me know!" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"The uploaded CSV file contains the following columns:\\n\\n1. **department**: The department in which the individual works.\\n2. **last_name**: The last name of the employee.\\n3. **first_name**: The first name of the employee.\\n4. **job_title**: The job title of the employee.\\n5. **hourly_rate**: The hourly rate for the employee's position.\\n\\nHere are the first few entries from the file:\\n\\n| department | last_name | first_name | job_title | hourly_rate |\\n|--------------------------------|-----------|------------|------------------------------------|-------------|\\n| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\\n| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\\n| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\\n| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\\n| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\\n\\nIf you need any specific analysis or information from this data, please let me know!\", 'source': 'assistant'}\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(TextMessage(content=\"Take a look at the uploaded CSV file.\", source=\"user\"), agent)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'What are the top-10 salaries?', 'source': 'user'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Sorting the data by hourly_rate in descending order and selecting the top 10 salaries\n", + "top_10_salaries = csv_data[['first_name', 'last_name', 'job_title', 'hourly_rate']].sort_values(by='hourly_rate', ascending=False).head(10)\n", + "top_10_salaries.reset_index(drop=True, inplace=True)\n", + "top_10_salaries\n", + "```\n", + "Executing code...\n", + "--------------------------------------------------------------------------------\n", + "Assistant:\n", + "\n", + "Here are the top 10 salaries based on the hourly rates from the CSV file:\n", + "\n", + "| First Name | Last Name | Job Title | Hourly Rate |\n", + "|------------|-----------|------------------------------------|-------------|\n", + "| Eric | Barden | Executive4 | 139.61 |\n", + "| Idris | Beauregard| Executive3 | 115.90 |\n", + "| Lori | Aagard | Pol Capt-Precinct | 112.70 |\n", + "| Krista | Bair | Pol Capt-Precinct | 108.74 |\n", + "| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\n", + "| Ginger | Armbruster| Executive2 | 102.42 |\n", + "| William | Andersen | Executive2 | 102.42 |\n", + "| Valarie | Anderson | Executive2 | 102.42 |\n", + "| Paige | Alderete | Executive2 | 102.42 |\n", + "| Kathryn | Aisenberg | Executive2 | 100.65 |\n", + "\n", + "If you need any further details or analysis, let me know!" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'Here are the top 10 salaries based on the hourly rates from the CSV file:\\n\\n| First Name | Last Name | Job Title | Hourly Rate |\\n|------------|-----------|------------------------------------|-------------|\\n| Eric | Barden | Executive4 | 139.61 |\\n| Idris | Beauregard| Executive3 | 115.90 |\\n| Lori | Aagard | Pol Capt-Precinct | 112.70 |\\n| Krista | Bair | Pol Capt-Precinct | 108.74 |\\n| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\\n| Ginger | Armbruster| Executive2 | 102.42 |\\n| William | Andersen | Executive2 | 102.42 |\\n| Valarie | Anderson | Executive2 | 102.42 |\\n| Paige | Alderete | Executive2 | 102.42 |\\n| Kathryn | Aisenberg | Executive2 | 100.65 |\\n\\nIf you need any further details or analysis, let me know!', 'source': 'assistant'}\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(TextMessage(content=\"What are the top-10 salaries?\", source=\"user\"), agent)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assistant with File Search\n", + "\n", + "Let's try the Q&A over document feature. We first download Wikipedia page\n", + "on the Third Anglo-Afghan War." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "response = requests.get(\"https://en.wikipedia.org/wiki/Third_Anglo-Afghan_War\")\n", + "with open(\"third_anglo_afghan_war.html\", \"wb\") as file:\n", + " file.write(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Send the file to the agent using an `UploadForFileSearch` message." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type UploadForFileSearch to assistant: {'file_path': 'third_anglo_afghan_war.html', 'vector_store_id': 'vs_h3xxPbJFnd1iZ9WdjsQwNdrp'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type UploadForFileSearch sent by Unknown\n", + "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(\n", + " UploadForFileSearch(file_path=\"third_anglo_afghan_war.html\", vector_store_id=vector_store.id), agent\n", + ")\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's ask some questions about the document to the agent. Before asking,\n", + "we reset the agent memory to start a new conversation." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Sending message of type Reset to assistant: {}\n", + "INFO:agnext:Calling message handler for assistant:default with message type Reset sent by Unknown\n", + "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n", + "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'When and where was the treaty of Rawalpindi signed? Answer using the document provided.', 'source': 'user'}\n", + "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Assistant:\n", + "\n", + "The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan怐6:0†source怑." + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan怐6:0†source怑.', 'source': 'assistant'}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0] third_anglo_afghan_war.html\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "await runtime.send_message(Reset(), agent)\n", + "await runtime.send_message(\n", + " TextMessage(\n", + " content=\"When and where was the treaty of Rawalpindi signed? Answer using the document provided.\", source=\"user\"\n", + " ),\n", + " agent,\n", + ")\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's it! We have successfully built an agent backed by OpenAI Assistant." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Assistant:\n", - "\n", - "Hello! I'm here and ready to assist you. How can I help you today?" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"Hello! I'm here and ready to assist you. How can I help you today?\", 'source': 'assistant'}\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(TextMessage(content=\"Hello, how are you today!\", source=\"user\"), agent)\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assistant with Code Interpreter\n", - "\n", - "Let's ask some math question to the agent, and see it uses the code interpreter\n", - "to answer the question." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'What is 1332322 x 123212?', 'source': 'user'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Calculating the product of 1332322 and 123212\n", - "result = 1332322 * 123212\n", - "result\n", - "```\n", - "Executing code...\n", - "--------------------------------------------------------------------------------\n", - "Assistant:\n", - "\n", - "The product of 1,332,322 and 123,212 is 164,158,058,264." - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The product of 1,332,322 and 123,212 is 164,158,058,264.', 'source': 'assistant'}\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(TextMessage(content=\"What is 1332322 x 123212?\", source=\"user\"), agent)\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's get some data from Seattle Open Data portal. We will be using the\n", - "[City of Seattle Wage Data](https://data.seattle.gov/City-Business/City-of-Seattle-Wage-Data/2khk-5ukd/).\n", - "Let's download it first." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "response = requests.get(\"https://data.seattle.gov/resource/2khk-5ukd.csv\")\n", - "with open(\"seattle_city_wages.csv\", \"wb\") as file:\n", - " file.write(response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's send the file to the agent using an `UploadForCodeInterpreter` message." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type UploadForCodeInterpreter to assistant: {'file_path': 'seattle_city_wages.csv'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type UploadForCodeInterpreter sent by Unknown\n", - "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(UploadForCodeInterpreter(file_path=\"seattle_city_wages.csv\"), agent)\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now ask some questions about the data to the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'Take a look at the uploaded CSV file.', 'source': 'user'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "import pandas as pd\n", - "\n", - "# Load the uploaded CSV file to examine its contents\n", - "file_path = '/mnt/data/file-oEvRiyGyHc2jZViKyDqL8aoh'\n", - "csv_data = pd.read_csv(file_path)\n", - "\n", - "# Display the first few rows of the dataframe to understand its structure\n", - "csv_data.head()\n", - "```\n", - "Executing code...\n", - "--------------------------------------------------------------------------------\n", - "Assistant:\n", - "\n", - "The uploaded CSV file contains the following columns:\n", - "\n", - "1. **department**: The department in which the individual works.\n", - "2. **last_name**: The last name of the employee.\n", - "3. **first_name**: The first name of the employee.\n", - "4. **job_title**: The job title of the employee.\n", - "5. **hourly_rate**: The hourly rate for the employee's position.\n", - "\n", - "Here are the first few entries from the file:\n", - "\n", - "| department | last_name | first_name | job_title | hourly_rate |\n", - "|--------------------------------|-----------|------------|------------------------------------|-------------|\n", - "| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\n", - "| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\n", - "| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\n", - "| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\n", - "| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\n", - "\n", - "If you need any specific analysis or information from this data, please let me know!" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"The uploaded CSV file contains the following columns:\\n\\n1. **department**: The department in which the individual works.\\n2. **last_name**: The last name of the employee.\\n3. **first_name**: The first name of the employee.\\n4. **job_title**: The job title of the employee.\\n5. **hourly_rate**: The hourly rate for the employee's position.\\n\\nHere are the first few entries from the file:\\n\\n| department | last_name | first_name | job_title | hourly_rate |\\n|--------------------------------|-----------|------------|------------------------------------|-------------|\\n| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\\n| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\\n| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\\n| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\\n| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\\n\\nIf you need any specific analysis or information from this data, please let me know!\", 'source': 'assistant'}\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(TextMessage(content=\"Take a look at the uploaded CSV file.\", source=\"user\"), agent)\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'What are the top-10 salaries?', 'source': 'user'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Sorting the data by hourly_rate in descending order and selecting the top 10 salaries\n", - "top_10_salaries = csv_data[['first_name', 'last_name', 'job_title', 'hourly_rate']].sort_values(by='hourly_rate', ascending=False).head(10)\n", - "top_10_salaries.reset_index(drop=True, inplace=True)\n", - "top_10_salaries\n", - "```\n", - "Executing code...\n", - "--------------------------------------------------------------------------------\n", - "Assistant:\n", - "\n", - "Here are the top 10 salaries based on the hourly rates from the CSV file:\n", - "\n", - "| First Name | Last Name | Job Title | Hourly Rate |\n", - "|------------|-----------|------------------------------------|-------------|\n", - "| Eric | Barden | Executive4 | 139.61 |\n", - "| Idris | Beauregard| Executive3 | 115.90 |\n", - "| Lori | Aagard | Pol Capt-Precinct | 112.70 |\n", - "| Krista | Bair | Pol Capt-Precinct | 108.74 |\n", - "| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\n", - "| Ginger | Armbruster| Executive2 | 102.42 |\n", - "| William | Andersen | Executive2 | 102.42 |\n", - "| Valarie | Anderson | Executive2 | 102.42 |\n", - "| Paige | Alderete | Executive2 | 102.42 |\n", - "| Kathryn | Aisenberg | Executive2 | 100.65 |\n", - "\n", - "If you need any further details or analysis, let me know!" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'Here are the top 10 salaries based on the hourly rates from the CSV file:\\n\\n| First Name | Last Name | Job Title | Hourly Rate |\\n|------------|-----------|------------------------------------|-------------|\\n| Eric | Barden | Executive4 | 139.61 |\\n| Idris | Beauregard| Executive3 | 115.90 |\\n| Lori | Aagard | Pol Capt-Precinct | 112.70 |\\n| Krista | Bair | Pol Capt-Precinct | 108.74 |\\n| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\\n| Ginger | Armbruster| Executive2 | 102.42 |\\n| William | Andersen | Executive2 | 102.42 |\\n| Valarie | Anderson | Executive2 | 102.42 |\\n| Paige | Alderete | Executive2 | 102.42 |\\n| Kathryn | Aisenberg | Executive2 | 100.65 |\\n\\nIf you need any further details or analysis, let me know!', 'source': 'assistant'}\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(TextMessage(content=\"What are the top-10 salaries?\", source=\"user\"), agent)\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assistant with File Search\n", - "\n", - "Let's try the Q&A over document feature. We first download Wikipedia page\n", - "on the Third Anglo-Afghan War." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "response = requests.get(\"https://en.wikipedia.org/wiki/Third_Anglo-Afghan_War\")\n", - "with open(\"third_anglo_afghan_war.html\", \"wb\") as file:\n", - " file.write(response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Send the file to the agent using an `UploadForFileSearch` message." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type UploadForFileSearch to assistant: {'file_path': 'third_anglo_afghan_war.html', 'vector_store_id': 'vs_h3xxPbJFnd1iZ9WdjsQwNdrp'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type UploadForFileSearch sent by Unknown\n", - "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(\n", - " UploadForFileSearch(file_path=\"third_anglo_afghan_war.html\", vector_store_id=vector_store.id), agent\n", - ")\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's ask some questions about the document to the agent. Before asking,\n", - "we reset the agent memory to start a new conversation." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Sending message of type Reset to assistant: {}\n", - "INFO:agnext:Calling message handler for assistant:default with message type Reset sent by Unknown\n", - "INFO:agnext:Resolving response with message type NoneType for recipient None from assistant: None\n", - "INFO:agnext:Sending message of type TextMessage to assistant: {'content': 'When and where was the treaty of Rawalpindi signed? Answer using the document provided.', 'source': 'user'}\n", - "INFO:agnext:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Assistant:\n", - "\n", - "The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan怐6:0†source怑." - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:agnext:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan怐6:0†source怑.', 'source': 'assistant'}\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[0] third_anglo_afghan_war.html\n" - ] - } - ], - "source": [ - "run_context = runtime.start()\n", - "await runtime.send_message(Reset(), agent)\n", - "await runtime.send_message(\n", - " TextMessage(\n", - " content=\"When and where was the treaty of Rawalpindi signed? Answer using the document provided.\", source=\"user\"\n", - " ),\n", - " agent,\n", - ")\n", - "await run_context.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That's it! We have successfully built an agent backed by OpenAI Assistant." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.4" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/docs/src/getting-started/agent-and-agent-runtime.ipynb b/python/docs/src/getting-started/agent-and-agent-runtime.ipynb index 165b99690..a0d77ef84 100644 --- a/python/docs/src/getting-started/agent-and-agent-runtime.ipynb +++ b/python/docs/src/getting-started/agent-and-agent-runtime.ipynb @@ -1,252 +1,263 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agent and Agent Runtime\n", - "\n", - "In this and the following section, we focus on the \n", - "[core concepts](../core-concepts/overview.md) of AGNext:\n", - "agents, agent runtime, messages, and communication.\n", - "You will not find any AI models or tools here, just the foundational\n", - "building blocks for building multi-agent applications." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "An agent in AGNext is an entity defined by the base class {py:class}`agnext.core.BaseAgent`.\n", - "It has a unique identifier of the type {py:class}`agnext.core.AgentId`,\n", - "a metadata dictionary of the type {py:class}`agnext.core.AgentMetadata`,\n", - "and method for handling messages {py:meth}`agnext.core.BaseAgent.on_message`.\n", - "\n", - "An agent runtime is the execution environment for agents in AGNext.\n", - "Similar to the runtime environment of a programming language,\n", - "an agent runtime provides the necessary infrastructure to facilitate communication\n", - "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", - "debugging.\n", - "For local development, developers can use {py:class}`~agnext.application.SingleThreadedAgentRuntime`,\n", - "which can be embedded in a Python application.\n", - "\n", - "```{note}\n", - "Agents are not directly instantiated and managed by application code.\n", - "Instead, they are created by the runtime when needed and managed by the runtime.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Implementing an Agent\n", - "\n", - "To implement an agent, developer must subclass the {py:class}`~agnext.core.BaseAgent` class,\n", - "declare the message types it can handle in the {py:attr}`~agnext.core.AgentMetadata.subscriptions` metadata,\n", - "and implement the {py:meth}`~agnext.core.BaseAgent.on_message` method.\n", - "This method is invoked when the agent receives a message. For example,\n", - "the following agent handles a simple message type and simply prints message it receives:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from agnext.core import AgentId, BaseAgent, MessageContext\n", - "\n", - "\n", - "@dataclass\n", - "class MyMessage:\n", - " content: str\n", - "\n", - "\n", - "class MyAgent(BaseAgent):\n", - " def __init__(self) -> None:\n", - " super().__init__(\"MyAgent\")\n", - "\n", - " async def on_message(self, message: MyMessage, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For convenience, developers can subclass the {py:class}`~agnext.components.TypeRoutedAgent` class\n", - "which provides an easy-to use API to implement different message handlers for different message types.\n", - "See the section on message handlers below." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Registering Agents\n", - "\n", - "To make an agent available to the runtime, developers can use the\n", - "{py:meth}`~agnext.core.AgentRuntime.register` method.\n", - "The process of registration associates a name and a factory function\n", - "that creates an instance of the agent in a given namespace.\n", - "The factory function is used to allow automatic creation of agents when they are needed.\n", - "\n", - "For example, to register an agent with the {py:class}`~agnext.application.SingleThreadedAgentRuntime`,\n", - "the following code can be used:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "from agnext.application import SingleThreadedAgentRuntime\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "await runtime.register(\"my_agent\", lambda: MyAgent())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once an agent is registered, the agent's unique ID of the type {py:class}`~agnext.core.AgentId` \n", - "can be retrieved by calling {py:meth}`~agnext.core.AgentRuntime.get`.\n", - "Using the agent ID, we can send messages to the agent:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received message: Hello, World!\n" - ] - } - ], - "source": [ - "agent_id = AgentId(\"my_agent\", \"default\")\n", - "run_context = runtime.start() # Start processing messages in the background.\n", - "await runtime.send_message(MyMessage(content=\"Hello, World!\"), agent_id)\n", - "await run_context.stop() # Stop processing messages in the background." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There is a convenience method\n", - "{py:meth}`~agnext.core.AgentRuntime.register_and_get` that both registers an agent\n", - "and gets its ID.\n", - "\n", - "```{note}\n", - "Because the runtime manages the lifecycle of agents, a reference to an agent,\n", - "whether it is {py:class}`~agnext.core.AgentId` or {py:class}`~agnext.core.AgentProxy`,\n", - "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running the Single-Threaded Agent Runtime\n", - "\n", - "The above code snippet uses `runtime.start()` to start a background task\n", - "to process and deliver messages to recepients' message handlers.\n", - "This is a feature of the\n", - "local embedded runtime {py:class}`~agnext.application.SingleThreadedAgentRuntime`.\n", - "\n", - "To stop the background task immediately, use the `stop()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "run_context = runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await run_context.stop() # This will return immediately but will not cancel\n", - "# any in-progress message handling." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can resume the background task by calling `start()` again.\n", - "\n", - "For batch scenarios such as running benchmarks for evaluating agents,\n", - "you may want to wait for the background task to stop automatically when\n", - "there are no unprocessed messages and no agent is handling messages --\n", - "the batch may considered complete.\n", - "You can achieve this by using the `stop_when_idle()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "run_context = runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await run_context.stop_when_idle() # This will block until the runtime is idle." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also directly process messages one-by-one without a background task using:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "await runtime.process_next()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Other runtime implementations will have their own ways of running the runtime." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agent and Agent Runtime\n", + "\n", + "In this and the following section, we focus on the \n", + "[core concepts](../core-concepts/overview.md) of AGNext:\n", + "agents, agent runtime, messages, and communication.\n", + "You will not find any AI models or tools here, just the foundational\n", + "building blocks for building multi-agent applications." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An agent in AGNext is an entity defined by the base class {py:class}`agnext.core.BaseAgent`.\n", + "It has a unique identifier of the type {py:class}`agnext.core.AgentId`,\n", + "a metadata dictionary of the type {py:class}`agnext.core.AgentMetadata`,\n", + "and method for handling messages {py:meth}`agnext.core.BaseAgent.on_message`.\n", + "\n", + "An agent runtime is the execution environment for agents in AGNext.\n", + "Similar to the runtime environment of a programming language,\n", + "an agent runtime provides the necessary infrastructure to facilitate communication\n", + "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", + "debugging.\n", + "For local development, developers can use {py:class}`~agnext.application.SingleThreadedAgentRuntime`,\n", + "which can be embedded in a Python application.\n", + "\n", + "```{note}\n", + "Agents are not directly instantiated and managed by application code.\n", + "Instead, they are created by the runtime when needed and managed by the runtime.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing an Agent\n", + "\n", + "To implement an agent, developer must subclass the {py:class}`~agnext.core.BaseAgent` class,\n", + "declare the message types it can handle in the {py:attr}`~agnext.core.AgentMetadata.subscriptions` metadata,\n", + "and implement the {py:meth}`~agnext.core.BaseAgent.on_message` method.\n", + "This method is invoked when the agent receives a message. For example,\n", + "the following agent handles a simple message type and simply prints message it receives:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from agnext.core import AgentId, BaseAgent, MessageContext\n", + "\n", + "\n", + "@dataclass\n", + "class MyMessage:\n", + " content: str\n", + "\n", + "\n", + "class MyAgent(BaseAgent):\n", + " def __init__(self) -> None:\n", + " super().__init__(\"MyAgent\")\n", + "\n", + " async def on_message(self, message: MyMessage, ctx: MessageContext) -> None:\n", + " print(f\"Received message: {message.content}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For convenience, developers can subclass the {py:class}`~agnext.components.TypeRoutedAgent` class\n", + "which provides an easy-to use API to implement different message handlers for different message types.\n", + "See the section on message handlers below." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Registering Agents\n", + "\n", + "To make an agent available to the runtime, developers can use the\n", + "{py:meth}`~agnext.core.AgentRuntime.register` method.\n", + "The process of registration associates a name and a factory function\n", + "that creates an instance of the agent in a given namespace.\n", + "The factory function is used to allow automatic creation of agents when they are needed.\n", + "\n", + "For example, to register an agent with the {py:class}`~agnext.application.SingleThreadedAgentRuntime`,\n", + "the following code can be used:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='my_agent')" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from agnext.application import SingleThreadedAgentRuntime\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "await runtime.register(\"my_agent\", lambda: MyAgent())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once an agent is registered, the agent's unique ID of the type {py:class}`~agnext.core.AgentId` \n", + "can be retrieved by calling {py:meth}`~agnext.core.AgentRuntime.get`.\n", + "Using the agent ID, we can send messages to the agent:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received message: Hello, World!\n" + ] + } + ], + "source": [ + "agent_id = AgentId(\"my_agent\", \"default\")\n", + "runtime.start() # Start processing messages in the background.\n", + "await runtime.send_message(MyMessage(content=\"Hello, World!\"), agent_id)\n", + "await runtime.stop() # Stop processing messages in the background." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is a convenience method\n", + "{py:meth}`~agnext.core.AgentRuntime.register_and_get` that both registers an agent\n", + "and gets its ID.\n", + "\n", + "```{note}\n", + "Because the runtime manages the lifecycle of agents, a reference to an agent,\n", + "whether it is {py:class}`~agnext.core.AgentId` or {py:class}`~agnext.core.AgentProxy`,\n", + "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running the Single-Threaded Agent Runtime\n", + "\n", + "The above code snippet uses `runtime.start()` to start a background task\n", + "to process and deliver messages to recepients' message handlers.\n", + "This is a feature of the\n", + "local embedded runtime {py:class}`~agnext.application.SingleThreadedAgentRuntime`.\n", + "\n", + "To stop the background task immediately, use the `stop()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop() # This will return immediately but will not cancel\n", + "# any in-progress message handling." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can resume the background task by calling `start()` again.\n", + "\n", + "For batch scenarios such as running benchmarks for evaluating agents,\n", + "you may want to wait for the background task to stop automatically when\n", + "there are no unprocessed messages and no agent is handling messages --\n", + "the batch may considered complete.\n", + "You can achieve this by using the `stop_when_idle()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop_when_idle() # This will block until the runtime is idle." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also directly process messages one-by-one without a background task using:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "await runtime.process_next()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Other runtime implementations will have their own ways of running the runtime." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/docs/src/getting-started/message-and-communication.ipynb b/python/docs/src/getting-started/message-and-communication.ipynb index 61e8bcd28..50bf94a4a 100644 --- a/python/docs/src/getting-started/message-and-communication.ipynb +++ b/python/docs/src/getting-started/message-and-communication.ipynb @@ -136,10 +136,10 @@ } ], "source": [ - "run_context = runtime.start()\n", + "runtime.start()\n", "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"User\"), agent)\n", "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"User\"), agent)\n", - "await run_context.stop_when_idle()" + "await runtime.stop_when_idle()" ] }, { @@ -241,10 +241,10 @@ "runtime = SingleThreadedAgentRuntime()\n", "await runtime.register(\"inner_agent\", lambda: InnerAgent(\"InnerAgent\"))\n", "await runtime.register(\"outer_agent\", lambda: OuterAgent(\"OuterAgent\", \"InnerAgent\"))\n", - "run_context = runtime.start()\n", + "runtime.start()\n", "outer = AgentId(\"outer_agent\", \"default\")\n", "await runtime.send_message(Message(content=\"Hello, World!\"), outer)\n", - "await run_context.stop_when_idle()" + "await runtime.stop_when_idle()" ] }, { @@ -344,9 +344,9 @@ "await runtime.register(\"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", "await runtime.add_subscription(TypeSubscription(\"default\", \"broadcasting_agent\"))\n", "await runtime.add_subscription(TypeSubscription(\"default\", \"receiving_agent\"))\n", - "run_context = runtime.start()\n", + "runtime.start()\n", "await runtime.send_message(Message(\"Hello, World!\"), AgentId(\"broadcasting_agent\", \"default\"))\n", - "await run_context.stop_when_idle()" + "await runtime.stop()" ] }, { @@ -380,9 +380,9 @@ "await runtime.register(\"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", "await runtime.add_subscription(TypeSubscription(\"default\", \"broadcasting_agent\"))\n", "await runtime.add_subscription(TypeSubscription(\"default\", \"receiving_agent\"))\n", - "run_context = runtime.start()\n", + "runtime.start()\n", "await runtime.publish_message(Message(\"Hello, World! From the runtime!\"), topic_id=TopicId(\"default\", \"default\"))\n", - "await run_context.stop_when_idle()" + "await runtime.stop_when_idle()" ] }, { diff --git a/python/docs/src/getting-started/model-clients.ipynb b/python/docs/src/getting-started/model-clients.ipynb index 41dceb113..d45ef528e 100644 --- a/python/docs/src/getting-started/model-clients.ipynb +++ b/python/docs/src/getting-started/model-clients.ipynb @@ -331,13 +331,13 @@ " ),\n", ")\n", "# Start the runtime processing messages.\n", - "run_context = runtime.start()\n", + "runtime.start()\n", "# Send a message to the agent and get the response.\n", "message = Message(\"Hello, what are some fun things to do in Seattle?\")\n", "response = await runtime.send_message(message, AgentId(\"simple-agent\", \"default\"))\n", "print(response.content)\n", "# Stop the runtime processing messages.\n", - "await run_context.stop()" + "await runtime.stop()" ] } ], diff --git a/python/docs/src/getting-started/multi-agent-design-patterns.ipynb b/python/docs/src/getting-started/multi-agent-design-patterns.ipynb index 720653f5f..b62174b94 100644 --- a/python/docs/src/getting-started/multi-agent-design-patterns.ipynb +++ b/python/docs/src/getting-started/multi-agent-design-patterns.ipynb @@ -509,14 +509,14 @@ " lambda: CoderAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\")),\n", ")\n", "await runtime.add_subscription(TypeSubscription(\"default\", \"ReviewerAgent\"))\n", - "run_context = runtime.start()\n", + "runtime.start()\n", "await runtime.publish_message(\n", " message=CodeWritingTask(task=\"Write a function to find the sum of all even numbers in a list.\"),\n", " topic_id=TopicId(\"default\", \"default\"),\n", ")\n", "\n", "# Keep processing messages until idle.\n", - "await run_context.stop_when_idle()" + "await runtime.stop_when_idle()" ] }, { diff --git a/python/docs/src/getting-started/tools.ipynb b/python/docs/src/getting-started/tools.ipynb index 373a7b9f0..8a413297e 100644 --- a/python/docs/src/getting-started/tools.ipynb +++ b/python/docs/src/getting-started/tools.ipynb @@ -1,324 +1,324 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tools\n", - "\n", - "Tools are code that can be executed by an agent to perform actions. A tool\n", - "can be a simple function such as a calculator, or an API call to a third-party service\n", - "such as stock price lookup and weather forecast.\n", - "In the context of AI agents, tools are designed to be executed by agents in\n", - "response to model-generated function calls.\n", - "\n", - "AGNext provides the {py:mod}`agnext.components.tools` module with a suite of built-in\n", - "tools and utilities for creating and running custom tools." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Built-in Tools\n", - "\n", - "One of the built-in tools is the {py:class}`agnext.components.tools.PythonCodeExecutionTool`,\n", - "which allows agents to execute Python code snippets.\n", - "\n", - "Here is how you create the tool and use it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from agnext.components.code_executor import LocalCommandLineCodeExecutor\n", - "from agnext.components.tools import PythonCodeExecutionTool\n", - "from agnext.core import CancellationToken\n", - "\n", - "# Create the tool.\n", - "code_executor = LocalCommandLineCodeExecutor()\n", - "code_execution_tool = PythonCodeExecutionTool(code_executor)\n", - "cancellation_token = CancellationToken()\n", - "\n", - "# Use the tool directly without an agent.\n", - "code = \"print('Hello, world!')\"\n", - "result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n", - "print(code_execution_tool.return_value_as_string(result))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:class}`~agnext.components.code_executor.LocalCommandLineCodeExecutor`\n", - "class is a built-in code executor that runs Python code snippets in a subprocess\n", - "in the local command line environment.\n", - "The {py:class}`~agnext.components.tools.PythonCodeExecutionTool` class wraps the code executor\n", - "and provides a simple interface to execute Python code snippets.\n", - "\n", - "Other built-in tools will be added in the future." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Function Tools\n", - "\n", - "A tool can also be a simple Python function that performs a specific action.\n", - "To create a custom function tool, you just need to create a Python function\n", - "and use the {py:class}`agnext.components.tools.FunctionTool` class to wrap it.\n", - "\n", - "For example, a simple tool to obtain the stock price of a company might look like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "138.75280591295171\n" - ] - } - ], - "source": [ - "import random\n", - "\n", - "from agnext.components.tools import FunctionTool\n", - "from agnext.core import CancellationToken\n", - "from typing_extensions import Annotated\n", - "\n", - "\n", - "async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n", - " # Returns a random stock price for demonstration purposes.\n", - " return random.uniform(10, 200)\n", - "\n", - "\n", - "# Create a function tool.\n", - "stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n", - "\n", - "# Run the tool.\n", - "cancellation_token = CancellationToken()\n", - "result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n", - "\n", - "# Print the result.\n", - "print(stock_price_tool.return_value_as_string(result))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Tool-Equipped Agent\n", - "\n", - "To use tools with an agent, you can use {py:class}`agnext.components.tool_agent.ToolAgent`,\n", - "by using it in a composition pattern.\n", - "Here is an example tool-use agent that uses {py:class}`~agnext.components.tool_agent.ToolAgent`\n", - "as an inner agent for executing tools." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "from dataclasses import dataclass\n", - "from typing import List\n", - "\n", - "from agnext.application import SingleThreadedAgentRuntime\n", - "from agnext.components import FunctionCall, TypeRoutedAgent, message_handler\n", - "from agnext.components.models import (\n", - " AssistantMessage,\n", - " ChatCompletionClient,\n", - " FunctionExecutionResult,\n", - " FunctionExecutionResultMessage,\n", - " LLMMessage,\n", - " OpenAIChatCompletionClient,\n", - " SystemMessage,\n", - " UserMessage,\n", - ")\n", - "from agnext.components.tool_agent import ToolAgent, ToolException\n", - "from agnext.components.tools import FunctionTool, Tool, ToolSchema\n", - "from agnext.core import AgentId, AgentInstantiationContext, MessageContext\n", - "\n", - "\n", - "@dataclass\n", - "class Message:\n", - " content: str\n", - "\n", - "\n", - "class ToolUseAgent(TypeRoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent: AgentId) -> None:\n", - " super().__init__(\"An agent with tools\")\n", - " self._system_messages: List[LLMMessage] = [SystemMessage(\"You are a helpful AI assistant.\")]\n", - " self._model_client = model_client\n", - " self._tool_schema = tool_schema\n", - " self._tool_agent = tool_agent\n", - "\n", - " @message_handler\n", - " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " # Create a session of messages.\n", - " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n", - " # Get a response from the model.\n", - " response = await self._model_client.create(\n", - " self._system_messages + session, tools=self._tool_schema, cancellation_token=cancellation_token\n", - " )\n", - " # Add the response to the session.\n", - " session.append(AssistantMessage(content=response.content, source=\"assistant\"))\n", - "\n", - " # Keep iterating until the model stops generating tool calls.\n", - " while isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content):\n", - " # Execute functions called by the model by sending messages to itself.\n", - " results: List[FunctionExecutionResult | BaseException] = await asyncio.gather(\n", - " *[self.send_message(call, self._tool_agent) for call in response.content],\n", - " return_exceptions=True,\n", - " )\n", - " # Combine the results into a single response and handle exceptions.\n", - " function_results: List[FunctionExecutionResult] = []\n", - " for result in results:\n", - " if isinstance(result, FunctionExecutionResult):\n", - " function_results.append(result)\n", - " elif isinstance(result, ToolException):\n", - " function_results.append(FunctionExecutionResult(content=f\"Error: {result}\", call_id=result.call_id))\n", - " elif isinstance(result, BaseException):\n", - " raise result # Unexpected exception.\n", - " session.append(FunctionExecutionResultMessage(content=function_results))\n", - " # Query the model again with the new response.\n", - " response = await self._model_client.create(\n", - " self._system_messages + session, tools=self._tool_schema, cancellation_token=cancellation_token\n", - " )\n", - " session.append(AssistantMessage(content=response.content, source=self.metadata[\"type\"]))\n", - "\n", - " # Return the final response.\n", - " assert isinstance(response.content, str)\n", - " return Message(content=response.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `ToolUseAgent` class is a bit involved, however,\n", - "the core idea can be described using a simple control flow graph:\n", - "\n", - "![ToolUseAgent control flow graph](tool-use-agent-cfg.svg)\n", - "\n", - "The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n", - "and determines whether the model has generated a tool call.\n", - "If the model has generated tool calls, then the handler sends a function call\n", - "message to the {py:class}`~agnext.components.tool_agent.ToolAgent` agent\n", - "to execute the tools,\n", - "and then queries the model again with the results of the tool calls.\n", - "This process continues until the model stops generating tool calls,\n", - "at which point the final response is returned to the user.\n", - "\n", - "By having the tool execution logic in a separate agent,\n", - "we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n", - "can be observed externally and intercepted if necessary.\n", - "\n", - "To run the agent, we need to create a runtime and register the agent." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a runtime.\n", - "runtime = SingleThreadedAgentRuntime()\n", - "# Create the tools.\n", - "tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n", - "# Register the agents.\n", - "await runtime.register(\n", - " \"tool-executor-agent\",\n", - " lambda: ToolAgent(\n", - " description=\"Tool Executor Agent\",\n", - " tools=tools,\n", - " ),\n", - ")\n", - "await runtime.register(\n", - " \"tool-use-agent\",\n", - " lambda: ToolUseAgent(\n", - " OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " tool_schema=[tool.schema for tool in tools],\n", - " tool_agent=AgentId(\"tool-executor-agent\", AgentInstantiationContext.current_agent_id().key),\n", - " ),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example uses the {py:class}`agnext.components.models.OpenAIChatCompletionClient`,\n", - "for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n", - "Let's test the agent with a question about stock price." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The stock price of NVDA on June 1, 2024, is approximately $49.28.\n" - ] - } - ], - "source": [ - "# Start processing messages.\n", - "run_context = runtime.start()\n", - "# Send a direct message to the tool agent.\n", - "tool_use_agent = AgentId(\"tool-use-agent\", \"default\")\n", - "response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n", - "print(response.content)\n", - "# Stop processing messages.\n", - "await run_context.stop()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "See [samples](https://github.com/microsoft/agnext/tree/main/python/samples#tool-use-examples)\n", - "for more examples of using tools with agents, including how to use\n", - "broadcast communication model for tool execution, and how to intercept tool\n", - "execution for human-in-the-loop approval." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tools\n", + "\n", + "Tools are code that can be executed by an agent to perform actions. A tool\n", + "can be a simple function such as a calculator, or an API call to a third-party service\n", + "such as stock price lookup and weather forecast.\n", + "In the context of AI agents, tools are designed to be executed by agents in\n", + "response to model-generated function calls.\n", + "\n", + "AGNext provides the {py:mod}`agnext.components.tools` module with a suite of built-in\n", + "tools and utilities for creating and running custom tools." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Built-in Tools\n", + "\n", + "One of the built-in tools is the {py:class}`agnext.components.tools.PythonCodeExecutionTool`,\n", + "which allows agents to execute Python code snippets.\n", + "\n", + "Here is how you create the tool and use it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from agnext.components.code_executor import LocalCommandLineCodeExecutor\n", + "from agnext.components.tools import PythonCodeExecutionTool\n", + "from agnext.core import CancellationToken\n", + "\n", + "# Create the tool.\n", + "code_executor = LocalCommandLineCodeExecutor()\n", + "code_execution_tool = PythonCodeExecutionTool(code_executor)\n", + "cancellation_token = CancellationToken()\n", + "\n", + "# Use the tool directly without an agent.\n", + "code = \"print('Hello, world!')\"\n", + "result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n", + "print(code_execution_tool.return_value_as_string(result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:class}`~agnext.components.code_executor.LocalCommandLineCodeExecutor`\n", + "class is a built-in code executor that runs Python code snippets in a subprocess\n", + "in the local command line environment.\n", + "The {py:class}`~agnext.components.tools.PythonCodeExecutionTool` class wraps the code executor\n", + "and provides a simple interface to execute Python code snippets.\n", + "\n", + "Other built-in tools will be added in the future." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Function Tools\n", + "\n", + "A tool can also be a simple Python function that performs a specific action.\n", + "To create a custom function tool, you just need to create a Python function\n", + "and use the {py:class}`agnext.components.tools.FunctionTool` class to wrap it.\n", + "\n", + "For example, a simple tool to obtain the stock price of a company might look like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "138.75280591295171\n" + ] + } + ], + "source": [ + "import random\n", + "\n", + "from agnext.components.tools import FunctionTool\n", + "from agnext.core import CancellationToken\n", + "from typing_extensions import Annotated\n", + "\n", + "\n", + "async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n", + " # Returns a random stock price for demonstration purposes.\n", + " return random.uniform(10, 200)\n", + "\n", + "\n", + "# Create a function tool.\n", + "stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n", + "\n", + "# Run the tool.\n", + "cancellation_token = CancellationToken()\n", + "result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n", + "\n", + "# Print the result.\n", + "print(stock_price_tool.return_value_as_string(result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tool-Equipped Agent\n", + "\n", + "To use tools with an agent, you can use {py:class}`agnext.components.tool_agent.ToolAgent`,\n", + "by using it in a composition pattern.\n", + "Here is an example tool-use agent that uses {py:class}`~agnext.components.tool_agent.ToolAgent`\n", + "as an inner agent for executing tools." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "from agnext.application import SingleThreadedAgentRuntime\n", + "from agnext.components import FunctionCall, TypeRoutedAgent, message_handler\n", + "from agnext.components.models import (\n", + " AssistantMessage,\n", + " ChatCompletionClient,\n", + " FunctionExecutionResult,\n", + " FunctionExecutionResultMessage,\n", + " LLMMessage,\n", + " OpenAIChatCompletionClient,\n", + " SystemMessage,\n", + " UserMessage,\n", + ")\n", + "from agnext.components.tool_agent import ToolAgent, ToolException\n", + "from agnext.components.tools import FunctionTool, Tool, ToolSchema\n", + "from agnext.core import AgentId, AgentInstantiationContext, MessageContext\n", + "\n", + "\n", + "@dataclass\n", + "class Message:\n", + " content: str\n", + "\n", + "\n", + "class ToolUseAgent(TypeRoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent: AgentId) -> None:\n", + " super().__init__(\"An agent with tools\")\n", + " self._system_messages: List[LLMMessage] = [SystemMessage(\"You are a helpful AI assistant.\")]\n", + " self._model_client = model_client\n", + " self._tool_schema = tool_schema\n", + " self._tool_agent = tool_agent\n", + "\n", + " @message_handler\n", + " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " # Create a session of messages.\n", + " session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n", + " # Get a response from the model.\n", + " response = await self._model_client.create(\n", + " self._system_messages + session, tools=self._tool_schema, cancellation_token=cancellation_token\n", + " )\n", + " # Add the response to the session.\n", + " session.append(AssistantMessage(content=response.content, source=\"assistant\"))\n", + "\n", + " # Keep iterating until the model stops generating tool calls.\n", + " while isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content):\n", + " # Execute functions called by the model by sending messages to itself.\n", + " results: List[FunctionExecutionResult | BaseException] = await asyncio.gather(\n", + " *[self.send_message(call, self._tool_agent) for call in response.content],\n", + " return_exceptions=True,\n", + " )\n", + " # Combine the results into a single response and handle exceptions.\n", + " function_results: List[FunctionExecutionResult] = []\n", + " for result in results:\n", + " if isinstance(result, FunctionExecutionResult):\n", + " function_results.append(result)\n", + " elif isinstance(result, ToolException):\n", + " function_results.append(FunctionExecutionResult(content=f\"Error: {result}\", call_id=result.call_id))\n", + " elif isinstance(result, BaseException):\n", + " raise result # Unexpected exception.\n", + " session.append(FunctionExecutionResultMessage(content=function_results))\n", + " # Query the model again with the new response.\n", + " response = await self._model_client.create(\n", + " self._system_messages + session, tools=self._tool_schema, cancellation_token=cancellation_token\n", + " )\n", + " session.append(AssistantMessage(content=response.content, source=self.metadata[\"type\"]))\n", + "\n", + " # Return the final response.\n", + " assert isinstance(response.content, str)\n", + " return Message(content=response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `ToolUseAgent` class is a bit involved, however,\n", + "the core idea can be described using a simple control flow graph:\n", + "\n", + "![ToolUseAgent control flow graph](tool-use-agent-cfg.svg)\n", + "\n", + "The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n", + "and determines whether the model has generated a tool call.\n", + "If the model has generated tool calls, then the handler sends a function call\n", + "message to the {py:class}`~agnext.components.tool_agent.ToolAgent` agent\n", + "to execute the tools,\n", + "and then queries the model again with the results of the tool calls.\n", + "This process continues until the model stops generating tool calls,\n", + "at which point the final response is returned to the user.\n", + "\n", + "By having the tool execution logic in a separate agent,\n", + "we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n", + "can be observed externally and intercepted if necessary.\n", + "\n", + "To run the agent, we need to create a runtime and register the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a runtime.\n", + "runtime = SingleThreadedAgentRuntime()\n", + "# Create the tools.\n", + "tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n", + "# Register the agents.\n", + "await runtime.register(\n", + " \"tool-executor-agent\",\n", + " lambda: ToolAgent(\n", + " description=\"Tool Executor Agent\",\n", + " tools=tools,\n", + " ),\n", + ")\n", + "await runtime.register(\n", + " \"tool-use-agent\",\n", + " lambda: ToolUseAgent(\n", + " OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " tool_schema=[tool.schema for tool in tools],\n", + " tool_agent=AgentId(\"tool-executor-agent\", AgentInstantiationContext.current_agent_id().key),\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example uses the {py:class}`agnext.components.models.OpenAIChatCompletionClient`,\n", + "for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n", + "Let's test the agent with a question about stock price." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The stock price of NVDA on June 1, 2024, is approximately $49.28.\n" + ] + } + ], + "source": [ + "# Start processing messages.\n", + "runtime.start()\n", + "# Send a direct message to the tool agent.\n", + "tool_use_agent = AgentId(\"tool-use-agent\", \"default\")\n", + "response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n", + "print(response.content)\n", + "# Stop processing messages.\n", + "await runtime.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See [samples](https://github.com/microsoft/agnext/tree/main/python/samples#tool-use-examples)\n", + "for more examples of using tools with agents, including how to use\n", + "broadcast communication model for tool execution, and how to intercept tool\n", + "execution for human-in-the-loop approval." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/samples/byoa/langgraph_agent.py b/python/samples/byoa/langgraph_agent.py index dcf17f921..671464d6a 100644 --- a/python/samples/byoa/langgraph_agent.py +++ b/python/samples/byoa/langgraph_agent.py @@ -120,12 +120,12 @@ async def main() -> None: ) agent = AgentId("langgraph_tool_use_agent", key="default") # Start the runtime. - run_context = runtime.start() + runtime.start() # Send a message to the agent and get a response. response = await runtime.send_message(Message("What's the weather in SF?"), agent) print(response.content) # Stop the runtime. - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/byoa/llamaindex_agent.py b/python/samples/byoa/llamaindex_agent.py index 16ae42121..ef7895261 100644 --- a/python/samples/byoa/llamaindex_agent.py +++ b/python/samples/byoa/llamaindex_agent.py @@ -125,7 +125,7 @@ async def main() -> None: ) agent = AgentId("chat_agent", key="default") - run_context = runtime.start() + runtime.start() # Send a message to the agent and get the response. message = Message(content="What are the best movies from studio Ghibli?") @@ -137,7 +137,7 @@ async def main() -> None: for source in response.sources: print(source.content) - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/core/inner_outer_direct.py b/python/samples/core/inner_outer_direct.py index d193773bc..ab99cac97 100644 --- a/python/samples/core/inner_outer_direct.py +++ b/python/samples/core/inner_outer_direct.py @@ -49,11 +49,11 @@ async def main() -> None: await runtime.register("outer", lambda: Outer(AgentId("outer", AgentInstantiationContext.current_agent_id().key))) outer = AgentId("outer", "default") - run_context = runtime.start() + runtime.start() response = await runtime.send_message(MessageType(body="Hello", sender="external"), outer) print(response) - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/core/one_agent_direct.py b/python/samples/core/one_agent_direct.py index 4775716f5..d1203afd2 100644 --- a/python/samples/core/one_agent_direct.py +++ b/python/samples/core/one_agent_direct.py @@ -52,7 +52,7 @@ async def main() -> None: ) agent = AgentId("chat_agent", "default") - run_context = runtime.start() + runtime.start() # Send a message to the agent and get the response. message = Message(content="Hello, what are some fun things to do in Seattle?") @@ -60,7 +60,7 @@ async def main() -> None: assert isinstance(response, Message) print(response.content) - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/core/two_agents_pub_sub.py b/python/samples/core/two_agents_pub_sub.py index 30ce6ed1b..f84de6d7e 100644 --- a/python/samples/core/two_agents_pub_sub.py +++ b/python/samples/core/two_agents_pub_sub.py @@ -108,14 +108,14 @@ async def main() -> None: ) await runtime.add_subscription(TypeSubscription("default", "Cathy")) - run_context = runtime.start() + runtime.start() # Send a message to Jack to start the conversation. message = Message(content="Can you tell me something fun about SF?", source="User") await runtime.send_message(message, AgentId("jack", "default")) # Process messages. - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/demos/assistant.py b/python/samples/demos/assistant.py index 684543fe2..c07eb1917 100644 --- a/python/samples/demos/assistant.py +++ b/python/samples/demos/assistant.py @@ -233,7 +233,7 @@ Type "exit" to exit the chat. """ runtime = SingleThreadedAgentRuntime() user = await assistant_chat(runtime) - _run_context = runtime.start() + runtime.start() print(usage) # Request the user to start the conversation. await runtime.send_message(PublishNow(), AgentId(user, "default")) diff --git a/python/samples/demos/chat_room.py b/python/samples/demos/chat_room.py index bf4ffa638..ccf068b2e 100644 --- a/python/samples/demos/chat_room.py +++ b/python/samples/demos/chat_room.py @@ -147,7 +147,7 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() app = TextualChatApp(runtime, user_name="You") await chat_room(runtime, app) - _run_context = runtime.start() + runtime.start() await app.run_async() diff --git a/python/samples/demos/chess_game.py b/python/samples/demos/chess_game.py index 4c98c1cac..5e4a9a85c 100644 --- a/python/samples/demos/chess_game.py +++ b/python/samples/demos/chess_game.py @@ -212,12 +212,12 @@ async def chess_game(runtime: AgentRuntime) -> None: # type: ignore async def main() -> None: runtime = SingleThreadedAgentRuntime() await chess_game(runtime) - run_context = runtime.start() + runtime.start() # Publish an initial message to trigger the group chat manager to start orchestration. await runtime.publish_message( TextMessage(content="Game started.", source="System"), topic_id=TopicId("default", "default") ) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/demos/illustrator_critics.py b/python/samples/demos/illustrator_critics.py index 89821fd0d..44286465f 100644 --- a/python/samples/demos/illustrator_critics.py +++ b/python/samples/demos/illustrator_critics.py @@ -106,7 +106,7 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() app = TextualChatApp(runtime, user_name="You") await illustrator_critics(runtime, app) - _run_context = runtime.start() + runtime.start() await app.run_async() diff --git a/python/samples/demos/software_consultancy.py b/python/samples/demos/software_consultancy.py index 1b9aaeffb..499a20b33 100644 --- a/python/samples/demos/software_consultancy.py +++ b/python/samples/demos/software_consultancy.py @@ -288,7 +288,7 @@ async def main() -> None: app = TextualChatApp(runtime, user_name="You") await software_consultancy(runtime, app) # Start the runtime. - _run_context = runtime.start() + runtime.start() # Start the app. await app.run_async() diff --git a/python/samples/marketing-agents/test_usage.py b/python/samples/marketing-agents/test_usage.py index a74e7855e..8288aa89a 100644 --- a/python/samples/marketing-agents/test_usage.py +++ b/python/samples/marketing-agents/test_usage.py @@ -32,7 +32,7 @@ async def main() -> None: await build_app(runtime) await runtime.register("Printer", lambda: Printer()) - ctx = runtime.start() + runtime.start() topic_id = TopicId("default", "default") @@ -45,7 +45,7 @@ async def main() -> None: topic_id=topic_id, ) - await ctx.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/patterns/coder_executor.py b/python/samples/patterns/coder_executor.py index f03379c75..6e7dc2aec 100644 --- a/python/samples/patterns/coder_executor.py +++ b/python/samples/patterns/coder_executor.py @@ -200,12 +200,12 @@ async def main(task: str, temp_dir: str) -> None: await runtime.register("executor", lambda: Executor(executor=LocalCommandLineCodeExecutor(work_dir=temp_dir))) await runtime.add_subscription(TypeSubscription("default", "coder")) await runtime.add_subscription(TypeSubscription("default", "executor")) - run_context = runtime.start() + runtime.start() # Publish the task message. await runtime.publish_message(TaskMessage(content=task), topic_id=TopicId("default", "default")) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/patterns/coder_reviewer.py b/python/samples/patterns/coder_reviewer.py index 1a2e0d5f8..77d763df7 100644 --- a/python/samples/patterns/coder_reviewer.py +++ b/python/samples/patterns/coder_reviewer.py @@ -281,7 +281,7 @@ async def main() -> None: ), ) await runtime.add_subscription(TypeSubscription("default", "CoderAgent")) - run_context = runtime.start() + runtime.start() await runtime.publish_message( message=CodeWritingTask( task="Write a function to find the directory with the largest number of files using multi-processing." @@ -290,7 +290,7 @@ async def main() -> None: ) # Keep processing messages until idle. - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/patterns/group_chat.py b/python/samples/patterns/group_chat.py index e39db257f..9e8e902cc 100644 --- a/python/samples/patterns/group_chat.py +++ b/python/samples/patterns/group_chat.py @@ -157,14 +157,14 @@ async def main() -> None: ) # Start the runtime. - run_context = runtime.start() + runtime.start() # Start the conversation. await runtime.publish_message( Message(content="Hello, everyone!", source="Moderator"), topic_id=TopicId("default", "default") ) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/patterns/mixture_of_agents.py b/python/samples/patterns/mixture_of_agents.py index 58cb32091..28bc8372d 100644 --- a/python/samples/patterns/mixture_of_agents.py +++ b/python/samples/patterns/mixture_of_agents.py @@ -158,13 +158,13 @@ async def main() -> None: ), ) await runtime.add_subscription(TypeSubscription("default", "AggregatorAgent")) - run_context = runtime.start() + runtime.start() await runtime.publish_message( AggregatorTask(task="What are something fun to do in SF?"), topic_id=TopicId("default", "default") ) # Keep processing messages. - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/patterns/multi_agent_debate.py b/python/samples/patterns/multi_agent_debate.py index 225c52a87..188859068 100644 --- a/python/samples/patterns/multi_agent_debate.py +++ b/python/samples/patterns/multi_agent_debate.py @@ -264,12 +264,12 @@ async def main(question: str) -> None: # Register the aggregator agent. await runtime.register("MathAggregator", lambda: MathAggregator(num_solvers=4)) - run_context = runtime.start() + runtime.start() # Send a math problem to the aggregator agent. await runtime.publish_message(Question(content=question), topic_id=TopicId("default", "default")) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/tool-use/coding_direct.py b/python/samples/tool-use/coding_direct.py index fcb1799a9..c837ebf83 100644 --- a/python/samples/tool-use/coding_direct.py +++ b/python/samples/tool-use/coding_direct.py @@ -125,7 +125,7 @@ async def main() -> None: ), ) - run_context = runtime.start() + runtime.start() # Send a task to the tool user. response = await runtime.send_message( @@ -134,7 +134,7 @@ async def main() -> None: print(response.content) # Run the runtime until the task is completed. - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/tool-use/coding_direct_with_intercept.py b/python/samples/tool-use/coding_direct_with_intercept.py index 044802aba..64475ca56 100644 --- a/python/samples/tool-use/coding_direct_with_intercept.py +++ b/python/samples/tool-use/coding_direct_with_intercept.py @@ -66,7 +66,7 @@ async def main() -> None: ), ) - run_context = runtime.start() + runtime.start() # Send a task to the tool user. response = await runtime.send_message( @@ -75,7 +75,7 @@ async def main() -> None: print(response.content) # Run the runtime until the task is completed. - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/samples/tool-use/coding_pub_sub.py b/python/samples/tool-use/coding_pub_sub.py index d7203c05d..61970b9b2 100644 --- a/python/samples/tool-use/coding_pub_sub.py +++ b/python/samples/tool-use/coding_pub_sub.py @@ -210,14 +210,14 @@ async def main() -> None: ) await runtime.add_subscription(TypeSubscription("default", "tool_use_agent")) - run_context = runtime.start() + runtime.start() # Publish a task. await runtime.publish_message( UserRequest("Run the following Python code: print('Hello, World!')"), topic_id=TopicId("default", "default") ) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/samples/tool-use/custom_tool_direct.py b/python/samples/tool-use/custom_tool_direct.py index 316c20aa8..9b975a6ab 100644 --- a/python/samples/tool-use/custom_tool_direct.py +++ b/python/samples/tool-use/custom_tool_direct.py @@ -63,7 +63,7 @@ async def main() -> None: ) tool_use_agent = AgentId("tool_enabled_agent", "default") - run_context = runtime.start() + runtime.start() # Send a task to the tool user. response = await runtime.send_message(Message("What is the stock price of NVDA on 2024/06/01"), tool_use_agent) @@ -72,7 +72,7 @@ async def main() -> None: print(response.content) # Run the runtime until the task is completed. - await run_context.stop() + await runtime.stop() if __name__ == "__main__": diff --git a/python/src/agnext/application/_single_threaded_agent_runtime.py b/python/src/agnext/application/_single_threaded_agent_runtime.py index a68e5ebe2..c92b9c58d 100644 --- a/python/src/agnext/application/_single_threaded_agent_runtime.py +++ b/python/src/agnext/application/_single_threaded_agent_runtime.py @@ -137,6 +137,8 @@ class SingleThreadedAgentRuntime(AgentRuntime): self._seen_topics: Set[TopicId] = set() self._subscribed_recipients: DefaultDict[TopicId, List[AgentId]] = defaultdict(list) + self._run_context: RunContext | None = None + @property def unprocessed_messages( self, @@ -430,8 +432,26 @@ class SingleThreadedAgentRuntime(AgentRuntime): def idle(self) -> bool: return len(self._message_queue) == 0 and self._outstanding_tasks.get() == 0 - def start(self) -> RunContext: - return RunContext(self) + def start(self) -> None: + """Start the runtime message processing loop.""" + if self._run_context is not None: + raise RuntimeError("Runtime is already started") + self._run_context = RunContext(self) + + async def stop(self) -> None: + """Stop the runtime message processing loop.""" + if self._run_context is None: + raise RuntimeError("Runtime is not started") + await self._run_context.stop() + self._run_context = None + + async def stop_when_idle(self) -> None: + """Stop the runtime message processing loop when there is + no outstanding message being processed or queued.""" + if self._run_context is None: + raise RuntimeError("Runtime is not started") + await self._run_context.stop_when_idle() + self._run_context = None async def agent_metadata(self, agent: AgentId) -> AgentMetadata: return (await self._get_agent(agent)).metadata diff --git a/python/teams/team-one/examples/example.py b/python/teams/team-one/examples/example.py index 09adffc0c..0d3d0d541 100644 --- a/python/teams/team-one/examples/example.py +++ b/python/teams/team-one/examples/example.py @@ -39,9 +39,9 @@ async def main() -> None: ), ) - run_context = runtime.start() + runtime.start() await runtime.send_message(RequestReplyMessage(), user_proxy.id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/examples/example_coder.py b/python/teams/team-one/examples/example_coder.py index 9b45539fa..eafac5876 100644 --- a/python/teams/team-one/examples/example_coder.py +++ b/python/teams/team-one/examples/example_coder.py @@ -33,9 +33,9 @@ async def main() -> None: await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([coder, executor, user_proxy])) - run_context = runtime.start() + runtime.start() await runtime.send_message(RequestReplyMessage(), user_proxy.id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/examples/example_file_surfer.py b/python/teams/team-one/examples/example_file_surfer.py index 0318a5654..c1508ed44 100644 --- a/python/teams/team-one/examples/example_file_surfer.py +++ b/python/teams/team-one/examples/example_file_surfer.py @@ -33,9 +33,9 @@ async def main() -> None: await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([file_surfer, user_proxy])) - run_context = runtime.start() + runtime.start() await runtime.send_message(RequestReplyMessage(), user_proxy.id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/examples/example_reflexagents.py b/python/teams/team-one/examples/example_reflexagents.py index 88c05c942..0237a0dd2 100644 --- a/python/teams/team-one/examples/example_reflexagents.py +++ b/python/teams/team-one/examples/example_reflexagents.py @@ -25,10 +25,10 @@ async def main() -> None: await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([fake1, fake2, fake3])) task_message = UserMessage(content="Test Message", source="User") - run_context = runtime.start() + runtime.start() await runtime.publish_message(BroadcastMessage(task_message), topic_id=TopicId("default", "default")) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/examples/example_userproxy.py b/python/teams/team-one/examples/example_userproxy.py index dcce89e44..1084a9b74 100644 --- a/python/teams/team-one/examples/example_userproxy.py +++ b/python/teams/team-one/examples/example_userproxy.py @@ -34,9 +34,9 @@ async def main() -> None: await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([coder, user_proxy])) - run_context = runtime.start() + runtime.start() await runtime.send_message(RequestReplyMessage(), user_proxy.id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/examples/example_websurfer.py b/python/teams/team-one/examples/example_websurfer.py index c0bcebefc..0f751fe1a 100644 --- a/python/teams/team-one/examples/example_websurfer.py +++ b/python/teams/team-one/examples/example_websurfer.py @@ -36,7 +36,7 @@ async def main() -> None: await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([web_surfer, user_proxy])) - run_context = runtime.start() + runtime.start() actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, type=MultimodalWebSurfer) await actual_surfer.init( @@ -47,7 +47,7 @@ async def main() -> None: ) await runtime.send_message(RequestReplyMessage(), user_proxy.id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": diff --git a/python/teams/team-one/tests/headless_web_surfer/test_web_surfer.py b/python/teams/team-one/tests/headless_web_surfer/test_web_surfer.py index 3f87670f5..855c7cf53 100644 --- a/python/teams/team-one/tests/headless_web_surfer/test_web_surfer.py +++ b/python/teams/team-one/tests/headless_web_surfer/test_web_surfer.py @@ -104,7 +104,7 @@ async def test_web_surfer() -> None: lambda: MultimodalWebSurfer(), ) web_surfer = AgentId("WebSurfer", "default") - run_context = runtime.start() + runtime.start() actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer, MultimodalWebSurfer) await actual_surfer.init(model_client=client, downloads_folder=os.getcwd(), browser_channel="chromium") @@ -150,7 +150,7 @@ async def test_web_surfer() -> None: with pytest.raises(AuthenticationError): tool_resp = await make_browser_request(actual_surfer, TOOL_SUMMARIZE_PAGE) - await run_context.stop_when_idle() + await runtime.stop_when_idle() @pytest.mark.skipif( skip_all or skip_openai, @@ -176,7 +176,7 @@ async def test_web_surfer_oai() -> None: ) user_proxy = AgentProxy(AgentId("UserProxy", "default"), runtime) await runtime.register("orchestrator", lambda: RoundRobinOrchestrator([web_surfer, user_proxy])) - run_context = runtime.start() + runtime.start() actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, MultimodalWebSurfer) await actual_surfer.init(model_client=client, downloads_folder=os.getcwd(), browser_channel="chromium") @@ -206,7 +206,7 @@ async def test_web_surfer_oai() -> None: recipient=web_surfer.id, sender=user_proxy.id ) - await run_context.stop_when_idle() + await runtime.stop_when_idle() @pytest.mark.skipif( skip_bing, @@ -232,7 +232,7 @@ async def test_web_surfer_bing() -> None: ) web_surfer = AgentProxy(AgentId("WebSurfer", "default"), runtime) - run_context = runtime.start() + runtime.start() actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, MultimodalWebSurfer) await actual_surfer.init(model_client=client, downloads_folder=os.getcwd(), browser_channel="chromium") @@ -247,7 +247,7 @@ async def test_web_surfer_bing() -> None: tool_resp = await make_browser_request(actual_surfer, TOOL_WEB_SEARCH, {"query": BING_QUERY + " Wikipedia"}) markdown = await actual_surfer._get_page_markdown() # type: ignore assert "https://en.wikipedia.org/wiki/" in markdown - await run_context.stop_when_idle() + await runtime.stop_when_idle() if __name__ == "__main__": """Runs this file's tests from the command line.""" diff --git a/python/tests/test_closure_agent.py b/python/tests/test_closure_agent.py index 256b59f55..2492bc4a3 100644 --- a/python/tests/test_closure_agent.py +++ b/python/tests/test_closure_agent.py @@ -35,14 +35,14 @@ async def test_register_receives_publish() -> None: await runtime.register("name", lambda: ClosureAgent("my_agent", log_message)) await runtime.add_subscription(TypeSubscription("default", "name")) topic_id = TopicId("default", "default") - run_context = runtime.start() + runtime.start() await runtime.publish_message(Message("first message"), topic_id=topic_id) await runtime.publish_message(Message("second message"), topic_id=topic_id) await runtime.publish_message(Message("third message"), topic_id=topic_id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() assert queue.qsize() == 3 assert queue.get_nowait() == ("default", "first message") diff --git a/python/tests/test_intervention.py b/python/tests/test_intervention.py index c566578e2..f859cc18d 100644 --- a/python/tests/test_intervention.py +++ b/python/tests/test_intervention.py @@ -21,11 +21,11 @@ async def test_intervention_count_messages() -> None: runtime = SingleThreadedAgentRuntime(intervention_handler=handler) await runtime.register("name", LoopbackAgent) loopback = AgentId("name", key="default") - run_context = runtime.start() + runtime.start() _response = await runtime.send_message(MessageType(), recipient=loopback) - await run_context.stop() + await runtime.stop() assert handler.num_messages == 1 loopback_agent = await runtime.try_get_underlying_agent_instance(loopback, type=LoopbackAgent) @@ -43,12 +43,12 @@ async def test_intervention_drop_send() -> None: await runtime.register("name", LoopbackAgent) loopback = AgentId("name", key="default") - run_context = runtime.start() + runtime.start() with pytest.raises(MessageDroppedException): _response = await runtime.send_message(MessageType(), recipient=loopback) - await run_context.stop() + await runtime.stop() loopback_agent = await runtime.try_get_underlying_agent_instance(loopback, type=LoopbackAgent) assert loopback_agent.num_calls == 0 @@ -66,12 +66,12 @@ async def test_intervention_drop_response() -> None: await runtime.register("name", LoopbackAgent) loopback = AgentId("name", key="default") - run_context = runtime.start() + runtime.start() with pytest.raises(MessageDroppedException): _response = await runtime.send_message(MessageType(), recipient=loopback) - await run_context.stop() + await runtime.stop() @pytest.mark.asyncio @@ -89,12 +89,12 @@ async def test_intervention_raise_exception_on_send() -> None: await runtime.register("name", LoopbackAgent) loopback = AgentId("name", key="default") - run_context = runtime.start() + runtime.start() with pytest.raises(InterventionException): _response = await runtime.send_message(MessageType(), recipient=loopback) - await run_context.stop() + await runtime.stop() long_running_agent = await runtime.try_get_underlying_agent_instance(loopback, type=LoopbackAgent) assert long_running_agent.num_calls == 0 @@ -114,11 +114,11 @@ async def test_intervention_raise_exception_on_respond() -> None: await runtime.register("name", LoopbackAgent) loopback = AgentId("name", key="default") - run_context = runtime.start() + runtime.start() with pytest.raises(InterventionException): _response = await runtime.send_message(MessageType(), recipient=loopback) - await run_context.stop() + await runtime.stop() long_running_agent = await runtime.try_get_underlying_agent_instance(loopback, type=LoopbackAgent) assert long_running_agent.num_calls == 1 diff --git a/python/tests/test_runtime.py b/python/tests/test_runtime.py index ccdb25d3d..e21dc1125 100644 --- a/python/tests/test_runtime.py +++ b/python/tests/test_runtime.py @@ -30,13 +30,13 @@ async def test_register_receives_publish() -> None: runtime = SingleThreadedAgentRuntime() await runtime.register("name", LoopbackAgent) - run_context = runtime.start() + runtime.start() await runtime.add_subscription(TypeSubscription("default", "name")) agent_id = AgentId("name", key="default") topic_id = TopicId("default", "default") await runtime.publish_message(MessageType(), topic_id=topic_id) - await run_context.stop_when_idle() + await runtime.stop_when_idle() # Agent in default namespace should have received the message long_running_agent = await runtime.try_get_underlying_agent_instance(agent_id, type=LoopbackAgent) @@ -62,7 +62,7 @@ async def test_register_receives_publish_cascade() -> None: await runtime.register(f"name{i}", lambda: CascadingAgent(max_rounds)) await runtime.add_subscription(TypeSubscription("default", f"name{i}")) - run_context = runtime.start() + runtime.start() # Publish messages topic_id = TopicId("default", "default") @@ -70,7 +70,7 @@ async def test_register_receives_publish_cascade() -> None: await runtime.publish_message(CascadingMessageType(round=1), topic_id) # Process until idle. - await run_context.stop_when_idle() + await runtime.stop_when_idle() # Check that each agent received the correct number of messages. for i in range(num_agents): diff --git a/python/tests/test_tool_agent.py b/python/tests/test_tool_agent.py index acc66e1fa..03e0a8ebb 100644 --- a/python/tests/test_tool_agent.py +++ b/python/tests/test_tool_agent.py @@ -44,7 +44,7 @@ async def test_tool_agent() -> None: ), ) agent = AgentId("tool_agent", "default") - run = runtime.start() + runtime.start() # Test pass function result = await runtime.send_message( @@ -73,4 +73,4 @@ async def test_tool_agent() -> None: with pytest.raises(asyncio.CancelledError): await result_future - await run.stop() + await runtime.stop()