mirror of
https://github.com/microsoft/autogen.git
synced 2026-04-20 03:02:16 -04:00
Remove chat layer, move it to examples/common (#125)
This commit is contained in:
@@ -2,6 +2,12 @@
|
||||
|
||||
This directory contains examples and demos of how to use AGNext.
|
||||
|
||||
- `common`: Contains common implementations and utilities used by the examples.
|
||||
- `core`: Contains examples that illustrate the core concepts of AGNext.
|
||||
- `tool-use`: Contains examples that illustrate tool use in AGNext.
|
||||
- `patterns`: Contains examples that illustrate how multi-agent patterns can be implemented in AGNext.
|
||||
- `demos`: Contains interactive demos that showcase applications that can be built using AGNext.
|
||||
|
||||
## Core examples
|
||||
|
||||
We provide examples to illustrate the core concepts of AGNext:
|
||||
@@ -42,7 +48,7 @@ We provide interactive demos that showcase applications that can be built using
|
||||
to implement the reflection pattern for image generation.
|
||||
- [`software_consultancy.py`](demos/software_consultancy.py): a demonstration of multi-agent interaction using
|
||||
the group chat pattern.
|
||||
- [`chest_game.py`](tool-use/chess_game.py): an example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use.
|
||||
- [`chest_game.py`](demos/chess_game.py): an example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use.
|
||||
|
||||
## Running the examples and demos
|
||||
|
||||
@@ -52,24 +58,15 @@ First, you need a shell with AGNext and the examples dependencies installed. To
|
||||
hatch shell
|
||||
```
|
||||
|
||||
To run an example, just run the corresponding Python script. For example, to run the `coder_reviewer_pub_sub.py` example, run:
|
||||
To run an example, just run the corresponding Python script. For example:
|
||||
|
||||
```bash
|
||||
hatch shell
|
||||
python core/coder_reviewer.py
|
||||
python core/one_agent_direct.py
|
||||
```
|
||||
|
||||
Or simply:
|
||||
|
||||
```bash
|
||||
hatch run python core/coder_reviewer.py
|
||||
hatch run python core/one_agent_direct.py
|
||||
```
|
||||
|
||||
To enable logging, turn on verbose mode by setting `--verbose` flag:
|
||||
|
||||
```bash
|
||||
hatch run python core/coder_reviewer.py --verbose
|
||||
```
|
||||
|
||||
By default the log file is saved in the same directory with the same filename
|
||||
as the script, e.g., "coder_reviewer.log".
|
||||
|
||||
0
python/examples/common/__init__.py
Normal file
0
python/examples/common/__init__.py
Normal file
11
python/examples/common/agents/__init__.py
Normal file
11
python/examples/common/agents/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from ._chat_completion_agent import ChatCompletionAgent
|
||||
from ._image_generation_agent import ImageGenerationAgent
|
||||
from ._oai_assistant import OpenAIAssistantAgent
|
||||
from ._user_proxy import UserProxyAgent
|
||||
|
||||
__all__ = [
|
||||
"ChatCompletionAgent",
|
||||
"OpenAIAssistantAgent",
|
||||
"UserProxyAgent",
|
||||
"ImageGenerationAgent",
|
||||
]
|
||||
263
python/examples/common/agents/_chat_completion_agent.py
Normal file
263
python/examples/common/agents/_chat_completion_agent.py
Normal file
@@ -0,0 +1,263 @@
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Any, Coroutine, Dict, List, Mapping, Sequence, Tuple
|
||||
|
||||
from agnext.components import (
|
||||
FunctionCall,
|
||||
TypeRoutedAgent,
|
||||
message_handler,
|
||||
)
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import (
|
||||
ChatCompletionClient,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from agnext.components.tools import Tool
|
||||
from agnext.core import AgentId, CancellationToken
|
||||
|
||||
from ..types import (
|
||||
FunctionCallMessage,
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
RespondNow,
|
||||
ResponseFormat,
|
||||
TextMessage,
|
||||
ToolApprovalRequest,
|
||||
ToolApprovalResponse,
|
||||
)
|
||||
from ..utils import convert_messages_to_llm_messages
|
||||
|
||||
|
||||
class ChatCompletionAgent(TypeRoutedAgent):
|
||||
"""An agent implementation that uses the ChatCompletion API to gnenerate
|
||||
responses and execute tools.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
system_messages (List[SystemMessage]): The system messages to use for
|
||||
the ChatCompletion API.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
model_client (ChatCompletionClient): The client to use for the
|
||||
ChatCompletion API.
|
||||
tools (Sequence[Tool], optional): The tools used by the agent. Defaults
|
||||
to []. If no tools are provided, the agent cannot handle tool calls.
|
||||
If tools are provided, and the response from the model is a list of
|
||||
tool calls, the agent will call itselfs with the tool calls until it
|
||||
gets a response that is not a list of tool calls, and then use that
|
||||
response as the final response.
|
||||
tool_approver (Agent | None, optional): The agent that approves tool
|
||||
calls. Defaults to None. If no tool approver is provided, the agent
|
||||
will execute the tools without approval. If a tool approver is
|
||||
provided, the agent will send a request to the tool approver before
|
||||
executing the tools.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
memory: ChatMemory[Message],
|
||||
model_client: ChatCompletionClient,
|
||||
tools: Sequence[Tool] = [],
|
||||
tool_approver: AgentId | None = None,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._description = description
|
||||
self._system_messages = system_messages
|
||||
self._client = model_client
|
||||
self._memory = memory
|
||||
self._tools = tools
|
||||
self._tool_approver = tool_approver
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(self, message: TextMessage, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a text message. This method adds the message to the memory and
|
||||
does not generate any message."""
|
||||
# Add a user message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler()
|
||||
async def on_multi_modal_message(self, message: MultiModalMessage, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a multimodal message. This method adds the message to the memory
|
||||
and does not generate any message."""
|
||||
# Add a user message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a reset message. This method clears the memory."""
|
||||
# Reset the chat messages.
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler()
|
||||
async def on_respond_now(
|
||||
self, message: RespondNow, cancellation_token: CancellationToken
|
||||
) -> TextMessage | FunctionCallMessage:
|
||||
"""Handle a respond now message. This method generates a response and
|
||||
returns it to the sender."""
|
||||
# Generate a response.
|
||||
response = await self._generate_response(message.response_format, cancellation_token)
|
||||
|
||||
# Return the response.
|
||||
return response
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a publish now message. This method generates a response and
|
||||
publishes it."""
|
||||
# Generate a response.
|
||||
response = await self._generate_response(message.response_format, cancellation_token)
|
||||
|
||||
# Publish the response.
|
||||
await self.publish_message(response)
|
||||
|
||||
@message_handler()
|
||||
async def on_tool_call_message(
|
||||
self, message: FunctionCallMessage, cancellation_token: CancellationToken
|
||||
) -> FunctionExecutionResultMessage:
|
||||
"""Handle a tool call message. This method executes the tools and
|
||||
returns the results."""
|
||||
if len(self._tools) == 0:
|
||||
raise ValueError("No tools available")
|
||||
|
||||
# Add a tool call message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
# Execute the tool calls.
|
||||
results: List[FunctionExecutionResult] = []
|
||||
execution_futures: List[Coroutine[Any, Any, Tuple[str, str]]] = []
|
||||
for function_call in message.content:
|
||||
# Parse the arguments.
|
||||
try:
|
||||
arguments = json.loads(function_call.arguments)
|
||||
except json.JSONDecodeError:
|
||||
results.append(
|
||||
FunctionExecutionResult(
|
||||
content=f"Error: Could not parse arguments for function {function_call.name}.",
|
||||
call_id=function_call.id,
|
||||
)
|
||||
)
|
||||
continue
|
||||
# Execute the function.
|
||||
future = self._execute_function(
|
||||
function_call.name,
|
||||
arguments,
|
||||
function_call.id,
|
||||
cancellation_token=cancellation_token,
|
||||
)
|
||||
# Append the async result.
|
||||
execution_futures.append(future)
|
||||
if execution_futures:
|
||||
# Wait for all async results.
|
||||
execution_results = await asyncio.gather(*execution_futures)
|
||||
# Add the results.
|
||||
for execution_result, call_id in execution_results:
|
||||
results.append(FunctionExecutionResult(content=execution_result, call_id=call_id))
|
||||
|
||||
# Create a tool call result message.
|
||||
tool_call_result_msg = FunctionExecutionResultMessage(content=results)
|
||||
|
||||
# Add tool call result message.
|
||||
await self._memory.add_message(tool_call_result_msg)
|
||||
|
||||
# Return the results.
|
||||
return tool_call_result_msg
|
||||
|
||||
async def _generate_response(
|
||||
self,
|
||||
response_format: ResponseFormat,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> TextMessage | FunctionCallMessage:
|
||||
# Get a response from the model.
|
||||
hisorical_messages = await self._memory.get_messages()
|
||||
response = await self._client.create(
|
||||
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["name"]),
|
||||
tools=self._tools,
|
||||
json_output=response_format == ResponseFormat.json_object,
|
||||
)
|
||||
|
||||
# If the agent has function executor, and the response is a list of
|
||||
# tool calls, iterate with itself until we get a response that is not a
|
||||
# list of tool calls.
|
||||
while (
|
||||
len(self._tools) > 0
|
||||
and isinstance(response.content, list)
|
||||
and all(isinstance(x, FunctionCall) for x in response.content)
|
||||
):
|
||||
# Send a function call message to itself.
|
||||
response = await self.send_message(
|
||||
message=FunctionCallMessage(content=response.content, source=self.metadata["name"]),
|
||||
recipient=self.id,
|
||||
cancellation_token=cancellation_token,
|
||||
)
|
||||
# Make an assistant message from the response.
|
||||
hisorical_messages = await self._memory.get_messages()
|
||||
response = await self._client.create(
|
||||
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["name"]),
|
||||
tools=self._tools,
|
||||
json_output=response_format == ResponseFormat.json_object,
|
||||
)
|
||||
|
||||
final_response: Message
|
||||
if isinstance(response.content, str):
|
||||
# If the response is a string, return a text message.
|
||||
final_response = TextMessage(content=response.content, source=self.metadata["name"])
|
||||
elif isinstance(response.content, list) and all(isinstance(x, FunctionCall) for x in response.content):
|
||||
# If the response is a list of function calls, return a function call message.
|
||||
final_response = FunctionCallMessage(content=response.content, source=self.metadata["name"])
|
||||
else:
|
||||
raise ValueError(f"Unexpected response: {response.content}")
|
||||
|
||||
# Add the response to the chat messages.
|
||||
await self._memory.add_message(final_response)
|
||||
|
||||
return final_response
|
||||
|
||||
async def _execute_function(
|
||||
self,
|
||||
name: str,
|
||||
args: Dict[str, Any],
|
||||
call_id: str,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> Tuple[str, str]:
|
||||
# Find tool
|
||||
tool = next((t for t in self._tools if t.name == name), None)
|
||||
if tool is None:
|
||||
return (f"Error: tool {name} not found.", call_id)
|
||||
|
||||
# Check if the tool needs approval
|
||||
if self._tool_approver is not None:
|
||||
# Send a tool approval request.
|
||||
approval_request = ToolApprovalRequest(
|
||||
tool_call=FunctionCall(id=call_id, arguments=json.dumps(args), name=name)
|
||||
)
|
||||
approval_response = await self.send_message(
|
||||
message=approval_request,
|
||||
recipient=self._tool_approver,
|
||||
cancellation_token=cancellation_token,
|
||||
)
|
||||
if not isinstance(approval_response, ToolApprovalResponse):
|
||||
raise ValueError(f"Expecting {ToolApprovalResponse.__name__}, received: {type(approval_response)}")
|
||||
if not approval_response.approved:
|
||||
return (f"Error: tool {name} approved, reason: {approval_response.reason}", call_id)
|
||||
|
||||
try:
|
||||
result = await tool.run_json(args, cancellation_token)
|
||||
result_as_str = tool.return_value_as_string(result)
|
||||
except Exception as e:
|
||||
result_as_str = f"Error: {str(e)}"
|
||||
return (result_as_str, call_id)
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"memory": self._memory.save_state(),
|
||||
"system_messages": self._system_messages,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._memory.load_state(state["memory"])
|
||||
self._system_messages = state["system_messages"]
|
||||
78
python/examples/common/agents/_image_generation_agent.py
Normal file
78
python/examples/common/agents/_image_generation_agent.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from typing import Literal
|
||||
|
||||
import openai
|
||||
from agnext.components import (
|
||||
Image,
|
||||
TypeRoutedAgent,
|
||||
message_handler,
|
||||
)
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.core import CancellationToken
|
||||
|
||||
from ..types import (
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
TextMessage,
|
||||
)
|
||||
|
||||
|
||||
class ImageGenerationAgent(TypeRoutedAgent):
|
||||
"""An agent that generates images using DALL-E models. It publishes the
|
||||
generated images as MultiModalMessage.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
client (openai.AsyncClient): The client to use for the OpenAI API.
|
||||
model (Literal["dall-e-2", "dall-e-3"], optional): The DALL-E model to use. Defaults to "dall-e-2".
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
memory: ChatMemory[Message],
|
||||
client: openai.AsyncClient,
|
||||
model: Literal["dall-e-2", "dall-e-3"] = "dall-e-2",
|
||||
):
|
||||
super().__init__(description)
|
||||
self._client = client
|
||||
self._model = model
|
||||
self._memory = memory
|
||||
|
||||
@message_handler
|
||||
async def on_text_message(self, message: TextMessage, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a text message. This method adds the message to the memory."""
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler
|
||||
async def on_reset(self, message: Reset, cancellation_token: CancellationToken) -> None:
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler
|
||||
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a publish now message. This method generates an image using a DALL-E model with
|
||||
a prompt. The prompt is a concatenation of all TextMessages in the memory. The generated
|
||||
image is published as a MultiModalMessage."""
|
||||
|
||||
response = await self._generate_response(cancellation_token)
|
||||
self.publish_message(response)
|
||||
|
||||
async def _generate_response(self, cancellation_token: CancellationToken) -> MultiModalMessage:
|
||||
messages = await self._memory.get_messages()
|
||||
if len(messages) == 0:
|
||||
return MultiModalMessage(
|
||||
content=["I need more information to generate an image."], source=self.metadata["name"]
|
||||
)
|
||||
prompt = ""
|
||||
for m in messages:
|
||||
assert isinstance(m, TextMessage)
|
||||
prompt += m.content + "\n"
|
||||
prompt.strip()
|
||||
response = await self._client.images.generate(model=self._model, prompt=prompt, response_format="b64_json")
|
||||
assert len(response.data) > 0 and response.data[0].b64_json is not None
|
||||
# Create a MultiModalMessage with the image.
|
||||
image = Image.from_base64(response.data[0].b64_json)
|
||||
multi_modal_message = MultiModalMessage(content=[image], source=self.metadata["name"])
|
||||
return multi_modal_message
|
||||
132
python/examples/common/agents/_oai_assistant.py
Normal file
132
python/examples/common/agents/_oai_assistant.py
Normal file
@@ -0,0 +1,132 @@
|
||||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
import openai
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.core import CancellationToken
|
||||
from openai import AsyncAssistantEventHandler
|
||||
from openai.types.beta import AssistantResponseFormatParam
|
||||
|
||||
from ..types import PublishNow, Reset, RespondNow, ResponseFormat, TextMessage
|
||||
|
||||
|
||||
class OpenAIAssistantAgent(TypeRoutedAgent):
|
||||
"""An agent implementation that uses the OpenAI Assistant API to generate
|
||||
responses.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
client (openai.AsyncClient): The client to use for the OpenAI API.
|
||||
assistant_id (str): The assistant ID to use for the OpenAI API.
|
||||
thread_id (str): The thread ID to use for the OpenAI API.
|
||||
assistant_event_handler_factory (Callable[[], AsyncAssistantEventHandler], optional):
|
||||
A factory function to create an async assistant event handler. Defaults to None.
|
||||
If provided, the agent will use the streaming mode with the event handler.
|
||||
If not provided, the agent will use the blocking mode to generate responses.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
client: openai.AsyncClient,
|
||||
assistant_id: str,
|
||||
thread_id: str,
|
||||
assistant_event_handler_factory: Callable[[], AsyncAssistantEventHandler] | None = None,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._client = client
|
||||
self._assistant_id = assistant_id
|
||||
self._thread_id = thread_id
|
||||
self._assistant_event_handler_factory = assistant_event_handler_factory
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(self, message: TextMessage, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a text message. This method adds the message to the thread."""
|
||||
# Save the message to the thread.
|
||||
_ = await self._client.beta.threads.messages.create(
|
||||
thread_id=self._thread_id,
|
||||
content=message.content,
|
||||
role="user",
|
||||
metadata={"sender": message.source},
|
||||
)
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a reset message. This method deletes all messages in the thread."""
|
||||
# Get all messages in this thread.
|
||||
all_msgs: List[str] = []
|
||||
while True:
|
||||
if not all_msgs:
|
||||
msgs = await self._client.beta.threads.messages.list(self._thread_id)
|
||||
else:
|
||||
msgs = await self._client.beta.threads.messages.list(self._thread_id, after=all_msgs[-1])
|
||||
for msg in msgs.data:
|
||||
all_msgs.append(msg.id)
|
||||
if not msgs.has_next_page():
|
||||
break
|
||||
# Delete all the messages.
|
||||
for msg_id in all_msgs:
|
||||
status = await self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id)
|
||||
assert status.deleted is True
|
||||
|
||||
@message_handler()
|
||||
async def on_respond_now(self, message: RespondNow, cancellation_token: CancellationToken) -> TextMessage:
|
||||
"""Handle a respond now message. This method generates a response and returns it to the sender."""
|
||||
return await self._generate_response(message.response_format, cancellation_token)
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a publish now message. This method generates a response and publishes it."""
|
||||
response = await self._generate_response(message.response_format, cancellation_token)
|
||||
await self.publish_message(response)
|
||||
|
||||
async def _generate_response(
|
||||
self, requested_response_format: ResponseFormat, cancellation_token: CancellationToken
|
||||
) -> TextMessage:
|
||||
# Handle response format.
|
||||
if requested_response_format == ResponseFormat.json_object:
|
||||
response_format = AssistantResponseFormatParam(type="json_object")
|
||||
else:
|
||||
response_format = AssistantResponseFormatParam(type="text")
|
||||
|
||||
if self._assistant_event_handler_factory is not None:
|
||||
# Use event handler and streaming mode if available.
|
||||
async with self._client.beta.threads.runs.stream(
|
||||
thread_id=self._thread_id,
|
||||
assistant_id=self._assistant_id,
|
||||
event_handler=self._assistant_event_handler_factory(),
|
||||
response_format=response_format,
|
||||
) as stream:
|
||||
run = await stream.get_final_run()
|
||||
else:
|
||||
# Use blocking mode.
|
||||
run = await self._client.beta.threads.runs.create(
|
||||
thread_id=self._thread_id,
|
||||
assistant_id=self._assistant_id,
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
if run.status != "completed":
|
||||
# TODO: handle other statuses.
|
||||
raise ValueError(f"Run did not complete successfully: {run}")
|
||||
|
||||
# Get the last message from the run.
|
||||
response = await self._client.beta.threads.messages.list(self._thread_id, run_id=run.id, order="desc", limit=1)
|
||||
last_message_content = response.data[0].content
|
||||
|
||||
# TODO: handle array of content.
|
||||
text_content = [content for content in last_message_content if content.type == "text"]
|
||||
if not text_content:
|
||||
raise ValueError(f"Expected text content in the last message: {last_message_content}")
|
||||
|
||||
# TODO: handle multiple text content.
|
||||
return TextMessage(content=text_content[0].text.value, source=self.metadata["name"])
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"assistant_id": self._assistant_id,
|
||||
"thread_id": self._thread_id,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._assistant_id = state["assistant_id"]
|
||||
self._thread_id = state["thread_id"]
|
||||
31
python/examples/common/agents/_user_proxy.py
Normal file
31
python/examples/common/agents/_user_proxy.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import asyncio
|
||||
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.core import CancellationToken
|
||||
|
||||
from ..types import PublishNow, TextMessage
|
||||
|
||||
|
||||
class UserProxyAgent(TypeRoutedAgent):
|
||||
"""An agent that proxies user input from the console. Override the `get_user_input`
|
||||
method to customize how user input is retrieved.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
user_input_prompt (str): The console prompt to show to the user when asking for input.
|
||||
"""
|
||||
|
||||
def __init__(self, description: str, user_input_prompt: str) -> None:
|
||||
super().__init__(description)
|
||||
self._user_input_prompt = user_input_prompt
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a publish now message. This method prompts the user for input, then publishes it."""
|
||||
user_input = await self.get_user_input(self._user_input_prompt)
|
||||
await self.publish_message(TextMessage(content=user_input, source=self.metadata["name"]))
|
||||
|
||||
async def get_user_input(self, prompt: str) -> str:
|
||||
"""Get user input from the console. Override this method to customize how user input is retrieved."""
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, input, prompt)
|
||||
4
python/examples/common/memory/__init__.py
Normal file
4
python/examples/common/memory/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from ._buffered import BufferedChatMemory
|
||||
from ._head_and_tail import HeadAndTailChatMemory
|
||||
|
||||
__all__ = ["BufferedChatMemory", "HeadAndTailChatMemory"]
|
||||
47
python/examples/common/memory/_buffered.py
Normal file
47
python/examples/common/memory/_buffered.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import FunctionExecutionResultMessage
|
||||
|
||||
from ..types import Message
|
||||
|
||||
|
||||
class BufferedChatMemory(ChatMemory[Message]):
|
||||
"""A buffered chat memory that keeps a view of the last n messages,
|
||||
where n is the buffer size. The buffer size is set at initialization.
|
||||
|
||||
Args:
|
||||
buffer_size (int): The size of the buffer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, buffer_size: int) -> None:
|
||||
self._messages: List[Message] = []
|
||||
self._buffer_size = buffer_size
|
||||
|
||||
async def add_message(self, message: Message) -> None:
|
||||
"""Add a message to the memory."""
|
||||
self._messages.append(message)
|
||||
|
||||
async def get_messages(self) -> List[Message]:
|
||||
"""Get at most `buffer_size` recent messages."""
|
||||
messages = self._messages[-self._buffer_size :]
|
||||
# Handle the first message is a function call result message.
|
||||
if messages and isinstance(messages[0], FunctionExecutionResultMessage):
|
||||
# Remove the first message from the list.
|
||||
messages = messages[1:]
|
||||
return messages
|
||||
|
||||
async def clear(self) -> None:
|
||||
"""Clear the message memory."""
|
||||
self._messages = []
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"messages": [message for message in self._messages],
|
||||
"buffer_size": self._buffer_size,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._messages = state["messages"]
|
||||
self._buffer_size = state["buffer_size"]
|
||||
67
python/examples/common/memory/_head_and_tail.py
Normal file
67
python/examples/common/memory/_head_and_tail.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import FunctionExecutionResultMessage
|
||||
|
||||
from ..types import FunctionCallMessage, Message, TextMessage
|
||||
|
||||
|
||||
class HeadAndTailChatMemory(ChatMemory[Message]):
|
||||
"""A chat memory that keeps a view of the first n and last m messages,
|
||||
where n is the head size and m is the tail size. The head and tail sizes
|
||||
are set at initialization.
|
||||
|
||||
Args:
|
||||
head_size (int): The size of the head.
|
||||
tail_size (int): The size of the tail.
|
||||
"""
|
||||
|
||||
def __init__(self, head_size: int, tail_size: int) -> None:
|
||||
self._messages: List[Message] = []
|
||||
self._head_size = head_size
|
||||
self._tail_size = tail_size
|
||||
|
||||
async def add_message(self, message: Message) -> None:
|
||||
"""Add a message to the memory."""
|
||||
self._messages.append(message)
|
||||
|
||||
async def get_messages(self) -> List[Message]:
|
||||
"""Get at most `head_size` recent messages and `tail_size` oldest messages."""
|
||||
head_messages = self._messages[: self._head_size]
|
||||
# Handle the last message is a function call message.
|
||||
if head_messages and isinstance(head_messages[-1], FunctionCallMessage):
|
||||
# Remove the last message from the head.
|
||||
head_messages = head_messages[:-1]
|
||||
|
||||
tail_messages = self._messages[-self._tail_size :]
|
||||
# Handle the first message is a function call result message.
|
||||
if tail_messages and isinstance(tail_messages[0], FunctionExecutionResultMessage):
|
||||
# Remove the first message from the tail.
|
||||
tail_messages = tail_messages[1:]
|
||||
|
||||
num_skipped = len(self._messages) - self._head_size - self._tail_size
|
||||
if num_skipped <= 0:
|
||||
# If there are not enough messages to fill the head and tail,
|
||||
# return all messages.
|
||||
return self._messages
|
||||
|
||||
placeholder_messages = [TextMessage(content=f"Skipped {num_skipped} messages.", source="System")]
|
||||
return head_messages + placeholder_messages + tail_messages
|
||||
|
||||
async def clear(self) -> None:
|
||||
"""Clear the message memory."""
|
||||
self._messages = []
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"messages": [message for message in self._messages],
|
||||
"head_size": self._head_size,
|
||||
"tail_size": self._tail_size,
|
||||
"placeholder_message": self._placeholder_message,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._messages = state["messages"]
|
||||
self._head_size = state["head_size"]
|
||||
self._tail_size = state["tail_size"]
|
||||
self._placeholder_message = state["placeholder_message"]
|
||||
4
python/examples/common/patterns/__init__.py
Normal file
4
python/examples/common/patterns/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from ._group_chat_manager import GroupChatManager
|
||||
from ._orchestrator_chat import OrchestratorChat
|
||||
|
||||
__all__ = ["GroupChatManager", "OrchestratorChat"]
|
||||
155
python/examples/common/patterns/_group_chat_manager.py
Normal file
155
python/examples/common/patterns/_group_chat_manager.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import logging
|
||||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import ChatCompletionClient
|
||||
from agnext.core import AgentId, AgentProxy, CancellationToken
|
||||
|
||||
from ..types import (
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
TextMessage,
|
||||
)
|
||||
from ._group_chat_utils import select_speaker
|
||||
|
||||
logger = logging.getLogger("agnext.events")
|
||||
|
||||
|
||||
class GroupChatManager(TypeRoutedAgent):
|
||||
"""An agent that manages a group chat through event-driven orchestration.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent.
|
||||
description (str): The description of the agent.
|
||||
runtime (AgentRuntime): The runtime to register the agent.
|
||||
participants (List[AgentId]): The list of participants in the group chat.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
model_client (ChatCompletionClient, optional): The client to use for the model.
|
||||
If provided, the agent will use the model to select the next speaker.
|
||||
If not provided, the agent will select the next speaker from the list of participants
|
||||
according to the order given.
|
||||
termination_word (str, optional): The word that terminates the group chat. Defaults to "TERMINATE".
|
||||
transitions (Mapping[AgentId, List[AgentId]], optional): The transitions between agents.
|
||||
Keys are the agents, and values are the list of agents that can follow the key agent. Defaults to {}.
|
||||
If provided, the group chat manager will use the transitions to select the next speaker.
|
||||
If a transition is not provided for an agent, the choices fallback to all participants.
|
||||
If no model client is provided, a transition must have a single value.
|
||||
on_message_received (Callable[[TextMessage], None], optional): A custom handler to call when a message is received.
|
||||
Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
participants: List[AgentId],
|
||||
memory: ChatMemory[Message],
|
||||
model_client: ChatCompletionClient | None = None,
|
||||
termination_word: str = "TERMINATE",
|
||||
transitions: Mapping[AgentId, List[AgentId]] = {},
|
||||
on_message_received: Callable[[TextMessage | MultiModalMessage], None] | None = None,
|
||||
):
|
||||
super().__init__(description)
|
||||
self._memory = memory
|
||||
self._client = model_client
|
||||
self._participants = participants
|
||||
self._participant_proxies = dict((p, AgentProxy(p, self.runtime)) for p in participants)
|
||||
self._termination_word = termination_word
|
||||
for key, value in transitions.items():
|
||||
if not value:
|
||||
# Make sure no empty transitions are provided.
|
||||
raise ValueError(f"Empty transition list provided for {key.name}.")
|
||||
if key not in participants:
|
||||
# Make sure all keys are in the list of participants.
|
||||
raise ValueError(f"Transition key {key.name} not found in participants.")
|
||||
for v in value:
|
||||
if v not in participants:
|
||||
# Make sure all values are in the list of participants.
|
||||
raise ValueError(f"Transition value {v.name} not found in participants.")
|
||||
if self._client is None:
|
||||
# Make sure there is only one transition for each key if no model client is provided.
|
||||
if len(value) > 1:
|
||||
raise ValueError(f"Multiple transitions provided for {key.name} but no model client is provided.")
|
||||
self._tranistions = transitions
|
||||
self._on_message_received = on_message_received
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, cancellation_token: CancellationToken) -> None:
|
||||
"""Handle a reset message. This method clears the memory."""
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(
|
||||
self, message: TextMessage | MultiModalMessage, cancellation_token: CancellationToken
|
||||
) -> None:
|
||||
"""Handle a message. This method adds the message to the memory, selects the next speaker,
|
||||
and sends a message to the selected speaker to publish a response."""
|
||||
# Call the custom on_message_received handler if provided.
|
||||
if self._on_message_received is not None:
|
||||
self._on_message_received(message)
|
||||
|
||||
# Check if the message contains the termination word.
|
||||
if isinstance(message, TextMessage) and self._termination_word in message.content:
|
||||
# Terminate the group chat by not selecting the next speaker.
|
||||
return
|
||||
|
||||
# Save the message to chat memory.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
# Get the last speaker.
|
||||
last_speaker_name = message.source
|
||||
last_speaker_index = next((i for i, p in enumerate(self._participants) if p.name == last_speaker_name), None)
|
||||
|
||||
# Get the candidates for the next speaker.
|
||||
if last_speaker_index is not None:
|
||||
logger.debug(f"Last speaker: {last_speaker_name}")
|
||||
last_speaker = self._participants[last_speaker_index]
|
||||
if self._tranistions.get(last_speaker) is not None:
|
||||
candidates = [c for c in self._participants if c in self._tranistions[last_speaker]]
|
||||
else:
|
||||
candidates = self._participants
|
||||
else:
|
||||
candidates = self._participants
|
||||
logger.debug(f"Group chat manager next speaker candidates: {[c.name for c in candidates]}")
|
||||
|
||||
# Select speaker.
|
||||
if len(candidates) == 0:
|
||||
speaker = None
|
||||
elif len(candidates) == 1:
|
||||
speaker = candidates[0]
|
||||
else:
|
||||
# More than one candidate, select the next speaker.
|
||||
if self._client is None:
|
||||
# If no model client is provided, candidates must be the list of participants.
|
||||
assert candidates == self._participants
|
||||
# If no model client is provided, select the next speaker from the list of participants.
|
||||
if last_speaker_index is not None:
|
||||
next_speaker_index = (last_speaker_index + 1) % len(self._participants)
|
||||
speaker = self._participants[next_speaker_index]
|
||||
else:
|
||||
# If no last speaker, select the first speaker.
|
||||
speaker = candidates[0]
|
||||
else:
|
||||
# If a model client is provided, select the speaker based on the transitions and the model.
|
||||
speaker_index = await select_speaker(
|
||||
self._memory, self._client, [self._participant_proxies[c] for c in candidates]
|
||||
)
|
||||
speaker = candidates[speaker_index]
|
||||
|
||||
logger.debug(f"Group chat manager selected speaker: {speaker.name if speaker is not None else None}")
|
||||
|
||||
if speaker is not None:
|
||||
# Send the message to the selected speaker to ask it to publish a response.
|
||||
await self.send_message(PublishNow(), speaker)
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"memory": self._memory.save_state(),
|
||||
"termination_word": self._termination_word,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._memory.load_state(state["memory"])
|
||||
self._termination_word = state["termination_word"]
|
||||
82
python/examples/common/patterns/_group_chat_utils.py
Normal file
82
python/examples/common/patterns/_group_chat_utils.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""Credit to the original authors: https://github.com/microsoft/autogen/blob/main/autogen/agentchat/groupchat.py"""
|
||||
|
||||
import re
|
||||
from typing import Dict, List
|
||||
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import ChatCompletionClient, SystemMessage
|
||||
from agnext.core import AgentProxy
|
||||
|
||||
from ..types import Message, TextMessage
|
||||
|
||||
|
||||
async def select_speaker(memory: ChatMemory[Message], client: ChatCompletionClient, agents: List[AgentProxy]) -> int:
|
||||
"""Selects the next speaker in a group chat using a ChatCompletion client."""
|
||||
# TODO: Handle multi-modal messages.
|
||||
|
||||
# Construct formated current message history.
|
||||
history_messages: List[str] = []
|
||||
for msg in await memory.get_messages():
|
||||
assert isinstance(msg, TextMessage)
|
||||
history_messages.append(f"{msg.source}: {msg.content}")
|
||||
history = "\n".join(history_messages)
|
||||
|
||||
# Construct agent roles.
|
||||
roles = "\n".join([f"{agent.metadata['name']}: {agent.metadata['description']}".strip() for agent in agents])
|
||||
|
||||
# Construct agent list.
|
||||
participants = str([agent.metadata["name"] for agent in agents])
|
||||
|
||||
# Select the next speaker.
|
||||
select_speaker_prompt = f"""You are in a role play game. The following roles are available:
|
||||
{roles}.
|
||||
Read the following conversation. Then select the next role from {participants} to play. Only return the role.
|
||||
|
||||
{history}
|
||||
|
||||
Read the above conversation. Then select the next role from {participants} to play. Only return the role.
|
||||
"""
|
||||
select_speaker_messages = [SystemMessage(select_speaker_prompt)]
|
||||
response = await client.create(messages=select_speaker_messages)
|
||||
assert isinstance(response.content, str)
|
||||
mentions = mentioned_agents(response.content, agents)
|
||||
if len(mentions) != 1:
|
||||
raise ValueError(f"Expected exactly one agent to be mentioned, but got {mentions}")
|
||||
agent_name = list(mentions.keys())[0]
|
||||
agent_index = next((i for i, agent in enumerate(agents) if agent.metadata["name"] == agent_name), None)
|
||||
assert agent_index is not None
|
||||
return agent_index
|
||||
|
||||
|
||||
def mentioned_agents(message_content: str, agents: List[AgentProxy]) -> Dict[str, int]:
|
||||
"""Counts the number of times each agent is mentioned in the provided message content.
|
||||
Agent names will match under any of the following conditions (all case-sensitive):
|
||||
- Exact name match
|
||||
- If the agent name has underscores it will match with spaces instead (e.g. 'Story_writer' == 'Story writer')
|
||||
- If the agent name has underscores it will match with '\\_' instead of '_' (e.g. 'Story_writer' == 'Story\\_writer')
|
||||
|
||||
Args:
|
||||
message_content (Union[str, List]): The content of the message, either as a single string or a list of strings.
|
||||
agents (List[Agent]): A list of Agent objects, each having a 'name' attribute to be searched in the message content.
|
||||
|
||||
Returns:
|
||||
Dict: a counter for mentioned agents.
|
||||
"""
|
||||
mentions: Dict[str, int] = dict()
|
||||
for agent in agents:
|
||||
# Finds agent mentions, taking word boundaries into account,
|
||||
# accommodates escaping underscores and underscores as spaces
|
||||
name = agent.metadata["name"]
|
||||
regex = (
|
||||
r"(?<=\W)("
|
||||
+ re.escape(name)
|
||||
+ r"|"
|
||||
+ re.escape(name.replace("_", " "))
|
||||
+ r"|"
|
||||
+ re.escape(name.replace("_", r"\_"))
|
||||
+ r")(?=\W)"
|
||||
)
|
||||
count = len(re.findall(regex, f" {message_content} ")) # Pad the message to help with matching
|
||||
if count > 0:
|
||||
mentions[name] = count
|
||||
return mentions
|
||||
393
python/examples/common/patterns/_orchestrator_chat.py
Normal file
393
python/examples/common/patterns/_orchestrator_chat.py
Normal file
@@ -0,0 +1,393 @@
|
||||
import json
|
||||
from typing import Any, Sequence, Tuple
|
||||
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.core import AgentId, AgentRuntime, CancellationToken
|
||||
|
||||
from ..types import Reset, RespondNow, ResponseFormat, TextMessage
|
||||
|
||||
__all__ = ["OrchestratorChat"]
|
||||
|
||||
|
||||
class OrchestratorChat(TypeRoutedAgent):
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
runtime: AgentRuntime,
|
||||
orchestrator: AgentId,
|
||||
planner: AgentId,
|
||||
specialists: Sequence[AgentId],
|
||||
max_turns: int = 30,
|
||||
max_stalled_turns_before_retry: int = 2,
|
||||
max_retry_attempts: int = 1,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._orchestrator = orchestrator
|
||||
self._planner = planner
|
||||
self._specialists = specialists
|
||||
self._max_turns = max_turns
|
||||
self._max_stalled_turns_before_retry = max_stalled_turns_before_retry
|
||||
self._max_retry_attempts_before_educated_guess = max_retry_attempts
|
||||
|
||||
@property
|
||||
def children(self) -> Sequence[AgentId]:
|
||||
return list(self._specialists) + [self._orchestrator, self._planner]
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(
|
||||
self,
|
||||
message: TextMessage,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> TextMessage:
|
||||
# A task is received.
|
||||
task = message.content
|
||||
|
||||
# Prepare the task.
|
||||
team, names, facts, plan = await self._prepare_task(task, message.source)
|
||||
|
||||
# Main loop.
|
||||
total_turns = 0
|
||||
retry_attempts = 0
|
||||
while total_turns < self._max_turns:
|
||||
# Reset all agents.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
await self.send_message(Reset(), agent)
|
||||
|
||||
# Create the task specs.
|
||||
task_specs = f"""
|
||||
We are working to address the following user request:
|
||||
|
||||
{task}
|
||||
|
||||
|
||||
To answer this request we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
Some additional points to consider:
|
||||
|
||||
{facts}
|
||||
|
||||
{plan}
|
||||
""".strip()
|
||||
|
||||
# Send the task specs to the orchestrator and specialists.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
await self.send_message(TextMessage(content=task_specs, source=self.metadata["name"]), agent)
|
||||
|
||||
# Inner loop.
|
||||
stalled_turns = 0
|
||||
while total_turns < self._max_turns:
|
||||
# Reflect on the task.
|
||||
data = await self._reflect_on_task(task, team, names, message.source)
|
||||
|
||||
# Check if the request is satisfied.
|
||||
if data["is_request_satisfied"]["answer"]:
|
||||
return TextMessage(
|
||||
content=f"The task has been successfully addressed. {data['is_request_satisfied']['reason']}",
|
||||
source=self.metadata["name"],
|
||||
)
|
||||
|
||||
# Update stalled turns.
|
||||
if data["is_progress_being_made"]["answer"]:
|
||||
stalled_turns = max(0, stalled_turns - 1)
|
||||
else:
|
||||
stalled_turns += 1
|
||||
|
||||
# Handle retry.
|
||||
if stalled_turns > self._max_stalled_turns_before_retry:
|
||||
# In a retry, we need to rewrite the facts and the plan.
|
||||
|
||||
# Rewrite the facts.
|
||||
facts = await self._rewrite_facts(facts, message.source)
|
||||
|
||||
# Increment the retry attempts.
|
||||
retry_attempts += 1
|
||||
|
||||
# Check if we should just guess.
|
||||
if retry_attempts > self._max_retry_attempts_before_educated_guess:
|
||||
# Make an educated guess.
|
||||
educated_guess = await self._educated_guess(facts, message.source)
|
||||
if educated_guess["has_educated_guesses"]["answer"]:
|
||||
return TextMessage(
|
||||
content=f"The task is addressed with an educated guess. {educated_guess['has_educated_guesses']['reason']}",
|
||||
source=self.metadata["name"],
|
||||
)
|
||||
|
||||
# Come up with a new plan.
|
||||
plan = await self._rewrite_plan(team, message.source)
|
||||
|
||||
# Exit the inner loop.
|
||||
break
|
||||
|
||||
# Get the subtask.
|
||||
subtask = data["instruction_or_question"]["answer"]
|
||||
if subtask is None:
|
||||
subtask = ""
|
||||
|
||||
# Update agents.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
_ = await self.send_message(
|
||||
TextMessage(content=subtask, source=self.metadata["name"]),
|
||||
agent,
|
||||
)
|
||||
|
||||
# Find the speaker.
|
||||
try:
|
||||
speaker = next(agent for agent in self._specialists if agent.name == data["next_speaker"]["answer"])
|
||||
except StopIteration as e:
|
||||
raise ValueError(f"Invalid next speaker: {data['next_speaker']['answer']}") from e
|
||||
|
||||
# Ask speaker to speak.
|
||||
speaker_response = await self.send_message(RespondNow(), speaker)
|
||||
assert speaker_response is not None
|
||||
|
||||
# Update all other agents with the speaker's response.
|
||||
for agent in [agent for agent in self._specialists if agent != speaker] + [self._orchestrator]:
|
||||
await self.send_message(
|
||||
TextMessage(
|
||||
content=speaker_response.content,
|
||||
source=speaker_response.source,
|
||||
),
|
||||
agent,
|
||||
)
|
||||
|
||||
# Increment the total turns.
|
||||
total_turns += 1
|
||||
|
||||
return TextMessage(
|
||||
content="The task was not addressed. The maximum number of turns was reached.",
|
||||
source=self.metadata["name"],
|
||||
)
|
||||
|
||||
async def _prepare_task(self, task: str, sender: str) -> Tuple[str, str, str, str]:
|
||||
# Reset planner.
|
||||
await self.send_message(Reset(), self._planner)
|
||||
|
||||
# A reusable description of the team.
|
||||
team = "\n".join(
|
||||
[agent.name + ": " + self.runtime.agent_metadata(agent)["description"] for agent in self._specialists]
|
||||
)
|
||||
names = ", ".join([agent.name for agent in self._specialists])
|
||||
|
||||
# A place to store relevant facts.
|
||||
facts = ""
|
||||
|
||||
# A plance to store the plan.
|
||||
plan = ""
|
||||
|
||||
# Start by writing what we know
|
||||
closed_book_prompt = f"""Below I will present you a request. Before we begin addressing the request, please answer the following pre-survey to the best of your ability. Keep in mind that you are Ken Jennings-level with trivia, and Mensa-level with puzzles, so there should be a deep well to draw from.
|
||||
|
||||
Here is the request:
|
||||
|
||||
{task}
|
||||
|
||||
Here is the pre-survey:
|
||||
|
||||
1. Please list any specific facts or figures that are GIVEN in the request itself. It is possible that there are none.
|
||||
2. Please list any facts that may need to be looked up, and WHERE SPECIFICALLY they might be found. In some cases, authoritative sources are mentioned in the request itself.
|
||||
3. Please list any facts that may need to be derived (e.g., via logical deduction, simulation, or computation)
|
||||
4. Please list any facts that are recalled from memory, hunches, well-reasoned guesses, etc.
|
||||
|
||||
When answering this survey, keep in mind that "facts" will typically be specific names, dates, statistics, etc. Your answer should use headings:
|
||||
|
||||
1. GIVEN OR VERIFIED FACTS
|
||||
2. FACTS TO LOOK UP
|
||||
3. FACTS TO DERIVE
|
||||
4. EDUCATED GUESSES
|
||||
""".strip()
|
||||
|
||||
# Ask the planner to obtain prior knowledge about facts.
|
||||
await self.send_message(TextMessage(content=closed_book_prompt, source=sender), self._planner)
|
||||
facts_response = await self.send_message(RespondNow(), self._planner)
|
||||
|
||||
facts = str(facts_response.content)
|
||||
|
||||
# Make an initial plan
|
||||
plan_prompt = f"""Fantastic. To address this request we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
Based on the team composition, and known and unknown facts, please devise a short bullet-point plan for addressing the original request. Remember, there is no requirement to involve all team members -- a team member's particular expertise may not be needed for this task.""".strip()
|
||||
|
||||
# Send second messag eto the planner.
|
||||
await self.send_message(TextMessage(content=plan_prompt, source=sender), self._planner)
|
||||
plan_response = await self.send_message(RespondNow(), self._planner)
|
||||
plan = str(plan_response.content)
|
||||
|
||||
return team, names, facts, plan
|
||||
|
||||
async def _reflect_on_task(
|
||||
self,
|
||||
task: str,
|
||||
team: str,
|
||||
names: str,
|
||||
sender: str,
|
||||
) -> Any:
|
||||
step_prompt = f"""
|
||||
Recall we are working on the following request:
|
||||
|
||||
{task}
|
||||
|
||||
And we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
To make progress on the request, please answer the following questions, including necessary reasoning:
|
||||
|
||||
- Is the request fully satisfied? (True if complete, or False if the original request has yet to be SUCCESSFULLY addressed)
|
||||
- Are we making forward progress? (True if just starting, or recent messages are adding value. False if recent messages show evidence of being stuck in a reasoning or action loop, or there is evidence of significant barriers to success such as the inability to read from a required file)
|
||||
- Who should speak next? (select from: {names})
|
||||
- What instruction or question would you give this team member? (Phrase as if speaking directly to them, and include any specific information they may need)
|
||||
|
||||
Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA:
|
||||
|
||||
{{
|
||||
"is_request_satisfied": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}},
|
||||
"is_progress_being_made": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}},
|
||||
"next_speaker": {{
|
||||
"reason": string,
|
||||
"answer": string (select from: {names})
|
||||
}},
|
||||
"instruction_or_question": {{
|
||||
"reason": string,
|
||||
"answer": string
|
||||
}}
|
||||
}}
|
||||
""".strip()
|
||||
request = step_prompt
|
||||
while True:
|
||||
# Send a message to the orchestrator.
|
||||
await self.send_message(TextMessage(content=request, source=sender), self._orchestrator)
|
||||
# Request a response.
|
||||
step_response = await self.send_message(
|
||||
RespondNow(response_format=ResponseFormat.json_object),
|
||||
self._orchestrator,
|
||||
)
|
||||
# TODO: use typed dictionary.
|
||||
try:
|
||||
result = json.loads(str(step_response.content))
|
||||
except json.JSONDecodeError as e:
|
||||
request = f"Invalid JSON: {str(e)}"
|
||||
continue
|
||||
if "is_request_satisfied" not in result:
|
||||
request = "Missing key: is_request_satisfied"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["is_request_satisfied"], dict)
|
||||
or "answer" not in result["is_request_satisfied"]
|
||||
or "reason" not in result["is_request_satisfied"]
|
||||
):
|
||||
request = "Invalid value for key: is_request_satisfied, expected 'answer' and 'reason'"
|
||||
continue
|
||||
if "is_progress_being_made" not in result:
|
||||
request = "Missing key: is_progress_being_made"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["is_progress_being_made"], dict)
|
||||
or "answer" not in result["is_progress_being_made"]
|
||||
or "reason" not in result["is_progress_being_made"]
|
||||
):
|
||||
request = "Invalid value for key: is_progress_being_made, expected 'answer' and 'reason'"
|
||||
continue
|
||||
if "next_speaker" not in result:
|
||||
request = "Missing key: next_speaker"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["next_speaker"], dict)
|
||||
or "answer" not in result["next_speaker"]
|
||||
or "reason" not in result["next_speaker"]
|
||||
):
|
||||
request = "Invalid value for key: next_speaker, expected 'answer' and 'reason'"
|
||||
continue
|
||||
elif result["next_speaker"]["answer"] not in names:
|
||||
request = f"Invalid value for key: next_speaker, expected 'answer' in {names}"
|
||||
continue
|
||||
if "instruction_or_question" not in result:
|
||||
request = "Missing key: instruction_or_question"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["instruction_or_question"], dict)
|
||||
or "answer" not in result["instruction_or_question"]
|
||||
or "reason" not in result["instruction_or_question"]
|
||||
):
|
||||
request = "Invalid value for key: instruction_or_question, expected 'answer' and 'reason'"
|
||||
continue
|
||||
return result
|
||||
|
||||
async def _rewrite_facts(self, facts: str, sender: str) -> str:
|
||||
new_facts_prompt = f"""It's clear we aren't making as much progress as we would like, but we may have learned something new. Please rewrite the following fact sheet, updating it to include anything new we have learned. This is also a good time to update educated guesses (please add or update at least one educated guess or hunch, and explain your reasoning).
|
||||
|
||||
{facts}
|
||||
""".strip()
|
||||
# Send a message to the orchestrator.
|
||||
await self.send_message(TextMessage(content=new_facts_prompt, source=sender), self._orchestrator)
|
||||
# Request a response.
|
||||
new_facts_response = await self.send_message(RespondNow(), self._orchestrator)
|
||||
return str(new_facts_response.content)
|
||||
|
||||
async def _educated_guess(self, facts: str, sender: str) -> Any:
|
||||
# Make an educated guess.
|
||||
educated_guess_promt = f"""Given the following information
|
||||
|
||||
{facts}
|
||||
|
||||
Please answer the following question, including necessary reasoning:
|
||||
- Do you have two or more congruent pieces of information that will allow you to make an educated guess for the original request? The educated guess MUST answer the question.
|
||||
Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA:
|
||||
|
||||
{{
|
||||
"has_educated_guesses": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}}
|
||||
}}
|
||||
""".strip()
|
||||
request = educated_guess_promt
|
||||
while True:
|
||||
# Send a message to the orchestrator.
|
||||
await self.send_message(
|
||||
TextMessage(content=request, source=sender),
|
||||
self._orchestrator,
|
||||
)
|
||||
# Request a response.
|
||||
response = await self.send_message(
|
||||
RespondNow(response_format=ResponseFormat.json_object),
|
||||
self._orchestrator,
|
||||
)
|
||||
try:
|
||||
result = json.loads(str(response.content))
|
||||
except json.JSONDecodeError as e:
|
||||
request = f"Invalid JSON: {str(e)}"
|
||||
continue
|
||||
# TODO: use typed dictionary.
|
||||
if "has_educated_guesses" not in result:
|
||||
request = "Missing key: has_educated_guesses"
|
||||
continue
|
||||
if (
|
||||
not isinstance(result["has_educated_guesses"], dict)
|
||||
or "answer" not in result["has_educated_guesses"]
|
||||
or "reason" not in result["has_educated_guesses"]
|
||||
):
|
||||
request = "Invalid value for key: has_educated_guesses, expected 'answer' and 'reason'"
|
||||
continue
|
||||
return result
|
||||
|
||||
async def _rewrite_plan(self, team: str, sender: str) -> str:
|
||||
new_plan_prompt = f"""Please come up with a new plan expressed in bullet points. Keep in mind the following team composition, and do not involve any other outside people in the plan -- we cannot contact anyone else.
|
||||
|
||||
Team membership:
|
||||
{team}
|
||||
""".strip()
|
||||
# Send a message to the orchestrator.
|
||||
await self.send_message(TextMessage(content=new_plan_prompt, source=sender), self._orchestrator)
|
||||
# Request a response.
|
||||
new_plan_response = await self.send_message(RespondNow(), self._orchestrator)
|
||||
return str(new_plan_response.content)
|
||||
74
python/examples/common/types.py
Normal file
74
python/examples/common/types.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import List, Union
|
||||
|
||||
from agnext.components import FunctionCall, Image
|
||||
from agnext.components.models import FunctionExecutionResultMessage
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class BaseMessage:
|
||||
# Name of the agent that sent this message
|
||||
source: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextMessage(BaseMessage):
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiModalMessage(BaseMessage):
|
||||
content: List[Union[str, Image]]
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionCallMessage(BaseMessage):
|
||||
content: List[FunctionCall]
|
||||
|
||||
|
||||
Message = Union[TextMessage, MultiModalMessage, FunctionCallMessage, FunctionExecutionResultMessage]
|
||||
|
||||
|
||||
class ResponseFormat(Enum):
|
||||
text = "text"
|
||||
json_object = "json_object"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RespondNow:
|
||||
"""A message to request a response from the addressed agent. The sender
|
||||
expects a response upon sening and waits for it synchronously."""
|
||||
|
||||
response_format: ResponseFormat = field(default=ResponseFormat.text)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PublishNow:
|
||||
"""A message to request an event to be published to the addressed agent.
|
||||
Unlike RespondNow, the sender does not expect a response upon sending."""
|
||||
|
||||
response_format: ResponseFormat = field(default=ResponseFormat.text)
|
||||
|
||||
|
||||
class Reset: ...
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolApprovalRequest:
|
||||
"""A message to request approval for a tool call. The sender expects a
|
||||
response upon sending and waits for it synchronously."""
|
||||
|
||||
tool_call: FunctionCall
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolApprovalResponse:
|
||||
"""A message to respond to a tool approval request. The response is sent
|
||||
synchronously."""
|
||||
|
||||
tool_call_id: str
|
||||
approved: bool
|
||||
reason: str
|
||||
98
python/examples/common/utils.py
Normal file
98
python/examples/common/utils.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from agnext.components.models import (
|
||||
AssistantMessage,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
LLMMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from typing_extensions import Literal
|
||||
|
||||
from .types import (
|
||||
FunctionCallMessage,
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
TextMessage,
|
||||
)
|
||||
|
||||
|
||||
def convert_content_message_to_assistant_message(
|
||||
message: Union[TextMessage, MultiModalMessage, FunctionCallMessage],
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[AssistantMessage]:
|
||||
match message:
|
||||
case TextMessage() | FunctionCallMessage():
|
||||
return AssistantMessage(content=message.content, source=message.source)
|
||||
case MultiModalMessage():
|
||||
if handle_unrepresentable == "error":
|
||||
raise ValueError("Cannot represent multimodal message as AssistantMessage")
|
||||
elif handle_unrepresentable == "ignore":
|
||||
return None
|
||||
elif handle_unrepresentable == "try_slice":
|
||||
return AssistantMessage(
|
||||
content="".join([x for x in message.content if isinstance(x, str)]),
|
||||
source=message.source,
|
||||
)
|
||||
|
||||
|
||||
def convert_content_message_to_user_message(
|
||||
message: Union[TextMessage, MultiModalMessage, FunctionCallMessage],
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[UserMessage]:
|
||||
match message:
|
||||
case TextMessage() | MultiModalMessage():
|
||||
return UserMessage(content=message.content, source=message.source)
|
||||
case FunctionCallMessage():
|
||||
if handle_unrepresentable == "error":
|
||||
raise ValueError("Cannot represent multimodal message as UserMessage")
|
||||
elif handle_unrepresentable == "ignore":
|
||||
return None
|
||||
elif handle_unrepresentable == "try_slice":
|
||||
# TODO: what is a sliced function call?
|
||||
raise NotImplementedError("Sliced function calls not yet implemented")
|
||||
|
||||
|
||||
def convert_tool_call_response_message(
|
||||
message: FunctionExecutionResultMessage,
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[FunctionExecutionResultMessage]:
|
||||
match message:
|
||||
case FunctionExecutionResultMessage():
|
||||
return FunctionExecutionResultMessage(
|
||||
content=[FunctionExecutionResult(content=x.content, call_id=x.call_id) for x in message.content]
|
||||
)
|
||||
|
||||
|
||||
def convert_messages_to_llm_messages(
|
||||
messages: List[Message],
|
||||
self_name: str,
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> List[LLMMessage]:
|
||||
result: List[LLMMessage] = []
|
||||
for message in messages:
|
||||
match message:
|
||||
case (
|
||||
TextMessage(content=_, source=source)
|
||||
| MultiModalMessage(content=_, source=source)
|
||||
| FunctionCallMessage(content=_, source=source)
|
||||
) if source == self_name:
|
||||
converted_message_1 = convert_content_message_to_assistant_message(message, handle_unrepresentable)
|
||||
if converted_message_1 is not None:
|
||||
result.append(converted_message_1)
|
||||
case (
|
||||
TextMessage(content=_, source=source)
|
||||
| MultiModalMessage(content=_, source=source)
|
||||
| FunctionCallMessage(content=_, source=source)
|
||||
) if source != self_name:
|
||||
converted_message_2 = convert_content_message_to_user_message(message, handle_unrepresentable)
|
||||
if converted_message_2 is not None:
|
||||
result.append(converted_message_2)
|
||||
case FunctionExecutionResultMessage(_):
|
||||
converted_message_3 = convert_tool_call_response_message(message, handle_unrepresentable)
|
||||
if converted_message_3 is not None:
|
||||
result.append(converted_message_3)
|
||||
case _:
|
||||
raise AssertionError("unreachable")
|
||||
|
||||
return result
|
||||
@@ -6,15 +6,12 @@ import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
import aiofiles
|
||||
import openai
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.agents import OpenAIAssistantAgent
|
||||
from agnext.chat.memory import BufferedChatMemory
|
||||
from agnext.chat.patterns._group_chat_manager import GroupChatManager
|
||||
from agnext.chat.types import PublishNow, TextMessage
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.core import AgentId, AgentRuntime, CancellationToken
|
||||
from openai import AsyncAssistantEventHandler
|
||||
@@ -23,6 +20,13 @@ from openai.types.beta.threads import Message, Text, TextDelta
|
||||
from openai.types.beta.threads.runs import RunStep, RunStepDelta
|
||||
from typing_extensions import override
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from common.agents import OpenAIAssistantAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from common.types import PublishNow, TextMessage
|
||||
|
||||
sep = "-" * 50
|
||||
|
||||
|
||||
|
||||
@@ -6,16 +6,17 @@ import os
|
||||
import sys
|
||||
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.memory import BufferedChatMemory
|
||||
from agnext.chat.types import Message, TextMessage
|
||||
from agnext.chat.utils import convert_messages_to_llm_messages
|
||||
from agnext.components import TypeRoutedAgent, message_handler
|
||||
from agnext.components.memory import ChatMemory
|
||||
from agnext.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage
|
||||
from agnext.core import AgentRuntime, CancellationToken
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.types import Message, TextMessage
|
||||
from common.utils import convert_messages_to_llm_messages
|
||||
from utils import TextualChatApp, TextualUserAgent, start_runtime
|
||||
|
||||
|
||||
|
||||
@@ -5,19 +5,24 @@ and make moves, and using a group chat manager to orchestrate the conversation."
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Annotated, Literal
|
||||
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.agents._chat_completion_agent import ChatCompletionAgent
|
||||
from agnext.chat.memory import BufferedChatMemory
|
||||
from agnext.chat.patterns._group_chat_manager import GroupChatManager
|
||||
from agnext.chat.types import TextMessage
|
||||
from agnext.components.models import OpenAIChatCompletionClient, SystemMessage
|
||||
from agnext.components.tools import FunctionTool
|
||||
from agnext.core import AgentRuntime
|
||||
from chess import BLACK, SQUARE_NAMES, WHITE, Board, Move
|
||||
from chess import piece_name as get_piece_name
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from common.agents._chat_completion_agent import ChatCompletionAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from common.types import TextMessage
|
||||
|
||||
|
||||
def validate_turn(board: Board, player: Literal["white", "black"]) -> None:
|
||||
"""Validate that it is the player's turn to move."""
|
||||
|
||||
@@ -6,14 +6,15 @@ import sys
|
||||
|
||||
import openai
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.agents import ChatCompletionAgent, ImageGenerationAgent
|
||||
from agnext.chat.memory import BufferedChatMemory
|
||||
from agnext.chat.patterns._group_chat_manager import GroupChatManager
|
||||
from agnext.components.models import OpenAIChatCompletionClient, SystemMessage
|
||||
from agnext.core import AgentRuntime
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from common.agents import ChatCompletionAgent, ImageGenerationAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from utils import TextualChatApp, TextualUserAgent, start_runtime
|
||||
|
||||
|
||||
|
||||
@@ -17,9 +17,6 @@ import aiofiles
|
||||
import aiohttp
|
||||
import openai
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.agents import ChatCompletionAgent
|
||||
from agnext.chat.memory import HeadAndTailChatMemory
|
||||
from agnext.chat.patterns._group_chat_manager import GroupChatManager
|
||||
from agnext.components.models import OpenAIChatCompletionClient, SystemMessage
|
||||
from agnext.components.tools import FunctionTool
|
||||
from agnext.core import AgentRuntime
|
||||
@@ -28,7 +25,11 @@ from tqdm import tqdm
|
||||
from typing_extensions import Annotated
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from common.agents import ChatCompletionAgent
|
||||
from common.memory import HeadAndTailChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from utils import TextualChatApp, TextualUserAgent, start_runtime
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from asyncio import Future
|
||||
|
||||
from agnext.application import SingleThreadedAgentRuntime
|
||||
from agnext.chat.types import (
|
||||
from agnext.components import Image, TypeRoutedAgent, message_handler
|
||||
from agnext.core import AgentRuntime, CancellationToken
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.containers import ScrollableContainer
|
||||
from textual.widgets import Button, Footer, Header, Input, Markdown, Static
|
||||
from textual_imageview.viewer import ImageViewer
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from common.types import (
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
RespondNow,
|
||||
@@ -11,12 +22,6 @@ from agnext.chat.types import (
|
||||
ToolApprovalRequest,
|
||||
ToolApprovalResponse,
|
||||
)
|
||||
from agnext.components import Image, TypeRoutedAgent, message_handler
|
||||
from agnext.core import AgentRuntime, CancellationToken
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.containers import ScrollableContainer
|
||||
from textual.widgets import Button, Footer, Header, Input, Markdown, Static
|
||||
from textual_imageview.viewer import ImageViewer
|
||||
|
||||
|
||||
class ChatAppMessage(Static):
|
||||
|
||||
@@ -3,23 +3,26 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Callable
|
||||
|
||||
import openai
|
||||
from agnext.application import (
|
||||
SingleThreadedAgentRuntime,
|
||||
)
|
||||
from agnext.chat.agents._chat_completion_agent import ChatCompletionAgent
|
||||
from agnext.chat.agents._oai_assistant import OpenAIAssistantAgent
|
||||
from agnext.chat.memory import BufferedChatMemory
|
||||
from agnext.chat.patterns._orchestrator_chat import OrchestratorChat
|
||||
from agnext.chat.types import TextMessage
|
||||
from agnext.components.models import OpenAIChatCompletionClient, SystemMessage
|
||||
from agnext.components.tools import BaseTool
|
||||
from agnext.core import AgentRuntime, CancellationToken
|
||||
from pydantic import BaseModel, Field
|
||||
from tavily import TavilyClient # type: ignore
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from common.agents import ChatCompletionAgent, OpenAIAssistantAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._orchestrator_chat import OrchestratorChat
|
||||
from common.types import TextMessage
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("agnext").setLevel(logging.DEBUG)
|
||||
|
||||
Reference in New Issue
Block a user