mirror of
https://github.com/microsoft/autogen.git
synced 2026-04-20 03:02:16 -04:00
Python: organize packages in package directory (#420)
* Move packages to packages directory * remove screenshot * update some paths
This commit is contained in:
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
This example shows how to use direct messaging to implement
|
||||
a simple interaction between an inner and an outer agent.
|
||||
1. The outer agent receives a message, sends a message to the inner agent.
|
||||
2. The inner agent receives the message, processes it, and sends a response to the outer agent.
|
||||
3. The outer agent receives the response and processes it, and returns the final response.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentInstantiationContext, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageType:
|
||||
body: str
|
||||
sender: str
|
||||
|
||||
|
||||
class Inner(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("The inner agent")
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(self, message: MessageType, ctx: MessageContext) -> MessageType:
|
||||
return MessageType(body=f"Inner: {message.body}", sender=self.metadata["type"])
|
||||
|
||||
|
||||
class Outer(RoutedAgent):
|
||||
def __init__(self, inner: AgentId) -> None:
|
||||
super().__init__("The outer agent")
|
||||
self._inner = inner
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(self, message: MessageType, ctx: MessageContext) -> MessageType:
|
||||
inner_response = self.send_message(message, self._inner)
|
||||
inner_message = await inner_response
|
||||
assert isinstance(inner_message, MessageType)
|
||||
return MessageType(body=f"Outer: {inner_message.body}", sender=self.metadata["type"])
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await runtime.register("inner", Inner)
|
||||
await runtime.register("outer", lambda: Outer(AgentId("outer", AgentInstantiationContext.current_agent_id().key)))
|
||||
outer = AgentId("outer", "default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
response = await runtime.send_message(MessageType(body="Hello", sender="external"), outer)
|
||||
print(response)
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,71 @@
|
||||
"""
|
||||
This example shows how to use direct messaging to implement
|
||||
a simple chat completion agent.
|
||||
The agent receives a message from the main function, sends it to the
|
||||
chat completion model, and returns the response to the main function.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
ChatCompletionClient,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
content: str
|
||||
|
||||
|
||||
class ChatCompletionAgent(RoutedAgent):
|
||||
def __init__(self, description: str, model_client: ChatCompletionClient) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = [SystemMessage("You are a helpful AI assistant.")]
|
||||
self._model_client = model_client
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:
|
||||
user_message = UserMessage(content=message.content, source="User")
|
||||
response = await self._model_client.create(self._system_messages + [user_message])
|
||||
assert isinstance(response.content, str)
|
||||
return Message(content=response.content)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await runtime.register(
|
||||
"chat_agent",
|
||||
lambda: ChatCompletionAgent("Chat agent", get_chat_completion_client_from_envs(model="gpt-4o-mini")),
|
||||
)
|
||||
agent = AgentId("chat_agent", "default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a message to the agent and get the response.
|
||||
message = Message(content="Hello, what are some fun things to do in Seattle?")
|
||||
response = await runtime.send_message(message, agent)
|
||||
assert isinstance(response, Message)
|
||||
print(response.content)
|
||||
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
125
python/packages/autogen-core/samples/core/two_agents_pub_sub.py
Normal file
125
python/packages/autogen-core/samples/core/two_agents_pub_sub.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""
|
||||
This example shows how to use publish-subscribe to implement a simple
|
||||
interaction between two agents that use a chat completion model to respond to messages.
|
||||
|
||||
1. The main function sends a message to Jack to start the conversation.
|
||||
2. The Jack agent receives the message, generates a response using a chat completion model,
|
||||
and publishes the response.
|
||||
3. The Cathy agent receives the message, generates a response using a chat completion model,
|
||||
and publishes the response.
|
||||
4. The conversation continues until a message with termination word is received by any agent.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
source: str
|
||||
content: str
|
||||
|
||||
|
||||
class ChatCompletionAgent(RoutedAgent):
|
||||
"""An agent that uses a chat completion model to respond to messages.
|
||||
It keeps a memory of the conversation and uses it to generate responses.
|
||||
It publishes a termination message when the termination word is mentioned."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
termination_word: str,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = system_messages
|
||||
self._model_client = model_client
|
||||
self._memory: List[Message] = []
|
||||
self._termination_word = termination_word
|
||||
|
||||
@message_handler
|
||||
async def handle_message(self, message: Message, ctx: MessageContext) -> None:
|
||||
self._memory.append(message)
|
||||
if self._termination_word in message.content:
|
||||
return
|
||||
llm_messages: List[LLMMessage] = []
|
||||
for m in self._memory[-10:]:
|
||||
if m.source == self.metadata["type"]:
|
||||
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["type"]))
|
||||
else:
|
||||
llm_messages.append(UserMessage(content=m.content, source=m.source))
|
||||
response = await self._model_client.create(self._system_messages + llm_messages)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
if ctx.topic_id is not None:
|
||||
await self.publish_message(
|
||||
Message(content=response.content, source=self.metadata["type"]), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
|
||||
# Register the agents.
|
||||
await runtime.register(
|
||||
"Jack",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Jack a comedian",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
system_messages=[
|
||||
SystemMessage("You are a comedian likes to make jokes. " "When you are done talking, say 'TERMINATE'.")
|
||||
],
|
||||
termination_word="TERMINATE",
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"Cathy",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Cathy a poet",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
system_messages=[
|
||||
SystemMessage("You are a poet likes to write poems. " "When you are done talking, say 'TERMINATE'.")
|
||||
],
|
||||
termination_word="TERMINATE",
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a message to Jack to start the conversation.
|
||||
message = Message(content="Can you tell me something fun about SF?", source="User")
|
||||
await runtime.send_message(message, AgentId("jack", "default"))
|
||||
|
||||
# Process messages.
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
Reference in New Issue
Block a user