From e902e94b14e66fdfe1668bb3632125f8944cc2f6 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 18 Dec 2024 14:09:19 -0800 Subject: [PATCH] Define AgentEvent, rename tool call messages to events. (#4750) * Define AgentEvent, rename tool call messages to events. * update doc * Use AgentEvent | ChatMessage to replace AgentMessage * Update docs * update deprecation notice * remove unused * fix doc * format --- .../agents/_assistant_agent.py | 14 +- .../agents/_base_chat_agent.py | 10 +- .../agents/_society_of_mind_agent.py | 6 +- .../src/autogen_agentchat/base/_chat_agent.py | 6 +- .../src/autogen_agentchat/base/_task.py | 6 +- .../autogen_agentchat/base/_termination.py | 8 +- .../conditions/_terminations.py | 18 +- .../src/autogen_agentchat/messages.py | 35 +- .../src/autogen_agentchat/state/_states.py | 4 +- .../src/autogen_agentchat/task/__init__.py | 20 +- .../teams/_group_chat/_base_group_chat.py | 8 +- .../_group_chat/_base_group_chat_manager.py | 8 +- .../teams/_group_chat/_events.py | 4 +- .../_magentic_one_orchestrator.py | 12 +- .../_group_chat/_round_robin_group_chat.py | 4 +- .../teams/_group_chat/_selector_group_chat.py | 20 +- .../teams/_group_chat/_swarm_group_chat.py | 4 +- .../src/autogen_agentchat/ui/_console.py | 10 +- .../tests/test_assistant_agent.py | 16 +- .../tests/test_group_chat.py | 16 +- .../examples/company-research.ipynb | 826 ++++----- .../examples/literature-review.ipynb | 662 +++---- .../tutorial/agents.ipynb | 4 +- .../tutorial/custom-agents.ipynb | 622 +++---- .../tutorial/messages.ipynb | 10 +- .../tutorial/selector-group-chat.ipynb | 1002 +++++------ .../agentchat-user-guide/tutorial/swarm.ipynb | 1058 ++++++------ .../agentchat-user-guide/tutorial/teams.ipynb | 1524 ++++++++--------- .../tutorial/termination.ipynb | 602 +++---- .../agents/openai/_openai_assistant_agent.py | 14 +- .../web_surfer/_multimodal_web_surfer.py | 6 +- .../autogenstudio/teammanager.py | 4 +- .../autogenstudio/web/managers/connection.py | 14 +- .../autogen-studio/notebooks/tutorial.ipynb | 680 ++++---- 34 files changed, 3642 insertions(+), 3615 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 74a80ac7b..7afa7f48a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -21,13 +21,13 @@ from .. import EVENT_LOGGER_NAME from ..base import Handoff as HandoffBase from ..base import Response from ..messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallExecutionEvent, + ToolCallRequestEvent, ) from ..state import AssistantAgentState from ._base_chat_agent import BaseChatAgent @@ -292,7 +292,7 @@ class AssistantAgent(BaseChatAgent): async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: # Add messages to the model context. for msg in messages: if isinstance(msg, MultiModalMessage) and self._model_client.capabilities["vision"] is False: @@ -300,7 +300,7 @@ class AssistantAgent(BaseChatAgent): self._model_context.append(UserMessage(content=msg.content, source=msg.source)) # Inner messages. - inner_messages: List[AgentMessage] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] # Generate an inference result based on the current model context. llm_messages = self._system_messages + self._model_context @@ -321,7 +321,7 @@ class AssistantAgent(BaseChatAgent): # Process tool calls. assert isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content) - tool_call_msg = ToolCallMessage(content=result.content, source=self.name, models_usage=result.usage) + tool_call_msg = ToolCallRequestEvent(content=result.content, source=self.name, models_usage=result.usage) event_logger.debug(tool_call_msg) # Add the tool call message to the output. inner_messages.append(tool_call_msg) @@ -329,7 +329,7 @@ class AssistantAgent(BaseChatAgent): # Execute the tool calls. results = await asyncio.gather(*[self._execute_tool_call(call, cancellation_token) for call in result.content]) - tool_call_result_msg = ToolCallResultMessage(content=results, source=self.name) + tool_call_result_msg = ToolCallExecutionEvent(content=results, source=self.name) event_logger.debug(tool_call_result_msg) self._model_context.append(FunctionExecutionResultMessage(content=results)) inner_messages.append(tool_call_result_msg) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index c6a4fad1a..a91e8f8fc 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -5,7 +5,7 @@ from autogen_core import CancellationToken from ..base import ChatAgent, Response, TaskResult from ..messages import ( - AgentMessage, + AgentEvent, ChatMessage, TextMessage, ) @@ -58,7 +58,7 @@ class BaseChatAgent(ChatAgent, ABC): async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handles incoming messages and returns a stream of messages and and the final item is the response. The base implementation in :class:`BaseChatAgent` simply calls :meth:`on_messages` and yields @@ -89,7 +89,7 @@ class BaseChatAgent(ChatAgent, ABC): if cancellation_token is None: cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] - output_messages: List[AgentMessage] = [] + output_messages: List[AgentEvent | ChatMessage] = [] if task is None: pass elif isinstance(task, str): @@ -119,13 +119,13 @@ class BaseChatAgent(ChatAgent, ABC): *, task: str | ChatMessage | List[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentMessage | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the agent with the given task and return a stream of messages and the final task result as the last item in the stream.""" if cancellation_token is None: cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] - output_messages: List[AgentMessage] = [] + output_messages: List[AgentEvent | ChatMessage] = [] if task is None: pass elif isinstance(task, str): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index 4c13c10bc..52e594ad7 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -8,7 +8,7 @@ from autogen_agentchat.state import SocietyOfMindAgentState from ..base import TaskResult, Team from ..messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, @@ -119,13 +119,13 @@ class SocietyOfMindAgent(BaseChatAgent): async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: # Prepare the task for the team of agents. task = list(messages) # Run the team of agents. result: TaskResult | None = None - inner_messages: List[AgentMessage] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] count = 0 async for inner_msg in self._team.run_stream(task=task, cancellation_token=cancellation_token): if isinstance(inner_msg, TaskResult): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index c27dc232a..d41d1f6be 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -3,7 +3,7 @@ from typing import Any, AsyncGenerator, List, Mapping, Protocol, Sequence, runti from autogen_core import CancellationToken -from ..messages import AgentMessage, ChatMessage +from ..messages import AgentEvent, ChatMessage from ._task import TaskRunner @@ -14,7 +14,7 @@ class Response: chat_message: ChatMessage """A chat message produced by the agent as the response.""" - inner_messages: List[AgentMessage] | None = None + inner_messages: List[AgentEvent | ChatMessage] | None = None """Inner messages produced by the agent.""" @@ -46,7 +46,7 @@ class ChatAgent(TaskRunner, Protocol): def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handles incoming messages and returns a stream of inner messages and and the final item is the response.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py index d0d9aefe7..ecf05b170 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py @@ -3,14 +3,14 @@ from typing import AsyncGenerator, List, Protocol, Sequence from autogen_core import CancellationToken -from ..messages import AgentMessage, ChatMessage +from ..messages import AgentEvent, ChatMessage @dataclass class TaskResult: """Result of running a task.""" - messages: Sequence[AgentMessage] + messages: Sequence[AgentEvent | ChatMessage] """Messages produced by the task.""" stop_reason: str | None = None @@ -38,7 +38,7 @@ class TaskRunner(Protocol): *, task: str | ChatMessage | List[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentMessage | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the task and produces a stream of messages and the final result :class:`TaskResult` as the last item in the stream. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index 211b912c1..8975c75aa 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -2,7 +2,7 @@ import asyncio from abc import ABC, abstractmethod from typing import List, Sequence -from ..messages import AgentMessage, StopMessage +from ..messages import AgentEvent, ChatMessage, StopMessage class TerminatedException(BaseException): ... @@ -50,7 +50,7 @@ class TerminationCondition(ABC): ... @abstractmethod - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: """Check if the conversation should be terminated based on the messages received since the last time the condition was called. Return a StopMessage if the conversation should be terminated, or None otherwise. @@ -88,7 +88,7 @@ class _AndTerminationCondition(TerminationCondition): def terminated(self) -> bool: return all(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached.") # Check all remaining conditions. @@ -120,7 +120,7 @@ class _OrTerminationCondition(TerminationCondition): def terminated(self) -> bool: return any(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise RuntimeError("Termination condition has already been reached") stop_messages = await asyncio.gather(*[condition(messages) for condition in self._conditions]) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index bab3e03ff..c554f477a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -2,7 +2,7 @@ import time from typing import List, Sequence from ..base import TerminatedException, TerminationCondition -from ..messages import AgentMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage +from ..messages import AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage class StopMessageTermination(TerminationCondition): @@ -15,7 +15,7 @@ class StopMessageTermination(TerminationCondition): def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -43,7 +43,7 @@ class MaxMessageTermination(TerminationCondition): def terminated(self) -> bool: return self._message_count >= self._max_messages - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") self._message_count += len(messages) @@ -73,7 +73,7 @@ class TextMentionTermination(TerminationCondition): def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -128,7 +128,7 @@ class TokenUsageTermination(TerminationCondition): or (self._max_completion_token is not None and self._completion_token_count >= self._max_completion_token) ) - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -163,7 +163,7 @@ class HandoffTermination(TerminationCondition): def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -194,7 +194,7 @@ class TimeoutTermination(TerminationCondition): def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") @@ -242,7 +242,7 @@ class ExternalTermination(TerminationCondition): """Set the termination condition to terminated.""" self._setted = True - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if self._setted: @@ -273,7 +273,7 @@ class SourceMatchTermination(TerminationCondition): def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if not messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index fddd6bea5..547b4a7fa 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -9,7 +9,7 @@ from typing import List, Literal from autogen_core import FunctionCall, Image from autogen_core.models import FunctionExecutionResult, RequestUsage from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Annotated +from typing_extensions import Annotated, deprecated class BaseMessage(BaseModel): @@ -63,6 +63,7 @@ class HandoffMessage(BaseMessage): type: Literal["HandoffMessage"] = "HandoffMessage" +@deprecated("Will be removed in 0.4.0, use ToolCallRequestEvent instead.") class ToolCallMessage(BaseMessage): """A message signaling the use of tools.""" @@ -72,6 +73,7 @@ class ToolCallMessage(BaseMessage): type: Literal["ToolCallMessage"] = "ToolCallMessage" +@deprecated("Will be removed in 0.4.0, use ToolCallExecutionEvent instead.") class ToolCallResultMessage(BaseMessage): """A message signaling the results of tool calls.""" @@ -81,15 +83,37 @@ class ToolCallResultMessage(BaseMessage): type: Literal["ToolCallResultMessage"] = "ToolCallResultMessage" +class ToolCallRequestEvent(BaseMessage): + """An event signaling a request to use tools.""" + + content: List[FunctionCall] + """The tool calls.""" + + type: Literal["ToolCallRequestEvent"] = "ToolCallRequestEvent" + + +class ToolCallExecutionEvent(BaseMessage): + """An event signaling the execution of tool calls.""" + + content: List[FunctionExecutionResult] + """The tool call results.""" + + type: Literal["ToolCallExecutionEvent"] = "ToolCallExecutionEvent" + + ChatMessage = Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage, Field(discriminator="type")] -"""Messages for agent-to-agent communication.""" +"""Messages for agent-to-agent communication only.""" + + +AgentEvent = Annotated[ToolCallRequestEvent | ToolCallExecutionEvent, Field(discriminator="type")] +"""Events emitted by agents and teams when they work, not used for agent-to-agent communication.""" AgentMessage = Annotated[ - TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallMessage | ToolCallResultMessage, + TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallRequestEvent | ToolCallExecutionEvent, Field(discriminator="type"), ] -"""All message types.""" +"""(Deprecated, will be removed in 0.4.0) All message and event types.""" __all__ = [ @@ -98,8 +122,11 @@ __all__ = [ "MultiModalMessage", "StopMessage", "HandoffMessage", + "ToolCallRequestEvent", + "ToolCallExecutionEvent", "ToolCallMessage", "ToolCallResultMessage", "ChatMessage", + "AgentEvent", "AgentMessage", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py index 4bf3d4709..ddc57e23d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py @@ -6,7 +6,7 @@ from autogen_core.models import ( from pydantic import BaseModel, Field from ..messages import ( - AgentMessage, + AgentEvent, ChatMessage, ) @@ -36,7 +36,7 @@ class TeamState(BaseState): class BaseGroupChatManagerState(BaseState): """Base state for all group chat managers.""" - message_thread: List[AgentMessage] = Field(default_factory=list) + message_thread: List[AgentEvent | ChatMessage] = Field(default_factory=list) current_turn: int = Field(default=0) type: str = Field(default="BaseGroupChatManagerState") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py index 6ff207788..a45983d83 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py @@ -27,39 +27,39 @@ from ..conditions import ( from ..conditions import ( TokenUsageTermination as TokenUsageTerminationAlias, ) -from ..messages import AgentMessage +from ..messages import AgentEvent, ChatMessage from ..ui import Console as ConsoleAlias -@deprecated("Moved to autogen_agentchat.terminations.ExternalTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.ExternalTermination. Will remove this in 0.4.0.", stacklevel=2) class ExternalTermination(ExternalTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.HandoffTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.HandoffTermination. Will remove this in 0.4.0.", stacklevel=2) class HandoffTermination(HandoffTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.MaxMessageTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.MaxMessageTermination. Will remove this in 0.4.0.", stacklevel=2) class MaxMessageTermination(MaxMessageTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.SourceMatchTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.SourceMatchTermination. Will remove this in 0.4.0.", stacklevel=2) class SourceMatchTermination(SourceMatchTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.StopMessageTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.StopMessageTermination. Will remove this in 0.4.0.", stacklevel=2) class StopMessageTermination(StopMessageTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.TextMentionTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.TextMentionTermination. Will remove this in 0.4.0.", stacklevel=2) class TextMentionTermination(TextMentionTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.TimeoutTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.TimeoutTermination. Will remove this in 0.4.0.", stacklevel=2) class TimeoutTermination(TimeoutTerminationAlias): ... -@deprecated("Moved to autogen_agentchat.terminations.TokenUsageTermination. Will remove this in 0.4.0.", stacklevel=2) +@deprecated("Moved to autogen_agentchat.conditions.TokenUsageTermination. Will remove this in 0.4.0.", stacklevel=2) class TokenUsageTermination(TokenUsageTerminationAlias): ... @@ -68,7 +68,7 @@ T = TypeVar("T", bound=TaskResult | Response) @deprecated("Moved to autogen_agentchat.ui.Console. Will remove this in 0.4.0.", stacklevel=2) async def Console( - stream: AsyncGenerator[AgentMessage | T, None], + stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, ) -> T: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index 7a6496acb..1d8d30d58 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -19,7 +19,7 @@ from autogen_core._closure_agent import ClosureContext from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TaskResult, Team, TerminationCondition -from ...messages import AgentMessage, ChatMessage, TextMessage +from ...messages import AgentEvent, ChatMessage, TextMessage from ...state import TeamState from ._chat_agent_container import ChatAgentContainer from ._events import GroupChatMessage, GroupChatReset, GroupChatStart, GroupChatTermination @@ -62,7 +62,7 @@ class BaseGroupChat(Team, ABC): # Constants for the closure agent to collect the output messages. self._stop_reason: str | None = None - self._output_message_queue: asyncio.Queue[AgentMessage | None] = asyncio.Queue() + self._output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | None] = asyncio.Queue() # Create a runtime for the team. # TODO: The runtime should be created by a managed context. @@ -273,7 +273,7 @@ class BaseGroupChat(Team, ABC): *, task: str | ChatMessage | List[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentMessage | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the team and produces a stream of messages and the final result of the type :class:`TaskResult` as the last item in the stream. Once the team is stopped, the termination condition is reset. @@ -405,7 +405,7 @@ class BaseGroupChat(Team, ABC): cancellation_token=cancellation_token, ) # Collect the output messages in order. - output_messages: List[AgentMessage] = [] + output_messages: List[AgentEvent | ChatMessage] = [] # Yield the messsages until the queue is empty. while True: message_future = asyncio.ensure_future(self._output_message_queue.get()) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py index 84725cecd..45469292d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py @@ -5,7 +5,7 @@ from typing import Any, List from autogen_core import DefaultTopicId, MessageContext, event, rpc from ...base import TerminationCondition -from ...messages import AgentMessage, ChatMessage, StopMessage +from ...messages import AgentEvent, ChatMessage, StopMessage from ._events import ( GroupChatAgentResponse, GroupChatRequestPublish, @@ -48,7 +48,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC): raise ValueError("The group topic type must not be in the participant topic types.") self._participant_topic_types = participant_topic_types self._participant_descriptions = participant_descriptions - self._message_thread: List[AgentMessage] = [] + self._message_thread: List[AgentEvent | ChatMessage] = [] self._termination_condition = termination_condition if max_turns is not None and max_turns <= 0: raise ValueError("The maximum number of turns must be greater than 0.") @@ -115,7 +115,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC): @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # Append the message to the message thread and construct the delta. - delta: List[AgentMessage] = [] + delta: List[AgentEvent | ChatMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: self._message_thread.append(inner_message) @@ -180,7 +180,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC): ... @abstractmethod - async def select_speaker(self, thread: List[AgentMessage]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants and return the topic type of the selected speaker.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py index ed325fcb5..853b854a4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py @@ -3,7 +3,7 @@ from typing import List from pydantic import BaseModel from ...base import Response -from ...messages import AgentMessage, ChatMessage, StopMessage +from ...messages import AgentEvent, ChatMessage, StopMessage class GroupChatStart(BaseModel): @@ -29,7 +29,7 @@ class GroupChatRequestPublish(BaseModel): class GroupChatMessage(BaseModel): """A message from a group chat.""" - message: AgentMessage + message: AgentEvent | ChatMessage """The message that was published.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index dcdf8b918..6e0d12f7f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -13,14 +13,14 @@ from autogen_core.models import ( from .... import TRACE_LOGGER_NAME from ....base import Response, TerminationCondition from ....messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallExecutionEvent, + ToolCallRequestEvent, ) from ....state import MagenticOneOrchestratorState from .._base_group_chat_manager import BaseGroupChatManager @@ -167,7 +167,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager): @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # type: ignore - delta: List[AgentMessage] = [] + delta: List[AgentEvent | ChatMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: delta.append(inner_message) @@ -210,7 +210,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager): self._n_rounds = orchestrator_state.n_rounds self._n_stalls = orchestrator_state.n_stalls - async def select_speaker(self, thread: List[AgentMessage]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Not used in this orchestrator, we select next speaker in _orchestrate_step.""" return "" @@ -427,7 +427,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager): """Convert the message thread to a context for the model.""" context: List[LLMMessage] = [] for m in self._message_thread: - if isinstance(m, ToolCallMessage | ToolCallResultMessage): + if isinstance(m, ToolCallRequestEvent | ToolCallExecutionEvent): # Ignore tool call messages. continue elif isinstance(m, StopMessage | HandoffMessage): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 3e17943b9..d6901f04c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -1,7 +1,7 @@ from typing import Any, Callable, List, Mapping from ...base import ChatAgent, TerminationCondition -from ...messages import AgentMessage, ChatMessage +from ...messages import AgentEvent, ChatMessage from ...state import RoundRobinManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -53,7 +53,7 @@ class RoundRobinGroupChatManager(BaseGroupChatManager): self._current_turn = round_robin_state.current_turn self._next_speaker_index = round_robin_state.next_speaker_index - async def select_speaker(self, thread: List[AgentMessage]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants in a round-robin fashion.""" current_speaker_index = self._next_speaker_index self._next_speaker_index = (current_speaker_index + 1) % len(self._participant_topic_types) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 5f161d0c6..735e533c9 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -7,14 +7,14 @@ from autogen_core.models import ChatCompletionClient, SystemMessage from ... import TRACE_LOGGER_NAME from ...base import ChatAgent, TerminationCondition from ...messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallExecutionEvent, + ToolCallRequestEvent, ) from ...state import SelectorManagerState from ._base_group_chat import BaseGroupChat @@ -38,7 +38,7 @@ class SelectorGroupChatManager(BaseGroupChatManager): model_client: ChatCompletionClient, selector_prompt: str, allow_repeated_speaker: bool, - selector_func: Callable[[Sequence[AgentMessage]], str | None] | None, + selector_func: Callable[[Sequence[AgentEvent | ChatMessage]], str | None] | None, ) -> None: super().__init__( group_topic_type, @@ -78,7 +78,7 @@ class SelectorGroupChatManager(BaseGroupChatManager): self._current_turn = selector_state.current_turn self._previous_speaker = selector_state.previous_speaker - async def select_speaker(self, thread: List[AgentMessage]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Selects the next speaker in a group chat using a ChatCompletion client, with the selector function as override if it returns a speaker name. @@ -95,7 +95,7 @@ class SelectorGroupChatManager(BaseGroupChatManager): # Construct the history of the conversation. history_messages: List[str] = [] for msg in thread: - if isinstance(msg, ToolCallMessage | ToolCallResultMessage): + if isinstance(msg, ToolCallRequestEvent | ToolCallExecutionEvent): # Ignore tool call messages. continue # The agent type must be the same as the topic type, which we use as the agent name. @@ -204,7 +204,7 @@ class SelectorGroupChat(BaseGroupChat): Must contain '{roles}', '{participants}', and '{history}' to be filled in. allow_repeated_speaker (bool, optional): Whether to allow the same speaker to be selected consecutively. Defaults to False. - selector_func (Callable[[Sequence[AgentMessage]], str | None], optional): A custom selector + selector_func (Callable[[Sequence[AgentEvent | ChatMessage]], str | None], optional): A custom selector function that takes the conversation history and returns the name of the next speaker. If provided, this function will be used to override the model to select the next speaker. If the function returns None, the model will be used to select the next speaker. @@ -278,7 +278,7 @@ class SelectorGroupChat(BaseGroupChat): from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.ui import Console - from autogen_agentchat.messages import AgentMessage + from autogen_agentchat.messages import AgentEvent, ChatMessage async def main() -> None: @@ -304,7 +304,7 @@ class SelectorGroupChat(BaseGroupChat): system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages: Sequence[AgentMessage]) -> str | None: + def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if len(messages) == 1 or messages[-1].content == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": @@ -341,7 +341,7 @@ Read the following conversation. Then select the next role from {participants} t Read the above conversation. Then select the next role from {participants} to play. Only return the role. """, allow_repeated_speaker: bool = False, - selector_func: Callable[[Sequence[AgentMessage]], str | None] | None = None, + selector_func: Callable[[Sequence[AgentEvent | ChatMessage]], str | None] | None = None, ): super().__init__( participants, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index 436fe8e4c..a31a693e0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -1,7 +1,7 @@ from typing import Any, Callable, List, Mapping from ...base import ChatAgent, TerminationCondition -from ...messages import AgentMessage, ChatMessage, HandoffMessage +from ...messages import AgentEvent, ChatMessage, HandoffMessage from ...state import SwarmManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -64,7 +64,7 @@ class SwarmGroupChatManager(BaseGroupChatManager): await self._termination_condition.reset() self._current_speaker = self._participant_topic_types[0] - async def select_speaker(self, thread: List[AgentMessage]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants based on handoff message. Looks for the last handoff message in the thread to determine the next speaker.""" if len(thread) == 0: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 364526386..6315b5049 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -7,7 +7,7 @@ from autogen_core import Image from autogen_core.models import RequestUsage from autogen_agentchat.base import Response, TaskResult -from autogen_agentchat.messages import AgentMessage, MultiModalMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage def _is_running_in_iterm() -> bool: @@ -22,7 +22,7 @@ T = TypeVar("T", bound=TaskResult | Response) async def Console( - stream: AsyncGenerator[AgentMessage | T, None], + stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, ) -> T: @@ -32,7 +32,7 @@ async def Console( Returns the last processed TaskResult or Response. Args: - stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Message stream to render. + stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. @@ -93,7 +93,7 @@ async def Console( else: # Cast required for mypy to be happy - message = cast(AgentMessage, message) # type: ignore + message = cast(AgentEvent | ChatMessage, message) # type: ignore output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n" if message.models_usage: output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" @@ -114,7 +114,7 @@ def _image_to_iterm(image: Image) -> str: return f"\033]1337;File=inline=1:{image_data}\a\n" -def _message_to_str(message: AgentMessage, *, render_image_iterm: bool = False) -> str: +def _message_to_str(message: AgentEvent | ChatMessage, *, render_image_iterm: bool = False) -> str: if isinstance(message, MultiModalMessage): result: List[str] = [] for c in message.content: diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index c132e3a48..67969cfce 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -12,8 +12,8 @@ from autogen_agentchat.messages import ( HandoffMessage, MultiModalMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallExecutionEvent, + ToolCallRequestEvent, ) from autogen_core import Image from autogen_core.tools import FunctionTool @@ -136,11 +136,11 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: assert len(result.messages) == 4 assert isinstance(result.messages[0], TextMessage) assert result.messages[0].models_usage is None - assert isinstance(result.messages[1], ToolCallMessage) + assert isinstance(result.messages[1], ToolCallRequestEvent) assert result.messages[1].models_usage is not None assert result.messages[1].models_usage.completion_tokens == 5 assert result.messages[1].models_usage.prompt_tokens == 10 - assert isinstance(result.messages[2], ToolCallResultMessage) + assert isinstance(result.messages[2], ToolCallExecutionEvent) assert result.messages[2].models_usage is None assert isinstance(result.messages[3], TextMessage) assert result.messages[3].content == "pass" @@ -235,11 +235,11 @@ async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) -> assert len(result.messages) == 4 assert isinstance(result.messages[0], TextMessage) assert result.messages[0].models_usage is None - assert isinstance(result.messages[1], ToolCallMessage) + assert isinstance(result.messages[1], ToolCallRequestEvent) assert result.messages[1].models_usage is not None assert result.messages[1].models_usage.completion_tokens == 5 assert result.messages[1].models_usage.prompt_tokens == 10 - assert isinstance(result.messages[2], ToolCallResultMessage) + assert isinstance(result.messages[2], ToolCallExecutionEvent) assert result.messages[2].models_usage is None assert isinstance(result.messages[3], TextMessage) assert result.messages[3].content == "Hello" @@ -323,11 +323,11 @@ async def test_handoffs(monkeypatch: pytest.MonkeyPatch) -> None: assert len(result.messages) == 4 assert isinstance(result.messages[0], TextMessage) assert result.messages[0].models_usage is None - assert isinstance(result.messages[1], ToolCallMessage) + assert isinstance(result.messages[1], ToolCallRequestEvent) assert result.messages[1].models_usage is not None assert result.messages[1].models_usage.completion_tokens == 43 assert result.messages[1].models_usage.prompt_tokens == 42 - assert isinstance(result.messages[2], ToolCallResultMessage) + assert isinstance(result.messages[2], ToolCallExecutionEvent) assert result.messages[2].models_usage is None assert isinstance(result.messages[3], HandoffMessage) assert result.messages[3].content == handoff.message diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 6a3c91b80..d3a0f2e56 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -14,14 +14,14 @@ from autogen_agentchat.agents import ( from autogen_agentchat.base import Handoff, Response, TaskResult from autogen_agentchat.conditions import HandoffTermination, MaxMessageTermination, TextMentionTermination from autogen_agentchat.messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallExecutionEvent, + ToolCallRequestEvent, ) from autogen_agentchat.teams import ( RoundRobinGroupChat, @@ -323,8 +323,8 @@ async def test_round_robin_group_chat_with_tools(monkeypatch: pytest.MonkeyPatch ) assert len(result.messages) == 8 assert isinstance(result.messages[0], TextMessage) # task - assert isinstance(result.messages[1], ToolCallMessage) # tool call - assert isinstance(result.messages[2], ToolCallResultMessage) # tool call result + assert isinstance(result.messages[1], ToolCallRequestEvent) # tool call + assert isinstance(result.messages[2], ToolCallExecutionEvent) # tool call result assert isinstance(result.messages[3], TextMessage) # tool use agent response assert isinstance(result.messages[4], TextMessage) # echo agent response assert isinstance(result.messages[5], TextMessage) # tool use agent response @@ -747,7 +747,7 @@ async def test_selector_group_chat_custom_selector(monkeypatch: pytest.MonkeyPat agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _select_agent(messages: Sequence[AgentMessage]) -> str | None: + def _select_agent(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if len(messages) == 0: return "agent1" elif messages[-1].source == "agent1": @@ -920,8 +920,8 @@ async def test_swarm_handoff_using_tool_calls(monkeypatch: pytest.MonkeyPatch) - result = await team.run(task="task") assert len(result.messages) == 7 assert result.messages[0].content == "task" - assert isinstance(result.messages[1], ToolCallMessage) - assert isinstance(result.messages[2], ToolCallResultMessage) + assert isinstance(result.messages[1], ToolCallRequestEvent) + assert isinstance(result.messages[2], ToolCallExecutionEvent) assert result.messages[3].content == "handoff to agent2" assert result.messages[4].content == "Transferred to agent1." assert result.messages[5].content == "Hello" diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb index a9959ccc1..2111dd098 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/company-research.ipynb @@ -1,416 +1,416 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Company Research \n", - "\n", - "\n", - "Conducting company research, or competitive analysis, is a critical part of any business strategy. In this notebook, we will demonstrate how to create a team of agents to address this task. While there are many ways to translate a task into an agentic implementation, we will explore a sequential approach. We will create agents corresponding to steps in the research process and give them tools to perform their tasks.\n", - "\n", - "- **Search Agent**: Searches the web for information about a company. Will have access to a search engine API tool to retrieve search results.\n", - "- **Stock Analysis Agent**: Retrieves the company's stock information from a financial data API, computes basic statistics (current price, 52-week high, 52-week low, etc.), and generates a plot of the stock price year-to-date, saving it to a file. Will have access to a financial data API tool to retrieve stock information.\n", - "- **Report Agent**: Generates a report based on the information collected by the search and stock analysis agents. \n", - "\n", - "First, let's import the necessary modules." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core.tools import FunctionTool\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining Tools \n", - "\n", - "Next, we will define the tools that the agents will use to perform their tasks. We will create a `google_search` that uses the Google Search API to search the web for information about a company. We will also create a `analyze_stock` function that uses the `yfinance` library to retrieve stock information for a company. \n", - "\n", - "Finally, we will wrap these functions into a `FunctionTool` class that will allow us to use them as tools in our agents. \n", - "\n", - "Note: The `google_search` function requires an API key to work. You can create a `.env` file in the same directory as this notebook and add your API key as \n", - "\n", - "```\n", - "GOOGLE_SEARCH_ENGINE_ID =xxx\n", - "GOOGLE_API_KEY=xxx \n", - "``` \n", - "\n", - "Also install required libraries \n", - "\n", - "```\n", - "pip install yfinance matplotlib pytz numpy pandas python-dotenv requests bs4\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "#!pip install yfinance matplotlib pytz numpy pandas python-dotenv requests bs4\n", - "\n", - "\n", - "def google_search(query: str, num_results: int = 2, max_chars: int = 500) -> list: # type: ignore[type-arg]\n", - " import os\n", - " import time\n", - "\n", - " import requests\n", - " from bs4 import BeautifulSoup\n", - " from dotenv import load_dotenv\n", - "\n", - " load_dotenv()\n", - "\n", - " api_key = os.getenv(\"GOOGLE_API_KEY\")\n", - " search_engine_id = os.getenv(\"GOOGLE_SEARCH_ENGINE_ID\")\n", - "\n", - " if not api_key or not search_engine_id:\n", - " raise ValueError(\"API key or Search Engine ID not found in environment variables\")\n", - "\n", - " url = \"https://customsearch.googleapis.com/customsearch/v1\"\n", - " params = {\"key\": str(api_key), \"cx\": str(search_engine_id), \"q\": str(query), \"num\": str(num_results)}\n", - "\n", - " response = requests.get(url, params=params)\n", - "\n", - " if response.status_code != 200:\n", - " print(response.json())\n", - " raise Exception(f\"Error in API request: {response.status_code}\")\n", - "\n", - " results = response.json().get(\"items\", [])\n", - "\n", - " def get_page_content(url: str) -> str:\n", - " try:\n", - " response = requests.get(url, timeout=10)\n", - " soup = BeautifulSoup(response.content, \"html.parser\")\n", - " text = soup.get_text(separator=\" \", strip=True)\n", - " words = text.split()\n", - " content = \"\"\n", - " for word in words:\n", - " if len(content) + len(word) + 1 > max_chars:\n", - " break\n", - " content += \" \" + word\n", - " return content.strip()\n", - " except Exception as e:\n", - " print(f\"Error fetching {url}: {str(e)}\")\n", - " return \"\"\n", - "\n", - " enriched_results = []\n", - " for item in results:\n", - " body = get_page_content(item[\"link\"])\n", - " enriched_results.append(\n", - " {\"title\": item[\"title\"], \"link\": item[\"link\"], \"snippet\": item[\"snippet\"], \"body\": body}\n", - " )\n", - " time.sleep(1) # Be respectful to the servers\n", - "\n", - " return enriched_results\n", - "\n", - "\n", - "def analyze_stock(ticker: str) -> dict: # type: ignore[type-arg]\n", - " import os\n", - " from datetime import datetime, timedelta\n", - "\n", - " import matplotlib.pyplot as plt\n", - " import numpy as np\n", - " import pandas as pd\n", - " import yfinance as yf\n", - " from pytz import timezone # type: ignore\n", - "\n", - " stock = yf.Ticker(ticker)\n", - "\n", - " # Get historical data (1 year of data to ensure we have enough for 200-day MA)\n", - " end_date = datetime.now(timezone(\"UTC\"))\n", - " start_date = end_date - timedelta(days=365)\n", - " hist = stock.history(start=start_date, end=end_date)\n", - "\n", - " # Ensure we have data\n", - " if hist.empty:\n", - " return {\"error\": \"No historical data available for the specified ticker.\"}\n", - "\n", - " # Compute basic statistics and additional metrics\n", - " current_price = stock.info.get(\"currentPrice\", hist[\"Close\"].iloc[-1])\n", - " year_high = stock.info.get(\"fiftyTwoWeekHigh\", hist[\"High\"].max())\n", - " year_low = stock.info.get(\"fiftyTwoWeekLow\", hist[\"Low\"].min())\n", - "\n", - " # Calculate 50-day and 200-day moving averages\n", - " ma_50 = hist[\"Close\"].rolling(window=50).mean().iloc[-1]\n", - " ma_200 = hist[\"Close\"].rolling(window=200).mean().iloc[-1]\n", - "\n", - " # Calculate YTD price change and percent change\n", - " ytd_start = datetime(end_date.year, 1, 1, tzinfo=timezone(\"UTC\"))\n", - " ytd_data = hist.loc[ytd_start:] # type: ignore[misc]\n", - " if not ytd_data.empty:\n", - " price_change = ytd_data[\"Close\"].iloc[-1] - ytd_data[\"Close\"].iloc[0]\n", - " percent_change = (price_change / ytd_data[\"Close\"].iloc[0]) * 100\n", - " else:\n", - " price_change = percent_change = np.nan\n", - "\n", - " # Determine trend\n", - " if pd.notna(ma_50) and pd.notna(ma_200):\n", - " if ma_50 > ma_200:\n", - " trend = \"Upward\"\n", - " elif ma_50 < ma_200:\n", - " trend = \"Downward\"\n", - " else:\n", - " trend = \"Neutral\"\n", - " else:\n", - " trend = \"Insufficient data for trend analysis\"\n", - "\n", - " # Calculate volatility (standard deviation of daily returns)\n", - " daily_returns = hist[\"Close\"].pct_change().dropna()\n", - " volatility = daily_returns.std() * np.sqrt(252) # Annualized volatility\n", - "\n", - " # Create result dictionary\n", - " result = {\n", - " \"ticker\": ticker,\n", - " \"current_price\": current_price,\n", - " \"52_week_high\": year_high,\n", - " \"52_week_low\": year_low,\n", - " \"50_day_ma\": ma_50,\n", - " \"200_day_ma\": ma_200,\n", - " \"ytd_price_change\": price_change,\n", - " \"ytd_percent_change\": percent_change,\n", - " \"trend\": trend,\n", - " \"volatility\": volatility,\n", - " }\n", - "\n", - " # Convert numpy types to Python native types for better JSON serialization\n", - " for key, value in result.items():\n", - " if isinstance(value, np.generic):\n", - " result[key] = value.item()\n", - "\n", - " # Generate plot\n", - " plt.figure(figsize=(12, 6))\n", - " plt.plot(hist.index, hist[\"Close\"], label=\"Close Price\")\n", - " plt.plot(hist.index, hist[\"Close\"].rolling(window=50).mean(), label=\"50-day MA\")\n", - " plt.plot(hist.index, hist[\"Close\"].rolling(window=200).mean(), label=\"200-day MA\")\n", - " plt.title(f\"{ticker} Stock Price (Past Year)\")\n", - " plt.xlabel(\"Date\")\n", - " plt.ylabel(\"Price ($)\")\n", - " plt.legend()\n", - " plt.grid(True)\n", - "\n", - " # Save plot to file\n", - " os.makedirs(\"coding\", exist_ok=True)\n", - " plot_file_path = f\"coding/{ticker}_stockprice.png\"\n", - " plt.savefig(plot_file_path)\n", - " print(f\"Plot saved as {plot_file_path}\")\n", - " result[\"plot_file_path\"] = plot_file_path\n", - "\n", - " return result" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "google_search_tool = FunctionTool(\n", - " google_search, description=\"Search Google for information, returns results with a snippet and body content\"\n", - ")\n", - "stock_analysis_tool = FunctionTool(analyze_stock, description=\"Analyze stock data and generate a plot\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining Agents\n", - "\n", - "Next, we will define the agents that will perform the tasks. We will create a `search_agent` that searches the web for information about a company, a `stock_analysis_agent` that retrieves stock information for a company, and a `report_agent` that generates a report based on the information collected by the other agents. " - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "search_agent = AssistantAgent(\n", - " name=\"Google_Search_Agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " tools=[google_search_tool],\n", - " description=\"Search Google for information, returns top 2 results with a snippet and body content\",\n", - " system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n", - ")\n", - "\n", - "stock_analysis_agent = AssistantAgent(\n", - " name=\"Stock_Analysis_Agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " tools=[stock_analysis_tool],\n", - " description=\"Analyze stock data and generate a plot\",\n", - " system_message=\"Perform data analysis.\",\n", - ")\n", - "\n", - "report_agent = AssistantAgent(\n", - " name=\"Report_Agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " description=\"Generate a report based the search and results of stock analysis\",\n", - " system_message=\"You are a helpful assistant that can generate a comprehensive report on a given topic based on search and stock analysis. When you done with generating the report, reply with TERMINATE.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating the Team\n", - "\n", - "Finally, let's create a team of the three agents and set them to work on researching a company." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "team = RoundRobinGroupChat([stock_analysis_agent, search_agent, report_agent], max_turns=3)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We use `max_turns=3` to limit the number of turns to exactly the same number of agents in the team. This effectively makes the agents work in a sequential manner." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a financial report on American airlines\n", - "---------- Stock_Analysis_Agent ----------\n", - "[FunctionCall(id='call_tPh9gSfGrDu1nC2Ck5RlfbFY', arguments='{\"ticker\":\"AAL\"}', name='analyze_stock')]\n", - "[Prompt tokens: 64, Completion tokens: 16]\n", - "Plot saved as coding/AAL_stockprice.png\n", - "---------- Stock_Analysis_Agent ----------\n", - "[FunctionExecutionResult(content=\"{'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\", call_id='call_tPh9gSfGrDu1nC2Ck5RlfbFY')]\n", - "---------- Stock_Analysis_Agent ----------\n", - "Tool calls:\n", - "analyze_stock({\"ticker\":\"AAL\"}) = {'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\n", - "---------- Google_Search_Agent ----------\n", - "[FunctionCall(id='call_wSHc5Kw1ix3aQDXXT23opVnU', arguments='{\"query\":\"American Airlines financial report 2023\",\"num_results\":1}', name='google_search')]\n", - "[Prompt tokens: 268, Completion tokens: 25]\n", - "---------- Google_Search_Agent ----------\n", - "[FunctionExecutionResult(content=\"[{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\", call_id='call_wSHc5Kw1ix3aQDXXT23opVnU')]\n", - "---------- Google_Search_Agent ----------\n", - "Tool calls:\n", - "google_search({\"query\":\"American Airlines financial report 2023\",\"num_results\":1}) = [{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\n", - "---------- Report_Agent ----------\n", - "### American Airlines Financial Report\n", - "\n", - "#### Overview\n", - "American Airlines Group Inc. (NASDAQ: AAL) is a major American airline headquartered in Fort Worth, Texas. It is known as one of the largest airlines in the world by fleet size, revenue, and passenger kilometers flown. As of the current quarter in 2023, American Airlines has shown significant financial activities and stock performance noteworthy for investors and analysts.\n", - "\n", - "#### Stock Performance\n", - "- **Current Stock Price**: $17.40\n", - "- **52-Week Range**: The stock price has ranged from $9.07 to $18.09 over the past year, indicating considerable volatility and fluctuation in market interest.\n", - "- **Moving Averages**: \n", - " - 50-Day MA: $13.38\n", - " - 200-Day MA: $12.60\n", - " These moving averages suggest a strong upward trend in recent months as the 50-day moving average is positioned above the 200-day moving average, indicating bullish momentum.\n", - "\n", - "- **YTD Price Change**: $3.96\n", - "- **YTD Percent Change**: 29.46%\n", - " The year-to-date figures demonstrate a robust upward momentum, with the stock appreciating by nearly 29.5% since the beginning of the year.\n", - "\n", - "- **Trend**: The current stock trend for American Airlines is upward, reflecting positive market sentiment and performance improvements.\n", - "\n", - "- **Volatility**: 0.446, indicating moderate volatility in the stock, which may attract risk-tolerant investors seeking dynamic movements for potential profit.\n", - "\n", - "#### Recent Financial Performance\n", - "According to the latest financial reports of 2023 (accessed through a reliable source), American Airlines reported remarkable figures for both the fourth quarter and the full year 2023. Key highlights from the report include:\n", - "\n", - "- **Revenue Growth**: American Airlines experienced substantial revenue increases, driven by high demand for travel as pandemic-related restrictions eased globally.\n", - "- **Profit Margins**: The company managed to enhance its profitability, largely attributed to cost management strategies and increased operational efficiency.\n", - "- **Challenges**: Despite positive momentum, the airline industry faces ongoing challenges including fluctuating fuel prices, geopolitical tensions, and competition pressures.\n", - "\n", - "#### Strategic Initiatives\n", - "American Airlines has been focusing on several strategic initiatives to maintain its market leadership and improve its financial metrics:\n", - "1. **Fleet Modernization**: Continuation of investment in more fuel-efficient aircraft to reduce operating costs and environmental impact.\n", - "2. **Enhanced Customer Experience**: Introduction of new services and technology enhancements aimed at improving customer satisfaction.\n", - "3. **Operational Efficiency**: Streamlining processes to cut costs and increase overall effectiveness, which includes leveraging data analytics for better decision-making.\n", - "\n", - "#### Conclusion\n", - "American Airlines is demonstrating strong market performance and financial growth amid an evolving industry landscape. The company's stock has been on an upward trend, reflecting its solid operational strategies and recovery efforts post-COVID pandemic. Investors should remain mindful of external risks while considering American Airlines as a potential investment, supported by its current upward trajectory and strategic initiatives.\n", - "\n", - "For further details, investors are encouraged to review the full financial reports from American Airlines and assess ongoing market conditions.\n", - "\n", - "_TERMINATE_\n", - "[Prompt tokens: 360, Completion tokens: 633]\n", - "---------- Summary ----------\n", - "Number of messages: 8\n", - "Finish reason: Maximum number of turns 3 reached.\n", - "Total prompt tokens: 692\n", - "Total completion tokens: 674\n", - "Duration: 19.38 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Company Research \n", + "\n", + "\n", + "Conducting company research, or competitive analysis, is a critical part of any business strategy. In this notebook, we will demonstrate how to create a team of agents to address this task. While there are many ways to translate a task into an agentic implementation, we will explore a sequential approach. We will create agents corresponding to steps in the research process and give them tools to perform their tasks.\n", + "\n", + "- **Search Agent**: Searches the web for information about a company. Will have access to a search engine API tool to retrieve search results.\n", + "- **Stock Analysis Agent**: Retrieves the company's stock information from a financial data API, computes basic statistics (current price, 52-week high, 52-week low, etc.), and generates a plot of the stock price year-to-date, saving it to a file. Will have access to a financial data API tool to retrieve stock information.\n", + "- **Report Agent**: Generates a report based on the information collected by the search and stock analysis agents. \n", + "\n", + "First, let's import the necessary modules." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core.tools import FunctionTool\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining Tools \n", + "\n", + "Next, we will define the tools that the agents will use to perform their tasks. We will create a `google_search` that uses the Google Search API to search the web for information about a company. We will also create a `analyze_stock` function that uses the `yfinance` library to retrieve stock information for a company. \n", + "\n", + "Finally, we will wrap these functions into a `FunctionTool` class that will allow us to use them as tools in our agents. \n", + "\n", + "Note: The `google_search` function requires an API key to work. You can create a `.env` file in the same directory as this notebook and add your API key as \n", + "\n", + "```\n", + "GOOGLE_SEARCH_ENGINE_ID =xxx\n", + "GOOGLE_API_KEY=xxx \n", + "``` \n", + "\n", + "Also install required libraries \n", + "\n", + "```\n", + "pip install yfinance matplotlib pytz numpy pandas python-dotenv requests bs4\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "#!pip install yfinance matplotlib pytz numpy pandas python-dotenv requests bs4\n", + "\n", + "\n", + "def google_search(query: str, num_results: int = 2, max_chars: int = 500) -> list: # type: ignore[type-arg]\n", + " import os\n", + " import time\n", + "\n", + " import requests\n", + " from bs4 import BeautifulSoup\n", + " from dotenv import load_dotenv\n", + "\n", + " load_dotenv()\n", + "\n", + " api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + " search_engine_id = os.getenv(\"GOOGLE_SEARCH_ENGINE_ID\")\n", + "\n", + " if not api_key or not search_engine_id:\n", + " raise ValueError(\"API key or Search Engine ID not found in environment variables\")\n", + "\n", + " url = \"https://customsearch.googleapis.com/customsearch/v1\"\n", + " params = {\"key\": str(api_key), \"cx\": str(search_engine_id), \"q\": str(query), \"num\": str(num_results)}\n", + "\n", + " response = requests.get(url, params=params)\n", + "\n", + " if response.status_code != 200:\n", + " print(response.json())\n", + " raise Exception(f\"Error in API request: {response.status_code}\")\n", + "\n", + " results = response.json().get(\"items\", [])\n", + "\n", + " def get_page_content(url: str) -> str:\n", + " try:\n", + " response = requests.get(url, timeout=10)\n", + " soup = BeautifulSoup(response.content, \"html.parser\")\n", + " text = soup.get_text(separator=\" \", strip=True)\n", + " words = text.split()\n", + " content = \"\"\n", + " for word in words:\n", + " if len(content) + len(word) + 1 > max_chars:\n", + " break\n", + " content += \" \" + word\n", + " return content.strip()\n", + " except Exception as e:\n", + " print(f\"Error fetching {url}: {str(e)}\")\n", + " return \"\"\n", + "\n", + " enriched_results = []\n", + " for item in results:\n", + " body = get_page_content(item[\"link\"])\n", + " enriched_results.append(\n", + " {\"title\": item[\"title\"], \"link\": item[\"link\"], \"snippet\": item[\"snippet\"], \"body\": body}\n", + " )\n", + " time.sleep(1) # Be respectful to the servers\n", + "\n", + " return enriched_results\n", + "\n", + "\n", + "def analyze_stock(ticker: str) -> dict: # type: ignore[type-arg]\n", + " import os\n", + " from datetime import datetime, timedelta\n", + "\n", + " import matplotlib.pyplot as plt\n", + " import numpy as np\n", + " import pandas as pd\n", + " import yfinance as yf\n", + " from pytz import timezone # type: ignore\n", + "\n", + " stock = yf.Ticker(ticker)\n", + "\n", + " # Get historical data (1 year of data to ensure we have enough for 200-day MA)\n", + " end_date = datetime.now(timezone(\"UTC\"))\n", + " start_date = end_date - timedelta(days=365)\n", + " hist = stock.history(start=start_date, end=end_date)\n", + "\n", + " # Ensure we have data\n", + " if hist.empty:\n", + " return {\"error\": \"No historical data available for the specified ticker.\"}\n", + "\n", + " # Compute basic statistics and additional metrics\n", + " current_price = stock.info.get(\"currentPrice\", hist[\"Close\"].iloc[-1])\n", + " year_high = stock.info.get(\"fiftyTwoWeekHigh\", hist[\"High\"].max())\n", + " year_low = stock.info.get(\"fiftyTwoWeekLow\", hist[\"Low\"].min())\n", + "\n", + " # Calculate 50-day and 200-day moving averages\n", + " ma_50 = hist[\"Close\"].rolling(window=50).mean().iloc[-1]\n", + " ma_200 = hist[\"Close\"].rolling(window=200).mean().iloc[-1]\n", + "\n", + " # Calculate YTD price change and percent change\n", + " ytd_start = datetime(end_date.year, 1, 1, tzinfo=timezone(\"UTC\"))\n", + " ytd_data = hist.loc[ytd_start:] # type: ignore[misc]\n", + " if not ytd_data.empty:\n", + " price_change = ytd_data[\"Close\"].iloc[-1] - ytd_data[\"Close\"].iloc[0]\n", + " percent_change = (price_change / ytd_data[\"Close\"].iloc[0]) * 100\n", + " else:\n", + " price_change = percent_change = np.nan\n", + "\n", + " # Determine trend\n", + " if pd.notna(ma_50) and pd.notna(ma_200):\n", + " if ma_50 > ma_200:\n", + " trend = \"Upward\"\n", + " elif ma_50 < ma_200:\n", + " trend = \"Downward\"\n", + " else:\n", + " trend = \"Neutral\"\n", + " else:\n", + " trend = \"Insufficient data for trend analysis\"\n", + "\n", + " # Calculate volatility (standard deviation of daily returns)\n", + " daily_returns = hist[\"Close\"].pct_change().dropna()\n", + " volatility = daily_returns.std() * np.sqrt(252) # Annualized volatility\n", + "\n", + " # Create result dictionary\n", + " result = {\n", + " \"ticker\": ticker,\n", + " \"current_price\": current_price,\n", + " \"52_week_high\": year_high,\n", + " \"52_week_low\": year_low,\n", + " \"50_day_ma\": ma_50,\n", + " \"200_day_ma\": ma_200,\n", + " \"ytd_price_change\": price_change,\n", + " \"ytd_percent_change\": percent_change,\n", + " \"trend\": trend,\n", + " \"volatility\": volatility,\n", + " }\n", + "\n", + " # Convert numpy types to Python native types for better JSON serialization\n", + " for key, value in result.items():\n", + " if isinstance(value, np.generic):\n", + " result[key] = value.item()\n", + "\n", + " # Generate plot\n", + " plt.figure(figsize=(12, 6))\n", + " plt.plot(hist.index, hist[\"Close\"], label=\"Close Price\")\n", + " plt.plot(hist.index, hist[\"Close\"].rolling(window=50).mean(), label=\"50-day MA\")\n", + " plt.plot(hist.index, hist[\"Close\"].rolling(window=200).mean(), label=\"200-day MA\")\n", + " plt.title(f\"{ticker} Stock Price (Past Year)\")\n", + " plt.xlabel(\"Date\")\n", + " plt.ylabel(\"Price ($)\")\n", + " plt.legend()\n", + " plt.grid(True)\n", + "\n", + " # Save plot to file\n", + " os.makedirs(\"coding\", exist_ok=True)\n", + " plot_file_path = f\"coding/{ticker}_stockprice.png\"\n", + " plt.savefig(plot_file_path)\n", + " print(f\"Plot saved as {plot_file_path}\")\n", + " result[\"plot_file_path\"] = plot_file_path\n", + "\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "google_search_tool = FunctionTool(\n", + " google_search, description=\"Search Google for information, returns results with a snippet and body content\"\n", + ")\n", + "stock_analysis_tool = FunctionTool(analyze_stock, description=\"Analyze stock data and generate a plot\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining Agents\n", + "\n", + "Next, we will define the agents that will perform the tasks. We will create a `search_agent` that searches the web for information about a company, a `stock_analysis_agent` that retrieves stock information for a company, and a `report_agent` that generates a report based on the information collected by the other agents. " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "search_agent = AssistantAgent(\n", + " name=\"Google_Search_Agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " tools=[google_search_tool],\n", + " description=\"Search Google for information, returns top 2 results with a snippet and body content\",\n", + " system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n", + ")\n", + "\n", + "stock_analysis_agent = AssistantAgent(\n", + " name=\"Stock_Analysis_Agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " tools=[stock_analysis_tool],\n", + " description=\"Analyze stock data and generate a plot\",\n", + " system_message=\"Perform data analysis.\",\n", + ")\n", + "\n", + "report_agent = AssistantAgent(\n", + " name=\"Report_Agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " description=\"Generate a report based the search and results of stock analysis\",\n", + " system_message=\"You are a helpful assistant that can generate a comprehensive report on a given topic based on search and stock analysis. When you done with generating the report, reply with TERMINATE.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating the Team\n", + "\n", + "Finally, let's create a team of the three agents and set them to work on researching a company." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "team = RoundRobinGroupChat([stock_analysis_agent, search_agent, report_agent], max_turns=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We use `max_turns=3` to limit the number of turns to exactly the same number of agents in the team. This effectively makes the agents work in a sequential manner." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a financial report on American airlines\n", + "---------- Stock_Analysis_Agent ----------\n", + "[FunctionCall(id='call_tPh9gSfGrDu1nC2Ck5RlfbFY', arguments='{\"ticker\":\"AAL\"}', name='analyze_stock')]\n", + "[Prompt tokens: 64, Completion tokens: 16]\n", + "Plot saved as coding/AAL_stockprice.png\n", + "---------- Stock_Analysis_Agent ----------\n", + "[FunctionExecutionResult(content=\"{'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\", call_id='call_tPh9gSfGrDu1nC2Ck5RlfbFY')]\n", + "---------- Stock_Analysis_Agent ----------\n", + "Tool calls:\n", + "analyze_stock({\"ticker\":\"AAL\"}) = {'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\n", + "---------- Google_Search_Agent ----------\n", + "[FunctionCall(id='call_wSHc5Kw1ix3aQDXXT23opVnU', arguments='{\"query\":\"American Airlines financial report 2023\",\"num_results\":1}', name='google_search')]\n", + "[Prompt tokens: 268, Completion tokens: 25]\n", + "---------- Google_Search_Agent ----------\n", + "[FunctionExecutionResult(content=\"[{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\", call_id='call_wSHc5Kw1ix3aQDXXT23opVnU')]\n", + "---------- Google_Search_Agent ----------\n", + "Tool calls:\n", + "google_search({\"query\":\"American Airlines financial report 2023\",\"num_results\":1}) = [{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\n", + "---------- Report_Agent ----------\n", + "### American Airlines Financial Report\n", + "\n", + "#### Overview\n", + "American Airlines Group Inc. (NASDAQ: AAL) is a major American airline headquartered in Fort Worth, Texas. It is known as one of the largest airlines in the world by fleet size, revenue, and passenger kilometers flown. As of the current quarter in 2023, American Airlines has shown significant financial activities and stock performance noteworthy for investors and analysts.\n", + "\n", + "#### Stock Performance\n", + "- **Current Stock Price**: $17.40\n", + "- **52-Week Range**: The stock price has ranged from $9.07 to $18.09 over the past year, indicating considerable volatility and fluctuation in market interest.\n", + "- **Moving Averages**: \n", + " - 50-Day MA: $13.38\n", + " - 200-Day MA: $12.60\n", + " These moving averages suggest a strong upward trend in recent months as the 50-day moving average is positioned above the 200-day moving average, indicating bullish momentum.\n", + "\n", + "- **YTD Price Change**: $3.96\n", + "- **YTD Percent Change**: 29.46%\n", + " The year-to-date figures demonstrate a robust upward momentum, with the stock appreciating by nearly 29.5% since the beginning of the year.\n", + "\n", + "- **Trend**: The current stock trend for American Airlines is upward, reflecting positive market sentiment and performance improvements.\n", + "\n", + "- **Volatility**: 0.446, indicating moderate volatility in the stock, which may attract risk-tolerant investors seeking dynamic movements for potential profit.\n", + "\n", + "#### Recent Financial Performance\n", + "According to the latest financial reports of 2023 (accessed through a reliable source), American Airlines reported remarkable figures for both the fourth quarter and the full year 2023. Key highlights from the report include:\n", + "\n", + "- **Revenue Growth**: American Airlines experienced substantial revenue increases, driven by high demand for travel as pandemic-related restrictions eased globally.\n", + "- **Profit Margins**: The company managed to enhance its profitability, largely attributed to cost management strategies and increased operational efficiency.\n", + "- **Challenges**: Despite positive momentum, the airline industry faces ongoing challenges including fluctuating fuel prices, geopolitical tensions, and competition pressures.\n", + "\n", + "#### Strategic Initiatives\n", + "American Airlines has been focusing on several strategic initiatives to maintain its market leadership and improve its financial metrics:\n", + "1. **Fleet Modernization**: Continuation of investment in more fuel-efficient aircraft to reduce operating costs and environmental impact.\n", + "2. **Enhanced Customer Experience**: Introduction of new services and technology enhancements aimed at improving customer satisfaction.\n", + "3. **Operational Efficiency**: Streamlining processes to cut costs and increase overall effectiveness, which includes leveraging data analytics for better decision-making.\n", + "\n", + "#### Conclusion\n", + "American Airlines is demonstrating strong market performance and financial growth amid an evolving industry landscape. The company's stock has been on an upward trend, reflecting its solid operational strategies and recovery efforts post-COVID pandemic. Investors should remain mindful of external risks while considering American Airlines as a potential investment, supported by its current upward trajectory and strategic initiatives.\n", + "\n", + "For further details, investors are encouraged to review the full financial reports from American Airlines and assess ongoing market conditions.\n", + "\n", + "_TERMINATE_\n", + "[Prompt tokens: 360, Completion tokens: 633]\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Maximum number of turns 3 reached.\n", + "Total prompt tokens: 692\n", + "Total completion tokens: 674\n", + "Duration: 19.38 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a financial report on American airlines', type='TextMessage'), ToolCallRequestEvent(source='Stock_Analysis_Agent', models_usage=RequestUsage(prompt_tokens=64, completion_tokens=16), content=[FunctionCall(id='call_tPh9gSfGrDu1nC2Ck5RlfbFY', arguments='{\"ticker\":\"AAL\"}', name='analyze_stock')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='Stock_Analysis_Agent', models_usage=None, content=[FunctionExecutionResult(content=\"{'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\", call_id='call_tPh9gSfGrDu1nC2Ck5RlfbFY')], type='ToolCallExecutionEvent'), TextMessage(source='Stock_Analysis_Agent', models_usage=None, content='Tool calls:\\nanalyze_stock({\"ticker\":\"AAL\"}) = {\\'ticker\\': \\'AAL\\', \\'current_price\\': 17.4, \\'52_week_high\\': 18.09, \\'52_week_low\\': 9.07, \\'50_day_ma\\': 13.376799983978271, \\'200_day_ma\\': 12.604399962425232, \\'ytd_price_change\\': 3.9600000381469727, \\'ytd_percent_change\\': 29.46428691803602, \\'trend\\': \\'Upward\\', \\'volatility\\': 0.4461582174242901, \\'plot_file_path\\': \\'coding/AAL_stockprice.png\\'}', type='TextMessage'), ToolCallRequestEvent(source='Google_Search_Agent', models_usage=RequestUsage(prompt_tokens=268, completion_tokens=25), content=[FunctionCall(id='call_wSHc5Kw1ix3aQDXXT23opVnU', arguments='{\"query\":\"American Airlines financial report 2023\",\"num_results\":1}', name='google_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='Google_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content=\"[{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\", call_id='call_wSHc5Kw1ix3aQDXXT23opVnU')], type='ToolCallExecutionEvent'), TextMessage(source='Google_Search_Agent', models_usage=None, content='Tool calls:\\ngoogle_search({\"query\":\"American Airlines financial report 2023\",\"num_results\":1}) = [{\\'title\\': \\'American Airlines reports fourth-quarter and full-year 2023 financial ...\\', \\'link\\': \\'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx\\', \\'snippet\\': \\'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...\\', \\'body\\': \\'Just a moment... Enable JavaScript and cookies to continue\\'}]', type='TextMessage'), TextMessage(source='Report_Agent', models_usage=RequestUsage(prompt_tokens=360, completion_tokens=633), content=\"### American Airlines Financial Report\\n\\n#### Overview\\nAmerican Airlines Group Inc. (NASDAQ: AAL) is a major American airline headquartered in Fort Worth, Texas. It is known as one of the largest airlines in the world by fleet size, revenue, and passenger kilometers flown. As of the current quarter in 2023, American Airlines has shown significant financial activities and stock performance noteworthy for investors and analysts.\\n\\n#### Stock Performance\\n- **Current Stock Price**: $17.40\\n- **52-Week Range**: The stock price has ranged from $9.07 to $18.09 over the past year, indicating considerable volatility and fluctuation in market interest.\\n- **Moving Averages**: \\n - 50-Day MA: $13.38\\n - 200-Day MA: $12.60\\n These moving averages suggest a strong upward trend in recent months as the 50-day moving average is positioned above the 200-day moving average, indicating bullish momentum.\\n\\n- **YTD Price Change**: $3.96\\n- **YTD Percent Change**: 29.46%\\n The year-to-date figures demonstrate a robust upward momentum, with the stock appreciating by nearly 29.5% since the beginning of the year.\\n\\n- **Trend**: The current stock trend for American Airlines is upward, reflecting positive market sentiment and performance improvements.\\n\\n- **Volatility**: 0.446, indicating moderate volatility in the stock, which may attract risk-tolerant investors seeking dynamic movements for potential profit.\\n\\n#### Recent Financial Performance\\nAccording to the latest financial reports of 2023 (accessed through a reliable source), American Airlines reported remarkable figures for both the fourth quarter and the full year 2023. Key highlights from the report include:\\n\\n- **Revenue Growth**: American Airlines experienced substantial revenue increases, driven by high demand for travel as pandemic-related restrictions eased globally.\\n- **Profit Margins**: The company managed to enhance its profitability, largely attributed to cost management strategies and increased operational efficiency.\\n- **Challenges**: Despite positive momentum, the airline industry faces ongoing challenges including fluctuating fuel prices, geopolitical tensions, and competition pressures.\\n\\n#### Strategic Initiatives\\nAmerican Airlines has been focusing on several strategic initiatives to maintain its market leadership and improve its financial metrics:\\n1. **Fleet Modernization**: Continuation of investment in more fuel-efficient aircraft to reduce operating costs and environmental impact.\\n2. **Enhanced Customer Experience**: Introduction of new services and technology enhancements aimed at improving customer satisfaction.\\n3. **Operational Efficiency**: Streamlining processes to cut costs and increase overall effectiveness, which includes leveraging data analytics for better decision-making.\\n\\n#### Conclusion\\nAmerican Airlines is demonstrating strong market performance and financial growth amid an evolving industry landscape. The company's stock has been on an upward trend, reflecting its solid operational strategies and recovery efforts post-COVID pandemic. Investors should remain mindful of external risks while considering American Airlines as a potential investment, supported by its current upward trajectory and strategic initiatives.\\n\\nFor further details, investors are encouraged to review the full financial reports from American Airlines and assess ongoing market conditions.\\n\\n_TERMINATE_\", type='TextMessage')], stop_reason='Maximum number of turns 3 reached.')" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+UAAAIjCAYAAABlBbqXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/GU6VOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hUZfbA8e+09EYaSSAFQgm9S5XeEURxUWwgdrHruro/G5Z11bWLurpSVBAriIoUlQ7SlA4BQgglvfdkyv39cWcmCSmkT8r5PM88SWbuzH1ncjOZc895z6tRFEVBCCGEEEIIIYQQjU7r6AEIIYQQQgghhBCtlQTlQgghhBBCCCGEg0hQLoQQQgghhBBCOIgE5UIIIYQQQgghhINIUC6EEEIIIYQQQjiIBOVCCCGEEEIIIYSDSFAuhBBCCCGEEEI4iATlQgghhBBCCCGEg0hQLoQQQgghhBBCOIgE5UIIIUQT8vzzz6PRaEhNTXXI/ufNm0dERIRD9v3aa68RFRWFxWJxyP6bM6PRSGhoKB988IGjhyKEEKKGJCgXQgjR6D744AM0Gg2DBw+u1vZPPPEEGo2G66+/vsLbz549i0aj4T//+U+txvPjjz8yatQoAgMDcXNzo2PHjsyePZt169bZt4mPj+f555/nwIEDtdpHY1i6dCkajcZ+cXFxoUuXLtx///0kJSU5enhVys7O5tVXX+Uf//gHWm3Jx5PSz0er1RISEsLEiRPZvHlzg4zjgw8+YOnSpZfdbuXKlWg0Gv773/9WePu9996LwWDg4MGD9TzCihkMBh599FFefvllCgsLG2WfQggh6ocE5UIIIRrd8uXLiYiIYM+ePZw+fbrKbRVF4csvvyQiIoIff/yRnJyceh3Lf/7zH2bMmIFGo+Gpp57irbfeYtasWZw6dYqVK1fat4uPj2fhwoVNOii3eeGFF/j88895//33GTZsGB9++CFDhw4lPz//svf95JNPiI6OboRRlrV48WJMJhNz5swpd9uECRP4/PPPWbZsGffccw+HDh1i7Nix/PLLL/U+juoG5TfccAOTJ0/mySefLHfCY8+ePXz88cc88sgj9OnTp97HWJnbbruN1NRUVqxY0Wj7FEIIUXd6Rw9ACCFE6xIbG8vOnTv5/vvvufvuu1m+fDnPPfdcpdtv3ryZCxcu8PvvvzNp0iS+//575s6dWy9jMZlMvPjii0yYMIENGzaUuz05Oble9tPYpkyZwsCBAwG444478PPz48033+SHH36oMOgFyMvLw93dHYPB0JhDtVuyZAkzZszAxcWl3G1dunTh5ptvtv98zTXX0Lt3b95++22mTJnSmMMs48MPP6RHjx488sgj9kDYbDZz9913ExYWxvPPP9/gY1AUhcLCQlxdXfHx8WHixIksXbqU+fPnN/i+hRBC1A/JlAshhGhUy5cvp02bNkybNo3rrruO5cuXX3b77t27M2bMGMaPH3/Z7WsiNTWV7Oxshg8fXuHtgYGBgHpiYNCgQYCajbSVU5fOqH7zzTcMGDAAV1dX/P39ufnmm7l48WK5xzxx4gSzZ88mICAAV1dXunbtyv/93/9VOc64uDg6depEz549a1WGPnbsWEA9IQLqvHEPDw9iYmKYOnUqnp6e3HTTTfbbLp1TbrFYeOedd+jVqxcuLi4EBAQwefJk9u3bV2a7L774wv4a+Pr6csMNN3D+/PnLji82NpZDhw4xfvz4aj2fXr164e/vb38+27Zt429/+xthYWE4OzsTGhrKI488QkFBQZn7JSYmctttt9G+fXucnZ0JDg7m6quv5uzZswBERERw9OhRtmzZYv8djx49utJxRERE8Pzzz/Pll1+yceNGAN59910OHDjAhx9+iJubG0VFRTz33HN06tTJPrYnnniCoqKiMo+1ZMkSxo4dS2BgIM7OznTv3p0PP/ywwn1eddVVrF+/noEDB+Lq6lqmhH7ChAls376d9PT0ar2WQgghHE+CciGEEI1q+fLlXHvttTg5OTFnzhxOnTrF3r17K9y2qKiI7777zp7dnTNnDr///juJiYn1MpbAwEBcXV358ccfqwxiunXrxgsvvADAXXfdxeeff87nn3/OyJEjAXUu9+zZs9HpdLzyyivceeedfP/994wYMYLMzEz74xw6dIjBgwfz+++/c+edd/LOO+8wc+ZMfvzxx0r3HRMTw8iRI/H09GTz5s20bdu2xs8zJiYGAD8/P/t1JpOJSZMmERgYyH/+8x9mzZpV6f1vv/12Hn74YUJDQ3n11Vd58skncXFx4Y8//rBv8/LLL3PrrbfSuXNn3nzzTR5++GF+++03Ro4cWeY1qMjOnTsB6N+/f7WeT0ZGBhkZGfbn880335Cfn8+9997Le++9x6RJk3jvvfe49dZby9xv1qxZrFq1ittuu40PPviABx98kJycHM6dOwfA22+/Tfv27YmKirL/ji93wsRWon7vvfdy+vRpnn32WXtpu8ViYcaMGfznP/9h+vTpvPfee8ycOZO33nqrXH+EDz/8kPDwcP75z3/yxhtvEBoayn333ceiRYvK7TM6Opo5c+YwYcIE3nnnHfr27Wu/bcCAASiKYn9NhRBCNAOKEEII0Uj27dunAMrGjRsVRVEUi8WitG/fXnnooYcq3P7bb79VAOXUqVOKoihKdna24uLiorz11ltltouNjVUA5fXXX6/xmJ599lkFUNzd3ZUpU6YoL7/8srJ///5y2+3du1cBlCVLlpS5vri4WAkMDFR69uypFBQU2K//6aefFEB59tln7deNHDlS8fT0VOLi4so8hsVisX//3HPPKYCSkpKiHD9+XAkJCVEGDRqkpKenX/a5LFmyRAGUX3/9VUlJSVHOnz+vrFy5UvHz81NcXV2VCxcuKIqiKHPnzlUA5cknnyz3GHPnzlXCw8PtP//+++8KoDz44IPltrWN++zZs4pOp1NefvnlMrcfPnxY0ev15a6/1NNPP60ASk5OTrnbAOX2229XUlJSlOTkZGX37t3KuHHjFEB54403FEVRlPz8/HL3e+WVVxSNRmN/rTMyMqp1jPTo0UMZNWpUldtcavfu3YpWq1V8fX0VHx8fJTExUVEURfn8888VrVarbNu2rcz2H330kQIoO3bssF9X0XOYNGmS0rFjxzLXhYeHK4Cybt26CscSHx+vAMqrr75ao+cghBDCcSRTLoQQotEsX76ctm3bMmbMGAB7R/WVK1diNpsr3H7gwIF06tQJAE9PT6ZNm1avJewLFy5kxYoV9OvXj/Xr1/N///d/DBgwgP79+3P8+PHL3n/fvn0kJydz3333lZkPPW3aNKKiovj5558BSElJYevWrcyfP5+wsLAyj6HRaMo97pEjRxg1ahQRERH8+uuvtGnTptrPafz48QQEBBAaGsoNN9yAh4cHq1atol27dmW2u/feey/7WN999x0ajabCef+2cX///fdYLBZmz55Namqq/RIUFETnzp3ZtGlTlftIS0tDr9fj4eFR4e2ffvopAQEBBAYGMnjwYHbs2MGjjz7Kww8/DICrq6t927y8PFJTUxk2bBiKovDXX3/Zt3FycmLz5s1kZGRc9nnXxBVXXME999xDeno6r7zyir2a4ZtvvqFbt25ERUWVeV1s0wlKvy6ln0NWVhapqamMGjWKM2fOkJWVVWZ/HTp0YNKkSRWOxXacOGpJPSGEEDUnjd6EEEI0CrPZzMqVKxkzZox9LjDA4MGDeeONN/jtt9+YOHGi/frMzEzWrl3L/fffX6ZD+/Dhw/nuu+84efIkXbp0qZexzZkzhzlz5pCdnc3u3btZunQpK1asYPr06Rw5cqTC5mM2cXFxAHTt2rXcbVFRUWzfvh2AM2fOANCzZ89qjWn69Om0bduW9evXVxqsVmbRokV06dIFvV5P27Zt6dq1a5llxgD0ej3t27e/7GPFxMQQEhKCr69vpducOnUKRVHo3LlzhbfXtXnc1Vdfzf33349Go8HT05MePXrg7u5uv/3cuXM8++yzrFmzplzAbQtonZ2defXVV3nsscdo27YtQ4YM4aqrruLWW28lKCioTuMD7D0HbA32QH1djh8/TkBAQIX3Kd1IcMeOHTz33HPs2rWrXJf8rKwsvL297T936NCh0nEoigJUfKJHCCFE0yRBuRBCiEbx+++/k5CQwMqVK8ssNWazfPnyMkH5N998Q1FREW+88QZvvPFGhdsvXLiwXsfo5eXFhAkTmDBhAgaDgWXLlrF7925GjRpVr/upjlmzZrFs2TKWL1/O3XffXaP7XnHFFWWCw4o4OzuXC9Rry2KxoNFo+OWXX9DpdOVuv9xJBT8/P0wmEzk5OXh6epa7vX379pU2gTObzUyYMIH09HT+8Y9/EBUVhbu7OxcvXmTevHlYLBb7tg8//DDTp09n9erVrF+/nmeeeYZXXnmF33//nX79+tXwWV+exWKhV69evPnmmxXeHhoaCqgnPsaNG0dUVBRvvvkmoaGhODk5sXbtWt56660yzwHKZtUvZTsp4e/vX0/PQgghREOToFwIIUSjWL58OYGBgRU2rvr+++9ZtWoVH330kT3gWL58OT179qywbPq///0vK1asqPegvLSBAweybNkyEhISgMozj+Hh4YDafMtWlmwTHR1tv71jx46AWpZeHa+//jp6vZ777rsPT09Pbrzxxlo9j7qKjIxk/fr1pKenV5otj4yMRFEUOnToUKvqhaioKEDtwt67d+8a3ffw4cOcPHmSZcuWlWnsZuuGXtFYH3vsMR577DFOnTpF3759eeONN/jiiy+A+s0wR0ZGcvDgQcaNG1fl4/74448UFRWxZs2aMlMbLlf2XxFbFUq3bt1qPmAhhBAOIXPKhRBCNLiCggK+//57rrrqKq677rpyl/vvv5+cnBzWrFkDwPnz59m6dSuzZ8+ucPvbbruN06dPs3v37jqNKz8/n127dlV42y+//AKUlKXbyqUv7SQ+cOBAAgMD+eijj8osc/XLL79w/Phxpk2bBkBAQAAjR45k8eLF9m7fNraS49I0Gg0ff/wx1113HXPnzrW/No1t1qxZKIpS4QkQ27ivvfZadDodCxcuLPdcFEUhLS2tyn0MHToUoNwSa9Vhy8yX3q+iKLzzzjtltsvPz6ewsLDMdZGRkXh6epb5vbm7u1+2W3x1zZ49m4sXL/LJJ5+Uu62goIC8vLxKn0NWVhZLliyp8T7379+PRqOxv6ZCCCGaPsmUCyGEaHBr1qwhJyeHGTNmVHj7kCFDCAgIYPny5Vx//fWsWLECRVEq3X7q1Kno9XqWL1/O4MGD7df/9ttv5QIvgJkzZ1Y4lzs/P59hw4YxZMgQJk+eTGhoKJmZmaxevZpt27Yxc+ZMe1lzZGQkPj4+fPTRR3h6euLu7s7gwYPp0KEDr776KrfddhujRo1izpw5JCUl8c477xAREcEjjzxi39+7777LiBEj6N+/P3fddRcdOnTg7Nmz/Pzzzxw4cKDc+LRaLV988QUzZ85k9uzZrF27tlw2vqGNGTOGW265hXfffZdTp07Zl/ratm0bY8aM4f777ycyMpKXXnqJp556irNnzzJz5kw8PT2JjY1l1apV3HXXXTz++OOV7qNjx4707NmTX3/9lfnz59dofFFRUURGRvL4449z8eJFvLy8+O6778rNLT958iTjxo1j9uzZdO/eHb1ez6pVq0hKSuKGG26wbzdgwAA+/PBDXnrpJTp16kRgYGCtX/NbbrmFr7/+mnvuuYdNmzYxfPhwzGYzJ06c4Ouvv7avNT5x4kScnJyYPn06d999N7m5uXzyyScEBgbaKzWqa+PGjQwfPrzM8ndCCCGaOIf0fBdCCNGqTJ8+XXFxcVHy8vIq3WbevHmKwWBQUlNTlV69eilhYWFVPubo0aOVwMBAxWg02pdEq+zy+eefV/gYRqNR+eSTT5SZM2cq4eHhirOzs+Lm5qb069dPef3115WioqIy2//www9K9+7dFb1eX255tK+++krp16+f4uzsrPj6+io33XSTfQmy0o4cOaJcc801io+Pj+Li4qJ07dpVeeaZZ+y3l14SzSY/P18ZNWqU4uHhofzxxx+Vvia2JdH27t1b5Ws3d+5cxd3dvdLbSi+JpiiKYjKZlNdff12JiopSnJyclICAAGXKlCnllo777rvvlBEjRiju7u6Ku7u7EhUVpSxYsECJjo6ucjyKoihvvvmm4uHhUW5pMEBZsGBBlfc9duyYMn78eMXDw0Px9/dX7rzzTuXgwYNlfkepqanKggULlKioKMXd3V3x9vZWBg8erHz99ddlHisxMVGZNm2a4unpqQDVXh6tste+uLhYefXVV5UePXoozs7OSps2bZQBAwYoCxcuVLKysuzbrVmzRundu7fi4uKiREREKK+++qqyePFiBVBiY2Pt24WHhyvTpk2rcAyZmZmKk5OT8r///a9aYxZCCNE0aBSlgpo5IYQQQohGlJWVRceOHXnttde4/fbbHT2cZuntt9/mtddeIyYmpspmcEIIIZoWmVMuhBBCCIfz9vbmiSee4PXXXy/XbVxcntFo5M033+Tpp5+WgFwIIZoZyZQLIYQQQgghhBAOIplyIYQQQgghhBDCQSQoF0IIIYQQQgghHESCciGEEEIIIYQQwkEkKBdCCCGEEEIIIRxE7+gBNDSLxUJ8fDyenp5oNBpHD0cIIYQQQgghRAunKAo5OTmEhISg1VadC2/xQXl8fDyhoaGOHoYQQgghhBBCiFbm/PnztG/fvsptWnxQ7unpCagvhpeXl4NH07IZjUY2bNjAxIkTMRgMjh6OaAbkmBG1JceOqC05dkRdyTEkakuOndYlOzub0NBQezxalRYflNtK1r28vCQob2BGoxE3Nze8vLzkjUZUixwzorbk2BG1JceOqCs5hkRtybHTOlVnCrU0ehNCCCGEEEIIIRxEgnIhhBBCCCGEEMJBJCgXQgghhBBCCCEcpMXPKa8ORVEwmUyYzWZHD6VZMxqN6PV6CgsLm+VrqdPp0Ov1snSeEEIIIYQQotG0+qC8uLiYhIQE8vPzHT2UZk9RFIKCgjh//nyzDWzd3NwIDg7GycnJ0UMRQgghhBBCtAKtOii3WCzExsai0+kICQnBycmp2QaTTYHFYiE3NxcPDw+02uY1M0JRFIqLi0lJSSE2NpbOnTs3u+cghBBCCCGEaH5adVBeXFyMxWIhNDQUNzc3Rw+n2bNYLBQXF+Pi4tIsA1pXV1cMBgNxcXH25yGEEEIIIYQQDan5RU4NoDkGkKJhyLEghBBCCCGEaEwSgQghhBBCCCGEEA4iQbkQQgghhBBCCOEgEpS3YBqNhtWrVzt6GDU2evRoHn74YUcPQwghhBBCCCEanATlzVRiYiIPPPAAHTt2xNnZmdDQUKZPn85vv/3m6KHZPf/882g0GjQaDXq9noiICB555BFyc3OrvN/333/Piy++2EijFEIIIYQQQgjHadXd15urs2fPMnz4cHx8fHj99dfp1asXRqOR9evXs2DBAk6cOOHoIdr16NGDX3/9FZPJxI4dO5g/fz75+fn897//LbdtcXExTk5O+Pr6OmCkQgghhBBCCNH4JFN+CUVRyC82NfpFUZRqj/G+++5Do9GwZ88eZs2aRZcuXejRowePPvoof/zxR6X3O3z4MGPHjsXV1RU/Pz/uuuuuMlnrzZs3c8UVV+Du7o6Pjw/Dhw8nLi7OfvsPP/xA//79cXFxoWPHjixcuBCTyVTlWPV6PUFBQbRv357rr7+em266iTVr1gBqJr1v377873//o0OHDvYlyC4tXy8qKuIf//gHoaGhODs706lTJz799FP77UeOHGHKlCl4eHjQtm1bbrnlFlJTU6v9egohhBBCCCGEo0im/BIFRjPdn13f6Ps99sIk3Jwu/+tIT09n3bp1vPzyy7i7u5e73cfHp8L75eXlMWnSJIYOHcrevXtJTk7mjjvu4P7772fp0qWYTCZmzpzJnXfeyZdffklxcTF79uxBo9EAsG3bNm699VbeffddrrzySmJiYrjrrrsAeO6556r9PF1dXSkuLrb/fPr0ab777ju+//57dDpdhfe59dZb2bVrF++++y59+vQhNjbWHnRnZmYyduxY7rjjDt566y0KCgr4xz/+wezZs/n999+rPS4hhBBCCCGEcAQJypuZ06dPoygKUVFRNbrfihUrKCws5LPPPrMH8++//z7Tp0/n1VdfxWAwkJWVxVVXXUVkZCQA3bp1s99/4cKFPPnkk8ydOxeAjh078uKLL/LEE09UOyjfv38/K1asYOzYsfbriouL+eyzzwgICKjwPidPnuTrr79m48aNjB8/3r5vm/fff59+/frxr3/9y37d4sWLCQ0N5eTJk3Tp0qVaYxNCCCGEEEIIR5Cg/BKuBh3HXpjkkP1WR03K3Es7fvw4ffr0KZNdHz58OBaLhejoaEaOHMm8efOYNGkSEyZMYPz48cyePZvg4GAADh48yI4dO3j55Zft9zebzRQWFpKfn4+bm1uF+z18+DAeHh6YzWaKi4uZNm0a77//vv328PDwSgNygAMHDqDT6Rg1alSFtx88eJBNmzbh4eFR7raYmBgJyoUQQgghhGghzBaF344n4eakZ0Rnf0cPp95IUH4JjUZTrTJyR+ncuTMajaZBmrktWbKEBx98kHXr1vHVV1/x9NNPs3HjRoYMGUJubi4LFy7k2muvLXc/21zwinTt2pU1a9ag1+sJCQnBycmpzO0VleCX5urqWuXtubm59mz/pWwnFIQQQgghhBDNV5HJzJe7z7Fk51ni0vLp1c6b4Z2G26faNndNN/oUFfL19WXSpEksWrSIBx98sFxQm5mZWeG88m7durF06VLy8vLs99mxYwdarZauXbvat+vXrx/9+vXjqaeeYujQoaxYsYIhQ4bQv39/oqOj6dSpU43G6+TkVOP7lNarVy8sFgtbtmyxl6+X1r9/f7777jsiIiLQ6+VwFkIIIYQQoqV5c+NJ/rvlDADergaGd/LHaFZw0reMoFy6rzdDixYtwmw2c8UVV/Ddd99x6tQpjh8/zrvvvsvQoUMrvM9NN92Ei4sLc+fO5ciRI2zatIkHHniAW265hbZt2xIbG8tTTz3Frl27iIuLY8OGDZw6dco+r/zZZ5/ls88+Y+HChRw9epTjx4+zcuVKnn766QZ9rhEREcydO5f58+ezevVqYmNj2bx5M19//TUACxYsID09nTlz5rB3715iYmJYv349t912G2azuUHHJoQQQgghhGh4iVmFAMwe2J5dT43lySlROOlbTijbcp5JK9KxY0f+/PNPxowZw2OPPUbPnj2ZMGECv/32Gx9++GGF93Fzc2P9+vWkp6czaNAgrrvuOsaNG2ef3+3m5saJEyfsS6zdddddLFiwgLvvvhuASZMm8dNPP7FhwwYGDRrEkCFDeOuttwgPD2/w5/vhhx9y3XXXcd999xEVFcWdd95JXl4eACEhIezYsQOz2czEiRPp1asXDz/8MD4+Pmi1cngLIYQQQgjR3JnMal+tHiHeTXqqcW1plNp2DmsmsrOz8fb2JisrCy8vrzK3FRYWEhsbW2aNbFF7FouF7OxsvLy8mm1ALMdE4zIajaxdu5apU6diMBgcPRzRjMixI2pLjh1RV3IMidqSY6f27vpsHxuOJfHyNT25aXDDJwXrQ1Vx6KWaZ+QkhBBCCCGEEKJVMFvUPLJe2zLmkF9KgnIhhBBCCCGEEE2W0R6Ut8zwtWU+KyGEEEIIIYQQLYLJbAFAr5NMuRBCCCGEEEII0ahMkikXQgghhBBCCCEcQzLlQgghhBBCCCGEg9gy5QYJyoUQQgghhBBCiMZlW6dcJ+XrQgghhBBCCCFE4zJZ1PJ1gyyJJoQQQgghhBBCNC57ozddywxfW+azEvVm6dKl+Pj4OHoYQgghhBBCiFaqpHxdMuWiiXj++efRaDRlLlFRUWW2KSwsZMGCBfj5+eHh4cGsWbNISkpy0IirdvbsWTQaDTqdjosXL5a5LSEhAb1ej0aj4ezZs+XuO2nSJHQ6HXv37m2k0QohhBBCCCEak637ujR6E01Kjx49SEhIsF+2b99e5vZHHnmEH3/8kW+++YYtW7YQHx/Ptdde66DRVk+7du347LPPyly3bNky2rVrV+H2586dY+fOndx///0sXry4MYYohBBCCCGEaGS28nXJlDeArVu3Mn36dEJCQtBoNKxevbrM7Zdmg22X119/veEGpShQnNf4F0Wp0TD1ej1BQUH2i7+/v/22rKwsPv30U958803Gjh3LgAEDWLJkCTt37uSPP/6o8nGXLl1KWFgYbm5uXHPNNaSlpZW5PSYmhquvvpq2bdvi4eHBoEGD+PXXX+23v/baa/Tu3bvc4/bt25dnnnmmyn3PnTuXJUuWlLluyZIlzJ07t8LtlyxZwlVXXcW9997Ll19+SUFBQZWPL4QQQgghhGh+SpZEa5k5Zb0jd56Xl0efPn2YP39+hVnchISEMj//8ssv3H777cyaNavhBmXMh3+FNNzjV+af8eDkXu3NT506RUhICC4uLgwdOpRXXnmFsLAwAPbv34/RaGT8+PH27aOioggLC2PXrl0MGTKkwsfcvXs3t99+O6+88gozZ85k3bp1PPfcc2W2yc3NZerUqbz88ss4Ozvz2WefMX36dKKjo2nfvj033XQTr776Knv37mXQoEEA/PXXXxw6dIjvv/++yuc0Y8YMPvroI7Zv386IESPYvn07GRkZTJ8+nRdffLHMtoqisGTJEhYtWkRUVBSdOnXi22+/5ZZbbqn2ayiEEEIIIYRo+ozW8nV9C82UOzQonzJlClOmTKn09qCgoDI///DDD4wZM4aOHTs29NCatMGDB7N06VK6du1KQkICCxcu5Morr+TIkSN4enqSmJiIk5NTuQZtbdu2JTExsdLHfeedd5g8eTJPPPEEAF26dGHnzp2sW7fOvk2fPn3o06eP/ecXX3yRVatWsWbNGu677z7atWvHxIkTWbJkiT0oX7JkCaNGjbrs781gMHDzzTezePFiRowYweLFi7n55psxGAzltv3111/Jz89n0qRJANx88818+umnEpQLIYQQQgjRwpht3ddb6DrlDg3KayIpKYmff/6ZZcuWVbldUVERRUVF9p+zs7MBMBqNGI3GMtsajUYURcFisWCxrn2HzgWevFC/g68OnQvYxnAZtkAUoGfPngwaNIgOHTqwcuVKbr/9dvtzsVTweLbn26tXL+Li4gAYMWIEa9eu5fjx48ycObPM/YYMGcK6devs1+Xm5rJw4ULWrl1LQkICJpOJgoIC4uLiUKwl+HfccQd33HEH//nPf9BqtaxYsYI33nijwvGUHqfFYmHevHmMGDGCl156iW+++YYdO3ZgMpnst9u2/fTTT5k9ezZarRaLxcL111/P3//+d06dOkVkZGS1XsfKxqIoCkajEZ1OV+vHEdVj+5u89G9TiMuRY0fUlhw7oq7kGBK1JcdO7dky5SjmZvP61WSczSYoX7ZsGZ6enpdtVvbKK6+wcOHCctdv2LABNze3MtfZ5mXn5uZSXFxcr+OtscKcWt9Vq9USGRnJsWPHyM7OxsvLi+LiYs6fP4+3t7d9u4SEBHx8fMjOzubLL7+0B7suLi5kZ2djNpspKiqyn8gAtYu7oij26x555BE2b97Miy++SIcOHXB1dWXu3Lnk5uaSk6M+h1GjRuHk5MSKFStwcnKiuLiYiRMnlnnc0nJzcwF1OkOvXr3o3Lkz119/PV26dCEsLIzDhw/bt8vOziYjI4PVq1djNBr56KOP7I9jNpv56KOPLjt3vSrFxcUUFBSwdetW++sjGt7GjRsdPQTRTMmxI2pLjh1RV3IMidqSY6fmTGYdoGHLpt/xcnL0aKonPz+/2ts2m6B88eLF3HTTTbi4uFS53VNPPcWjjz5q/zk7O5vQ0FAmTpyIl5dXmW0LCws5f/48Hh4el33cpiw3N5ezZ88yd+5cvLy8uPLKKzEYDOzZs8c+/z46OpoLFy4wevRovLy86NmzZ7nH6dGjBwcOHCjzOh04cACNRmO/bt++fdx2223ceOON9n2fP38eJycnPD09ycnJoU2bNsydO5evvvoKJycnbrjhBtq2bVvp+D08PABwd3fHy8uL22+/nfvvv59Fixbh5eWFu7u7fTsvLy8+++wz2rdvX26O+saNG3nzzTf597//Xessd2FhIa6urowcObJZHxPNhdFoZOPGjUyYMKHCaQpCVEaOHVFbcuyIupJjSNSWHDu1Y7EoKLvUExkTJ4zH1715ROWVJSQr0iyC8m3bthEdHc1XX3112W2dnZ1xdnYud73BYCh38JvNZjQaDVqtFm0zmp/w+OOPM336dMLDw4mPj+e5555Dp9Nx4403otVqadOmDbfffjuPP/44/v7+eHl58cADDzB06FCGDRtW6eM+9NBDDB8+nDfffJOrr76a9evXs379egD769O5c2dWrVrFjBkz0Gg0PPPMM1gsFntnfFC75t95551069YNgB07dlT5+tpus/0e7r77bq6//np8fHzK/G5s3y9evJjrrruuXJf38PBw/vnPf7JhwwamTZtWq9dWq9Wi0WgqPF5Ew5HXW9SWHDuituTYEXUlx5CoLTl2aqbIZLZ/7+ri1Gxeu5qMs1lEop9++ikDBgwo02CsNbtw4QJz5syha9euzJ49Gz8/P/744w8CAgLs27z11ltcddVVzJo1i5EjRxIUFHTZ7udDhgzhk08+4Z133qFPnz5s2LCBp59+usw2b775Jm3atGHYsGFMnz6dSZMm0b9//3KP1blzZ4YNG0ZUVBSDBw+u0fPT6/X4+/uj15c/Z7R//34OHjxYYQd+b29vxo0bx6efflqj/QkhhBBCCCGaJpO5ZOloQzNKpNaEQzPlubm5nD592v5zbGwsBw4cwNfX1768V3Z2Nt988w1vvPGGo4bZ5KxcufKy27i4uLBo0SIWLVpUo8eeP38+8+fPL3PdY489Zv8+IiKC33//vcztCxYsAMo2llMUhfj4eO67777L7jMiIsLeJK4iffv2td9+uW3Xrl172f0JIYQQQgghmgfbGuUAOlkSrf7t27ePMWPG2H+2zQWfO3cuS5cuBdQAVFEU5syZ44ghilpISUnh66+/JjExkdtuu83RwxFCCCGEEEI0UyZzSeLPoJOgvN6NHj26yqwnwF133cVdd93VSCMS9SEoKAh/f38+/vhj2rRp4+jhCCGEEEIIIZopW6Zcpy3pYdXSNItGb6J5MZvNzapxnhBCCCGEEKJpKh2Ut1QSOQkhhBBCCCGEaJJs5esGCcqFEEIIIYQQQojGZcuU63UtN3Rtuc9MCCGEEEIIIUSzZlsSTS+ZciGEEEIIIYQQonEZreXr+hbaeR0kKBdCCCGEEEII0USZbeXrLbiRdMt9ZkIIIYQQQgghmjWTRTLlQlzWvHnzmDlzpqOHIYQQQgghhGhhjDKnXDRFr7zyCoMGDcLT05PAwEBmzpxJdHR0mW0KCwtZsGABfn5+eHh4MGvWLJKSkspsc+7cOaZNm4abmxuBgYH8/e9/x2QyNeZTqbalS5ei0Wjo1q1budu++eYbNBoNERER5W4rKCjA19cXf39/ioqKGmGkQgghhBBCiPoi5euiSdqyZQsLFizgjz/+YOPGjRiNRiZOnEheXp59m0ceeYQff/yRb775hi1bthAfH8+1115rv91sNjNt2jSKi4vZuXMny5YtY+nSpTz77LOOeErV4u7uTnJyMrt27Spz/aeffkpYWFiF9/nuu+/o0aMHUVFRrF69uhFGKYQQQgghhKgv0uitFVIUhXxjfqNfFEWp9hjXrVvHvHnz6NGjB3369GHp0qWcO3eO/fv3A5CVlcWnn37Km2++ydixYxkwYABLlixh586d/PHHHwBs2LCBY8eO8cUXX9C3b1+mTJnCiy++yKJFiyguLq5032azmUcffRQfHx/8/Px44oknyo193bp1jBgxwr7NVVddRUxMjP32sWPHcv/995e5T0pKCk5OTvz222+V7luv13PjjTeyePFi+3UXLlxg8+bN3HjjjRXe59NPP+Xmm2/m5ptv5tNPP630sYUQQgghhBBNj31JtBa8Trne0QNoagpMBQxeMbjR97v7xt24Gdxqdd+srCwAfH19Adi/fz9Go5Hx48fbt4mKiiIsLIxdu3YxZMgQdu3aRa9evWjbtq19m0mTJnHvvfdy9OhR+vXrV+G+3njjDZYuXcrixYvp1q0bb7zxBqtWrWLs2LH2bfLy8nj00Ufp3bs3ubm5PPvss1xzzTUcOHAArVbLHXfcwf33388bb7yBs7MzAF988QXt2rUr8zgVmT9/PqNHj+add97Bzc2NpUuXMnny5DLPwyYmJoZdu3bx/fffoygKjzzyCHFxcYSHh1fzlRVCCCGEEEI4kskic8pFE2exWHj44YcZPnw4PXv2BCAxMREnJyd8fHzKbNu2bVsSExPt21wayNp+tm1TkbfffpunnnqKa6+9lm7duvHRRx/h7e1dZptZs2Zx7bXX0qlTJ/r27cvixYs5fPgwx44dA7CX0f/www/2+yxdupR58+ah0VT9x9avXz86duzIt99+i6IoLF26lPnz51e47eLFi5kyZQpt2rTB19eXSZMmsWTJkiofXwghhBBCCNF02Luvt+CgXDLll3DVu7L7xt0O2W9tLFiwgCNHjrB9+/Z6Hc+5c+fo3r27/ed//vOfLFiwgISEBAYPLqkk0Ov1DBw4sEwJ+6lTp3j++efZvXs3qampWKx/SOfOnaNnz564uLhwyy23sHjxYmbPns2ff/7JkSNHWLNmTbXGNn/+fJYsWUJYWBh5eXlMnTqV999/v8w2ZrOZZcuW8c4779ivu/nmm3n88cd59tln0bbgRhFCCCGEEEK0FLbydYOUr7ceGo2m1mXkje3+++/np59+YuvWrbRv395+fVBQEMXFxWRmZpbJliclJREUFGTfZs+ePWUez9adPSgoiJCQEA4cOGC/zVYaXx1XX3014eHhfPLJJ4SEhGCxWOjZs2eZuep33HEHffv25cKFCyxZsoSxY8dWu6z8pptu4oknnuD555/nlltuQa8vfxivX7+eixcvcv3115e53mw289tvvzFhwoRqPx8hhBBCCCGEY9jK13UtOFPeck83tGCKonD//fezatUqfv/9dzp06FDm9gEDBmAwGMo0TYuOjubcuXMMHToUgKFDh3L48GGSk5Pt22zcuBEvLy+6d++OXq+nU6dO9ouvry/e3t4EBweze3dJJYHJZLI3mANIT08nOjqap59+mnHjxtGtWzcyMjLKPYdevXoxcOBAPvnkE1asWFFpCXpFfH19mTFjBlu2bKn0fp9++ik33HADBw4cKHO54YYbpOGbEEIIIYQQzYTJ2n3d0IK7r0umvBlasGABK1as4IcffsDT09M+B9zb2xtXV1e8vb25/fbbefTRR/H19cXLy4sHHniAoUOHMmTIEAAmTpxI9+7dueWWW3jttddITEzk6aefZsGCBfbmaxV56KGH+Pe//03nzp2JiorizTffJDMz0367reP6xx9/THBwMOfOnePJJ5+s8LFsDd/c3d255ppravQaLF26lA8++AA/P79yt6WkpPDjjz+yZs0a+zx7m1tvvZVrrrmG9PT0GmX/hRBCCCGEEI3PJOuUi6boww8/JCsri9GjRxMcHGy/fPXVV/Zt3nrrLa666ipmzZrFyJEjCQoK4vvvv7ffrtPp+Omnn9DpdAwdOpSbb76ZW2+9lRdeeKHKfT/22GPccsstzJ07l6FDh+Lp6VkmoNZqtaxYsYL9+/fTs2dPHnnkEV5//fUKH2vOnDno9XrmzJmDi4tLjV4DV1fXCgNygM8++wx3d3fGjRtX7rZx48bh6urKF198UaP9CSGEEEIIIRqfLVOuk0y5aEqqs6a5i4sLixYtYtGiRZVuEx4eztq1a2u0b71ez9tvv83bb79d7jZbQ7fx48fbO61XNebU1FQKCwu5/fbbL7vfefPmMW/evEpvf/jhh3n44YcB9cTBY489VuF2Tk5OFZbTCyGEEEIIIZoeW6bc0ILnlEtQLhqd0WgkLS2Np59+miFDhtC/f39HD0kIIYQQQgjRBJU0emu5Rd4t95mJJmvHjh0EBwezd+9ePvroI0cPRwghhBBCCNFESaM3IRrA6NGjq1WCL4QQQgghhGjdjNZ1yvUtOCiXTLkQQgghhBBCiCbJLN3XWwfJ2gobORaEEEIIIYRoOozWZtL6FtzorVUH5QaDAYD8/HwHj0Q0FbZjwXZsCCGEEEIIIRzHZC9fb7mha6ueU67T6fDx8SE5ORkANzc3NJqWewamoVksFoqLiyksLETbzMpLFEUhPz+f5ORkfHx80Ol0jh6SEEIIIYQQrV5J+XrLjdNadVAOEBQUBGAPzEXtKYpCQUEBrq6uzfbkho+Pj/2YEEIIIYQQQjiW0dp9vSU3emv1QblGoyE4OJjAwECMRqOjh9OsGY1Gtm7dysiRI5tl+bfBYJAMuRBCCCGEEE2IrXzdIOXrLZ9Op5OArI50Oh0mkwkXF5dmGZQLIYQQQgghmhaTtXxd14LL11vu6QYhhBBCCCGEEM2aSbqvCyGEEEIIIYQQjtEaytdb7jMTQgghhBBCCNGs2TLlUr4uhBBCCCGEEEI0spJMuQTlQgghhBBCCCFEoypp9NZyQ9eW+8yEEEIIIYQQQjRrtvJ1yZQLIYQQQgghhBCNzGgtX9dLplwIIYQQQgghhGhcZlmnXAghhBBCCCGEcAyTWcrXhRBCCCGEEEIIh7CXr8s65UIIIYQQQgghROOyla/rpXxdCCGEEEIIIYRoXEZr93UJyhvI1q1bmT59OiEhIWg0GlavXl1um+PHjzNjxgy8vb1xd3dn0KBBnDt3rvEHK4QQQgghhBCiUZmkfL1h5eXl0adPHxYtWlTh7TExMYwYMYKoqCg2b97MoUOHeOaZZ3BxcWnkkQohhBBCCCGEaGytoXxd78idT5kyhSlTplR6+//93/8xdepUXnvtNft1kZGRjTE0IYQQQgghhBAOZrR2X9e34O7rDg3Kq2KxWPj555954oknmDRpEn/99RcdOnTgqaeeYubMmZXer6ioiKKiIvvP2dnZABiNRoxGY0MPu1Wzvb7yOovqkmNG1JYcO6K25NgRdSXHkKgtOXZqxxaUayyWZvXa1WSsGkVRlAYcS7VpNBpWrVplD7gTExMJDg7Gzc2Nl156iTFjxrBu3Tr++c9/smnTJkaNGlXh4zz//PMsXLiw3PUrVqzAzc2tIZ+CEEIIIYQQQoh69OQeHQVmDf/sa6Ktq6NHU335+fnceOONZGVl4eXlVeW2TTYoj4+Pp127dsyZM4cVK1bYt5sxYwbu7u58+eWXFT5ORZny0NBQUlNTL/tiiLoxGo1s3LiRCRMmYDAYHD0c0QzIMSNqS44dUVty7Ii6kmNI1JYcO7XT58XfyC8289sjIwjzbT5J1uzsbPz9/asVlDfZ8nV/f3/0ej3du3cvc323bt3Yvn17pfdzdnbG2dm53PUGg0EO/kYir7WoKTlmRG3JsSNqS44dUVdyDInakmOnZkzWRm+uzk7N6nWryVibbF95JycnBg0aRHR0dJnrT548SXh4uINGJYQQQgghhBCisZjMLX+dcodmynNzczl9+rT959jYWA4cOICvry9hYWH8/e9/5/rrr2fkyJH2OeU//vgjmzdvdtyghRBCCCGEEEI0OItFwZoob9HrlDs0KN+3bx9jxoyx//zoo48CMHfuXJYuXco111zDRx99xCuvvMKDDz5I165d+e677xgxYoSjhiyEEEIIIYQQohHYStcBdJIpbxijR4/mcn3m5s+fz/z58xtpREIIIYQQQgghmgKTxWL/3tCC1ylvuTUAQgghhBBCCCGaLaO5JIGr17bc0LXlPjMhhBBCCCGEEM2W2VI6KJdMuRBCCCGEEEII0Whsnde1GtBKUC6EEEIIIYQQQjQeozVT3pI7r4ME5UIIIYQQQgghmiCzdU55Sy5dBwnKhRBCCCGEEEI0QUZr93UJyoUQQgghhBBCiEZmsmbKDVK+LoQQQgghhBBCNC7bOuU6yZQLIUT9MZotfLL1DL8eS8JSapkLIYQQQgghSmstmXK9owcghGhdvt1/gZfXHgcg3NeNkb4apjp4TEIIIYQQoumxZcr1OsmUCyFEvdkfl2H/Pi49ny9Oa8kpNDpwREIIIYQQoimyZcqlfF0IIerRoQuZALxzQ1/8PZxQ0HA6Jc+xgxJCCCGEEE2OyTrV0aBt2WFry352QogmJa/IxOnkXACGdvSjc6AHADESlAshhBBCiEvYgnLJlAshRD05Gp+NRYEgLxcCvVyIDHAHJCgXQgghhBDlmczqnHKDzCkXQoj6YStd793eG6BUUJ7rqCEJIYQQQogmymidU65v4d3XW/azE0I4XExKLnFpaib84IUsAPqE+gBIplwIIYQQQlTK3ErK12VJNCFEg8ktMjHz/R0owK+PjqogU67OKb+QUUCh0YyLQeegkQohhBBCiKbGtiSalK8LIUQtRSdmk1NkIrfIxNOrDxOXlg9A73Y+AAR4OOGqU7AoEJsq2XIhhBBCCFHCXr4u3deFEKJ2TiTm2L//9XgyAOF+bni7GQDQaDS0dVVvt3VlF0IIIYQQAsBszZTrW3j5ugTlQogGc9IalDuVas7Ru71PmW3auqpnQCUoF0IIIYQQpZU0epOgXAghasWWKX9ofGdcDOrbTR/rfHIbe1AuHdiFEEIIIUQptiXRWnr3dWn0JoRoEIqiEJ2kBuWjugQQ7O3CV3vPM6NvSJnt2rqpX2MkUy6EEEIIIUoxWWxzylt2plyCciFEg0jJKSIz34hWA50CPejZzptr+7cvt12QNVN+JjUPs0Vp8UteVFdWgZFfjyVxMimH24Z3IMjbxdFDEkIIIYRoVCVBuWTKhRCixmyl6xH+7lUudebrDE56LcUmC+fT84nwd2+sITY5mfnFbDiWxC+HE9h+OtU+j0qn1fDE5CgHj04IIYQQonHZytdb+pJoEpQLUQOSya2+aGtQHhXkWeV2Wg109HPjRFIup5NzW11QnpFXzIZjiaw9nMiO06n2M8IAbk468ovNZOQbHThCIYQQQgjHsH0uaumfvyUoF6KaTiXlcO0HO7ltRAcendDF0cNp8mzzybu29brstpEBHmpQnpLLeNo29NCahK0nU/hk2xl2xqRhLhWIRwV5MrVXMFN7BbE5OoWXfj5OQbHJgSMVQgghhHAMk7Vq0CCN3oQQAAcvZJFTZOLHg/ESlFeDLVPeNcjjsttGBqrZ8dayLNrvJ5K487P99mC8e7AXU3sFMaVXMJEBJa/XntgMAPKKzQ4ZpxBCCCGEI0mmXAhRRpFJDYxiU/PILjTi5WJw8IiaLrNF4aQtUx50+Ux5p4DWE5Tvj0vnvuV/YrYoXNU7mMcmdqVDJSX7bk7qXPwCCcqFEEII0QqVLInWsoPyll0HIEQ9KjJa7N8fi8924EiavnPp+RSZLLgYtIT5ul12+0hrUB6TnIuiKJfZuvnKyCtm/tJ9FBotjOkawFvX9600IIeSoDxPyteFEEII0QrZMuWGFt59vWU/OyHqUZGpJCg/cjHLgSNp+v6MU8uuOwd6VqvcKNzPHa0GcopMJOcUNfTwHGbv2XSyCoyE+bqx6Kb+l50f5eakFjNJplwIIYQQrZHJon7+bunl6xKUC1FNhcaSwOiwBOWVOnIxi+fWHAVgWCe/at3HWa8l3K/ll7DHZxYA0CPEyx5wV8XNWTLlQgghhGi9Shq9SVAuhEAy5Tbf7r/ArYv3kJpbPqMdl5bHvCV7yS0yMaSjL4+Mr35DPFuDs5YclF+0BuUhPq7V2l7mlAshhBCiNTNag3J9C+++3rKfnRD1yNboDeBMah65Ra0ze/m/bWfYejKFxdtjy1yfklNkD9a7BXvx8a0DcTHoqv24nQJbflAen1kIQLvqBuUGNZueVyRBuRBCCCFaH7O1fF0v5etCCCibKVcUOJ7QOpu92eZ8f7X3vP1ERU6hkXlL9hCXlk+oryvL5g+qcXf61hCUX7Bmytu1qWZQbi1fLzCasVhabgO85iSrwMi5tHxHD0MIIYRoFYzWzz8SlAshgLLd1wEOX2h9JezFJgvpecUApOUVs+5IIkUmM3d/vp+j8dn4uTvx+fzBBHq61Pix7UF5SssNyi9mWIPyGpavgxqYC8d78Mu/GPfm5hZ98kgIIYRoKkqWRGvZYWvLfnZC1CNbVriNm5oBPhLf+oLylEvmkX+2K45HvzrIzpg03J10LL3tCiKqWOKrKrZl0VJyisgqMNZ5rE1NodFsn4df3aDcRa9DYz0xnC/zyh3OZLaw60waRrPC7tg0Rw9HCCGEaPHMkikXQpRmK1/vH9YGaJ3N3pKy1TnRXi569FoN++My+PlwAgadho9vHUiv9t61fmxPFwNBXmqGvSVmIROy1NfO1aDDx616pf1arQZX67z8fOnA7nBn0/Iptr4PtNbpK0IIIURjkkZvQogybEuiDYhQg/LTybmcT29dc0uTrUF5p0APJvUIAkCjgbeu78vwTv51fnxbCXtMCwzK40vNJ9doqn+217Z0mmTKHS86Mcf+/YmEnCq2FEIIIUR9sK1TLkuiCSGAkkx5mK8bV0T4YlHg4a8O2Oe6tAZJ2Wr5daCnCw+O60yPEC9evbY3V/UOqZfHb8nzym3zyau7HJqNbV65ZModLzqpVFCemCPN94QQQogGZlunXCfl60IIKAnKXfQ63pjdB09nPfvjMnh/02kHj6zx2MrX23o50zXIk58fvJLZg0Lr7fEjW3AHdnvn9VoH5ZIpd7ToxJKS9dwik33deSGEEEI0DJN9TnnLDltb9rMToh4VWcvXnQ1aQn3deOmangC8+9sp9p1Nd+TQGo1tObRAr5p3V6+OTgEtNyi3l6/71Oy1swXlsla549nK121n64/JvHIhhBCiQdkqUqV8vQFt3bqV6dOnExISgkajYfXq1WVunzdvHhqNpsxl8uTJjhmsaPVsDZ6c9WqQdHXfdlzTrx0WBR5aeYDswpbXMfxSJZnyBgrKrZny8xn59jn8LYV9ObRqrlFu4+6szikvMEr5uiPlF5uIs/aQGGHtnyDzyoUQQoiGZcuUS/l6A8rLy6NPnz4sWrSo0m0mT55MQkKC/fLll1824giFKFFkD8pL/mxeuLoHob6uXMws4OlVR1CUlj3HNNk6p7ytl3ODPL6/hxPergYUBc6k5DXIPhwlPsuWKXer0f1s3dclU+5Yp5NzURTwc3fiys5qUC4d2IUQQoiGZZtTbmjh3df1jtz5lClTmDJlSpXbODs7ExQU1EgjEqJytnXKnQ0lbwqeLgbeuaEff/toF2sOxjO6awDX9m/vqCE2uKSchs2UazQaOgV6sD8ug9MpuXQP8WqQ/TQ2i0UhIVN97UJqWL5uz5TLnHKHOmEtXe8a5Em3YC/rdRKUCyGEEA3J1n29pWfKHRqUV8fmzZsJDAykTZs2jB07lpdeegk/P79Kty8qKqKoqMj+c3a2+qHJaDRiNLb88mJHsr2+LfV1LjRa3xSwlHmOvYI9eGBMJG//dppnfjhC73aehPvWLBvaHBQZzWTmq8+7jYuuXn7PFR0zHf3d2B+XwcmELIzdA+q8j6YgKbuQYrMFrQb8XGv22jnr1X9COQXFLfZvqzYa+/3meHwWAJ0C3In0V6cgxKXnk5lbYD9xIpqHlv6/SjQ8OYZEbcmxU3NG65xyjWJpdq9bTcbbpD9JTJ48mWuvvZYOHToQExPDP//5T6ZMmcKuXbvQ6XQV3ueVV15h4cKF5a7fsGEDbm4tL1BqijZu3OjoITSIgmIdoGHH1i0cu6R6O1yBSE8dMTlmbv9kGw/1MNPSqmzSCgH06DUKOzZtpAZLbV9W6WOmOFUD6Nh++DSdi07W304c6GwOgB4vg8KG9etqdN+kC1pAy5HoU6wtjG6I4TVrjfV+s/OY+nsoTo5l95YzeBl0ZBs1LFu9gQjPRhmCqGct9X+VaDxyDInakmOn+nJy1c/fu//YSeIRR4+mZvLz86u9bZMOym+44Qb797169aJ3795ERkayefNmxo0bV+F9nnrqKR599FH7z9nZ2YSGhjJx4kS8vFpGKWxTZTQa2bhxIxMmTMBgMDh6OPXKbFEw71LfQKdMHI+vu1O5bfoPL+CqRbuIyzVx2qULj4zvVO/j+Gb/BVbsucA/JnVhSEffen/8quyPy4C/9hLk48a0aVfWy2NWdMy4nUzhh8//Il/nxdSpw+plP4728+FEOHKITsFtmDr1ihrd99Rvp9mUcIag9mFMndq9gUbY/DT2+81LhzcDxcwaP5S+oT58m7KfbafTaNOxF1PrcVlA0fBa8v8q0TjkGBK1JcdOzf3ryBYoLmLkiBH0aGbTGm0V29XRpIPyS3Xs2BF/f39Onz5daVDu7OyMs3P5JlQGg0EO/kbSEl9rU6n5vB6uzhgM5f90wgMMvHJtL+5f8Rf/3RbLXaMi8XErH7zXhqIovLHhpH1N9Me+PczGR0fh7Vr2dU7KLuSHAxfZeCyJcd3acs+oyHrZP0B6gfoaBHm51Pvvt/QxExXsA8DZtHw0Wh36ZlxycDY1j7/OZ7DlZCoA7dq41fi183RVj6FCk9Li/q7qQ2O836TnFZOSWwxAt3ZtMBj0dG/nzbbTaRxLyJXfSzPVEv9XicYlx5CorZZ67MSm5qEBIvzd6+0xbd3XXZ2dmt1rVpPxNqug/MKFC6SlpREcHOzooYhWxtbkDcp2X7/UVb1DWPjjMVJyijifXlDnoHzf2XS2nExhZ0yamqkGvF0NJOcU8cra4/x7Vm8Kis1sOJbId39eZPupFKzvXRxPyOHukR3RXFJnfjwhG193pxo3a2vo5dBs2vm44mLQUmi0cD6jgA4VvLHHpOSi12oI96u/N/36pCgKS3ee5ZW1Jyi2zoUCaF/D5dCgZJ3yfOm+7jB/nVP/9jr4u+NhnT8+tKMf/91yhk3RySiKUu7vTAghhGhNcotMzFy0A51Ww84nx+JiqHiqcU3Z5pTrW/g65Q4NynNzczl9+rT959jYWA4cOICvry++vr4sXLiQWbNmERQURExMDE888QSdOnVi0qRJDhy1aI1sy6HptJrLZm5DvF1IySkiIauAXu29a73Pk0k5XPfRLvvPOq2Gf13Tkw7+Hsz+7y5W7j1PVoGRrSdTyCuVye8f5sNf5zPJLTKRnleMn4daOaIoCh9sjuH19dF0DvRg46OjajSeJOtyaIENtByajVaroaO/B8cSsjmdnFsmKN90IpmPt55h15k0PJ31bP/HWLzdmtZZ04y8Yv7+7UF+PZ4MQK923vi4GfB00XPDoLAaP56bk/o2nd/C1m1vTnbGpAEwpGNJk9EhHf1wc9KRlF3EkYvZdfpbF0IIIZq7vbHpZBWojc3OpOTV2wo6Zmu2SS/d1xvOvn37GDNmjP1n21zwuXPn8uGHH3Lo0CGWLVtGZmYmISEhTJw4kRdffLHC8nQhGlKRsfwa5ZUJ8nbh4IUsErIK67TPs6nqOt1tvZx5YGxnRnTyt5cD3TIknM//iOOXI4kAhPq6ck2/9lzbrx0R/u4M//fvXMws4GxaHn4ezpjMFp5bc5Tlu88BcCo5l6x8Y40C2mRrpjzQs2Ez5QCdAkuC8gnd2wKw/VQqty3da98mp8jEgQuZjOrSdDq07z6TxsNfHSAhqxAnnZanr+rGLUPC65RFLcmUm+prmKKGdlmD8qGRJUG5i0HHyM4BrDuayK/HkyQoF0II0ar9cSbN/n19LWurKApG6zrlzXk6Y3U4NCgfPXo0iqJUevv69esbcTRCVM6+Rnk1gvJgb7VEua5Beab1bGO3YC9uHhJe5rZ/TIkip9CIq5OOa/u3Z2B4mzKBX4S/mxqUp+YzINyXL/ecY/nuc2g06nMoNFo4kZjN4I6VLy94qZI1yhv+pFinQA8ATifn2q/bGaPOyx7a0Q8nvZYtJ1M4dL5pBOVmi8L7v5/mnd9OYlGgo787793Yjx4hdQ/U3Kzl0vmyTrlDZOQVcyxBbdQy9JK/l3HdAu1B+SMTujhieEIIIUSTsKt0UJ6UUy+PmV1osk8D9K2nPk1NVbOaUy6EoxTaM+WXnx8T7K1mkhOzCuq0zyzbmuAVvAl5OOt5+4Z+ld433M+dHafTiEtTs+1/nEkH4IExnTgSn83vJ5I5mZRTs6DcWr7e0HPKoVRQnlISlB84nwnA9D4h5Beb2HIyhYMXshp8LJeTmFXIQyv/Ynes+hrP6t+eF67uUW9rV9sz5cWSKXcE25n/Lm09CPAse0JqbFQgGg0cjc8mIavAfkJOCCGEaE2yC40cuVjymaz057e6SLQmuNq4GXB1qp856k1Vy64DEKKe2DPlhuqVrwPE1zFTnpGvdnu+tMN6dUT4uQEQm6auj3g8Uc30DYzwpUtbdVHlE4k1O4uZnN34mfKY5FwURcFsUThkDcD7hfnQJ9QHgEMXMht8LFU5dCGTq97bzu7YdNycdLw5uw9vzO5TbwE5lA7KJVPuCLb55MMi/cvd5ufhTP+wNgD2HgJCCCFEa7M3Nt3eaBjKVjrWRbw1wRXUCk56S1AuRDXYGr25VCNTHuKjvnEk1lP5uk8tGplFWLuSx6XlUVBsts9P7xbsRVSQGpRH1yAoLyg2k12oZmoDGyFTHuHnjpNOS26RiZNJucSk5JJbZMLNSUeXtp70CPFCq4HknCJ7V/jGtulEMjd8/AepuUVEBXny0wMjuLZ/+3rfj73RmwTlDmErxxtSSVXJ+G5qz4NfjyU12piEEEKIpsTWe+XKzuoJ7NjUPEylVp+pLdtnaVsVaksmQbkQ1VCjTLmXrXy9sMqeCZdjK1/3qU2m3NoQLjY1j+ikHCwK+Hs4EeDpTFdbUJ6UU+3xbT+tzuf2ctHjWY9Z4Mo46bWMsL6xrzuSaF+Sqlc7b3RaDW5OenvG/6C1rL0xnUrK4Y7P9pFfbObKzv58c89QOgZ4NMi+3EuVr9fleBI1l5xdyOnkXDQaGNLRt8JtbI0It5xMYdMJyZYLIYRoff6IVYPy6wa0x9Wgw2hWiEvPr/PjJkhQLoQorSbd19t6uaDRQLHZQlpeca33mVmg3rc2a52H+arl6zmFJvvZy27BahfMyAAP9FoNOYWmajWjUxSFt389CcBNdewkXhOTewYB8MuRBPt88n7WUmGA3tZu14ccMK98U3QyZovCFRG+LJ43CE+XhluWzTaHSq8YKY7dCdvfgrV/h03/gj8+gkNfw+lfIf0MSNBer2xZ8h4hXpX+HXYK9ODWoWojxke+PsDFzLr1khBCCCGak6x8I0fjSxqi2qYgnkqqewl7gvV/amsIyqXRmxDVYCtfr06jNye9Fn8PZ1JyikjMKsTfo3ZzsDOtmfLarMPtYtAR7O1CQlYh644kANjL1p30Wjr4u3MqOZfoxBx7uX1lNhxL4mh8Nu5OOu66smONx1JbE7q1RafVcCIxh9RctclcX+tccoDe7X34et8FDjpgXrntJMHYboEYGmqJDkWBrPO4n9/L+4aPGaP9C+fPiqq+j6svtBsA7QdCu4HQrj+4VZzhFZe383Tl88lL+79p3fjrXCaHL2Zx/4o/+equoThV4wSeEEII0dztOZuOokDHAHcCvVzoFOjB4YtZxNRDs7dE6xTF1jCnXIJyIaqhJkuigXpGLyWniISsQnq2q92yWJl1KF8HdV52QlahvUN5VFDJepFdgzzVoDwphzFRgZU+hsWi8PavpwCYNzyCNu6NtxxFG3cnhnb0Y/vpVFJz1aqBfmE+9tttmfLDF7NQFKXRMvgAf53LBMqeJKiT/HRIPgZJxyD5qPXrcSjOQQtcZT0XZHb1Qxc+FPy7QGEWFKRDfhrkpUHaKfXn0xvVi41vR+g6FYY/BB6V/65FebZM+aVLoV3KWa/jg5v6M/Xdbfx1LpP/bonhgXGdG2OIQgghhEPZKjJt/ytLMuV1XxbNVtEZIplyIQSUypRXY045qEH5oQtZJNRhWbTM/NqXr4O6VnnpNSNt5eugZs1/OpRQabO3zPxiNhxL4seD8RxPyMbDWc+djZglt5ncM8g+nz3E26XMcmxRQV446bRk5hs5n15AmLXjfENLyi4kIasQrUad415rh7+FAyvUYDwnoeJttAYI6MrS5Ei+LxzIf+6ZR5dSJ1fKMBVB0hG4sB8u7oML+yA9Ri1r3/U+7FsMQ+6FKx8Hp8Z5rZqz8+n5nEvPR6fVMKjD5asNQn3deGlmTx5aeYD3fj/N5J5BdLb2PRBCCCFaKtvSoUMjywbldV0WTVEUe/l6kATlQgiAQqMtU169NRJt6xVXZ852RYpNFvKs3bbb1KJ8HdS1ym30Wg2RgSU/d7UGdqWD8oy8YjYcS+Tnw4nsPJ2KqdTaFo9O6FLrkwN1MalHEM/8cARFgb6lsuSgluF3C/bk4IUsFqz4k2Gd/Ogf1ob+YW3KrSddn2xZ8q5BXrVf+mz/MvjxwbLX+YRBYA8I7AZte0Bgd/DrBHonPvn371wsKLAfExXSO6ul6+0GAHep1+WnQ9xO2PYGxP+pfj26GmZ+CGGDazf2VsJ2QqtPe288qvl7ntEnhB8OxPP7iWT+8d0hvrlnGDpt41VwCCGEEI0pM7/Yvuzu4A5qUN7ZFpQn52KxKGhr+X8wp8hk/9wTLOXrQgioWaM3KDmjV9tl0bKsy6FpNNS6iVhEqaC8U6BHmRMKXa0ZvNPJuazYfY5fjiSwMyYNc6lAPCrIk6m9gpnaK9h+1rOxBXg6c0WEL7tj0+3rQZc2qWcQBy9kcfiierEJ83Wjf5gPwzr5c22/dujrcd73X+fVTvC1Ll0/8j38+JD6/cD50GcOBESBSyUZcMDdWf3dFdR0WTQ3X+h2FURNgxM/wdon1Oz54knQYyYMuA0irgRty57/nFNoZMvJFMZGBdqXmLucXVWsT14ZjUbDSzN7MvGtrfx5LpPPdp3ltuEdajVmIYQQoqn744w6n7xzoIc9IRLm64aTTkuh0cLFzAJCfWtXnWf7DO3jZrA3vW3JJCgXohpKGr1Vv3wdIL6WnZizrJ3XvVwMtc60RfiXvAnamrzZtG/jipuTjvxiM/9cddh+ffdgL6b1DmZKz6AGW+Krpv51bS9++OsiNw0OL3fbfaM7MalHEPvjMvjrXAb74zI4lZzLOWvp8eoD8fi4GpjYI6jexnPAminvV5ug/Ogq+P4uQIEB82Dam+qZl8twtQaSVWbKq6LRQLfpagC+7ik4uEIdy9FV4N8VRjwCva4DXcN1kXekDzbH8OHmGJ6cEsU9oyIvu72iKCVz5CKrnk9+qRAfV56cEsXTq4/w2rpoxndrW+sPJEIIIURTZitdH1Kq94pepzYUjk7K4ar3tuPmpOO+0ZHcMjSiRo9t+wwd5NXyS9dBgnIhqsXW6M3FULPydVvXyJqyN3mrZek6QLhvSaa89HxyAK1Ww5ioQH4+lECvdt5M6RXE1J7B9vXNm5LIAA8endi1ytsjAzyYPTAUgOxCIwfOZfL6+mgOX8yq9YmRipjMFntGvt8l5fSXtX8p/PgwoECvv1U7IIeya5XXiasPXPOhOrd8/1J1ObXUaFh9D/z6PIQNUTu2h/SH4D5VZu+bE9ta9qeTqze/LTY1j8TsQpx0WgaEl6/QuJwbrwhjzcF49sSm889Vh/ls/hWN2ohQCCGEaAyXzie3GdHZn+ikHLIKjGQVGHl2zVHC/dwZ2SWgysfLyjcSnZTDoIg29kx5a1gODSQobxZyCo2cTc2nV/s6NJVqAaITc/By1TtkXkltM+UJWYW16gxe187roK5vbVsW7dKgHOCd6/vy8syeDpkr3pC8XAyM7BLAL0cSOXwxi6yCOgaypZxMyiW/2Iyns57I6lYSWMzqmuLb/qP+POA2mPYGaKtfiuVmD8prmSm/VHBvuOpNGP887PsUdi2C3EQ4tlq9AKAB/84Q0g/ChkLfG9V5683QSWsH2Oo2XtxpzZL3D/ep9om40rRaDf++thdT3tnGtlOpfLv/An+znjQSQgghWoL0vGJOWHsTDb6kIerT07px4+AwTGaFxdtj+WrfeR7+6gBrH7yy0qZtaw8n8MzqI6TlFfPqrF7E24Lyyyzd21K07ImELcRLPx1n+vvb+eHARUcPpUFZLApPfX+Yxdtjy92WnFPI9Pe2c9Mnux0wslJzyqv5Ad3WJbzYZCE9r7jG+8sssK1RXreA+f+mdePWoeEMq6AEV6/TtriAvDQvV/Wco21+fn2wrU/eO9S78sYlpiK1wVrmOYg/AJ9dXRKQj3gErnqrRgE5YJ8HXW9BuY2Llzqmhw/DLath3HNqmbt3KKBA6kk49BX89DB8PEZ9Ps1MWm6RfUm9hMzqVa7UZj75pToGePDIhC4AvPjTMZJzalc1I4QQQjRFu61Z8q5tPfHzKHvSXqPREBngQdcgTxZe3YPuwV6k5xXzwJd/YjJbymybmlvEfcv3c9/yP0mzfmb+/s+LJFpPpAdL+bpoKmxdDT/YFMOMPiEttgzy4IVMvtxzDoNOw42Dw8pkqA6cy6TYbOFCPZYi10RN1yl30mvx93AmNVddq/zSN6u/zmXw3y1neGpqVJku6Tb25dDqkCkHuKp3CFf1DqnTYzRX3tbXLruw/oLywxczMWBiom8S/Pk5JB6ChEOQfRGKcqA4DywV7M/JA6a/o87brgV7pryo/rL+ZRhcIXKMerHJTVaD8Iv7Ye//1PXT/zdOzZoHdIXQwWoDOaemN+WhtJNJJSXr8VkFl61csVgUezleRSezauKOER346VA8Ry5m89wPR/nw5gF1ejwhhBCiqSiZT171sqEuBh0f3NSfq97bzt6zGfxnw0menBKFoij8eCiB5344Qka+EZ1Ww82Dw1i2K469Z9PJKVQ/87SG5dBAgvJmISWnCIDopBy2nExhdNdAB4+oYcSl5QNgNCscuZjFwIiSP/Kj8eqJiWKTpVbl4HVV0/J1UEvYU3OLSMwqpOcl61kv2hTDr8eTUFD47y0Dy93XVr5e2+XQRElQXqNMuakYMs5C2ulSlxj1a0EGCy0KLzqb0B+ywKHLPJbeRQ1YA7ur2XH/zrV+LvZMubGeM+VV8QiELhPVyxV3ws+PwrEf4Ow29bL3f+rJhu5Xqx3kw4dX2sU9PrMAPw+nai8pWJ9OJZcs+1dotJCZb6SNe+UVIieTc0jLK8bVoKN3e5867Vuv0/LarD7MeH87vxxJ5JfDCUzpFVynxxRCCCGagl2VzCevSIS/O69d15v7lv/JR1ti6Bjgzq/HkthwLAlQex+9fl1verbz5s9zmRy+mMWxBPWzf2tYDg0kKG/yFEUhLbek/PmTbWdabFB+Ni3P/v3+uIwyQbntDxPUALk28zzroqbrlIMalB++mEV8BfNYD13IBGDDsSRiU/PocEmDtUxr9/W6lq+3ZtUKylNPwV+fQ/JxNfDOiAOl8sDXCUADRoMXhnZ91GZoQb3BLxKcPdUg1cldvdRjJ/MGz5Rfjrs//G0ZJB2BxMOQdFRdYi3jLBxYrl68Q9UTEC7e1osXuHgTk6PnzW2JdOx9JY9dP7HRhx6dmFPm54uZBVUG5TtPqx8yBnXwxakGJ+Eq0z3Ei3tGRfL+ptM888NRhnf2x6uWyxwKIYQQTUFqbpG9Eu2KDtWrKpvaK5h5wyJYuvMsT3yrZjb0Wg33j+3EfaM72f/nTu4ZVGaZ22AfyZSLJiC7wESxde6FTqthx+k0jlzMKpd5bQnOWTPloAblpR2LLxWUGxs/KLdnyg3V/5AeZl0G6WxqfpnrE7MKSbZWPygKfLr9DC/N7FVmm/po9Nba2cvXLw3KM89B3C44+j2cXFf+jk4eapDt16nUJRLcA5nzyS5i0op595bJDKnDfOOacnOu50ZvtaHRQFAv9QIw8SU494d1ebXVkHVevVwiElhkAI6/C+91VtdNH3QHeLdvlGGfSirbcT2hgsqV0nbG1E/pemn3j+3E6gMXuZBRwM7TaUzuWX9L9AkhhBCNbfeZdEBdcte3ihPdl/rn1G78dT6Tg+cz6RHixevX9aF7SNlmxFN6BvH6+mj7z7IkmmgSUnLV4M3TRc/YqEB+OBDPh1tiWHRjfwePrP6VzpT/eS7DXqaemV/MxVJzydX53Y0brNamfL1DgJr9PpNaNig4aM2SuzvpyCs2882+CzwyvkuZeee27G5dlkRrNYpyICcRchIgJ0ntIp6TSM/kcyw1xOGUrYMv/CA3CbIuQEF6qTtroMtk6DKpJAD3DKp0qbJTRSdIpRifGvwDqg9uhiYQlF9Ko4HwoeplymsQu1V9jQuzoDAbCrMwF2Sy9fBp2lgy6amJRZ92Cra/BTvehe4zoOMYaDdAzbBXUvpeF4qiEG3tvB7q68r59IJyHdjPpubx6fZY7ryyI+3auLI7tv6DcheDjh4hXlzIKCBFGr4JIYRo5nadSQXKrk9eHU56LSvuGMyf5zIY0tEPg678//6OAR50betJdFIOXi563J1bR7jaOp5lM2abTx7g4cw9oyJZczCenw8lcPfIzDrPd2xqzqWXZJRTc4s5l55PuJ97mdJ1UOeFNjZ7o7caZOg7+qtLZsWm5pW5/vAFtSRnaq9gopNyOHQhiy/+OMdD40vmHNfHOuUtVnY8nN1hndu8HdJjKtysDTBaB1iA06Vu0OggpC9EjIB+t4J/p2rtVlGUUhUMjRyUO9u6rzuofP1yDK7qiY1LbI1O5ra9ewHwIo/fZpoJOLFc/d0dXaVeAHw7whV3Qe/rwa3qhjE1kZJTRFaBEa0GRnQK4Ms954gv1YE9t8jE/KV7OZOax4nEbJ6e1p2cQhOeLnp6hNRvNVKAp7N9TEIIIURzcPhCFpuik7lvdCT6UgH0H9ZMeXXmk1/K3VnPlZ2rXq98cs8gopNyCGkly6GBBOVNXqo1U+7v4Uy3YC+u6duO7/+6yCtrT7DizsEtphN7TqHRvmxRl7YenEzKZd/ZDDUojy8blNsC5MZkXxKtBpnySGum/Hx6PkUms30+ui1T3jvUh5FdAnjgy7/4bNdZ7h7V0V6Wb59T3sjBX5OUmwwxmyBuuzUIP1N+GydPNcPtGQQebcEziHwnf579NQGAf1/bE71HoFoy3SYCnKu5xnjpYRSZMFkUoPFPltT7OuWNZN3hRPv32bhzpM0gxsybo3Z1P7Za7ex+Yb/6O133pHpx84M2HcC3g/q7amP92rY7uLap0f5tWfIIf3ci/NTpJLZMuaKoSzCesZ4023s2gxd+OgbA4A5+6Cpb8q6W/K2VMCm5NV8iUQghhGhsiqLw0Mq/OGPtfTS9j7qaT3JOIaeTc9Foyq9PXl/mXBHG5uhkZg1onKluTYEE5U2cLSi3ZVkendiFnw4lsOtMGptPpjCmhTR9s3Ve93N3YnTXQE4m5bL/XAazBrQvF5Q7JlNuC8qrnykP8HTGw1lPbpGJc2n5dG7riaIo9uYVfdp70z3Yi3Y+rlzMLOC7Py9w0+BwADLzWkmm3GLG2ZipNlozZkN+mrrGd0G6+vX8HriwF1BK7qPRqs3VIkaol7AhFQZrzhaFb9evBeCpruPLLUtXU7YsubNe2+g9Ddwbap3yBmQyW9hwTA3KAzydSckpIjYljzFdUSsVQvqqGxbnwcGVsOcTSDluPQbS4OK+sg+od4EB82D4w+BVvQ7mtiZvXQI9CbaebbetVb589zl+PBiPXqthfLe2rDuaaO9lUZ+l6zaSKRdCCNGcHLmYbT9xfSwh2x6U2+aTdwvywqeBGhIHebvww/0jGuSxmyoJypu4kky5etC3b+PGvOERfLz1DP9ee4KRnQPqPaPjCLbS9TA/N/qHqQHWn9YPyEebQqa8huuUA2g0Gjr4u3P4YhYxKXl0buvJufR8MvONOOm0RAV5oddpuX1EB1746Rj/2xbLnEFhmBWFHGuX7TYtufv6ibXof3mCyVnn4chltg3uAx1GlQThLpcvLdZpNXg668kpMpFVYKxzUG6b5++I34mrPVPeRMvXK7AnNp2MfCNt3Axc268d/916ptxUDkDtVD/odvVSmKV2dM84C+mx1u9jIe0MZJ2D3R/BviUwYC4Mvv+yY7A1eesS5EmIdZ3T+KwCLBaFtzaeBOCJyV25eUg4f/0ng6Rs9f12WKcGCMrtmXIJyoUQQjR9Pxy4aP++9EomNVkKTVSfBOVNXGqOWuroXyqguG90JCv3nCM6KYfv/7zA3waGOmp49cbW5C3Cz53+4T6AWnqanFPI6RT1g7W3q4GsAqM9a92YbNl5lxp0XwfoGKAG5bZg5KB1Pnm3YE/70g/XDwrl7V9PEpuax6/HkxgQXpL19XJpgX+iaTGw8Vk48RMaQEEDrm3QuPmp84ldfa1f26iN17pMAq+QWu3Ky9VgD8rrypHz/D2sc8ptY2gOfjmiZskndg+iU6A6XeDSpofluHirJ2CC+5S9XlHgzGbY8iqc2wV7Pka/fyl9vYegiQ+G8CsqfDhb+XqXth72THlSdiFH4rNIyyvGw1nPbcM7YNBp+cfkKB79+iD+Hs50CfSs/ROvhC1TniqZciGEEA6y72w6D3z5FxO7t+Wpqd0qrfwzWxR+PBRv/7l0UP6HNSivaZM3UbUW+Im/ZbFlVfw9S4JyHzcn7h/biX+tPcGbG08yvU9Io5fT1jfbcmjhfm4EerrQwd+d2NQ8Zry3A7NFoY2bgTBfNw5eyLKvGd6YSjLlNXudbc3ezlhPLBw6nwlQpkmfu7Oem4eE88HmGD7ZdobIwN6A2nFfX0FXymYrPRZ2vKOuC24xgVaPecgCfsnryaSrrsFgqP9g18vVwMXMgvoJyu3z/Bs/KI8M8ECrgbS8YpJzCgn0bNrLg1gsCuuPqkH55F5B9nW5Y1MqyJRXh0YDkWOg42i1SdzmV9HEbSc8fSssmQDtr4Apr0K7klUpFEXhlDUo79rWk7aezmg1YDQrrP5L/aAxpKOvvfPrNf3aYTIrRAa6o22A6iP/Uply28oSQgghRGP6aEsMCVmFLNsVx+7YdN6/sR+dKjgRvSc2naTsIvtKQRczC8guNFJQbOZMSh4aDVzRQPPJW6sW9Im/ZbLPKb+k9PbWoRG083ElIauQJTvOVvkYiVmFjH9zC8+vOYrR3PhZ5uqwZcrDrc2Y/nVNL/w9nEjMVud/9gjxtnc+b+xMuaIotVoSDUovi6Y+v0PWTHmv9mXLr+cNi8Cg07D3bAabTiQDLWQ+uaLAsR9g6VXwbl/Yv0QNyDtPhLu3YRnzDGZt3crKq+Ltqp53bO6ZclcnHR381WPp0h4LTdGf5zJIzinC00XP8Eh/OlrHHp9VSEFd5sVrNNBhJNz2M6a5aznfZhiKzgku7IFPxsKPD8PBr+DkBlKObyfQeIFAXQ4Rvs7odVr7yYzV1pK84Z38Sz20htmDQhkQ3jAfMmyZ8mKThezC5jMNQQghRMuQmV/MlpMpgPpZ5kRiDtPf28HX+86jKEqZbdccVE9eT+sdbF8n/GRijj1L3iPEyyFJipZMgvImzlbqWDpTDuq6t49N7ALAB5tPk5FXeUffrSdTOJ2cy9KdZ7lj2T7yipreB8KSTLn64X1opB8bHxnFzL5q2fLYqEB7QNzYmXKjWcH2XlXzTLk1KE/JJafQyKGLmQD0uWQ5u0AvF2b2bQfAB5vVJb4ae9mtepcRB1/Mgq9vVbObaCByLNz2C9z0jdpNu4HZ/mFk10NQbl873kG/l+7WJbouXSKwKbKVro/v1hYnvZY27k72kxm2E3B1pbS/gj8j7sF0/1/qUmoo6kmfVXfBir8R+PVVbHJ+jD2GuzG8HACvduAZ3RLaa5JJt75fjigVlDc0F4MOT+t0FGn2JoQQorH9ciQRo1khKsiTDY+MZEQnfwqMZp749hAPrTxATqH6OSchq4C1h9XVa67u246uQWomPTqpJCgfKqXr9U6C8iZMURT7MmG2Rm+lzezbjm7BXuQUmnh/U8lCzGdScsvM/biQWWD/fsvJFG74+A+yC5vO3NRCo5kEa0Y83NfNfn0bdyfevqEfh5+fyPwRHewl+o2dKS/dWM65FnPKATLyjSzdcZZCo4WOAe50aVt+Sa47R3YEsAcMzSpTXpgFcbvUDto/PgSfjIP3B0HMb6Bzhisfh4cPwy2rIHxYow3LHpTXQ2Yyw8G/lx4hXkD5xodNjaIorLMG5ZN7Btmvt2X6K2z2VhcebeHaj2Huj9BrNnQcA8F9yXZpR7ZS8n5CQTrTCn5ki9MjvGt4j5EeF+1z3RuLfV65NHsTQgjRyNYcULPfV/dtR6CnC5/Nv4InJndFp9Ww5mA8V723nR8OXOSaRTvJKjAS4efGkI5+JUF5Yg67YmQ+eUOROeVNWHaBiWJrubl/BZ2jtVoNT02J4tbFe/h8VxzzhkWQW2Ri5qId6LUa/vjnODxdDFzMUIPyqb2C2H0mncMXs7hz2T6Wzb+iScxFv5CRj6KAp7MeX/fyJx88rfNRHZUpL30SoKbl625OeoK9XUjIKuTDLWoG/JYh4RXOJ+3S1pPRXQPYHK2WFjX5sqD4v2D7W+rXzHMVbxM+HKa/C/6dGndsVrbXsH7mlKuP4e2goLx7sBqUH2/iQfnhi1lczCzAzUnHqC4B9us7+Lvz17nM+g/K7TsYqV6snv/6AN//eZHHx3fk/qGBkHiQmB/+TWT2bmbodjHDtAs++wmGP6RWcDTCHG9/D2fOpORJplwIIUSjSswq5I9YNaCe3kddVlSr1XDf6E4M7uDLg18eIC4tn4dWHgCgU6AHS28bhE6roWtbNSjfejKFs2n5aDUwSOaT1zvJlDdhtiZvns76SoPnkV0CGNHJn2KzhRd/OsZ9y/+kyGQhr9hs//B7IUMtDZ/UI4jPbr8CT2c9u2PTeXjlAcwWpcLHbUxnU0uWQ6uq+ZGtdLzxM+Xq/pz02lo1Z7JlCPOLzbgadFzbv32l295lzZZDE14OrTAbfvmHOof32A8lAblXe+g8Ca58DK5bAvfvg3k/Oywgh1JBeT10LbfNKXfU76W7NVMem5bXJKeg2NhK18d0DSzzvlUylaOBgvJLnLQ2eesc3Abc/SByLFuu+C9Til5hlXk4Fo0OYrfAF9fCR1dC9C+gNOz7oaxVLoQQwhF+OhSPosDA8Da0b+NW5rYB4b6sffBKplir267o4Mt39wyzb2fLlJ+1TjXt2c7b3sBV1B/JlDdh9iZvnlU3wnpyShRXvbedDceSylx/Ni2f3u19uGgtX2/n40qPEG8+vnUgcxfvYd3RRJ5efYR/XdPToZ2ASy+HVhXbcmRFxkYOyo01X6O8tI4B7uy0lvvM7BdSZQZ8aEc/erbz4sjFbNo0xfJ1YwF8OFxdMxqg53UwYB607aEuY9bEeNVjpjzL2n3dx0EVDP4ezrT1ciYpu4gTidkN1pCsLiorXQfoYF2JIPZyy6LVA7NFKVmjvG1JV9kQHxeOK+E8YlzAiLvfIeDIYti/DJIOw5c3qJn2gfPVE0xeIWppvO6Sf5OKAgUZkHUBchJBqwODq3rRu5Z8b/u51P1lrXIhhBCOYPvfPKNvxUvMersZ+OCm/sSl5RPq64au1CoknQLVFWBseTyZT94wJChvwmxBeUWl66X1bOfNzL4hrD4Qj16rISrYkyMXszmbmofJbCEhS52v3a6Nuk7v0Eg/3rmhL/et+JMv95wjwNOZRyd0adgnU4Vz6SWZ8qrYMuWFpsYtX7etUV7TJm82tmXRAG4aHF7lthqNhn9f25sPNp9umuvPG1yh5zVw/EeY9oZa9tuE1Wv5er5jy9dBLWFPyk7haHzTDMqjk3KITc3DSa9lTFRgmdsabE55Bc6n51NksuCs1xJWqk9FjxBvnHRa+oR6E9C+M7R/BUb+XV2q748PIXarerHRaNXA3DNYDdLz0yHluBqUV5fWYA/SH1A8CNd3QnNxJJw8p/Zi0GrB1Rdc26gntlx9wcm9UcrphRBCtHxmi8KReHX1n+FVNDjVaDRE+JdPkLkYdET4u9sr3WQ+ecOQoLwJK+m8fvly2aemdiOzwMiMPiEkZBWqQXlaHkk5RZgtCgadpszaxlN6BfPi1T15evUR3v3tFAEeTtwyNKKhnkqVbOUwEZcJyh2WKTfVLVM+ILwNoK6J3LOd92W2Vk+yfHDTgFrtq1GMfgpG/xMMTXutbKjfTHmmg7uvgxpUbopOabLLov1yWD0TP7KzPx7OZf+9RPirf98Z+UYy8oppU0H/iPoSbS1d7xToUeZsf6ivG78+OqpstYqbL0xYCANvU3skJB2DnAT1YjGVfB//Z9mduAeowbqigDEfTIXqV2MhmEqaa2IxQpERirLxI4nb9DFwfj2sqOIJaHTg7AnOXuDipX519rR+b72+w0joNK4eXi0hhBAtWVxaHoVGCy4G7WWrUisTFeTJmZQ8dFoNAyPa1PMIBUhQ3qSlVDNTDtDWy4Wlt10BqPNGAM6m5tmbvAV7u5b5cApw85BwUnOLePvXUzy75ih+Hs5M7RVcn0+hWs7Z1yiv+o3CUZly+xrlNey8btMn1IefHhhx2UqAZsPg6ugRVFtJ9/W6BeWKopCZ7/iu+N2beAf2ktL18u8jpZsenknNY0ADBuWnrEF511Kl6zaV/h22iYDp75T8bLFAXgpkX1SD8ux4NRgOjAK/zuBUxd+zoliD9IKSi6mAw0cOcnDz9wxzjqFjoI8aZFssUJCuZt/z08FcBIoZCjPVS1Yl+9jxNsz8EPreePkXRAghRKt1PKHkf+KlsUB1dWnrydrDifRs521vwCzqlwTlTVhqjhoEBFQjKC/NdhYsLi3f3uStfZuKA6mHxnUmJaeI5bvP8fDKA/i4GhjWiGv3mswWLlhPHIQ32Ux53crXgWplyEX9q6/y9fxiM0azOpnKkUG5bVm06KQcjGYLBl3T6dV5JiWX6KQc9FoNE7q1rXCbjgHuJGQVEpuaZ68gaQjR1vnknSsIyqtNqwXPtuqlpjSaknnlpa82h/H0r20IdHVmz93jy9/PlnUvzIaiHCjKVi+F1q9FOer3SUfgxE/ww/1quXvXybV8kkIIIVoKk9mCyaKUaw59IlE9kd/NuopLbfxtYCg7Y9K4u1RDYlG/JChvwuxzyi/T6O1StuA2La+YE9b1ytv5VByUazQaXri6J+l5xfxyJJG7Pt/PyruGNFoQGZ9ZiMmi4KzX0taz6nJoW/l4UWNnyq2N3lxqmSkXjmMLynMKTZgtSq3PENtK1530WlwduIxgaBs3PJz15BaZiEnJJSqo9v9g65ut6/rQSL9K59138Hdnx+m0Bm/2Zs+UBzXuOuSXY2vamZZXjMWioL30eNRo1PnkTu5AFVVLFgv8sAAOroBv5sGtqyFsSEMNWwghRDPwwJd/sfVkChsfHUVIqc/9xxPUoDwqqPYnqtv5uPL13UPrPEZROYkymrDqNnq7lKeLAX8PtTR0x+lUoKTJW0V0Wg1vXd+XIR19yS0yMW/JXuLSGmfZIlvn9TBft/IfUC/hbA2GCh2WKZc/l+am9JIdOXUoYbeXrrsaHLpSgVarITLQ2sW8kZYWqy5b6fqUCkrXbUo6sKtjN1sUPtl6xv6BoT4YzRZiUqyZ8sA6ZMobgK+1ZN9sUciwHlO1otXCjHfVJQhNBbBitjoXXgghRKsUk5LLL0cSySs221f8sbGVr9clUy4ankQZTZhtLVtbgF0TthJ229zTS9ckvJSLQcfHtw6kW7AXqblF3PLpHvtJgYYUZ+28frn55FCqfN1Rc8rrUL4uHKN0ZrumJewFxWZ7wGhb59yRpes2toaItr+dpuB8ej6HL2ah1cDEHpWXe1+6VvmPB+N5ee1xnll9pN7GEpeWh9Gs4O6kq7RCyFEMOq09ME/JLUKpy7roOgP8bSmEDla7uH9xLWTE1c9AhRBCNCtf7z1v/770ie6sAqN9aeSmVF0nypOgvIlSFIXUXOuc8hqWr0P5ILc6H069XAwsmz+IUF9XzqXn8+HmmBrvt6biUm1N3i7fBM0WFDf2nPLCOq5TLhyrNvPKjWYL96/4k5fXHufJ7w83ic7rNuHWJb7i0ppOUP7lHnXd+kERvlVW9tiWRTublofForAvLh1QTx5aLHUIUEuJTlSz5J3ael62+sYRbD1CTiXlMvGtrdzy6e7aP5iTG8xZCQHd1GZ0K29Uu78LIYRoNYxmC9/9ecH+s20OOUB0qWmsjlzSVVyeRBlN1Od/xFFstmDQaWpcvg7llxerrNHbpQI9XXjuqh4ArP7rIkZzwwbA1V0ODZpAptyBc4lF7dU0KFcUhae+P8xvJ5IBOHwhk3PWrHRT+IcWZm/k2DTK19Pzilm28ywAt4/oUOW27du4otdqKDRaSMwu5MD5TAAKjOZ6y/xH2zuvN6355Da2JS5f+OkYp5Jz2XYqlbS6VCW5+cLN34Gbv9oAbsP/1dNIhRBCNAe/HU8mNbcYg049EX08IcdeiVUf88lF43BoUL5161amT59OSEgIGo2G1atXV7rtPffcg0aj4e2332608TnKT4fieW7NUUDtjn5pF8XqiPAvyZRrNRDkXf01pUd3DcDfw5m0vGI2WQOThnIu3TqnvBrl6/Yl0ZrZOuXCsezLohWYqrX9a+uj+Xb/BbQa8HLRY1Fgw1F1vrSPq+ODcltVSVPJlH+89Qx5xWZ6tvNiQveqO5XrdVr7kmTH4rM5YZ3nBnCinuaV25q8dalL5/UGZMuU26YnAZxOrmPjO+92cM1/1e/3/g+O/VC3xxNCCNFsfLVXrVa7aXA4Wo16stz2P6Y+Oq+LxuHQKCMvL48+ffqwaNGiKrdbtWoVf/zxByEhIY00Msc5npDNI18dQFHg1qHhLBjTqVaPE1EqyA3ycqnR0kl6nZZr+7cD4Jv9Fy6zde1ZLIo9sGjSmXKjNHprzrxqkCn/dHusfdrGv6/tzbTe6nvOn+cygaYxp9wWlCdkFVBsatwTVJdKyy3is11nAXh4XJdqNcGzzSv/8VA8plIl6/XV7C26qQflpaYj2arrT9U1KAfoPB6GP6x+/+3tsOZBSD9T98cVQgjRZCVkFbDlZAoAc4dF2KeJHbP+Tz1mPfkdFdw0/yeKEg6NMqZMmcJLL73ENddcU+k2Fy9e5IEHHmD58uUYDI7/QNzQfj+RjNGsMLSjH89N71HrTs/h/iVB7uWavFXkbwPaA7DpRHKDNXxLzimiyGRBr9VUa867fU55Iwci0uitefNyVVd+vFxQvvqvi7z4k9rB+u+TujJ7UChDI/3KbOPj5vg55QEezrg56bAocCHDsdnyT7bFkl9spnd7b8Z1C6zWfWwfGNZbqw9sb3HHE3Mqu0u1FRrN9hN9XZtoqV6wt/pe176NKzcODgPqIVNuM/Zp6DoVLEb4cxm8NwC+u0M6swshRAv17b4LWBS4ooMvHfzd7RnxE4k5mC0KJxOl83pz0aTXKbdYLNxyyy38/e9/p0ePHtW6T1FREUVFJUFkdrZ6pshoNGI01n5JpMYSa13KZ1CEDxazCUstk8KuOvB1N5CeZyTY27nGzz3C14Xe7b04dCGbT7fFcO/Ijrg6VR2U2vZR3X2dTsoCIMTHBcVixniZJ6tFDY4LjeYG/10qisLTPxyjwGi2L6vlpKv+cxPVU9NjpjY8ndXj9mxqDvOX7CEpp5A+7b0ZGN6GyT3aYtBp2XYqlce/OQjA3KFh3Dk8DKPRyIBQr0seS9skjoHQNq5EJ+VyJjmbUJ+a95yoDyazhW/3q91e7x3ZAZOpetMDwqz9LWzTUEZE+rHtdBrH47Nq9NpWdOycTFA/hHi56Gnj0jR+V5ea0bst8Zn5XNs3hIMX1ffAk0nZ9TfW6z5Dc24X2p3voI35FQ5/A4e/wdJlCpZhj6C0618/+2nGGuN9R7RscgyJ2qrJsbM/LoOX1kbzzLQo+of5lLvdYlHspet/6x+C0WikS6B19aWLmfwVl0aB0YyLQUs7Lyc5Xh2gJq95kw7KX331VfR6PQ8++GC17/PKK6+wcOHCctdv2LABN7eaZ4wb21+ndICGjHMnWbs2uk6P5aXRkY6GgtSLrF17/vJ3uERXg4ZD6PhwSyz/3XKGrj4Kd3a1cLlK+I0bN1br8f9I1gA63Mx5rF279rLbpxUC6MkrLK7W9nWRY4Sv96t/Hs46BdAQd+Y0a9eeatD9tlbVPWZqI+m8epx9te+i/bqj8Tms2HOBN9ceYnSwha/PaDFZNPT3s9BXOcMvv5SU/bZ11ZFUoKZzz5w4wtqUww021upyKtYCWn7Zto+80/XTtbymTmRqSM3V4a5XKIjZx9rY6t0vOQtK/+vprE1mGzouZBby3Zq1uF7yXynPCKvjtBgtcGtnC5c2VC997OxLUX/X/gYjv/zyS62eV2PoBZzaf5rkHAA9R8+l1f97mteteHcdSeekHwnJ3If25C9oT/5CgvcA9kYsQNE26X//jaIh33dE6yDHkKit6hw738VqOZKo5d01fzC7Y/kq0egsDRcydbjoFDh/gLXxB8jNUP8P7juVwMWL8YCWHt4m1q9ruv8TW7L8/OpXNDbZ/8r79+/nnXfe4c8//6xRCfdTTz3Fo48+av85Ozub0NBQJk6ciJdX0y/dePnIFqCIq8cNo0977zo91mHdSf63/SzXjRnAmK4BNb7/mGIzph+PsTMmnaScIo5nagjpNZgB4W0q3N5oNLJx40YmTJhQrakGxzeegphYBnQNZ+rUbpfdPjmniBf+2oJJ0TBlypRal/ZXx6ELWbBPXaqoyKzup2f3KKZeWXV3aVEzNT1maiNlVxzrLqgnuIK9XXhobCSnknP57s94LuQZ+eK0mkkfHunHxzf3w+mS3gG7zcdYsUftrTBm2BUMu6Sk3REOaaM5vCMOj+AOTJ0aVa375BSaOJ+RT/d6KmHbuuoIEM+MfqFMv6p7te+XnFPEe8e2AOqc6gdmT2DtuztIzC4ivM9QBpZ6f9kXl8Gj3xwmIUtd5uuVm64kMkDNAlR07BzfeApOx3JFVChTp1Z/TI6SU2jkrSObyDJquHLsBDxdGuJv4D5MqafQ7XoXzZFvCM7azzTPY1hGPdkA+2oeGuN9R7RscgyJ2qrJsbP5+yOQGI/eO5CpU8tXOW34+hCQyLUDQpk5Xf2f1y+rkI9PbCW5SEtyEYDCM38bRo+Qph8DtUS2iu3qaLJB+bZt20hOTiYsLMx+ndls5rHHHuPtt9/m7NmzFd7P2dkZZ+fy5ZwGg6HJv3HmF5tItnZL7NzWu87jfWpqd+YO60Cob+0qBAwGA2/d0B9FUZi/dC+bolOITs5nSKeq545W97U+n6F+0O4Q4FGt7T2s084tCmh0+ho1r6up5Nzy5SZuzk3/GGquGvLvs3uID6AuB7L0tivsKxHcPboTz6w+wvqjSfRp781/bx2Iu3P5t8RhnQLsQbmfp2uTOAY6BKjzpS9kFFZrPMk5hcz6cBfn0wv45aEr6zy3rNBoZsNRdWWGa/qH1ug1CWmjx91JR16xmS5tPWnj4Uq3YC8Ss1M4nZLP0E6BmC0KH2w6zVu/nqT08uXpBSaiLtlX6WPndIp6RrpbcN3fPxuDr8FAWy9nkrKLOJtRRP+wBqrmCu4O134EXSbAt/PR7XgLXffpENK3YfbXTDSHzwWiaZNjSFRGURTe3HgSnVbDw+O7lLu9OsdOfrE6rTMxq6jcthl5xWw8pv4fvnFwhP32UD893q4Gex+d4Z386Bvu+GRCa1WT94cm2076lltu4dChQxw4cMB+CQkJ4e9//zvr16939PAahK1BkY+boV7WQ9ZpNbUOyEvTaDT0bKdm7Y/F10+HZIA463Jo4dVYDg3Kdj8vNDZsB/aLmQUADAxvg7t1Lr2rrFPeLA3r5M9vj41izf0jyiwNGOjpwkc3D+DXR0fy9T1D8aggIAcY0rHkn1kbd8c3eoNSy6JVY23v7EIjcxfv5Xy6ekyfTKp7Q7XN0SnkFJkI9nYpk9muDo1GQwdrtrufdY6c7STBsYQckrILufl/u3ljoxqQX9OvHX1C1e1KLyNWEdtz69xE1yivSKdAdaz11uytKj1nQfeZoJhh1T1gapgmnkII0dodupDFe7+f5u1fT5GZX1yrx8gpVHu1XMwssK87brP6wEWKzRa6B3vZP6OD+j+29Jrkd42MrNW+ReNzaKY8NzeX06dP23+OjY3lwIED+Pr6EhYWhp9f2TM7BoOBoKAgunbt2thDbRRnU2sWpDYmW9nL0YSsenk8RVGIS63+cmhQNigvMlloyN7KtqB8QEQb7hkVycq95xnXreo1mEXTFRlQcZCm0WjoFFj1keTv4cyD4zqTmltESKmg3pHCfdX3iHPp+VgsCtpLJ1pbFRrN3PXZvjLLjaXl1u7DQWlrDqrz86f3Cal031UZGO7LkYvZjOqiVt1EWYPyLdHJrD+aSHpeMa4GHS/O7Ml1A9rz4Jd/cfB8ZpVBeX6xWp4P0LWJLodWkc6Bnuw4ndY4QTnAtDfg7HZIOQ6b/w3jn2uc/QohRCvyzf6SXk6pucW1Wr0lt8hk/5pdaMLbusSroih8tVd9/BuuCC13v27BXuyOTadrW09GdvavzfCFAzg0KN+3bx9jxoyx/2ybCz537lyWLl3qoFE5ztkarNnd2LoHq2fhTibmYjRb6lw6npFvJMf6ZlPdbL5Go8FZr6XIZGnwZdEuZqhBeTsfV8Z3b8v47hKQt2aPTihfeuZIIT4u6LUaik0WErMLCalgScEik5m7P9/PH2fS8XDW06udN7vOpJGeV7egvMhk5rfjasncjD4htXqMf0yO4roB7e1n97tb10+Nt84d7xbsxfs39rOfTLGt7Z1cRVB+OjkXRQF/Dyf8PBzTkb42IhszUw7g7g/T34avboYdb0PUVdB+QOPsWwghWoFCo5k1B+LtP9f2/25uYcmqJhczCuxB+aELWZxIzMFZr+XqPu3K3e/mIWGcTMrhkQldGrT/kqhfDg3KR48eXa4coyqVzSNvKeLS1Ex5RBPMlIf6uuLprCenyMTp5Nw6z0k9a32uwd4uuNSgLNwWlDdW+Xp11k8XorHpdVrat3HlbFo+cWn55YLyYpOFBcv/ZMvJFFwNOhbPG8SumDR2nUkjLa9uJcsZeUaKTBZ0Wk2tG8e4OunKlNtF+Lnj42YgM9/IvGERPDklqsz7QqAtKM8uLPdYvxxJZG9cFgXW94TOl6l8aGo6W4PyU8l1n1ZQbd2mQ6+/qculrb4H7t4KBnmvE0KI+rDhWBLZpQLqtNza/d8t/RjxmQV0t/7PXWnNkk/pGVThdNdOgZ6suHNIrfYpHKfJzilvjWKt5esR/k0vU67RaOhmfTOoj3nl56xVAeE1rAqwfVAvMjZspjzeFpS3kQ+qomkK87OVsOeVud5ktvDQyr/49Xgyznotn84dyBUdfPHzUEvn6lq+biun83DW19sZeL1Oyzd3D2XVfcN4fkaPcifqAr3UoDzlkg82FgWeXHWUz/+I49v9ajO+rkHNKyi3zSm/kFHAtlMpvPzzMaITGyFAn/IaeLSF1JOw6eWG358QQrQS3+wruwxxWm0z5UUlTYdtyaL8YhM/HlSz8NcPCqvwfqJ5qlGmPDMzk1WrVrFt2zbi4uLIz88nICCAfv36MWnSJIYNG9ZQ42wV4uyBatPLlAN0D/ZiT2w6R+OzmVXHakdbptw2N7a6nA3qeaRCU80z5YcvZPHbiSTuG92p3LJXpeUXm8jIV98IKyoLFqIpCLdO+zha6iSZ2aLwyNcH+eVIIk46LR/fOpBhndT5ZH7WJnW1/XBgUzoor0+dq5gHHuipzuVPzi4blKcXqd1pnXRa+oX5kJJTxDX9ypfyNWV+7k72KoFbPt0DwPbTaax9cETDlh26+cL0d+HL62Hn+xA1HcIGN9z+hBCiFYjPLGD76VQAhkX6sTMmrVYnw41mC4WlElC2oPznQwnkFpkI93NjSEff+hm0aBKqlSmPj4/njjvuIDg4mJdeeomCggL69u3LuHHjaN++PZs2bWLChAl0796dr776qqHH3CIVFJtJtJZmdmiiQbmtVPVYPTR7s2fKa1gV4KKvfab8tfUnePvXU3z/54Uqt7NlyT1d9Hg1yLrBQtTdyC4BAHy55xzRiTlYLApPfHuIHw/Go9dq+OCm/oyybgPY51nXdU55XgMF5VWpbE55YoEatHYMcOeru4fy++Oj7Z3amwuNRkPv9j6AOj3HSa/leEK2/UNdg+o6GfreBCiw+l4ovnw3fyGEEJX77XgSigJXRPjaVxhJr8W0sdLzyaEkKP/GWhU2e2CozBdvYar1qapfv37MnTuX/fv307179wq3KSgoYPXq1bz99tucP3+exx9/vF4H2tLZlgfzctHjUw/LoTWE7qXK1xVFqdObQV0z5UW1yJTbgpFN0cnccEXlJT8XMmQ+uWj6xncLZHy3tvx6PIknvjtE92BPvvvzAjqthvdv7FeuOaGvNVOeWsu5bTa2JVo8XBovKLfNKc8qMFJoNNvL25OsMWRVWfbm4F/X9GT3mXRGdw3gvd9Ps3TnWT7eeoYrOwdc/s51NelfELMJ0mPgtxdgyr8bfp9CCNFCHTivJq6GRPrhY23MVpsKNVtVms3FjALyi038GZcB1L7Rqmi6qpUpP3bsGK+99lqlATmAq6src+bMYdeuXdx22231NsDW4qxteTB/9yZ75qtzoCcGnYbsQpM9cK2tuFrOKXe2ZsoLa5Ept73B7TidhtFc+f3jM9WKBQnKRVOm0Wh4aWZPPJ31HDyfyZd7zqPVwFvX92Vyz+By2/tb55TnFJoorsPqBbZMuXsjZsq9XQ32KSelTyrYMuWdKlnyrrlo38aNWQPa4+fhzO0jOqDVwLZTqWWWsmswrj4w4z31+90fwoV9Db9PIYRooQ5dyASgT3vvOvVyybkkUx6fWcBf5zIxWRTa+bhWe+Ui0XxUKyi/dL3w+t5elHReb6rzyQGc9Fp7Z+NjdfiwmFNotJ81rHmjt9pnyvNKrfe433qmsSIXM9UTBjKfXDR1Qd4u/HNaNwA0Gnj9uj6Vnj33cjGgs64pXpcSdtvJLc9GDMo1Gg0BHuVL2JOsQXnnts07KC8t1NeNKb3UkyqfbDvTODvtPB763Kh+//NjYGnY1S2EEKIlyi0ycTpFXd6yV3tv/NxrP20sp1DtbWSrnk3OKbJPaxoU0aY+hiuamDp3Xz9+/DhLlizhwIED9TCc1stWzt2hCa5RXlrPdmoJ+y+HE2r9GLYsuZ+7E541nLPtXIc55aVLgTZHp1S6nX2Ncum8LpqBGwaF8tqs3nw+fzCzBrSvdDutVmMvYa/Lsmi59kx59ZcyrA/2eeXWZm+KopBoLdixdTBvKe66siMAaw7Ek5BVt6qkapuwEJy9IOEA/LmscfYphBAtyNGLWSiKutxvoKdLnf7n2v7Xhvm62RNStrXPB3WQBm8tUY2C8hdeeIHXX3/d/vOmTZvo27cvf//73xk0aBDLly+v9wG2Frby9aacKQe4aXA4Gg2sPhDP7jNptXqMc+nqcw2rxQmI2mbKTZd0sdxysvKgXMrXRXOi0WiYPSiUEZ39L7utvQN7HZZFK+m+3ri9L2zzym3LoiXlFFFk1qDTaoho4u+bNdUn1IcrOvhisigs3Xm2cXbqEQhj/ql+/9sLkF37E69CCNEaHbqgzifv3d4bKJk2lp5XjMWi1OixbOXrni56e+WmrdnbFRESlLdENQrKv/322zLzyl9++WUefPBBUlNTef/99/nXv/5V7wNsLUZ2CWBSj7ZEBTfthkV9Qn2YY22S9swPR6qcm10ZW1VAbT5I13ZOeV5x2SD+eEI2SdZu95eyvelJ+bpoafxKfUCorZLu642bKbevVW79uz2dbGsW6VrlEofNlS1bvuKPc/YyxgY36E4I7AEFGbDoCtj+Npjq1hhQCCFai4PW+eS2FTXaWE+EWxTILKjZ+3hOqZVOSieJ2rgZWlx1mFBV65PMZ599xrJlyzh79iwHDhyw/7xjxw48PDz47LPPsFgsnDlzhs8++4zPPvusocfd4tw7OpL/3jKQHiHejh7KZT0xqSu+7k6cTMpl6Y6zNb6/bTm0sFo0qXDW1y5Tbgsk9FoNfaxnMDccTSy3nclssS9N117K10UL42ud31aXDuy5Dui+DqXWKrfOKbfN24ts5k3eKjM2KpDIAHdyikx8tfd84+xUp4e/LYHgPlCUDb8+B+8PgqOrQalZlkcIIVobW6a8jzUoN+i0eFs7sNd0WbRce6bcUObz6MAI3ybbEFrUTbWC8vDwcCIiInBycqJt27aEh4eTmZmJl5cXY8aMITw8nMjISDQaDREREYSHhzf0uIUD+bg58eTkKADe/vUkiVkVZ5wrY8+U13CNcsC+FFJRDbtHl+4YPaprIADP/HCUsW9s5qWfjrHzdCrFJgtJOUWYLQoGXUljKSFaClv5en00emvM7utQfq3ymBT1faRTQMsqXbfRajXcac2WL94eW6uqpFoJ6Ap3boaZH4JHEGTGwTdzYclUiP+rccYghBDNTEZesX16Zq92JQk2P/typDX7v2urkPJw1hPiXRKUS+l6y1WtoHzUqFGMGjWK/v3789NPP+Hk5MS6deuYOnUqI0eOZNSoUQQHBxMaGmr/WbRs1w1oT/8wH/KKzbz487Ea3fdcWu3nz9sy5YXGmmXKc0uVAd00OIwrO/uj12o4k5LH/7bHcuP/dtP/xY08vFL90Bns7YpWK2ciRcty6Zzy8+n5pOTU8Ox9qb+lxmSfU27LlCdbM+UtuIxvZr92+Hs4EZ9VyNo6NNesMa0W+t4ID+yHkU+A3hXO7YSPx8Cqe6Eop/HGIoQQzcDhi2qWPMLPDW+3kp4rtZ02Zl/pxEVfpvGwNHlruWo0Ee/111/nwIEDDB8+nLi4OF544QX7bUuXLmXy5Mn1PkDRNGm1Gl6c2ROtBn4+lMD2U6nVul+h0Uy8NbMeXpvy9VpnytUg3sNZT1svFz6/fTB/PjuBD27qz3UD2uPv4URukYm9Z9Wl0qTJm2iJ/KzVH2l5xWTkFTPp7a3M+nBnjR4jz2FBua18XX3/aOmZclArg+YOjQDg461nUBq7hNzZA8b+HzywD3rNBhQ4uALWPdW44xBCiCbu0CXzyW3sHdhrOG0st1SjN9tnUleDjh4hXnUbqGiyavSpqk+fPpw9e5a0tLRya5E//vjjeHnJgdKa9Ajx5tahESzdeZZnfzjCmgVDL3ufCxlqltzDWW9/o6qJumbKSy/j5OViYGqvYKb2CsZiUTh8MYvfTyRz8EImtw3vUOOxCdHUlV6eZV9cBvnFZs6l55NfbMLNqXr/DnIcFZR72ebDF5OSU0RGvhENCh39W25QDnDzkHA+2BzD0fhsdsakMbzT5bvs1zvv9jDrE+h9PSyfBQeWw5D7oG33y99XCCFagb/OZQIlnddtSp8Mr4nswpKVTgZG+DLnijB6t/fGoGt5jU2Fqla/2UsDcoDg4GDc3Vv2hyNR3qMTu+Dv4cyZ1DyWVKPpW5J1jeFgb5daNaqojznlFdFqNfQJ9eGRCV1YetsVjOoSUOOxCdHUlV6e5cD5DPv1NSlhv9zfUkPxc3dCowGzRWG9tUljG2dwdWrcLvCNrY27E7MHquvPf7z1jGMH03k8dJsOikVtAieEEIJCo5mdMeoywUM6lo2R/Gu5FGlukXVOuYsenVbDK9f2sq9+JFqmagXlK1eurPYDnj9/nh07dtR6QKJ58XIx8H/T1KZvi7acIf0yn+1tXZ9tc2xqqraZ8rxix2T3hGhKfEt9OLCd1YeS5mnVUbqkrjHpdVr7nPh/rT0OQB/f1tERfP6IDmg1sOVkCtGJDp7PPX4haPVwagOc2eLYsQghRBOwKyaNAqOZYG+XcuXlvpc0WM0tMlGdJctLzykXrUO1gvIPP/yQbt268dprr3H8+PFyt2dlZbF27VpuvPFG+vfvT1paWr0PVDRdM/u2Y3AHXwqNFr6PrfqQsnWf9K9lZ/OSJdFqlil3VMdoIZoSWxldbpGJA+cz7dcnZ1cvKLdYFPKK1RNijvhbCrDOK88vNtPex4UpoY3UkdzBwv3cmdwzCIBPtjk4W+4XCQPnq9//8gQU5zl2PEII4WC/Hk8CYFy3wHJVoLb/u6m5Rfx1LoMrXtnED3GXD79ybCfA5XNrq1GtoHzLli28+uqrbNy4kZ49e+Ll5UXnzp3p1asX7du3x8/Pj/nz5xMWFsaRI0eYMWNGQ49bNCEajdr0Ta/VcDhDy+aTKZVua8uU1zYot5evG2tXvi6ZctGaebnoMejUDwz5xSXVJik51VvW0FZxAo75W7J1YAd48eoeOLfsyvUybMuj/XDgIknZNVuGst6NelJdLi3lBKx5QNYwF0K0Woqi8NvxZADGdWtb7vbSS5Gu3HMeo1lhX4rmso07bVVpHpIpbzWq/ZueMWMGM2bMIDU1le3btxMXF0dBQQH+/v7069ePfv36odVK84HWqsv/t3ff8W1W1x/HP5Isy3vPbGeH7ISQkEUCSSDsvQmzlF3KhrasX9mrzFIoBcoom7ADITtkkL33juMRO95Tlp7fH4+lxImdeMuyv+9WLzQeSUfytaOje+65ieFcfWIn3vltF49/v5ExPRO9CfShsirLZOPDGzZTXlpRx/L1Ms/sXhv6FC9yGIvFQkxooLe3g0dty9c9v0cBVov3d7E5pcSFMmfzfi4c2oHR3WP5cXOzh+AzgztFc0KXGH7feYD3Fuzk/tN6+y6Y0Fi46D14/0xY+yV0GAYjbvZdPCIiPrJuXz7p+aWEBNo4seuRPbc8M+WZBWX8st7sh1JYYWFrZhHHdah5Kad3pjzIXuMx0rrU+euXuLg4zj333CYIRfzdbeO78cWSnezJKeGfs7fx54k9jzjGu6a8Hp3Xof4z5SpfFzHFhDq8SbkjwEpZhbvWSfmhjWfq06ixoW4/uTv920dyxoBkoG2Urh/qD2O78vvOA3y4aBe3ju/u28qfzifCpCdg2v3w818g4TjoepLv4hER8YHp683S9TE94qqdjPKsKc8rcVa5fvHOAxzXIbraxyyrcFHuMv+NU4Vn26GpbWk0YY4Azuts/hH555xt7Mo+cq1hY60pr/tMucrXReBgB3aAMT3MXQZqn5RXVpzUcvu0xhYb5uCCoR2q/eDTFpzSO4GucaEUlFbw6ZI9vg4Hhv/R3L/ccMFnUyB7m68jEhFpVjM2etaTH1m6DhAdYufQ77ADKz/HLt6RU+3xcLB0HfS5tS1RUi6NalCswchuMZRXuL0dkg+V7VlTXt/y9YbOlPsomRBpKWIOqVKZ1Nf8EFHbLdF81XldTFarhRsq15b/Z/6OY65JbHIWC5z9CrQ/Hkpz4eOLoaTmD5oiIq1JWl4Ja1PzsVjg5N4J1R4TYLMSFXywBP36UZ0BWLzjQI1/wz2l66GBNmzW5q9KE99QUi6NymKBO0/uDsCyXVU/nBmGcchMeX3L1+vXfd1XeyuLtDSxoeYXYsmRQfRrFwnUvtGbp3xdv0e+c9bAZABSc0u8nfB9yh4Ml34MER0geyt8ehVU1H6LPRERf+Vp8Da4Y9RRK0A968rDHQH8cUwKgVaDnGInmzMKqz3eM5GkJm9ti5JyaXQdY4IBs9Ok65DNGPNLK7xrZOpfvu6ZKa9fozeVAUlblxhh/u4N6RRNQuX57KJyKlzH/qKrUL9HPhdySLWPs45fTjaZ8ES4/BMIDIed8+Drm8DdQmITEWkinq3QJhxXfem6h6dC7ZQ+CYQ6AkgJNz8bL9pe/RbSnply/Vt7FL6uFGsC9U7Ky8vL2bRpExUVFcc+WNqU6JBArBZwG5BddHDGxNPkLcwRUO81ofWdKT/Y6K1trkUV8bhgaAf+OLYrf57Yk5iQQGxWC4ZhJubHUlha2ehNHxR8xma14KlmLK/FFynNJqk/XPIBWANg3Vfw6RWw+WeoOPa4EhHxN0VlFSzYZibVE2pYT+5xYtdY7DYLV4wwS9e7RxwrKTf/rVXn9RoUZcPLA2DG/4HLeezj/USdk/Li4mKuv/56QkJC6Nu3L7t37wbg9ttv5+mnn270AMX/2KwWYipLZA9dq+rZDq2+petwcKa83OWuMgt/LJ79lZVMSFsXF+bgwdP70D0hDKvV4v19zMw/dsmxp1xav0e+5WkUVN5SZso9uo2Hc94wz2/60Vxj/nwP+OY22DYTXPoSX0Rah3lbsiivcNMpJoQeCWFHPfbOCT1Y/cipDOsSA0CPSPPz6+IdB3BX81nWM5Gk/i01WPUx5O6GrdPNL4JbiTon5Q8++CCrVq1i9uzZBAUFea+fMGECn376aaMGJ/7Lsw/5oUm5ZyauvqXrcHCmHOr2gbRI63NEqpUQbv4dz6zFunJv8xkl5T5lt5l/B50taabcY+Al8IdZcMIfISzRbAC34gP44Dx4oRd8fxfs3+TrKEVEGmTGBk/X9YRjbhFqsVgIDjxYqdkpFILtVg4UlbM5s+CI4wu1Y1DNDAOWvmueH3ot+GB71qZS56R86tSpvPbaa4wePbrKIOzbty/btmk7FDFVl5R7ytcbkpQH2g4O2dJarisvq3DhdJnfRCqZEKkqofJ3tTbbounLrZbBszVkiypfP1T7IXD6s3DXBrj6O/ODU0gsFGfB0nfg3xMh88jdOURE/IHLbTBzo9nkbeIxSterY7PC0M7mHuWLth1Zwl6gnU5qtmMuHNhm9jDpf6Gvo2lUdU7K9+/fT0LCkW3/i4qKjvlNkbQdnpJYT7d1OKR8Pbz+5esBNisBlQsqa7uu3NPkDbQlmsjhqvsCrSYHv71XbwZf8s6UV7TwRjdWG6SMhbP+AXdvhiu/MrdPK8uDjy6CggxfRygiUmcr9+SSXVROeFAAw1Ji6vUYw7tUJuXbDxxx28FGb1pTfoSl/zH/O+BicIT7NpZGVuek/Pjjj+eHH37wXvYk4v/+97858cQTGy8y8WvVfdDfX5mge7Zkqi9Pk7iyitrNlHtm94Lt2u9R5HAHZ8qPXb5+MCnXBwVf8iTlLXamvDq2AOh+ClzxOcR0g7w98PFFkLPT15GJiNSJp3R9XK8E79/juhpemcwv3pF9xLpyT6M3VaUdpiADNn5vnj/+Wt/G0gTq/NN+8sknmTx5MuvXr6eiooKXX36Z9evXs2DBAubMmdMUMYofiq8sUd9fWE35enjDknJHgJXCMih11u4DqdbBitQsPqJyTXktGr0VlmoXg5agxTZ6q42QGDMxf2cipK2C14fD6D/DqD+Ze56LiLRw3q3Q+hxZOVxb/dpHEBJoI6fYyaaMAvokR3hv83wBHqGkvKqVH4K7AjoMM3f8aGXq/PXO6NGjWblyJRUVFfTv359ffvmFhIQEFi5cyNChQ5siRvFDB2fKD86+ZVcm5fEN6L4O9ZgpL1fJrUhNqvsCrSae3yWtc/OtFt3orTZiu8F1P5ul7RWlMPspMznf+GOr3HtWRFqP3dnFbM4oxGa1MK5n/ZNyu83K8ZXd2A/fGq1Q+5QfyTBg1Sfm+SFX+zaWJlKvn3a3bt14++23GzsWaUU8H/SrrCkvbHj3dTjY5KikvHZJ+cE9yvXHTeRwCRGV5et1mSlXbwaf8uuZco+4HjDlW1j3Nfz8F8jdBZ9cBn3Ph/PehICG/TshItIUPLPkw7pEExnSsKVcI7rGMHfzfhZtz+baUSne671ryvUF+EFpqyBrMwQEwXHn+DqaJlHnn/aPP/6IzWbj1FNPrXL9zz//jNvtZvLkyY0WnPivo3Vfj21oUl45U375vxeTEO4gKTKI5MggkiKCSY4MIjnKvNw+KoSkyCDvmnIl5SJHSjjkd9UwjKM27CxU9/UWIdBm/oz8dqbcw2KBfudDj0kw73lY8Bqs+wrKC+HiD8AedOzHEBFpRjM2ekrX6951/XAjusYCB/crt1b2PSrw7lOu/i1eaz43/9vzNAiKOPqxfqrOn6weeOABnn766SOuNwyDBx54QEm5AAeT8rwSJ2UVLlxug+LKme24BpavnzkgmS0ZBVS4DdLySknLK2VFDcc+OLk3UZXfZKoMSORInt/VcpebvBInUSE1/35q79SWwS8bvR2NIwwmPAopJ8H/LoMtv8BHF8LJf4WOw1vVPrQi4r/yS50sruyW3hhJef/2kYQG2sgtdrIxvYDj2pnJprfRm/6tNbldsOYL8/yAi30bSxOq8097y5YtHHfccUdc37t3b7Zu3dooQYn/iwy2Y7dZcLoMsgrLcVXuE+4IsDb4j8yt47tz00ndyC4sq0zKS0jLKyW9MkFPzytl14EiMvLLmL4+g8n9kwHNlItUxxFgIzLYTl6Jk/0FZTUm5Yd+saYPCr7VKsrXq9NtPFz5BXx0MeycB/85FaI6w4BLzFNcd19HKCJt2JxN+6lwG3SLD6VLXGiDH8+zrnxOZQn7ce0iKCh1kppbAkDHaDW/BMx/DwrTITgauk/0dTRNps6frCIjI9m+fTtdunSpcv3WrVsJDW34AJXWwWKxEBfmIC2vlKyCMlyVzXviwhyNsp+9zWohISKIhIggBnaMOuL2DWn5TH55HpsyChjTIx5QozeRmiSEO8grcbL7QDE9Eqvf99PT5A30BZevHWz01gqbonUZDdf/Agtfhw3fmmvN5z5rntoPNZPzvudDWLyvIxWRNmbmxkwAJhzX8FlyjxFdY71J+XWjU1i5JxfDgI4xwSREaAkPAKsrS9ePOxcCGlZt25LVufv6Oeecw5133sm2bdu8123dupW7776bs88+u1GDE/926LryrILG2Q6ttrrFhxFgtVBQWsHW/YWAmlOJ1GRIp2gAXvhlc43rlD29Gew2i7fZovhGoKd8vZY7UPidpH5w3j/hni1wwTvmmnOLDVKXwU/3wT/6w7qpvo5SRNqYDWn5AIxIiW20xxzR1bNfubmufNmuHACGVv673OYVZMD6qeb5Vly6DvVIyp999llCQ0Pp3bs3KSkppKSk0KdPH2JjY3n++eebIkbxU4duteTtvB7aPN9wBQZY6RpvVm4sr/wDp9k9kerdc2ovokLsrE/L562526s95uAe5QGNUu0i9ecpX2+VM+WHCgyB/hea+5rfvRFOe8bcm7aiBD6/Bhb/y9cRikgbkppTWVYe03hl5Z515XklTjak5x9MyjsrKQfg10fN5p/tBkPHEb6OpknVOSmPjIxkwYIF/PDDD9xyyy3cfffdzJgxg5kzZxIVFdUEIYq/8mx9tr+gzNt5vaHbodVFz8oyXM/aHK2DFalefLiDh880e4W8PGMLWzMLjzhGTd5aDntl9/VW0+itNsISYMRNcOMcOP56wDBnzX+8D1xOX0cnIi1ceYWbS/61kJs/XEZ+ad3/ZuSVOL1d0dtHhTRaXAE2K8NSzNnyBVuzWbk7F4AhSsphz++w6mPz/OkvgLV1V+nV69VZLBYmTZrEvffey2233cbYsWMbOy5pBTzl61mFZSyt/OavMRpj1FbvpKprY7WNk0jNzhvcnpN6xlNe4eaBL1fjdledhVVS3nK02kZvtWG1wRkvwCkPm5d//xe8f7ZZ4igiUoM1qXks3nGAn9amc/GbC0nLK6nT/T2z5LGhgQQHNm6PIs/WaB8t3kVBWQWhgTZ6J7XObb9qze2CH+8xzw++EjoM9W08zaBWSfkrr7xCaWmp9/zRTnUxd+5czjrrLNq1a4fFYmHq1KlVbn/00Ufp3bs3oaGhREdHM2HCBBYvXlyn5xDf8STlWzMLWbA1C4BJfRuvOcax9DrsD5rK10VqZrFYeOK8foQG2li6K4cPF++qcnuRkvIW42CjtzaYlIO5RdqYu+HSj8ERAbsXwL/Gwm59PhCR6m3NLPCe35hewPlvLGBjen6t7783pxiA9k3QEd2TlO/MNp9jcKdobNY2vkxsyTuQtgockXDKo76OplnU6tPVSy+9xBVXXEFQUBAvvfRSjcdZLBbuuOOOWj95UVERAwcO5LrrruP8888/4vaePXvy2muv0bVrV0pKSnjppZeYNGkSW7duJT5enVdbOk9SvmBbNgA9E8PoFh/WbM/f67Au0uq+LnJ0HaJDuH9ybx7+Zh3P/LSRk3sn0CHaLNMrOGRNufhWYFtPyj16nwF/mAWfXgH7N8J7p8NpT8OwG7S3uYhU4VmWdVrfJLZkFrBtfxEX/XMh/7pqKCO7xx3z/p6lkB2aICnv1y6CMEeAtyKtzZeu5+wy15IDnPK3NrPbRq1mynfs2EFsbKz3fE2n7durbxBUk8mTJ/P3v/+d8847r9rbL7/8ciZMmEDXrl3p27cvL774Ivn5+axevbpOzyO+cfj68dP6JTfr83eIDibkkBIjdV8XObYrh3fm+M7RFJW7+MvXazEqtzP8Zb1ZHtwUH0ikbtp0+frh4rrDDTOg73ngrjDLHRfUrWpPRFo/T1I+pmccX948khO6xFBQVsHV7/7O1BWpx7z/3sry9fZRjf9vYIDNyrAuBxPxNt3kzTDguz+Bswg6jazsIdI21ClLcTqd9O7dm++//54+ffo0VUzVKi8v56233iIyMpKBAwfWeFxZWRllZWXey/n5ZmmK0+nE6VQzmKbkeX89/40Orvqdz8Tecc3+M+iREMaqvXkAOGxoDLQwh48ZaRmeOOc4znpjIXM27+eLpbvpkRDG9PUZWC1w1fCOLeLn1ZbHjhXzi5JSp6tNvv4jWB1wzltY43pjm/MUTH+YipAEjH4XVnt4Wx470jg0hvzPlgyzfL1LTBChdgv/mTKY+75ay49rM7jz05XsyS7kj2NTatxdZM+BIgCSIxwN+rnXNHaGdYli1qb9WCzQLym0bY4tZzHW+S9g2z4Lw+ag4vQXweUyT36qLj/HOiXldrvdu7a8uXz//fdceumlFBcXk5yczPTp04mLq7nM5KmnnuKxxx474vpffvmFkJDG65YoNZs+fToApS7wDLH4IINty+axvZkrCoPLrXgKQpYunM8uTfK1SJ4xIy3HpHYWvt9t45Fv1tAuxACsDI51s2nJHDb5OrhDtMWxsz3VAtjYsWs3P/6409fhtCB96Bd/Kt32/4z121tZs3wRu2NG47ZWvxVnWxw70rg0hvxDmQtSc22AhZ2rFpG13rx+YhiUJFuZlWblhV+38tPSzfSJMugRaZB8WMqwfpd5/7St6/jxwNoGx3T42LEVg81io0sYzJ/VtsaVxV1B5+zZ9Er/BnuFOZG2PvFcti7eDGz2bXANVFxcXOtjLYanNrGWnnzySTZv3sy///1vAgIarxzYYrHw9ddfc+6551a5vqioiLS0NLKysnj77beZOXMmixcvJiEhodrHqW6mvGPHjmRlZRER0cY7GTYxp9PJ9OnTmThxIna7HcMwGPB/Myh1uvnjmBTumdSj2WN6b+EunvjRTCEW3n9Ss27JJsd2+JiRlsPpcnPBm4vZkG7OLlgtMO2OUaQ04w4KR9OWx867C3bx5E+bOLN/Ei9dPMDX4bQshhvb13/AuuEb82JILO4h1+Aeci2EJwFte+xI49AY8i/r9uVz7j8XER1i5/cHxx9x+/sLd/HET5s4NCP65IZhVcrIT3hqFjnFTr6/9UR6Hba7T10cbezsyCoiOiSQqJA2MqYMN5Z1X2Kb8wyW3J3mVZGdcJ30AEa/i1pFb5D8/Hzi4uLIy8s7Zh5a56x6yZIlzJgxg19++YX+/fsTGlr1A9pXX31V14c8qtDQULp370737t0ZMWIEPXr04J133uHBBx+s9niHw4HDcWTiZbfb9YezmRz6XvdOimBNah7nDO7gk/e/b7so7/mo0GDsdjV7a4n0+9ny2O3w3EUDOef133C5Dc4d1J6eyVG+DusIbXHsBFf2x3AZtLnXXisX/Bt+HwaL/4Ulbw+2+S9gW/AK9LvA3Os8vh/QNseONC6NIf+w84BZ5dsjMbzan9cNY7szvGs8szZl8uOaNDamF/D1ynRGdDcnAIvKKsgpNsuQO8dX/xh1Vd3YaYn/xjYJw4DNP8PM/4OMyqqD0AQ46T4sQ64mIKD66iZ/VJexUuekPCoqigsuuKCud2s0bre7yky4tGxvXjmUrMIyjmvnmyqFPskRBAZYCQ20EWSvVV9DEanUr30kD595HFNXpnLXpJ6+DkcqqdHbMQQEwsjbYfjNsPF7WPRP2LMIVn8Cqz/B1nEEESFn+jpKEWkmWyq3Q+ueUPMOQP07RNK/QyTDusRw2duL+GltGo+f2xdHgM3beT0iKIDwIH0J0yDFB2DqLbD5J/OyIwJG/QlG3AyBLaMSz1fqnJS/++67jfbkhYWFbN261Xt5x44drFy5kpiYGGJjY3niiSc4++yzSU5OJisri9dff53U1FQuuuiiRotBmlZSZBBJkUE+e/7o0EA+++OJOAKsNTbvEJGaXT2yC1eP7OLrMOQQnn3Ky9v6lmjHYguAvueap9RlsOhNWPcV1j2LGGHfBEUXQlTz7goiIs1jyc4DLN+Vw3WjU7yd13scJSn3OCElhsQIBxn5ZczZtJ9JfZNIzfFsh6beVA2yZwl8fg3k7wVboJmIj7oTQmJ8HVmLUOupQ7fbzTPPPMOoUaMYNmwYDzzwACUlJQ168qVLlzJ48GAGDx4MwF133cXgwYN5+OGHsdlsbNy4kQsuuICePXty1llnkZ2dzbx58+jbt2+DnlfalkEdo+iTrH4CItI6eJNyzZTXXvuhcMHbcOcajNgeBDtzsH13G7j1Hoq0Ro99t46nftrIf+bvYEtlUn60mXIPm9XCWQPaAfDtqn0A7M0xm3W115ag9WMYsOBVePc0MyGP6Qo3/AoTH1dCfohaz5Q/8cQTPProo0yYMIHg4GBefvllMjMz+c9//lPvJx83bhxH6zPX2OvTRURE/J2nfN2pmfK6i2hHxXn/xvrOBGzbfoWFr5qlkyLSqmQXlgPw2sytFDvNLbV6JNSuQdvZg9rx7/k7+HVDBkVlFezN9cyUKymvs/Ii+PIG2PSjebnveXDWKxCkybLD1Xqm/L///S9vvPEGP//8M1OnTuW7777jo48+wq1vmUVERJpNoM2TlNdp8xTxSOzLmg5XmOdnPA7pa3wbj0gLtT57PYvTFlNS0bDKWF8oLK0AoKCsApfbIMwRQGJE7Xbg6d8+ki6xIZQ63fyyPt1bvt4+Skl5nf36qJmQ2wLhjBfgwneVkNeg1jPlu3fv5vTTT/denjBhAhaLhX379tGhQ4cmCU5ERESqUqO3htsVO54BwZlYN/9oNh36w0ywqYGTyKE+2vAR3277lgBLAMfFHkeXyC4khiQSHxJPQnACCSEJxIfEExsci93acn5/DMOgsLyiynXdE8Jq3VvIYrFw9qD2vDJjC89O20RQ5c49WlNeR7sXw+9vm+cv+x90n+DbeFq4WiflFRUVBAVVbdhlt9txOp2NHpSIiIhUz25T+XqDWSy4Jj+Hdc9CSF8N8/8BJ93r66hEWpRoRzSJIYlkFGewOms1q7NWV3uc1WKlX1w/Tk85nVO7nEpccFwzR1pVcbnLu+f4ST3jmbN5P32S67a3+PWjUvhh9T627S/yXqfy9TqoKINvbwcMGHSlEvJaqHVSbhgG11xzTZU9wEtLS7npppuq7FWudeAiIiJNx24zZ3vKNFPeMGGJMPk5+OoGmPMMxPUwO7WLCAD3DLuHu4+/m31F+1iVuYp9RfvYX7yfzOJMMksyySzOJKs4iwqjgtX7V7N6/2qeX/I8EzpP4JJelzA0cahPdr4pKjNnyS0W+Mclg/hg0S4uGFq3qt7IEDv/vX44F7yxgPR8c59zla/XwbwXIWsThMbDpP/zdTR+odZJ+dVXX33EdVdeeWWjBiMiIiJHp0Zvjaj/hbDhW/P0+dWw5Qo47WmteRSpZLFYaB/WnvZh7au93W24ySjKYMbuGXy//XvWZa9j2s5pTNs5je5R3bmk1yWc3vV0IgKb73eqoDIpD3MEEB0ayB2n9KjX47SPCua/15/A5W8vJj7cQVRIyynRb9EyN8C8F8zzk59Vh/VaqnVS3pj7k4uIiEj9BKp8vfFYLHDBOzDnaZj/Eqz8CHbOh/Pfgk4jfB2dSItntVhJDkvmyuOu5MrjrmRD9gY+3fQpP+74ka25W3li8RM88/szDEkcwrCkYSSFJpEQbK5FTwhJICIwotFn04sOScobqmdiOPPuG4/dZvHJrL/fcbvMsnW3E3pONrutS600fLSKiIhIs9E+5Y0sIBBOeRi6T4Svb4TcXfDuZBhxC4y8HcKTfB2hiN/oE9uHR0c+yl3H38V3277ji81fsDV3K7+n/87v6b8fcXygNZD4kHg6hndkSOIQjk88ngHxA3DYatcpvTqezuuNkZQDBAfaGuVx2oQl78DeJRAYbnZb1xcZtaakXERExI8cLF/XlmiNqvOJcNNv8NP9sOpjWPgaLP4X9DvfTNDbDfJ1hCJ+IyIwgiv6XMEVfa5gT/4eZu+dzaYDm8gqySKzJJP9xfvJLcul3F1OamEqqYWpLEpbBJiJev/4/gxPGs6J7U6kb1zfOnV3L6ycKQ9tpKRcaqGsEOY+Z/7dBJjwCERWv+RBqqfRKiIi4ke8M+UuN4ZhqKSyMQVFwHn/NBu+zX8Jdi+E1Z+ap04jYcTN0PsMsGrmTKS2OkZ05Krjrjri+jJXmbdx3JacLSzNWMqS9CVkl2azLGMZyzKW8caqNwi1hzIsaRjDk4bTLqwd0UHRRDmiiHJEEREYge2w30dPUh4epDSnyRkGrP8Gfn4I8lPN6/pdAMdf79u4/JBGq4iIiB/xzJSDOVseGKCkvNH1PNU8pS6HRf+EdV/B7gXmKaoTnHQ/DFazW5GGcNgcdAjvQIfwDgxJHMIlvS/BMAx25u9kSfoSFqctZnH6YvLK8pi9Zzaz98w+4jEsWIh0RBITFEPP6J70j+vP9uwQrEFZ2Ow2ip3FhNi1v3iTyNoKP90L22aal6M6m43dep3m27j8lJJyERERP+Jp9AZms7dDk3RpZO2HwAVvw8THYcnbsPQ/kLsbvrkVgqKgz5m+jlCkVbFYLKREppASmcLFvS7G5Xax8cBGFqYtZFXmKg6UHiCnLIfc0lwKnAUYGOSW5ZJblsv2vO1M2zkNgNAUWGbA8I/NcniLxYLL7cLAwIIF8/+V/7NYsFqsBNmCiHBEEBsUS4/oHvSK6UWv6F70iO5BcIC2Q/MqLza7qy94BVzlYHPA6Dth9J/BrvepvpSUi4iI+BHPPuVgNnsLrX8/JKmtiGSzGdyYe+CXv8LSd+Db28ykPaKdr6MTabVsVht94/rSN67vEbc53U7yyvLILc0lsziTtdlrWZO1hlVpO8kuySHAXowbJ+Xu8iMfuJqWHCUVJeSU5bArfxfLM5d7r7dgoV1YOzqGdyQpNIkgWxDBAcE4Ahw4bA6CbEEEBQRhs9jIL88nryyP/PL8g6eyfEoqSrBb7QRYAigoKOD7md/jCHAQaAvEbrVjt9oJtAWaJ2sgAdaAKpcDbYEkhCTQPao77cLaYbX46MvYsgJ4+xRzD3KA7hPM2fHYbs0eSrmrnEBbYLM/b1NRUi4iIuJHAmxWrBZwG9oWrdkFhpj7mKcuhbRV8NWNMOUbrTEX8QG71U5ccBxxwXF0j+7OyPYjAXjkm7W8v2YXt47rxi2ndCSvLA8L5my4xWLBMAyMyqzcc95tuCmpKCG/PJ+0ojQ252xm04FNbDqwiezSbG8zusayM31nve9rtVi9XwY4Air/a3PgCHAQbg8nMTSRxJBEIh2RhNpDCbSaiaun/4iFyi92LQfPe/97yDEOm4PwwHAiAiOIcEQQHhiOY/bTZkIemgBnvgi9z2y2Duu5pbn8nv67d1lD75jePH/S883y3M1BSbmIiIifsduslFW4KVdS3vwCAuGC/8C/xsLOeTDrSTjlb76OSkQqFZa5AAgLshNqDyXUHtqgx8sqyWJX/i5SC1PJLM6ktKLUPLnM/5a5yih1leJyu7xJbKQj0pvMRgRGEGIPocJdQWl5KYuXLqbvwL64LW6cLnMmv9xVTrm7HKfLidPt9F4ud5nXlbnKSC1MZXvedpxuJyUVJZRUlEBZY7xjtRdoGIR3bE9QaDysfx3L+jewWCzeLz0A72XP0gCLxYLNYjNPVhsBlgBsVhtWi9V7/tDbbRYbAdYArBYrbsON0+1kZ95ONh7Y6P0yBaDIWdSqmp0qKRcREfEzgQGVSbn2KveNuO5w5kvmvubznof43jDgIl9HJSJAYZkTgLBG6r7umY0fmji0wY/ldDopWl3E6SmnY7fXfps3jwp3BTmlOVW/EDjki4H8snwyijPILM4kvzyfwvJCKtwVBysDDqsQ8Jw/lIGBYRiUucrIL8+noLyAgnJz/X65xUJ2gA3KDjT7FwIA3aO6Mzx5OMOThnN80vGtJiEHJeUiIiJ+x9PsTXuV+9DASyBzHfz2stn4LaYrdGj4h3YRaZgiz0y5o/UtKwmwBhAfEt/sz+te/BbF0+6jICic/Ms/piwo0pu8A7gNt/dylf9WLg1wG25cbhcuo/LkdlFhVHivrzAqDt5+yHGe2fT4kHiGJQ0jLjiu2V97c1FSLiIi4me8e5Vrpty3TnkEsrbAph/h86vh5t8gKNLXUYm0aQWV+5SHOeo+Ey3VyN6G9ddHCDMMwk76C8kdx/g6olZJ+6iIiIj4Gc82aFpT7mNWG5z/FkR3gbw98ON9vo5IpM0rqkzKQ1vhTHmzc7tg6s3gLIYuY+CEG30dUaulpFxERMTPeLZFU/f1FsARDue9BRYrrP4E1n3t64hE2rTCUjMpD9dMecMteBX2LIbAcDj3DbAqdWwqemdFRET8jMrXW5hOw2H0Xeb57+6EnJ2+jEakTdNMeSM5sANmP2WeP+0piOrk23haOSXlIiIifsYR4Gn0pqS8xRj3ALQfCqW58MmVUF7k64hE2hzDMCgsr1xT3kjd19skw4Cf7oOKUkgZC4Ov9HVErZ6SchERET9jtykpb3Fsdrj4AwiNh4w18M1t5gdbEWk2xeUu769dmENJeb1t+A62/AJWO5z+ArSircdaKiXlIiIifsbT6K1M5estS2R7uPi/YA2AdV/BP0fCsvfAWeLryETahMLK0nWrBYLtKl+vl9I8+Ol+8/zoOyG+p0/DaSuUlIuIiPgZu/Ypb7k6j4SzXwN7KGSuh+/+BC/2gV8fhbxUX0cn0qoVerdDC8Ci2d36+fkhKNgH0Skw5m5fR9NmKCkXERHxM2r01sINugzuWg+T/m42RyrJgfkvwT/6w+fXwJ7ffR2hSKvk6byu0vV62jQNVnwIWMxu6/ZgX0fUZigpFxER8TNq9OYHgqNg5O1wx0q45ENzj1/DZW6Z9s5E+O1lX0co0up4Oq+ryVs9ZG2F7+4wz594q1n1I81GI1ZERMTPaJ9yP2K1QZ+zzFP6Gljwmrmf+fSHzfLQ4872dYQirUaBdzs0pTi1YhiwexEsfA02/gAYENcLTv6bryNrczRiRURE/IynfF2N3vxMUn84/18QFAG/vwVf3QiRHaD9EF9HJtIqFJWpfL1WXBWw8TvzS8LUpQev73GquSe5Pch3sbVRGrEiIiJ+JlDl6/7t1KfgwA7YOh0+uRxunAPhib6OSsTvFSopPzpXBSx9Bxa+Drm7zOtsDhh4qVmyHt/Lt/G1YRqxIiIifkaN3vycLQAu/A/8ewJkbYLPr4Yp30JAoK8jE/FrSsqP4ddHzFJ1gJBYGHYDDPsDhMX7Ni5RozcRERF/o0ZvrUBQBFz6ETgiYPdC+PlBX0ck4vc83de1prwaqctg0Rvm+Ul/hzvXwviHlJC3EErKRURE/Iz2KW8l4nrA+W8DFljyb9g+x9cRifg1z5rycHVfr6qiHL65HQw39L/Y3BkiMMTXUckhlJSLiIj4GTV6a0V6nQYn/ME8//NfwO3ybTwifkzd12vw28uQuc4sWT/taV9HI9VQUi4iIuJn1OitlRn3IARFQsYaWPmxr6MR8Vvqvl6N/Ztg7rPm+dOegdBY38Yj1VJSLiIi4me0T3krExIDY+8zz8/8Pygr9G08In5Kjd4O43bDt3eAqxx6TIL+F/o6IqmBknIRERE/42n0pu7rrcgJf4DoFCjMgLnP+ToaEb9UWGYu/1BSXmnpO7BnEQSGwRkvgsXi64ikBkrKRURE/MzBRm9KyluNAAec+qR5fuFrkLnBt/GI+KHCUiegNeUA5O6GXx8zz5/yCER19G08clRKykVERPyMGr21Ur1Ph15ngLsCvv+zWXoqIrVWVDlT3ua7r7vd8M2tUF4AHUeY+5FLi6akXERExM+o0VsrNvkZsIeae5f//hYY2vZOpLYK1X3dtPQd2DEX7CFw7htgVcrX0uknJCIi4me0T3krFtURxj9onp92P/z3bNi3wrcxifgBt9tQozeA7G0w/WHz/ITHILabb+ORWvFpUj537lzOOuss2rVrh8ViYerUqd7bnE4n999/P/379yc0NJR27doxZcoU9u3b57uARUREWoDAALNZjxq9tVLDb4bRfwabw5ztemscfHkD5Oz0dWQiLVax0+U932aTcrcLpt4CzmJIGauydT/i06S8qKiIgQMH8vrrrx9xW3FxMcuXL+dvf/sby5cv56uvvmLTpk2cffbZPohURESk5Qi02QCVr7datgCY8CjcvhQGXGJet+ZzeG0Y/PwXKD7g0/BEWqLCUnOW3Ga1EGRvo8XAC1+v7LYeDue8rrJ1P+LTr5EmT57M5MmTq70tMjKS6dOnV7nutdde44QTTmD37t106tSpOUIUERFpcTz7lGumvJWL6gTnvwUjbjHLUXfMMTuzr/gATn8BBlzk6whFWoy8ksrO64E2LG1x66/MjTDz7+b50540/36I3/Cr2o68vDwsFgtRUVE1HlNWVkZZWZn3cn5+PmCWwzudzqYOsU3zvL96n6W2NGakvtr62LFiJuPlFa42+x7Ul1+Onfi+cNkXWLbPxDbzMSyZ6zG+uZWKdkMhUtscNTe/HENtwMJt+wHomRjWYn82TTJ2Ksqwrngf628vYXGV4e42AVe/S6GFvgdtSV1+zhbDaBltPS0WC19//TXnnntutbeXlpYyatQoevfuzUcffVTj4zz66KM89thjR1z/8ccfExIS0ljhioiI+MzeInhudQCRdoPHj3cd+w7SehhuRm59hvjCDeyNGsGylFt8HZFIi/D2Ritrc6yc2cnFxPYtIr1pUhajgo7Z8+mVPpUQp7mkpdCRxG89HqTUHu3j6ATM5diXX345eXl5REREHPVYv0jKnU4nF1xwAXv37mX27NlHfVHVzZR37NiRrKysY74Z0jBOp5Pp06czceJE7Ha7r8MRP6AxI/XV1sfOloxCTn9tAdEhdn5/cLyvw/ErrWLspK8h4J2TsWBQcfWPGB1O8HVEbUqrGEOtTFmFmxOemkVxuYtvbhnBcckt8zN/o4wdw41l3VfY5j6DJWeHeVV4Mu7Rd+MeeAXYNCZbivz8fOLi4mqVlLf48nWn08nFF1/Mrl27mDlz5jFfkMPhwOFwHHG93W7XH85movda6kpjRuqrrY6dkKBAwNwSrS2+/sbg12On4xAYfCWs+ICAX/8G1/+qhk4+4NdjqIVzutzerR9r4/ddWRSXu4gPdzCgY0yLX1Ne77Gz8zf48R7IXG9eDomDMXdhOf56bPYgbI0bpjRQXX7GLTop9yTkW7ZsYdasWcTGxvo6JBEREZ+zB5gfVtXorQ07+W+w7mtIXQbrp0K/830dkUiDpOeV8tqsLSzdmcOmjALO6J/Ma5cPqdV9Z2/KBOCknvEtPiGvt9w98PElUF4AjkgYdbu5faIjzNeRSSPwaVJeWFjI1q1bvZd37NjBypUriYmJITk5mQsvvJDly5fz/fff43K5SE9PByAmJobAwEBfhS0iIuJTgZUzSOUuN4ZhtN4PoVKz8EQYeTvMfsrsuNznLJWtil97e952Ply023v5xzVp5Jc6iQg69ries9ls8nZSz/gmi8+nDAO+u8NMyDsMgys+h2CtG29NfFrrtHTpUgYPHszgwYMBuOuuuxg8eDAPP/wwqampfPvtt+zdu5dBgwaRnJzsPS1YsMCXYYuIiPhU4CFlnRXuFtEaRnzhxFshJBYObIMVH/o6GpEG2ZVdDMB1o1LoEhuC24CF27KPeb99uSVszijEaoExPeKaOkzfWPkRbJsJNgec84YS8lbIpzPl48aN42h95lpIDzoREZEWxR5wcGa8vKJuay+lFXGEw9h7YdoDMOcZGHgp2IN9HZVIvezLLQFgdI9YKtxudi7cxW9bszi1b1KN98kvdfLCL5sBGNQxiqiQVlhJm78Ppj1knh//EMT39G080iT0r7iIiIifOXSm3OnSuvI27fjrzL3KC9Lg97d8HY1IvaXlmUl5u6hgRnU3Z7znb82q9li32+DzpXs4+fk5fLl8LwCXDuvUPIE2J8OA7/8MZXnQbgiceJuvI5ImoqRcRETEz9isFjzLyMuVlLdtAQ5z9gxg3otQkuvTcETqo7i8gpxiJ2Am5Sd2i8Vqge37i7wz6B4r9+Ry3j8XcO8Xq8kqLKNrXCjvXTuMi4d19EXoTWvN57B5GljtcO4bYGvRPbqlAZSUi4iI+BmLxeItWVcHdmHAJRDfG0pzYcGrvo5GpM725ZYCEO4IICLITkSQnYEdo4CDs+X7C8q474tVnPv6b6zak0tooI2HTu/NtDvHMq5Xgq9CbzoFGfDTfeb5k+6HhD6+jUealJJyERERP+SoTMqdLvVfafOsNnOLNIBFb5gf5kUa6Nf1GVz4zwXsyi5q8ufyzIYnRwV5rxtdWcI+b0sWHy7axcnPz+azpWap+vlD2jPrnnHcOLYbgQGtNJ355S9QkgNJA2D0nb6ORppYKx3FIiIirZv2Kpcqep8B7Y8HZzHMfc7X0Ugr8NnSPSzdlcNXy1Ob/Lk8SXm7qIONCj1J+Xer9vHXqWspKKugf/tIvrx5JC9ePIiEiKBqH6tV2PmbWbqOBc5+RdsdtgFKykVERPxQoHemXEm5ABYLTHjEPL/sPcjb69NwxP/ll5prvNfty2vy59qXZ5avH5qUD+4UTUigDYAgu5WHzzyOqbeOYmjnVr4dmKviYNn60Gug3WCfhiPNQ0m5iIiIH/Jsi6ZGb+KVMha6jAG3E+a/5OtoxM8VlFYAsCa1GZLyypny9ock5YEBVh46vQ9nD2zHz3eO5brRKdislpoeovVY+h/IWAtBUXDKw76ORpqJknIRERE/pEZvUq2T7jf/u/y/kNf0ZcfSenmS8oz8MjILSpv0uQ6Wr1ctSb9yRGdeuWwwnWNDm/T5W4ysLTDjMfP8KX+DkBjfxiPNRkm5iIiIH1L5ulQrZQx0HgWucs2WS4MUllV4z69LzW/S5/Im5ZHBxziyFSsvhs+mQHmhWfEy9FpfRyTNSEm5iIiIHwpUozepiXe2/H1Y+yUY6tAvdWMYBgWVa8qhaUvYDcOodk15m+Isge/vhMz1EJYIF7xj7qogbYaSchERET9k10y51CRlLPSYZM6Wf3EdfHYVFGb6OirxI2UV7irbLa5twqQ8u6ic8go3FgsktuaO6oczDNizBL67E57vBas/BYvVTMjDE30dnTSzAF8HICIiInXnKV8vb8A+5YZhYLG0gcZJbY3FApd8BPNegHnPw4bvYOd8mPwc9L/QvF3kKDzryT2aMin3lK4nhDta757jhwhy5mBd8Aqs+QSyNh+8IbITnPxXcwmKtDlKykVERPyQZ5/yMqerXvffklHA+W8s4KLjO/LwWcc1ZmjSEgQEwvgHzf3Lv7kF0tfAVzfAuq/hzBchPMnXEUoL5ildDwywUl7hZl9eKdmFZcSGORr9uarbo7xVKsrGNvUWJm35GQuVX6YGBMNx58Cgy8115NbW/6WEVE8/eRERET/k2TpoU3pBve7/3oKdFJRV8OXyvbjdWnPcaiUPgD/MgvF/BasdNv0Arw+H1Z/5OjJpwTwz5XGhgaTEmZ3P1+5rmmZvqbltYD25YcB3d2DdMg0LBu6OI+DsV+GezXD+v6DrSUrI2zj99EVERPzQiK7mVjkLt2fX+b6lThffrtoHQF6Jky2ZhY0am7QwNjucdC/8cQ4kD4LSXPjqD7Bluq8jkxbK03k9LCiAfu0jgaYrYT/Yeb0Vrydf+yVs/B7DGsDcnn/DNeV7GDIFgiJ8HZm0EErKRURE/NCJXWMBWJ+WT16x8xhHV/XzuvQqa0Z/33mgUWOTFiqxL9wwAwZfZV7++S/gqjj6faRN8pSvhwfZ6dfOTBxX781tkudKy2vl5esFGfDjPQC4R91FTmgPHwckLZGSchERET+UEBFE1/hQDAMW76h5ttwwDHZnF/PNylTmb8nCMAy+WLYXgOgQOwC/71BS3mbYAmDS3yE4BrI2mdumiRwmv/JLu/CgAEZUfgE4a9N+DhSVN9pz5BaXs3x3jncJTqtMyt1u+PZ2KMmBpP64R/3Z1xFJC6VGbyIiIn5qRNdYtu8vYtH2A0zqazbuKi6vYNWePFbsyWH5rlxW7skhq/DgB+mxPeOZvzULgAcn9+G+L1ezZMcBdWJvS4KjYNwD8NN9MOtJ6H+RymiligJvUm5nQIdI+rePZE1qHp8s2c0t47rX+fE2ZxTw89p0dmQVsSO7iB1ZReQeVuHTMTqkUWJvURa8Alt+BpsDzn3TXEoiUg0l5SIiIn7qxK6xfLx4Nwu3Z1Ne4ebPn61k2tp0XIc1brPbLPROimBjej5zN+8HYHhKDGcNbMdfpq4hPb+UvTklrNiTy/M/byLYbiMqxE50SCBRIXaiQgKJDrGTHBXMaX2TvNsWPTttI2tS83j1ssFEhQQ2++uXBjj+Ovj9LcjeCvNfggmP+DoiaUEKK5PyMEcAFouFq0d24Z7PV/HRot3cOKYrAba6Fdte//4S9hwoOeL6pIggUuJCGd41hj7J4Y0Se4uxayHMeNw8P/kZSOoHzrotNZK2Q0m5iIiIn/KUlW5Iy+ehr9fww+o0wGyYNLhTNIM7RTG4UzR920UQZLexMT2fez9fzdp9efxhTFeCA230ax/Jit25fLtqH/+cvc3b4KkmfzvzOK4fnUKp08W/5m7H5Ta4+7NVvD3leKxWzbT7DZsdJj4On1wOi94wk/Sojr6OSloIz5ryiCAzVThzQDJP/riB1NwSft2QyWn9ar+lntttkJpjJuS3ju/GccmRpMSF0iUuhJDAVpqKlObBF9eB4YL+F8PQa3wdkbRwrfQ3QUREpPWLD3fQPSGMrZmF3nXib145tMYPzL2TIvjm1lHklTiJDjVntk/oEsOK3bk8/8smDAMGd4rirok9ySl2kltcTk6Rk9ySctbszWPprhwWbc/m+tEprE/L987Iz9iYydvztvPHk7o1zwuXxtHrdHNv5J3zzBm9C972dUTSQhQcsqYcIMhu49JhHXlj9jbeX7CzTkl5YXkFnuKd20/uQZDd1ujxtji/PgYF+yCmK5z5EmhpkByDGr2JiIj4MU8XdoAbx3Y95odlq9XiTcgBhnUxt1YzDAi0WXn2ggGM6RHP2QPbMeXELvxpQg8eOasvD0zuDcCK3bkYhsHqPbkARFU2i3v2502s2J1T4/PmFTu5438reH/Bzvq8TGkKFovZ9A0LrPkMUpf5OiJpIbxbojkOzt9dOaIzNquFhduzvc3ZaiO/xJx1Dwywto2EfPciWPqOef6sV8AR5tt4xC8oKRcREfFjE49LBGBYl2juPbVXne8/rEuMdxLn9pO70yOx+nWd/dpHEmC1kFVYRmpuCav3mnsWX31iF87on4zLbfDhot3V3tfpcnPzR8v4dtU+nvppA6VOV53jlCbSbhAMvNQ8//NfzW9npM3LP2RLNI92UcFMqvx78/7CnbV+rLzKpDwyuA00Oasog2/vMM8PvhJSxvg2HvEbSspFRET82Nie8fz0pzF8eMNw7HVsvgQQGWLnnkm9uOyETtw0ruby8yC7jT7JZofulXtyWVW5Z/HAjpFcPrwTAHM278d9WJM5wzD469drWbDN3Lat1OlmsbZga1lO/hsEBMHuBbBthq+jkRbg8PJ1j6tHdgHg6+Wp5BXXrmlZm0rKf3vZ3GowNB4m/p+voxE/oqRcRETEz/VJjsARUP+y0FvHd+ep8/sfM6kf1DEKgPlbstieVQTAgA5RHN8lmpBAG1mFZaxPy69yn7fmbufTpXuwWuC4yqR+zqb99Y5VmkBkexh2g3l+5t81Wy7eRm9hhyXlw1Ni6J0UTonTxefL9tTqsfLbSlKetQXmPmeeP+1pCInxbTziV5SUi4iISK0M7hQFwDcr92EY0D4qmLgwB44AGyO7xQHmbLnHtLVpPD1tI2B2bb/9ZHN/49mbM5s3cDm2UXeCPRT2rYCNP/g6GvExz5ryiKCqibRnezSA/y7cdcT2i9VpEzPlbjd89ydwlUP3idDvAl9HJH5GSbmIiIjUimemvKRyTfiADpHe207qFQ/A7E1mwr16by53froSw4ApJ3bmmpFdGNUjDpvVwvb9Rew5UNxkcRaWVWBotrduwuJhxM3m+VlPmEmGtFk1la8DnDuoPZHBdnYfKPb+vh+NJymPqOaxWoWKMpj9FOz6DewhcMYL6rYudaakXERERGolJS60ymzXgA5R3vPjeppJ+fLduWxKL+CG95dS6nRzUs94Hj7zOCwWCxFBdoZ2igZg9uamKWF/77cdDHj0Z/7x65YmefxWbeRt4IiEzPWw7itfRyM+UuFyU1xufvEWHnTk7HZwoI1Lhpl72r9Xi90UWu1MuasCVnwIrw6Fuc+a1538V4ju7Nu4xC8pKRcREZFasVgs3tlyqDpT3jEmhK7xobjcBhe+uYDMgjJ6JYbz2uWDCThkrbpnRn1OLWbY6uqblak8+t163Aa8PW87OUXljf4crVpwNIy63Tw/60kz6ZA2p6js4O4Ih26Jdqgrh3fGYoF5W7LYtr/wqI+XX2KOo1aTlLvdsPZLeGMEfHMr5O2B8GQ48x8w4hZfRyd+Skm5iIiI1NqhSXm/9pFVbhvXMwEwS1/jwhy8c83xR8y0nVQ5o75gWzaZBaWNFtf8LVnc8/kqABwBVorLXfx34a5Ge/w2Y/hNEBILB7bB6k98HY34gGc7NEeAlcCA6lOFTrEhnNLb/H3/7zFmy73l6/6elBsGbJoG/xoLX1wH2VsgOAYm/R3uWAHHX6uydak3JeUiIiJSayekmB2FeyWGHzHzdUof80O6I8DK21OG0iE65Ij7920XQVJEEMXlLkY/PYt7Pl/F+n35RxxXV//3/XqcLoOzBrbj2QsHAPDegh0Ul2u2t04c4TD6z+b52c+Y62WlTTm4nvzoSbSn4dsXy/Z6u7VXp1WUrxsGfHs7/O8SyFgDjggY9xD8aRWMvB3swb6OUPycknIRERGptZHdYnnx4oH849JB1d721Pn9+d+NIxhcuXb8cBaLhTevGsrgTlGUu9x8sWwvp78yj8veWsTi7dn1iik1t4RNGQVYLfB/5/TljP7JdI4NIafYySe/127bJjnEsBsgLAnydsOvj6mMvY3xdF6vrsnboUZ3j6NbfChF5S6+XLa3xuNaRVK+4gPzZLHBqD+Zyfi4+yEowteRSSuhpFxERERqzWKxcP6QDvRJPvLDqMVi4bITOjGkhoTcY1DHKL6+ZRRf3TKSMwckY7NaWLg9m8v/vZhPl+yuc0yefc8Hd4omKiSQAJuVG8d2BeDf87ZTXqFO4nViD4aT/2KeX/Q6vHc6HNjh25haIafLzXer9nnLxVsKz6z3sZJyi8XClBO7APDtqn01Huf3+5RnrIMf7zXPn/xXmPi49iCXRqekXERERHxiSKdoXrt8CHPvG8/ZA9vhchvc/+Uanpm2kf0FtS+b9mzL5OkAD3DBkA7EhzvYl1d61IRBajBkCpz/tlmmu2cxvDkGVv7PLOOVRvGvOdu4/X8rePGXzb4OpYqjbYd2uGFdzOR0Z3bNWxz69ZryskL4/BqoKIXuE2DUnb6OSFopJeUiIiLiU+2jgnn50kHcfnJ3AP45exvDnviVc16bzz9+3czqvbm43dUng+UVbhZsM8vePZ3dAYLsNq4fnQLAm3O21Xh/X8kpKufc13/j3/O2+zqUmg24GG6aD51OhPICmHoTfHEtlOT4OrJWwfNl0e87Dvg4kqq8M+WOYyfRHWPMtdQHisqrXVduGIZ/l6//eA9kbTa7q5/3L7AqdZKmoZElIiIiPmexWLh7Ui/+cckg+ld2dV+1N49//LqFs1/7jeFPzeDez1cxY0MGxiGztct25VBYVkFsaCD92lXtBn/F8E6EBwWwNbOQXzdkNOvrOZZF27NZuSeXN2Zvq/J6WpzoznDND2bZrjUA1n0N/xwFO+b6OjK/tm1/IZszzK3ENmcUUOp0HeMeR/fNylSuemdxo+xoUFC5pjysFjPl4UF2YkIDAdh94MjZ8hKni4rKL8T8Lilf8RGs+h9YrHDBOxAa5+uIpBVTUi4iIiItxrmD2/Pd7aP5/aFTeOaC/pzaN5HQQBv7C8r4fNlern9/KVe/u4TU3BIA5mw215OP7RmP1Vp1O6LwIDtXjegM0GTJb05ROTe8v5Rpa9PqdL/syj3UDxSVe5OzFstqg7H3wvW/QEw3yE+F98+GFR/6OjK/NW1tuvd8hdtgY3pBgx7vnfk7mLcli48W1b0nw+HqUr4O0CnG3GVhTzVJuWeWPMBqISTQ1uDYmk3mBvjhbvP8uIegyyjfxiOtnpJyERERaXESIoK4ZFgn/nXV8Sx/eCIfXj+ca0Z2ITDAytzN+zn1pbnc9dlKvl9tlgCPO6R0/VDXjkrBEWBl5Z5cFm0/sky4rMLF5oyCeifsny3dw68bMnhzTt3K0A9UJuVgzpr7hfZD4Y9zYeDlgAHTH4byIl9H5Zd+qvwSx24zv0hasze3QY/nSYgPTfbr62Cjt9rNbHuS8upmyg8tXbf4yx7e5UWV68hLoOt4GHOXryOSNkBJuYiIiLRojgAbo3vE8ejZffnpT2MY2jmawrIKvlqeyt6cEiwWGNOj+qQ8PtzBxcd3BOCfc7ZVuc3tNpjyzu9Memkuf526lgpX3bu0z9+aBUB6Xt3Khg9Nyhdu85OkHMARBme/CtEpUJwNS/7t64j8zp4DxaxNzcdqgYsqx+bqvXn1frzCsgpyis3kd1NGAdv3F2IYBk/9uIEXftlU5y+cCj0z5Y66zZRXm5QX++F68h/vg/0bISwRzn/LrBQRaWJKykVERMRvdIsP47M/nsj7153AHSd3Z1yveO47tbd3XWt1bhzbFZvVwtzN+1m3L997/QeLdrG4ssnWR4t3c9OHyygur/2e3KVOl7dJ1/7Csjol9Ycm5Yt3ZLe4RnRHZQswy9kBfntFs+V15JnNHp4S690xYE1q/ZPy1JySKpd/WpvOt6v28a+523l15laW7apbY776lq/vPlByxG1+13l91Sew8sPKdeT/hrAEX0ckbYSSchEREfErNquFk3rGc9ekXrx37QncPK7bUY/vGBPCmQOSAXh73k4AUnNLeGbaRgDOHdQOR4CVXzdkctnbi8kqrN12bMt25VBWuQe6y22QVVh+jHscdGhSnlPsZFNGw9YUN7sBl1TOlmfBknd8HY1f8ZSuT+6fRP8OZnPCLZmF9W72dvha7u9W7eOZnzZ6L781t25LKw4m5bVLpDt6kvLsI7+c8aukPHc3/HCPef6k+yFlrG/jkTbFp0n53LlzOeuss2jXrh0Wi4WpU6dWuf2rr75i0qRJxMbGYrFYWLlypU/iFBEREf9200lm4v7TunRmp1m445NVFJe7GNYlmhcvHsTHfxhOVIidVXtyOf+NBezIOvbsr6d03SMt78iZwpp4Gr0F2c2PYn6zrtyjymz5y5otr6X0vFKW784F4NS+SSRFBBEX5sDlNlifln/0O9dgb46ZlJ/QJQarBTamF7Avr5S4MLN6ZPqGDLbvP3YzwbwSJ4ZhkO9dU167mfLOsSGVcZTgOqziw2+2Q3O74ZvbzK3/OpxwcGyLNBOfJuVFRUUMHDiQ119/vcbbR48ezTPPPNPMkYmIiEhr0ic5gpN7J+A24OudNlan5hMYYOXpCwZgtVoY2jmGL28eSceYYHYfKOa8N37jt8OS7sMdfntGfu3XledUJuUn9zbLY/1qXbmHZsvr7Od1Zun60M7RJEYEYbFYGFA5W76mnuvK91aWrw/oEMkJKTHe6x8+qy8T+iRgGPDv+TuO+hg/rUnj+L9P55J/LfJWfNRmSzSAxIggAm1WKtzGEV9M5VfOukcG1+6xfGbpO7BjDgQEw3lvah25NDufJuWTJ0/m73//O+edd161t1911VU8/PDDTJgwoZkjExERkdbm3lN70TUulG7hBjeO6cLUW0bRLT7Me3u3+DC+unkUAztEklvsZMp/fued+TuqbZSVU1TuXQc8tHM0AGm1bPZmGIa3fP30/mZZ/eIdB/xrXTlotrwevKXr/ZK81/Vrbybl9W325knKO0QHc9bAdgAM6RTFWQOS+cOYrgB8uWwvny7ZzdbMgiPG2Y6sIu79YjVOl8HvOw94l29E1DIpt1ktdIgOBo5s9pbf0mfK09fAVzfCtAfMyxMehdijL4cRaQot/GuruisrK6Os7OBasPx8sxTI6XTidDp9FVab4Hl/9T5LbWnMSH1p7Eh9dI8L5vtbTmD69OlMHJ+C3W4/YgxFBVn58Lrj+ds365m6Ko3/+349a/fm8PjZxxFkPzh7Nm9zBoYBPRJC6dcunGW7ckjNKa7VmCworaC8sincyJQogu1W8kqcbEnPo2t8aOO+6KZ23PkEzH0OS84OXIvfxj3iVl9H1OTq+/cnu6jc2xjwlF5x3vsfl2T+zNfsza3X37TdB8wvQ5IiAhnfM55Qu5WR3WKoqKhgcIdwBnaIZNXePO7/cg1glqUP7BDJoA6RDOwYyQvTt1JYVsGADhFkFZSzr/LLpSBb7V9jh+ggtmcVsXN/AcM6RXqvzykyP5OHBdpazt9rw8Cycy7WRa9h3T7Le7X7uPNwDbkWmjBO/dvVttTl59zqkvKnnnqKxx577Ijrf/nlF0JCQnwQUdszffp0X4cgfkZjRupLY0fq61hjZ1wwWLpY+Ganla9XprF0yz5u6OUiygGGAe9ssgJW2lkLyN2XD9hYsXE7P7q2HvO5s0oBArBbDebNnE5kgI0Sp4Wp0+fSO8rPZsuBjuETGJLzNs45L/Dr/na4bA5fh9Qs6vr3Z0GGBbdho2OoweqFs1hdeX1eOUAAWzIL+Pq7H3HUsXJ6Z6YNsLBj7VLKtoMFWLjn4O0XJUGc28rOAgu7i8wvheZvzWb+1oNLJsICDC5IOIAlEf67xQxgybyZWGu5tbg73/x9mLlkLaEZq73Xb9llXr9rywZ+zF9ftxfWyCxGBe1zfqdb5o9ElewGwMBCatQJbEucTK6jK/w0rVli0b9dbUNx8ZHbBNak1SXlDz74IHfddZf3cn5+Ph07dmTSpElERET4MLLWz+l0mrMPEydit7fQMiVpUTRmpL40dqS+6jJ2zgDO3ZbNnz5dzZ4iJ69uDuG1Swcya1MWa3J2YLXAbWePYG9OCd/sWoM1LJbTTx92zBhW7smFFb8THx7M6aeP5cusZaRvyaZjr/6cPrRD47zQ5uSehPHmrwTl7GBy2Drc4//q64iaVH3//nzx/jIgm4tP7MHpJ3Wtcttrm+eQWVBGpwEnepdD1EZBqZPiheZs76VnTSKshr3FL/HE7nKzOaOQlXtyWbknj5V788guKue1SwcyslssAJfV+tkPSvttJ/OnbcYR3Y7TTx/gvf791N8hN5fRJwzh1L6J9XjkRlKcTcB/z8SSvQUAwx6Ce9CVuE/4I4lRnWmuyPRvV9viqdiujVaXlDscDhyOI7+htdvtGvzNRO+11JXGjNSXxo7UV23Hzkm9k/j2tghu/GApG9MLuOI/S70dpp86vz/DusZjVJYkZxaU1eox88vM0vXYMAd2u5320aFANukFTj8dz3aY9Hf49ApsC1/FNuBCSOrv66CaXF3+/uQVO1m43RwnZwxsf8T9BnSI5NcNmWzIKGJE99rvjZ2RZa4njw6xEx0WXIuYYVBnB4M6x9b6OWojJT4cgL25JVVem6fRW0xYkO/GtmHAj3dB9hYIiYURN2M5/npsITH4qp2b/u1qG+ryM9Y+5SIiIiJH0Sk2hC9vHsnp/ZO8Cfm9p/bikmGdAEiODALMRm/VNYU7nGc7tJhQc8uq9lGV98+t/ZZqLU6fM6HP2WC44NvbwVXh64halF83ZFDhNuiVGE7XQ5oLeniavdW1A7tnj3LPXuG+0smzV3kNjd58uk/58vdh0w9gtcNVU83mhCExx7ybSHPy6Ux5YWEhW7ceXHu1Y8cOVq5cSUxMDJ06deLAgQPs3r2bffv2AbBp0yYAkpKSSEpKqvYxRURERBpbqCOA1y8fwufL9gJw0SFl5gkRZoVeeYWbnGKnN9muSc5hSXm7KHOGc18d9jlvkU5/DrbPgX0r4NdHYOL/gVXzPwA/rTW3QjutX/WfX73boqXWLSk/tPO6L3m+FMgpdpJf6iQiyEzCfb5PedZWmPagef6UhyF5wNGPF/ERn/6lXLp0KYMHD2bw4MEA3HXXXQwePJiHH34YgG+//ZbBgwdzxhlnAHDppZcyePBg3nzzTZ/FLCIiIm2TxWLh4uM7cvHxHbFYDnbAcgTYiK1MsNNrsS3agZqS8tza73PeIoUnwalPmOcXvgYfXwzFB3wbUwtQWFbB3C37AZjcv/qk3DNTvnV/IUVlFazfl8+Zr87j1/UZR33sg0m5b2fKwxwBtK8cx8t35QBQ6nRRVmEu1YgM8UFS7nabVRvOYkgZCyfe1vwxiNSST5PycePGYRjGEaf33nsPgGuuuaba2x999FFfhi0iIiJSRVJlCXt6/rFnuw8vX28X6UnKS2pV/t6iDbkKznkDAoJg63R4qR98dBEsfB0y1pvre9uYWRszKa9wkxIXSq/E8GqPSQgPIikiCMOAdfvyeXH6Ztam5vPv+duP+th7csxycV/PlAOM7RkHwOxN5hcQntJ1qwXCAn1QnLv8fdi9AOyhcM7rqtqQFk2jU0RERKSBDl1XfiyemXLP7HpipAOLBcoq3N7b/NrgK+D66RDbA5xFsOUX+Pkh+OeJ8EJv+OqPsOqTNjOLPu2Q0vVDKywO17+yhP3ndenM3GjOkK/YnUt55WxzdTwz5R19PFMOcFJPs0Hd3M1mUp53yHpya233VmssBekw/RHz/Ml/hahOzfv8InWkpFxERESkgRIjzKQ8ow5JeXRlUu4IsBEfZq5L9/sSdo/kAXDr73DTfHNtebdTICAYCtNh9Sfw9R/h5UGw8QdfR9qkSp0uZm3KBGByDevJPQZUlrC/t2Anlf0EKatwH3Wd+d4WNFM+qnssAVYL27OK2J1d7Nv15D/eC2V50G4IDP9j8z+/SB21ui3RRERERJpbQ2bKwVxXnllQRmpuiXfG1O9ZrebWaEn9YdQd4CyFPYth+yzY+CNkbYJPLoeRd8Apj4Ct9Xws/XTJbj5dsofichfF5S7aRwXTv/3Rf679Kn/ung7/0SF2coqd/L7jQLV7l3+5bC8FlVuOtW8BSXl4kJ2hnaNZvOMAczZnsjmjEPDBLP7GH2DDt2CxwdmvgNVXG5+J1J5mykVEREQaKKlyXXh6ft0bvQG0q9wWbZ8/b4t2LPYg6HoSTHgUbv7tYOOtBa/A+2eZJcetQIXLzd9/2MDy3blsTC8A4NzB7Y5aug5USdoTwh3cdFI3AJbsrFrmbxgGr83cwt2frwLg8uGdCPHFmu1qnNQrHoCPFu/mo8W7ALh1fPfmC6A0H364xzw/6g7zCyERP6CkXERERKSBkirL14/Vfb2swkVhmTm7GRvq8F7vafaW1ozboqXnlXLfF6vYtr+w2Z7Ty2Y3O7Vf/F8IDDcbcr05Brb82vyxNLLVqXkUlFYQERTA21OO57/XncCfTul5zPvFhTm8HcwvO6ETJ3aLBWDpzgO4K2fPK1xuHvp6Lc//shmAm07qxt/P6ddEr6TuxlWuK9+YXoDbgFP7JnpfR7OY8TgU7IOYrnDS/c33vCINpKRcREREpIG83dePkZTnFJnrbG1WCxHBB2c3fbEt2vsLd/LZ0r08+u26ZnvOIxx3DvxxDiT2g6JM+OgC+OI6KDj6VmAt2W9bsgAY2S2OicclMrZnPIEBtfvI/adTenBK7wSuGdmF45IjCA20kV9awaaMAorKKrjxg2X87/fdWC3w+Dl9eWBy7+ZvonYUfZLDSQg3v2yy2yw8OLlP8z35pmmw5N/m+TP/AXbfl/SL1JaSchEREZEG8qwpL6jcY7om2UVlAESHBFYpZ/Yk5anNWL6+M6sIgPlbs5p1hv4Isd3Mbu3DbwaLFdZ+Ca8PgzVf+C6mBpi/1UzKR/WIq/N9Lx7WkXeuGUZ0aCABNitDKteS/7QmjcveXsTMjZkE2a28eeVQppzYpTHDbhQWi4VJfRMBuHZUCl3iQpvniTM3wJfXAwYM+4O5TELEjygpFxEREWmgUEcAYyqTsGve/Z09B4qrPa66Jm9QtzXlpU5Xo+xnvjPbjNEw4KvlqQ1+vAYJDIHJT8MfZkHyICjNM5Osz6bA6s8hbTU4W/56++LyCpbvzgFgTPe6J+WHG9YlBoBXZm5l9d48YkID+fgPI5jU9+id3H3pgcl9+PeU47n/tN5N/2SGAftWwv8uhfJC6DIGTnuq6Z9XpJG1jK4QIiIiIn7utcuGcPG/FrIpo4Cr//M7T57fnxO6xFQpL66uyRscnCnfX1hGeYW7xnLnb1am8tBXaxjfO4HXLh9S71gNw2B3dpH38udL93DLuG7HbEbW5NoNght+hbnPw9znYP035gkAC0R3gfjeEN/LbOLV+4wWVaa8eMcBnC6D9lHBdI5teNfxE1JivOc7x4bw3rUnkNJcs8/1FOYIYMJxiU33BIYB+5YfHBs5O83ro7uYPQpsPtiCTaSBlJSLiIiINILIEDvvX3cCF/xzAduzirj0rUW0jwrm/CHtOW9we7rGhx1MysOqJuWxoYEEBlgpr3CTkV9Kx5iqCZ3LbfDMtI28NXc7AL+szzhq8n4s2UXlFJW7sFgg2G5jZ3YxS3fleGdmfcpmh/EPQs9TYcUHkLkR9m+AkhzI2WGeNv9kHhuWBGPugiFXm93dfcyznnxMj7hG+YJjcKcoBneKIiTQxsuXDiYuzHHsO7VGhgGpy2Dd17D+W8jbffC2gGDoMdHs6h/SAsavSD0oKRcRERFpJEmRQXz6xxG8OmMrP65JIzW3hFdnbuXVmVsZ3CkKR2USfXj5usVioX1UMDuyikjNLamSlOcWl3P7/1YwrzLhC7BaKK9wsyEtn4Edo+oV567K0vV2kcGM7BbL58v28vnSPS0jKfdoP8Q8gZmUFWXB/o2Vp02weRrk7YGf7oP5/4Cxd8PgqyDAd4mrdz15I5SuAzgCbHx9y6hGeSy/VXwAvr0dNn5/8Dp7qPmlzXHnmAl5YMuuHhA5FiXlIiIiIo2oQ3QIz1w4gMfO6cv09Rl8tXwvc7dksWJ3rveY6JDAI+6XHBnEjqyiKuvKN6UXcOMHS9mVXUyw3cZzFw3gi2V7mb1pPyt251SblLvdBnO37Ce/tIKzBiRXO2O7q7J0vVNMCOcP6cDny/by64bMhr/4pmKxQFi8eUoZY1536pPmTPq8FyA/FX64G+a9BGP+3GjJ+drUPBZuyyYhwkFyZDDJkUFEB9uqPfbHNWnefclHNuc2YK3ZtpnwzW3mz9dqN5PwvudCt1PMPgQirYSSchEREZEmEGS3cdbAdpw1sB2ZBaV8u3IfXy1PZXtWIWN7xh9xfOfYUBZsy+b1WVsZ1iWGdfvyueuzlRSXu+gQHcxbVx3Pce0i2JpZyOxN+1m5J7fK/Q3D4POle3lzzja2V3ZWzy0ur7ZLt2emvHNsCL2SwgFzvXtDSuKbXUAgDLseBl8Jy/9bmZzvrUzOX4TRlcl5PcvaDcPgxv8uZV8129yF2W28tWshyZEhJEcGkZ5fyvT15jZuE/okENtWy8wby875MOsp2DXfvBzTDS78j9lzQKQVUlIuIiIi0sQSwoO4YUxXbhjTFcMwqp29/uPYrszamMm2/UWc8co88ksrAHPW9bXLh3ibww3uZG6TteKwpHxjegH3fbkaAEeAlbIKN0/8sIHhKbHexNtjd2V3+E6xIUQF27FZLbjcBtlFZSRHtpzGabUS4IAT/mAm4Mvfh/kvmTOrP95jJurHnQtJ/SCxL8T3qXWSnpFfxr68UqwWs+Fael4paXmllFW4KXRaWLevgHX7CrzH26wWbj6pG7ef0r2JXmgbsGsBzHoSds4zL9sCYei1cMrD4AjzbWwiTUhJuYiIiEgzqqkBWJe4UL6+dSTXvrvEWwZ97agu/OX0PgTYDs5eD+oQBZiz3QeKyr3Jumf2u2diGF/dMorbPl7O7E37ueN/K/jmtlEE2Q+WXXvK1zvHhGK1WogNDSSzoIysgnL/S8o97EEw/I9m07cVHxxMzhf/8+AxFivE9jAT9MS+0Guy+d9qrNuXB0D3hDA+ufFEwJw9z8wr5osff6X7gGHsL3SSnldKYVkFFw7tQL/2kU3+Mlulwv3w071mIzcwS9WHTDGb+EV28G1sIs1ASbmIiIhIC5EcGcxnN53IG7O2MaBDJKf3Tz7imMgQO13jQtmeVcSqPbmM750AQEa+WWbdNS6MMEcAz180kNP+MZdNGQU8/dNGHj37YPJ5aPk6QFyYw0zKC8ua+iU2PXuQOXM+ZIrZqXvfcshYC+lroeQAZG0yT+u+gpn/B73PhLH3HlEavW5fPgD92h1MtC0WCzGhgXQIhZN7xWO3a/utOvM07cvZaZ6yt8Lvb5k/G4sNhlwFY+6GqE6+jlSk2SgpFxEREWlBIoLsPDC591GPGdQpiu1ZRazYneNNytMrk/KkSLM8Oy7MwXMXDeTad5fw3oKdnNQznvG9EygsqyC7cms2b1Ie7oA0c5/0ViPAAQMuMk9gJoMF6ZCxDjLWwO7FZgf3jd+bpwGXwCmPQGR7wGzyBnBcuwhfvYLWoXA/rPof7F50MBF3Fh15XGI/OOd1rRuXNklJuYiIiIifGdwxiq+Wp1ZZV55R2ZAsMeLgmunxvRK4dlQX3v1tJ/d+sYqf/jSWzALzuJjQQMKDzJneuMp901vFTHlNLBaISDZPPSaY1+3fBHOfgzWfw+pPzZn1ARfDkKtZV5mUqyS9Hgr3w7YZsOkn2PgDuJ2HHWCBiHYQnQLRXaD9YBg8xWzeJ9IGKSkXERER8TODOprN3lbtycXtNrBaLYfMlFft/H3/ab1ZuC2bjekF3PP5Ki4+viNgbofmEV/ZLTyroLw5wm854nvBBf+GETfDtIdgzyKzWdzy9/nYncC8gP4MPJALFfEQHAVBURAQisVd4ePAfcRZCoXpZsWB51Td5ZKcqvdrNwT6XwhxPc0kPLJjvbvii7RGSspFRERE/Ezv5HAcAVbySyvYkV1Et/gwb1J+6Ew5mFuzvXLZYM56dT5zNu8ntXIf9C6xB5PyOE9S3ppnyo+m/VC4bpq5FdeKD3Ctm0pnMulsnQHfz6hyqB04GzDWh5hJenAUhMRC8kDoNAKSBpjNyazV72fuV0pyYdUnsPoTOLADSnNrf9+k/tB9ormvePLAJgpQpHVQUi4iIiLiZ+w2Kz0Sw1ibms+2zEK6xYd5y9eTIo6cgeyZGM5fz+jD375Zx9bMQgA6xYZ6b48LbwPl68disUDKGEgZw/uRt/HbjKlcFb+NcTEHzOS0NBdK8qDMLGu3OIvBWQwF+8z775wHC18zz9sCzRnhmK7mfwOCzCTdYq08VZ632iCifeWxKRAab8bR3IoPQNZmsNkhJA72b4S1X8KG78zXeKiAIAhLhPBkCK/87+GXw5PNLytEpFaUlIuIiIj4oS6xoaxNzWdndhEFpU6Kyl3AwUZvh7tyRGfmbN7PrxsyAegco5nymqzMdDHDPZQhgy5n3Piq+447y0qZ/v2XTBwzDLuzEErzzLLtvb+bzeOyt4Cr3ExyszbX7YntoRDV0Zx5D4mBuF6QXDnzHhBsNq+zB5uJsedUXmg+f3E2YJgN7cA8X5IDubuhIA3Ki6C82DzeWVx5vgiKs8zbaxLfB4ZdD13GmEl3UJRvvjgQacWUlIuIiIj4oa5x5kz3jqwi73Zo4UEBhARW//HOYrHwzAUDOO3leewvKKNv+4NdxQ8m5f67pry8wo3VQpU93etrbeUe5X2r67xuteEMCDOblB26Jdqgy8z/ul2QtxcObIMD282k2OUEw33kqaIc8vaYHcnz9ppdyfdvPOTJvmvwa6m1yI5mTEX7ITjGLDvvdwF0GKYkXKSJKSkXERER8UNdDknK0/PMGe7qStcPFRvm4LvbRrMjq4jeSUcm5TnF5VS43I2S2DanCpeb8//5GwcKy5lx9ziCA21UuNy8OnMrI7vFMrxrbK0fq6isgh1Z5pZdfdvVo/O61QbRnc1Tt5Nrf7+KMsjdA/l7zXLywgzIXA/pa8x9vZ0l5jEVJXB4ozlHJITGmmXxcDCJdoSb+31HtIfAMAgMhcAQc0becz4oymzAFlQ5Hjwz7UrERZqNknIRERERP5RyaFJ+2B7lR5MUGXTEcTGhgVgt4DbgQFE5CcdI7luaGRszWZuaD8C6fXkc3yWGGRszeXnGFt6au51vbxtFj8TwWj3WhrR8DAMSIxzEhzuOfYfGEuCAuO7m6VhcFWZy7iw1y9kdYY0Xh5JxkWbnX1+DioiIiAhwMCnPyC9jR5bZvO3wzuu1ZbNaiAk1m73t98N15R8u2uU9vymjAMC7z3iJ08XNHy2nuLx225it22cm9/WaJW8utgBzFjwsvnETchHxCSXlIiIiIn4oKiSQ6BBzTfPi7QeAY5evH42/rivfkVXEvC1Z3sub0s2kfEPlfwG2Zhby16/XYniboNVsbWUy36+69eQiIk1ASbmIiIiIn/KsK1+1NxeAxFqUr9fEm5QXtPyZ8sz8Uj5YuJPU3BI+XmzOkgfZzY+1Gz1JeZo5433XxJ5YLfDVilQ+W7rnmI/tmSk/riXPlItIq6KkXERERMRPeUrYnS5zBrhhM+X+sVe5YRjc9OEy/vbNOsY+O4v3F5pJ+S3jzLXYmzMKyC91sjenBIApJ3bmnlN7AfDwN+u8yXp1yipcbMk0k/p+7TVTLiLNQ0m5iIiIiJ9KiQ2tcrlxytdbdlI+c2Mmy3fnYrWAy21QXuGmfVQwN4xJwWqB3GIn8zab5ezJkUFEhQRy09hujO8VT1mFm1s+Wk5BqbPax96SUYjTZRAZbKd9VHBzviwRacOUlIuIiIj4qZT4qkl5YmT9u4XHhbf8NeVut8FzP28C4Max3fjpT2O4bXx3Xr9iCCGBAd5y/q9XpALQJ9mc7bZaLbx48SDaRQaxI6uIB79aU+368nWV+5P3ax+BRV3IRaSZKCkXERER8VNdDpkpD7BaiAttQFLuBzPl369JY2N6AeGOAG46qSt9kiO459ReDOoYBUDvJHPbszmbM6tcBogODeTVy4cQYLXw/eq0Kh3bPTzbqrXozusi0uooKRcRERHxU5415QAJ4Q6s1vrP7nrWlO9voY3enC43L/7imSXvSlRI4BHH9Kzci9yzxt4zU+4xtHM0D0zuDcD/fb+BNXvzqtzumSnvq87rItKMlJSLiIiI+KlQRwCJEeYMd0M6r0PL3xLti2V72ZldTGxoINeOTqn2mENnxgH6JIcfccz1o1OYdFwi5S4317z7O6v25ALm+vQNaWaTN82Ui0hzUlIuIiIi4sc8JewNafIGEF+5pvxAURku97H3825OpU4XL/+6BYBbxncnzBFQ7XG9kg7OcDsCrFXK+z0sFgvPXTSQvu0iyC4q59K3FvHLunR2ZBVS4nQREmirUoEgItLUlJSLiIiI+LGulc3eEhuYlMeEmuXgbgNyilvWbPmHi3aRnl9KcmQQVwzvVONxnWJCvPuV90wMJ8BW/UfdyGA7n/7xRMb2jKfE6eLGD5Zx9X+WAGbJu60BywBEROpKSbmIiIiIH7v8hM6c1DOei47v0KDHsdus3sS8JTV7Kyyr4I3Z2wD40yk9CLLbajzWZrV415VXV7p+qDBHAO9cfTzXjOyC3WYhNdfc11zryUWkuVVf+yMiIiIifqF/h0jev+6ERnmsuLBADhSVk1VQDkmN8pAN9p/5OzhQVE5KXCgXDj32Fw/DU2JYvTePEV1jj3ms3Wbl0bP7cvO4bnywcBfLd+cw5cQujRC1iEjtKSkXEREREcBs9rY5o9CnM+VlFS4MA4LsNnKKynl77nYA7prYs8Zy9EPdNbEXE49LYliX6Fo/Z2JEEPec2qveMYuINISSchEREREBfL9XucttMPkf88grcfLWlOP5ZX06BWUV9EmO4Iz+ybV6jOBAGyekxDRxpCIijUdJuYiIiIgAB5Py/T5KytPzS9meVQTA5W8v8l5/76k9G7QHu4hIS+bTRm9z587lrLPOol27dlgsFqZOnVrldsMwePjhh0lOTiY4OJgJEyawZcsW3wQrIiIi0srFhVc2eivwTff13dnF3vNlFW7KKtwM7RzN+F4JPolHRKQ5+DQpLyoqYuDAgbz++uvV3v7ss8/yyiuv8Oabb7J48WJCQ0M59dRTKS0tbeZIRURERFo/X5ev78kxk/KR3WK5dlQXOsYE88hZx2GxaJZcRFovn5avT548mcmTJ1d7m2EY/OMf/+Cvf/0r55xzDgD//e9/SUxMZOrUqVx66aXNGaqIiIhIqxfv46R87wEzKe8SF8ojZ/XlkbP6+iQOEZHm1GLXlO/YsYP09HQmTJjgvS4yMpLhw4ezcOHCGpPysrIyysoO/kOSn58PgNPpxOl0Nm3QbZzn/dX7LLWlMSP1pbEj9aWxc3RRQeYe4FkFZT55j3Zlm+vJ20U4WuzPSGNI6ktjp22py8+5xSbl6enpACQmJla5PjEx0XtbdZ566ikee+yxI67/5ZdfCAkJadwgpVrTp0/3dQjiZzRmpL40dqS+NHaql1sGEMD+wlK+/+FHmru32urtNsBC1s6N/Fi4oXmfvI40hqS+NHbahuLi4mMfVKnFJuX19eCDD3LXXXd5L+fn59OxY0cmTZpERESEDyNr/ZxOJ9OnT2fixInY7XZfhyN+QGNG6ktjR+pLY+foyivcPLL8V9yGhVHjJxAdEtisz//k2jlAGWedPJIBHSKb9blrS2NI6ktjp23xVGzXRotNypOSkgDIyMggOfngvpQZGRkMGjSoxvs5HA4cDscR19vtdg3+ZqL3WupKY0bqS2NH6ktjp3p2O0QG28krcZJX6iYhsvneo1Kni4wCcwli14SIFv/z0RiS+tLYaRvq8jP2aff1o0lJSSEpKYkZM2Z4r8vPz2fx4sWceOKJPoxMREREpPWKCzNnx5t7r/K9OSUAhDkCiApRwiIibYdPZ8oLCwvZunWr9/KOHTtYuXIlMTExdOrUiTvvvJO///3v9OjRg5SUFP72t7/Rrl07zj33XN8FLSIiItKKxYU52La/iKzCuu9VvudAMQ9+tYZxveK5dlQKtjosSvdsh9YhOlhboIlIm+LTpHzp0qWMHz/ee9mzFvzqq6/mvffe47777qOoqIgbb7yR3NxcRo8ezbRp0wgKCvJVyCIiIiKtWlx45bZoBXWfKf95XTrzt2Yxf2sW369O4/mLBtA9IbxW9/Vsh9YxRo15RaRt8WlSPm7cOAzDqPF2i8XC448/zuOPP96MUYmIiIi0XQ3Zqzyn+ODs+so9uVz05kJm3TOOqFo0jNtTWb7eMVpJuYi0LS12TbmIiIiIND/PmvL6JOW5xea+vJcO60iPhDByip28PmvrMe5l2p1tzpR3igmu8/OKiPgzJeUiIiIi4hXnnSmv+5pyT1LeOymcv5zRB4D3F+xiz4Hq9+tduvMAf5u6lgNF5d415SpfF5G2Rkm5iIiIiHjFNaB8PbfETOSjQgI5qWc8o7rHUu5y8+L0zdUe/+SPG/hg0S7+8vUab+KupFxE2hol5SIiIiLi1ZBGbzlF5kx5VIgdi8XCA6eZs+VTV6ayNjWvyrGlThdrKq/7aW06+aUVgNl9XUSkLVFSLiIiIiJeB9eUlx+1IW918ko8Sbn5GP07RHLOoHYYBjwzbWOVY9em5uF0VX38uLBAQgJ92odYRKTZKSkXERERES9P+Xq5y+2dva4tT/f16BC797p7JvUi0GZl3pYs5m7e771+2a4cAMb3iqdbfCig0nURaZuUlIuIiIiIV5DdRrjDnK2uy7rysgoXxeUuAKKCD26B1jEmhKtO7AzA0z9txO02Z8c9SfmJ3WJ54eJBtI8K5txB7RvlNYiI+BMl5SIiIiJSRX3WlXtK160WCA+qWoJ+2/juhAcFsD4tn29WpWIYBst3m0n50M7RDOoYxW8PnMzVI7s0zgsQEfEjSspFREREpIpD15XXlmc7tMhgO1arpcpt0aGB3DyuGwDP/7yZLZmFZBWWE2iz0rddZCNFLSLin5SUi4iIiEgV9dkWzZOUe5q8He66USkkRwaRmlvCvZ+vAqBf+wiC7LYGRisi4t+UlIuIiIhIFfVJyj1N3qIOafJ2qCC7jT9P7AnAqr3mVmhDO0c3JEwRkVZBSbmIiIiIVJFQuaZ8e1ZRre+T55kpD64+KQe4YEgHeiWGey8rKRcRUVIuIiIiIocZ3SMOgF/XZ3gbuB3Lwe3Qqi9fB7BZLTwwubf38hAl5SIiSspFREREpKpBHaPolRhOWYWbb1em1uo+uZXJe2QN5ese43rF8+Dk3jx2dl8SwoMaHKuIiL9TUi4iIiIiVVgsFi4Z1hGAT5fuqdV9cmsxU+557D+e1E3bn4mIVFJSLiIiIiJHOG9wewJtVtam5rM2Ne+Yxx/svn70mXIREalKSbmIiIiIHCE6NJBJfRMB+HTJsWfLD92nXEREak9JuYiIiIhUy1PCPnVlKqVO11GPrU2jNxEROZKSchERERGp1qhucbSPCqagtIKf1qYd9VhPl3aVr4uI1I2SchERERGpltV6SMO3Y5Swa6ZcRKR+lJSLiIiISI0uHNoBiwUWbT/Azqyiao8pdboodbqBY2+JJiIiVSkpFxEREZEatYsK5qSe8QB8VsP2aJ4mbzarhXBHQLPFJiLSGigpFxEREZGjurSyhP3zZXupcLmPuD23xCxdjwq2Y7FYmjU2ERF/p6RcRERERI7q5N6JxIYGsr+gjFmb9h9xe06RmryJiNSXknIREREROarAACsXDO0AVN/wLc8zU64mbyIidaakXERERESO6eLjzRL2WZsyycwvrXJbTuWa8mjNlIuI1JmSchERERE5pu4JYRzfORqX2+CL5Xur3OZp9BYZrJlyEZG6UntMEREREamVS4Z1ZOmuHD5dsoeTesbz5I8biA9zEB1qJuNaUy4iUndKykVERESkVs4YkMxj361nV3YxZ746H8Mwr7fbzI7rKl8XEak7la+LiIiISK2EBAZw1sB2ABgGjO4eh91mwekys/NINXoTEakzzZSLiIiISK39eWIPXG43J/VM4PT+SXy/Oo07PlmBYUBsqJJyEZG6UlIuIiIiIrWWEB7EsxcO9F4+a2A73IbBzI2ZnNQz3oeRiYj4JyXlIiIiItIg5wxqzzmD2vs6DBERv6Q15SIiIiIiIiI+oqRcRERERERExEeUlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIj7T4pLygoIA777yTzp07ExwczMiRI1myZImvwxIRERERERFpsBaflN9www1Mnz6dDz74gDVr1jBp0iQmTJhAamqqr0MTERERERERaZAWnZSXlJTw5Zdf8uyzzzJ27Fi6d+/Oo48+Svfu3fnnP//p6/BEREREREREGiTA1wEcTUVFBS6Xi6CgoCrXBwcHM3/+/GrvU1ZWRllZmfdyfn4+AE6nE6fT2XTBivf91fsstaUxI/WlsSP1pbEjDaUxJPWlsdO21OXnbDEMw2jCWBps5MiRBAYG8vHHH5OYmMj//vc/rr76arp3786mTZuOOP7RRx/lscceO+L6jz/+mJCQkOYIWURERERERNqw4uJiLr/8cvLy8oiIiDjqsS0+Kd+2bRvXXXcdc+fOxWazMWTIEHr27MmyZcvYsGHDEcdXN1PesWNHsrKyjvlmSMM4nU6mT5/OxIkTsdvtvg5H/IDGjNSXxo7Ul8aONJTGkNSXxk7bkp+fT1xcXK2S8hZdvg7QrVs35syZQ1FREfn5+SQnJ3PJJZfQtWvXao93OBw4HI4jrrfb7Rr8zUTvtdSVxozUl8aO1JfGjjSUxpDUl8ZO21CXn3GLbvR2qNDQUJKTk8nJyeHnn3/mnHPO8XVIIiIiIiIiIg3S4mfKf/75ZwzDoFevXmzdupV7772X3r17c+211/o6NBEREREREZEGafEz5Xl5edx666307t2bKVOmMHr0aH7++WeVfIiIiIiIiIjfa/Ez5RdffDEXX3yxr8MQERERERERaXQtPilvKE9zec9+5dJ0nE4nxcXF5Ofnq5JBakVjRupLY0fqS2NHGkpjSOpLY6dt8eSftdnsrNUn5QUFBQB07NjRx5GIiIiIiIhIW1JQUEBkZORRj2nx+5Q3lNvtZt++fYSHh2OxWHwdTqvm2RN+z5492hNeakVjRupLY0fqS2NHGkpjSOpLY6dtMQyDgoIC2rVrh9V69FZurX6m3Gq10qFDB1+H0aZEREToD43UicaM1JfGjtSXxo40lMaQ1JfGTttxrBlyjxbffV1ERERERESktVJSLiIiIiIiIuIjSsql0TgcDh555BEcDoevQxE/oTEj9aWxI/WlsSMNpTEk9aWxIzVp9Y3eRERERERERFoqzZSLiIiIiIiI+IiSchEREREREREfUVIuIiIiIiIi4iNKykVERERERER8REl5G/DUU08xbNgwwsPDSUhI4Nxzz2XTpk1VjiktLeXWW28lNjaWsLAwLrjgAjIyMry3r1q1issuu4yOHTsSHBxMnz59ePnll2t8zt9++42AgAAGDRp0zPgMw+Dhhx8mOTmZ4OBgJkyYwJYtW6oc88QTTzBy5EhCQkKIioqq0+uXumkN4+Xss8+mU6dOBAUFkZyczFVXXcW+ffvq9kZInbWGsdOlSxcsFkuV09NPP123N0LqzN/HzuzZs48YN57TkiVL6v6GSJ35+xgCWL58ORMnTiQqKorY2FhuvPFGCgsL6/ZGSJ219LHz1VdfMWnSJGJjY7FYLKxcufKIY9566y3GjRtHREQEFouF3Nzc2r58aSGUlLcBc+bM4dZbb2XRokVMnz4dp9PJpEmTKCoq8h7z5z//me+++47PP/+cOXPmsG/fPs4//3zv7cuWLSMhIYEPP/yQdevW8Ze//IUHH3yQ11577Yjny83NZcqUKZxyyim1iu/ZZ5/llVde4c0332Tx4sWEhoZy6qmnUlpa6j2mvLyciy66iJtvvrkB74TURmsYL+PHj+ezzz5j06ZNfPnll2zbto0LL7ywAe+K1EZrGDsAjz/+OGlpad7T7bff5rinnAAAC1RJREFUXs93RGrL38fOyJEjq4yZtLQ0brjhBlJSUjj++OMb+O5Ibfj7GNq3bx8TJkyge/fuLF68mGnTprFu3Tquueaahr0xckwtfewUFRUxevRonnnmmRqPKS4u5rTTTuOhhx6qwyuXFsWQNiczM9MAjDlz5hiGYRi5ubmG3W43Pv/8c+8xGzZsMABj4cKFNT7OLbfcYowfP/6I6y+55BLjr3/9q/HII48YAwcOPGosbrfbSEpKMp577jnvdbm5uYbD4TD+97//HXH8u+++a0RGRh7jFUpj8ufx4vHNN98YFovFKC8vP+rjS+Pyx7HTuXNn46WXXqrlK5Sm4o9j51Dl5eVGfHy88fjjjx/1saXp+NsY+te//mUkJCQYLpfLe8zq1asNwNiyZUutXrM0jpY0dg61Y8cOAzBWrFhR4zGzZs0yACMnJ6fWjystg2bK26C8vDwAYmJiAPPbPafTyYQJE7zH9O7dm06dOrFw4cKjPo7nMTzeffddtm/fziOPPFKrWHbs2EF6enqV546MjGT48OFHfW5pPv4+Xg4cOMBHH33EyJEjsdvttXoeaRz+OnaefvppYmNjGTx4MM899xwVFRW1eg5pPP46djy+/fZbsrOzufbaa2v1HNL4/G0MlZWVERgYiNV68KN5cHAwAPPnz6/V80jjaEljR9qOAF8HIM3L7XZz5513MmrUKPr16wdAeno6gYGBR6zVTkxMJD09vdrHWbBgAZ9++ik//PCD97otW7bwwAMPMG/ePAICaje0PI+fmJhY6+eW5uPP4+X+++/ntddeo7i4mBEjRvD999/X6jmkcfjr2LnjjjsYMmQIMTExLFiwgAcffJC0tDRefPHFWj2PNJy/jp1DvfPOO5x66ql06NChVs8hjcsfx9DJJ5/MXXfdxXPPPcef/vQnioqKeOCBBwBIS0ur1fNIw7W0sSNth2bK25hbb72VtWvX8sknn9T7MdauXcs555zDI488wqRJkwBwuVxcfvnlPPbYY/Ts2bPa+3300UeEhYV5T/Pmzat3DNI8/Hm83HvvvaxYsYJffvkFm83GlClTMAyj3q9D6sZfx85dd93FuHHjGDBgADfddBMvvPACr776KmVlZfV+HVI3/jp2PPbu3cvPP//M9ddfX+/4pWH8cQz17duX999/nxdeeIGQkBCSkpJISUkhMTGxyuy5NC1/HDvSSvi6fl6az6233mp06NDB2L59e5XrZ8yYUe36k06dOhkvvvhilevWrVtnJCQkGA899FCV63NycgzAsNls3pPFYvFeN2PGDCM/P9/YsmWL91RcXGxs27at2vUxY8eONe64444jXoPWlDef1jBePPbs2WMAxoIFC+r+Rkidtaaxs3btWgMwNm7cWPc3QuqsNYydxx9/3IiPj1cPCx9pDWMoPT3dKCgoMAoLCw2r1Wp89tln9X9DpNZa4tg5lNaUt25KytsAt9tt3HrrrUa7du2MzZs3H3G7p4HFF1984b1u48aNRzSwWLt2rZGQkGDce++9RzyGy+Uy1qxZU+V08803G7169TLWrFljFBYW1hhbUlKS8fzzz3uvy8vLU6M3H2pN48Vj165dBmDMmjWrNm+B1FNrHDsffvihYbVajQMHDtTqPZD6aS1jx+12GykpKcbdd99d5/dAGqa1jKFDvfPOO0ZISIgSrCbWksfOoZSUt25KytuAm2++2YiMjDRmz55tpKWleU+HfgN30003GZ06dTJmzpxpLF261DjxxBONE0880Xv7mjVrjPj4eOPKK6+s8hiZmZk1Pm9tu0o+/fTTRlRUlPHNN98Yq1evNs455xwjJSXFKCkp8R6za9cuY8WKFcZjjz1mhIWFGStWrDBWrFhhFBQU1O9NkRr5+3hZtGiR8eqrrxorVqwwdu7cacyYMcMYOXKk0a1bN6O0tLT+b4wck7+PnQULFhgvvfSSsXLlSmPbtm3Ghx9+aMTHxxtTpkyp/5siteLvY8fj119/NQBjw4YNdX8TpEFawxh69dVXjWXLlhmbNm0yXnvtNSM4ONh4+eWX6/eGSK219LGTnZ1trFixwvjhhx8MwPjkk0+MFStWGGlpad5j0tLSjBUrVhhvv/22ARhz5841VqxYYWRnZ9fvTZFmp6S8DQCqPb377rveY0pKSoxbbrnFiI6ONkJCQozzzjuvyi/7I488Uu1jdO7cucbnre0fG7fbbfztb38zEhMTDYfDYZxyyinGpk2bqhxz9dVXV/v8mvlsfP4+XlavXm2MHz/eiImJMRwOh9GlSxfjpptuMvbu3Vuft0PqwN/HzrJly4zhw4cbkZGRRlBQkNGnTx/jySef1Jc5zcDfx47HZZddZowcObIuL10aSWsYQ1dddZURExNjBAYGGgMGDDD++9//1vVtkHpo6WPn3XffrfaxH3nkkWM+/6GvQVo2i2Go85GIiIiIiIiIL6ido4iIiIiIiIiPKCkXERERERER8REl5SIiIiIiIiI+oqRcRERERERExEeUlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIjygpFxERaeWuueYaLBYLFosFu91OYmIiEydO5D//+Q9ut7vWj/Pee+8RFRXVdIGKiIi0QUrKRURE2oDTTjuNtLQ0du7cyU8//cT48eP505/+xJlnnklFRYWvwxMREWmzlJSLiIi0AQ6Hg6SkJNq3b8+QIUN46KGH+Oabb/jpp5947733AHjxxRfp378/oaGhdOzYkVtuuYXCwkIAZs+ezbXXXkteXp531v3RRx8FoKysjHvuuYf27dsTGhrK8OHDmT17tm9eqIiIiJ9RUi4iItJGnXzyyQwcOJCvvvoKAKvVyiuvvMK6det4//33mTlzJvfddx8AI0eO5B//+AcRERGkpaWRlpbGPffcA8Btt93GwoUL+eSTT1i9ejUXXXQRp512Glu2bPHZaxMREfEXFsMwDF8HISIiIk3nmmuuITc3l6lTpx5x26WXXsrq1atZv379Ebd98cUX3HTTTWRlZQHmmvI777yT3Nxc7zG7d++ma9eu7N69m3bt2nmvnzBhAieccAJPPvlko78eERGR1iTA1wGIiIiI7xiGgcViAeDXX3/lqaeeYuPGjeTn51NRUUFpaSnFxcWEhIRUe/81a9bgcrno2bNnlevLysqIjY1t8vhFRET8nZJyERGRNmzDhg2kpKSwc+dOzjzzTG6++WaeeOIJYmJimD9/Ptdffz3l5eU1JuWFhYXYbDaWLVuGzWarcltYWFhzvAQRERG/pqRcRESkjZo5cyZr1qzhz3/+M8uWLcPtdvPCCy9gtZotZz777LMqxwcGBuJyuapcN3jwYFwuF5mZmYwZM6bZYhcREWktlJSLiIi0AWVlZaSnp+NyucjIyGDatGk89dRTnHnmmUyZMoW1a9fidDp59dVXOeuss/jtt9948803qzxGly5dKCwsZMaMGQwcOJCQkBB69uzJFVdcwZQpU3jhhRcYPHgw+/fvZ8aMGQwYMIAzzjjDR69YRETEP6j7uoiISBswbdo0kpOT6dKlC6eddhqzZs3ilVde4ZtvvsFmszFw4EBefPFFnnnmGfr168dHH33EU089VeUxRo4cyU033cQll1xCfHw8zz77LADvvvsuU6ZM4e6776ZXr16ce+65LFmyhE6dOvnipYqIiPgVdV8XERERERER8RHNlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIjygpFxEREREREfERJeUiIiIiIiIiPqKkXERERERERMRHlJSLiIiIiIiI+IiSchEREREREREfUVIuIiIiIiIi4iP/D8qtFloq3E+YAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stream = team.run_stream(task=\"Write a financial report on American airlines\")\n", + "await Console(stream)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a financial report on American airlines', type='TextMessage'), ToolCallMessage(source='Stock_Analysis_Agent', models_usage=RequestUsage(prompt_tokens=64, completion_tokens=16), content=[FunctionCall(id='call_tPh9gSfGrDu1nC2Ck5RlfbFY', arguments='{\"ticker\":\"AAL\"}', name='analyze_stock')], type='ToolCallMessage'), ToolCallResultMessage(source='Stock_Analysis_Agent', models_usage=None, content=[FunctionExecutionResult(content=\"{'ticker': 'AAL', 'current_price': 17.4, '52_week_high': 18.09, '52_week_low': 9.07, '50_day_ma': 13.376799983978271, '200_day_ma': 12.604399962425232, 'ytd_price_change': 3.9600000381469727, 'ytd_percent_change': 29.46428691803602, 'trend': 'Upward', 'volatility': 0.4461582174242901, 'plot_file_path': 'coding/AAL_stockprice.png'}\", call_id='call_tPh9gSfGrDu1nC2Ck5RlfbFY')], type='ToolCallResultMessage'), TextMessage(source='Stock_Analysis_Agent', models_usage=None, content='Tool calls:\\nanalyze_stock({\"ticker\":\"AAL\"}) = {\\'ticker\\': \\'AAL\\', \\'current_price\\': 17.4, \\'52_week_high\\': 18.09, \\'52_week_low\\': 9.07, \\'50_day_ma\\': 13.376799983978271, \\'200_day_ma\\': 12.604399962425232, \\'ytd_price_change\\': 3.9600000381469727, \\'ytd_percent_change\\': 29.46428691803602, \\'trend\\': \\'Upward\\', \\'volatility\\': 0.4461582174242901, \\'plot_file_path\\': \\'coding/AAL_stockprice.png\\'}', type='TextMessage'), ToolCallMessage(source='Google_Search_Agent', models_usage=RequestUsage(prompt_tokens=268, completion_tokens=25), content=[FunctionCall(id='call_wSHc5Kw1ix3aQDXXT23opVnU', arguments='{\"query\":\"American Airlines financial report 2023\",\"num_results\":1}', name='google_search')], type='ToolCallMessage'), ToolCallResultMessage(source='Google_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content=\"[{'title': 'American Airlines reports fourth-quarter and full-year 2023 financial ...', 'link': 'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx', 'snippet': 'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...', 'body': 'Just a moment... Enable JavaScript and cookies to continue'}]\", call_id='call_wSHc5Kw1ix3aQDXXT23opVnU')], type='ToolCallResultMessage'), TextMessage(source='Google_Search_Agent', models_usage=None, content='Tool calls:\\ngoogle_search({\"query\":\"American Airlines financial report 2023\",\"num_results\":1}) = [{\\'title\\': \\'American Airlines reports fourth-quarter and full-year 2023 financial ...\\', \\'link\\': \\'https://news.aa.com/news/news-details/2024/American-Airlines-reports-fourth-quarter-and-full-year-2023-financial-results-CORP-FI-01/default.aspx\\', \\'snippet\\': \\'Jan 25, 2024 ... American Airlines Group Inc. (NASDAQ: AAL) today reported its fourth-quarter and full-year 2023 financial results, including: Record\\\\xa0...\\', \\'body\\': \\'Just a moment... Enable JavaScript and cookies to continue\\'}]', type='TextMessage'), TextMessage(source='Report_Agent', models_usage=RequestUsage(prompt_tokens=360, completion_tokens=633), content=\"### American Airlines Financial Report\\n\\n#### Overview\\nAmerican Airlines Group Inc. (NASDAQ: AAL) is a major American airline headquartered in Fort Worth, Texas. It is known as one of the largest airlines in the world by fleet size, revenue, and passenger kilometers flown. As of the current quarter in 2023, American Airlines has shown significant financial activities and stock performance noteworthy for investors and analysts.\\n\\n#### Stock Performance\\n- **Current Stock Price**: $17.40\\n- **52-Week Range**: The stock price has ranged from $9.07 to $18.09 over the past year, indicating considerable volatility and fluctuation in market interest.\\n- **Moving Averages**: \\n - 50-Day MA: $13.38\\n - 200-Day MA: $12.60\\n These moving averages suggest a strong upward trend in recent months as the 50-day moving average is positioned above the 200-day moving average, indicating bullish momentum.\\n\\n- **YTD Price Change**: $3.96\\n- **YTD Percent Change**: 29.46%\\n The year-to-date figures demonstrate a robust upward momentum, with the stock appreciating by nearly 29.5% since the beginning of the year.\\n\\n- **Trend**: The current stock trend for American Airlines is upward, reflecting positive market sentiment and performance improvements.\\n\\n- **Volatility**: 0.446, indicating moderate volatility in the stock, which may attract risk-tolerant investors seeking dynamic movements for potential profit.\\n\\n#### Recent Financial Performance\\nAccording to the latest financial reports of 2023 (accessed through a reliable source), American Airlines reported remarkable figures for both the fourth quarter and the full year 2023. Key highlights from the report include:\\n\\n- **Revenue Growth**: American Airlines experienced substantial revenue increases, driven by high demand for travel as pandemic-related restrictions eased globally.\\n- **Profit Margins**: The company managed to enhance its profitability, largely attributed to cost management strategies and increased operational efficiency.\\n- **Challenges**: Despite positive momentum, the airline industry faces ongoing challenges including fluctuating fuel prices, geopolitical tensions, and competition pressures.\\n\\n#### Strategic Initiatives\\nAmerican Airlines has been focusing on several strategic initiatives to maintain its market leadership and improve its financial metrics:\\n1. **Fleet Modernization**: Continuation of investment in more fuel-efficient aircraft to reduce operating costs and environmental impact.\\n2. **Enhanced Customer Experience**: Introduction of new services and technology enhancements aimed at improving customer satisfaction.\\n3. **Operational Efficiency**: Streamlining processes to cut costs and increase overall effectiveness, which includes leveraging data analytics for better decision-making.\\n\\n#### Conclusion\\nAmerican Airlines is demonstrating strong market performance and financial growth amid an evolving industry landscape. The company's stock has been on an upward trend, reflecting its solid operational strategies and recovery efforts post-COVID pandemic. Investors should remain mindful of external risks while considering American Airlines as a potential investment, supported by its current upward trajectory and strategic initiatives.\\n\\nFor further details, investors are encouraged to review the full financial reports from American Airlines and assess ongoing market conditions.\\n\\n_TERMINATE_\", type='TextMessage')], stop_reason='Maximum number of turns 3 reached.')" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+UAAAIjCAYAAABlBbqXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/GU6VOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hUZfbA8e+09EYaSSAFQgm9S5XeEURxUWwgdrHruro/G5Z11bWLurpSVBAriIoUlQ7SlA4BQgglvfdkyv39cWcmCSmkT8r5PM88SWbuzH1ncjOZc895z6tRFEVBCCGEEEIIIYQQjU7r6AEIIYQQQgghhBCtlQTlQgghhBBCCCGEg0hQLoQQQgghhBBCOIgE5UIIIYQQQgghhINIUC6EEEIIIYQQQjiIBOVCCCGEEEIIIYSDSFAuhBBCCCGEEEI4iATlQgghhBBCCCGEg0hQLoQQQgghhBBCOIgE5UIIIUQT8vzzz6PRaEhNTXXI/ufNm0dERIRD9v3aa68RFRWFxWJxyP6bM6PRSGhoKB988IGjhyKEEKKGJCgXQgjR6D744AM0Gg2DBw+u1vZPPPEEGo2G66+/vsLbz549i0aj4T//+U+txvPjjz8yatQoAgMDcXNzo2PHjsyePZt169bZt4mPj+f555/nwIEDtdpHY1i6dCkajcZ+cXFxoUuXLtx///0kJSU5enhVys7O5tVXX+Uf//gHWm3Jx5PSz0er1RISEsLEiRPZvHlzg4zjgw8+YOnSpZfdbuXKlWg0Gv773/9WePu9996LwWDg4MGD9TzCihkMBh599FFefvllCgsLG2WfQggh6ocE5UIIIRrd8uXLiYiIYM+ePZw+fbrKbRVF4csvvyQiIoIff/yRnJyceh3Lf/7zH2bMmIFGo+Gpp57irbfeYtasWZw6dYqVK1fat4uPj2fhwoVNOii3eeGFF/j88895//33GTZsGB9++CFDhw4lPz//svf95JNPiI6OboRRlrV48WJMJhNz5swpd9uECRP4/PPPWbZsGffccw+HDh1i7Nix/PLLL/U+juoG5TfccAOTJ0/mySefLHfCY8+ePXz88cc88sgj9OnTp97HWJnbbruN1NRUVqxY0Wj7FEIIUXd6Rw9ACCFE6xIbG8vOnTv5/vvvufvuu1m+fDnPPfdcpdtv3ryZCxcu8PvvvzNp0iS+//575s6dWy9jMZlMvPjii0yYMIENGzaUuz05Oble9tPYpkyZwsCBAwG444478PPz48033+SHH36oMOgFyMvLw93dHYPB0JhDtVuyZAkzZszAxcWl3G1dunTh5ptvtv98zTXX0Lt3b95++22mTJnSmMMs48MPP6RHjx488sgj9kDYbDZz9913ExYWxvPPP9/gY1AUhcLCQlxdXfHx8WHixIksXbqU+fPnN/i+hRBC1A/JlAshhGhUy5cvp02bNkybNo3rrruO5cuXX3b77t27M2bMGMaPH3/Z7WsiNTWV7Oxshg8fXuHtgYGBgHpiYNCgQYCajbSVU5fOqH7zzTcMGDAAV1dX/P39ufnmm7l48WK5xzxx4gSzZ88mICAAV1dXunbtyv/93/9VOc64uDg6depEz549a1WGPnbsWEA9IQLqvHEPDw9iYmKYOnUqnp6e3HTTTfbbLp1TbrFYeOedd+jVqxcuLi4EBAQwefJk9u3bV2a7L774wv4a+Pr6csMNN3D+/PnLji82NpZDhw4xfvz4aj2fXr164e/vb38+27Zt429/+xthYWE4OzsTGhrKI488QkFBQZn7JSYmctttt9G+fXucnZ0JDg7m6quv5uzZswBERERw9OhRtmzZYv8djx49utJxRERE8Pzzz/Pll1+yceNGAN59910OHDjAhx9+iJubG0VFRTz33HN06tTJPrYnnniCoqKiMo+1ZMkSxo4dS2BgIM7OznTv3p0PP/ywwn1eddVVrF+/noEDB+Lq6lqmhH7ChAls376d9PT0ar2WQgghHE+CciGEEI1q+fLlXHvttTg5OTFnzhxOnTrF3r17K9y2qKiI7777zp7dnTNnDr///juJiYn1MpbAwEBcXV358ccfqwxiunXrxgsvvADAXXfdxeeff87nn3/OyJEjAXUu9+zZs9HpdLzyyivceeedfP/994wYMYLMzEz74xw6dIjBgwfz+++/c+edd/LOO+8wc+ZMfvzxx0r3HRMTw8iRI/H09GTz5s20bdu2xs8zJiYGAD8/P/t1JpOJSZMmERgYyH/+8x9mzZpV6f1vv/12Hn74YUJDQ3n11Vd58skncXFx4Y8//rBv8/LLL3PrrbfSuXNn3nzzTR5++GF+++03Ro4cWeY1qMjOnTsB6N+/f7WeT0ZGBhkZGfbn880335Cfn8+9997Le++9x6RJk3jvvfe49dZby9xv1qxZrFq1ittuu40PPviABx98kJycHM6dOwfA22+/Tfv27YmKirL/ji93wsRWon7vvfdy+vRpnn32WXtpu8ViYcaMGfznP/9h+vTpvPfee8ycOZO33nqrXH+EDz/8kPDwcP75z3/yxhtvEBoayn333ceiRYvK7TM6Opo5c+YwYcIE3nnnHfr27Wu/bcCAASiKYn9NhRBCNAOKEEII0Uj27dunAMrGjRsVRVEUi8WitG/fXnnooYcq3P7bb79VAOXUqVOKoihKdna24uLiorz11ltltouNjVUA5fXXX6/xmJ599lkFUNzd3ZUpU6YoL7/8srJ///5y2+3du1cBlCVLlpS5vri4WAkMDFR69uypFBQU2K//6aefFEB59tln7deNHDlS8fT0VOLi4so8hsVisX//3HPPKYCSkpKiHD9+XAkJCVEGDRqkpKenX/a5LFmyRAGUX3/9VUlJSVHOnz+vrFy5UvHz81NcXV2VCxcuKIqiKHPnzlUA5cknnyz3GHPnzlXCw8PtP//+++8KoDz44IPltrWN++zZs4pOp1NefvnlMrcfPnxY0ev15a6/1NNPP60ASk5OTrnbAOX2229XUlJSlOTkZGX37t3KuHHjFEB54403FEVRlPz8/HL3e+WVVxSNRmN/rTMyMqp1jPTo0UMZNWpUldtcavfu3YpWq1V8fX0VHx8fJTExUVEURfn8888VrVarbNu2rcz2H330kQIoO3bssF9X0XOYNGmS0rFjxzLXhYeHK4Cybt26CscSHx+vAMqrr75ao+cghBDCcSRTLoQQotEsX76ctm3bMmbMGAB7R/WVK1diNpsr3H7gwIF06tQJAE9PT6ZNm1avJewLFy5kxYoV9OvXj/Xr1/N///d/DBgwgP79+3P8+PHL3n/fvn0kJydz3333lZkPPW3aNKKiovj5558BSElJYevWrcyfP5+wsLAyj6HRaMo97pEjRxg1ahQRERH8+uuvtGnTptrPafz48QQEBBAaGsoNN9yAh4cHq1atol27dmW2u/feey/7WN999x0ajabCef+2cX///fdYLBZmz55Namqq/RIUFETnzp3ZtGlTlftIS0tDr9fj4eFR4e2ffvopAQEBBAYGMnjwYHbs2MGjjz7Kww8/DICrq6t927y8PFJTUxk2bBiKovDXX3/Zt3FycmLz5s1kZGRc9nnXxBVXXME999xDeno6r7zyir2a4ZtvvqFbt25ERUWVeV1s0wlKvy6ln0NWVhapqamMGjWKM2fOkJWVVWZ/HTp0YNKkSRWOxXacOGpJPSGEEDUnjd6EEEI0CrPZzMqVKxkzZox9LjDA4MGDeeONN/jtt9+YOHGi/frMzEzWrl3L/fffX6ZD+/Dhw/nuu+84efIkXbp0qZexzZkzhzlz5pCdnc3u3btZunQpK1asYPr06Rw5cqTC5mM2cXFxAHTt2rXcbVFRUWzfvh2AM2fOANCzZ89qjWn69Om0bduW9evXVxqsVmbRokV06dIFvV5P27Zt6dq1a5llxgD0ej3t27e/7GPFxMQQEhKCr69vpducOnUKRVHo3LlzhbfXtXnc1Vdfzf33349Go8HT05MePXrg7u5uv/3cuXM8++yzrFmzplzAbQtonZ2defXVV3nsscdo27YtQ4YM4aqrruLWW28lKCioTuMD7D0HbA32QH1djh8/TkBAQIX3Kd1IcMeOHTz33HPs2rWrXJf8rKwsvL297T936NCh0nEoigJUfKJHCCFE0yRBuRBCiEbx+++/k5CQwMqVK8ssNWazfPnyMkH5N998Q1FREW+88QZvvPFGhdsvXLiwXsfo5eXFhAkTmDBhAgaDgWXLlrF7925GjRpVr/upjlmzZrFs2TKWL1/O3XffXaP7XnHFFWWCw4o4OzuXC9Rry2KxoNFo+OWXX9DpdOVuv9xJBT8/P0wmEzk5OXh6epa7vX379pU2gTObzUyYMIH09HT+8Y9/EBUVhbu7OxcvXmTevHlYLBb7tg8//DDTp09n9erVrF+/nmeeeYZXXnmF33//nX79+tXwWV+exWKhV69evPnmmxXeHhoaCqgnPsaNG0dUVBRvvvkmoaGhODk5sXbtWt56660yzwHKZtUvZTsp4e/vX0/PQgghREOToFwIIUSjWL58OYGBgRU2rvr+++9ZtWoVH330kT3gWL58OT179qywbPq///0vK1asqPegvLSBAweybNkyEhISgMozj+Hh4YDafMtWlmwTHR1tv71jx46AWpZeHa+//jp6vZ777rsPT09Pbrzxxlo9j7qKjIxk/fr1pKenV5otj4yMRFEUOnToUKvqhaioKEDtwt67d+8a3ffw4cOcPHmSZcuWlWnsZuuGXtFYH3vsMR577DFOnTpF3759eeONN/jiiy+A+s0wR0ZGcvDgQcaNG1fl4/74448UFRWxZs2aMlMbLlf2XxFbFUq3bt1qPmAhhBAOIXPKhRBCNLiCggK+//57rrrqKq677rpyl/vvv5+cnBzWrFkDwPnz59m6dSuzZ8+ucPvbbruN06dPs3v37jqNKz8/n127dlV42y+//AKUlKXbyqUv7SQ+cOBAAgMD+eijj8osc/XLL79w/Phxpk2bBkBAQAAjR45k8eLF9m7fNraS49I0Gg0ff/wx1113HXPnzrW/No1t1qxZKIpS4QkQ27ivvfZadDodCxcuLPdcFEUhLS2tyn0MHToUoNwSa9Vhy8yX3q+iKLzzzjtltsvPz6ewsLDMdZGRkXh6epb5vbm7u1+2W3x1zZ49m4sXL/LJJ5+Uu62goIC8vLxKn0NWVhZLliyp8T7379+PRqOxv6ZCCCGaPsmUCyGEaHBr1qwhJyeHGTNmVHj7kCFDCAgIYPny5Vx//fWsWLECRVEq3X7q1Kno9XqWL1/O4MGD7df/9ttv5QIvgJkzZ1Y4lzs/P59hw4YxZMgQJk+eTGhoKJmZmaxevZpt27Yxc+ZMe1lzZGQkPj4+fPTRR3h6euLu7s7gwYPp0KEDr776KrfddhujRo1izpw5JCUl8c477xAREcEjjzxi39+7777LiBEj6N+/P3fddRcdOnTg7Nmz/Pzzzxw4cKDc+LRaLV988QUzZ85k9uzZrF27tlw2vqGNGTOGW265hXfffZdTp07Zl/ratm0bY8aM4f777ycyMpKXXnqJp556irNnzzJz5kw8PT2JjY1l1apV3HXXXTz++OOV7qNjx4707NmTX3/9lfnz59dofFFRUURGRvL4449z8eJFvLy8+O6778rNLT958iTjxo1j9uzZdO/eHb1ez6pVq0hKSuKGG26wbzdgwAA+/PBDXnrpJTp16kRgYGCtX/NbbrmFr7/+mnvuuYdNmzYxfPhwzGYzJ06c4Ouvv7avNT5x4kScnJyYPn06d999N7m5uXzyyScEBgbaKzWqa+PGjQwfPrzM8ndCCCGaOIf0fBdCCNGqTJ8+XXFxcVHy8vIq3WbevHmKwWBQUlNTlV69eilhYWFVPubo0aOVwMBAxWg02pdEq+zy+eefV/gYRqNR+eSTT5SZM2cq4eHhirOzs+Lm5qb069dPef3115WioqIy2//www9K9+7dFb1eX255tK+++krp16+f4uzsrPj6+io33XSTfQmy0o4cOaJcc801io+Pj+Li4qJ07dpVeeaZZ+y3l14SzSY/P18ZNWqU4uHhofzxxx+Vvia2JdH27t1b5Ws3d+5cxd3dvdLbSi+JpiiKYjKZlNdff12JiopSnJyclICAAGXKlCnllo777rvvlBEjRiju7u6Ku7u7EhUVpSxYsECJjo6ucjyKoihvvvmm4uHhUW5pMEBZsGBBlfc9duyYMn78eMXDw0Px9/dX7rzzTuXgwYNlfkepqanKggULlKioKMXd3V3x9vZWBg8erHz99ddlHisxMVGZNm2a4unpqQDVXh6tste+uLhYefXVV5UePXoozs7OSps2bZQBAwYoCxcuVLKysuzbrVmzRundu7fi4uKiREREKK+++qqyePFiBVBiY2Pt24WHhyvTpk2rcAyZmZmKk5OT8r///a9aYxZCCNE0aBSlgpo5IYQQQohGlJWVRceOHXnttde4/fbbHT2cZuntt9/mtddeIyYmpspmcEIIIZoWmVMuhBBCCIfz9vbmiSee4PXXXy/XbVxcntFo5M033+Tpp5+WgFwIIZoZyZQLIYQQQgghhBAOIplyIYQQQgghhBDCQSQoF0IIIYQQQgghHESCciGEEEIIIYQQwkEkKBdCCCGEEEIIIRxE7+gBNDSLxUJ8fDyenp5oNBpHD0cIIYQQQgghRAunKAo5OTmEhISg1VadC2/xQXl8fDyhoaGOHoYQQgghhBBCiFbm/PnztG/fvsptWnxQ7unpCagvhpeXl4NH07IZjUY2bNjAxIkTMRgMjh6OaAbkmBG1JceOqC05dkRdyTEkakuOndYlOzub0NBQezxalRYflNtK1r28vCQob2BGoxE3Nze8vLzkjUZUixwzorbk2BG1JceOqCs5hkRtybHTOlVnCrU0ehNCCCGEEEIIIRxEgnIhhBBCCCGEEMJBJCgXQgghhBBCCCEcpMXPKa8ORVEwmUyYzWZHD6VZMxqN6PV6CgsLm+VrqdPp0Ov1snSeEEIIIYQQotG0+qC8uLiYhIQE8vPzHT2UZk9RFIKCgjh//nyzDWzd3NwIDg7GycnJ0UMRQgghhBBCtAKtOii3WCzExsai0+kICQnBycmp2QaTTYHFYiE3NxcPDw+02uY1M0JRFIqLi0lJSSE2NpbOnTs3u+cghBBCCCGEaH5adVBeXFyMxWIhNDQUNzc3Rw+n2bNYLBQXF+Pi4tIsA1pXV1cMBgNxcXH25yGEEEIIIYQQDan5RU4NoDkGkKJhyLEghBBCCCGEaEwSgQghhBBCCCGEEA4iQbkQQgghhBBCCOEgEpS3YBqNhtWrVzt6GDU2evRoHn74YUcPQwghhBBCCCEanATlzVRiYiIPPPAAHTt2xNnZmdDQUKZPn85vv/3m6KHZPf/882g0GjQaDXq9noiICB555BFyc3OrvN/333/Piy++2EijFEIIIYQQQgjHadXd15urs2fPMnz4cHx8fHj99dfp1asXRqOR9evXs2DBAk6cOOHoIdr16NGDX3/9FZPJxI4dO5g/fz75+fn897//LbdtcXExTk5O+Pr6OmCkQgghhBBCCNH4JFN+CUVRyC82NfpFUZRqj/G+++5Do9GwZ88eZs2aRZcuXejRowePPvoof/zxR6X3O3z4MGPHjsXV1RU/Pz/uuuuuMlnrzZs3c8UVV+Du7o6Pjw/Dhw8nLi7OfvsPP/xA//79cXFxoWPHjixcuBCTyVTlWPV6PUFBQbRv357rr7+em266iTVr1gBqJr1v377873//o0OHDvYlyC4tXy8qKuIf//gHoaGhODs706lTJz799FP77UeOHGHKlCl4eHjQtm1bbrnlFlJTU6v9egohhBBCCCGEo0im/BIFRjPdn13f6Ps99sIk3Jwu/+tIT09n3bp1vPzyy7i7u5e73cfHp8L75eXlMWnSJIYOHcrevXtJTk7mjjvu4P7772fp0qWYTCZmzpzJnXfeyZdffklxcTF79uxBo9EAsG3bNm699VbeffddrrzySmJiYrjrrrsAeO6556r9PF1dXSkuLrb/fPr0ab777ju+//57dDpdhfe59dZb2bVrF++++y59+vQhNjbWHnRnZmYyduxY7rjjDt566y0KCgr4xz/+wezZs/n999+rPS4hhBBCCCGEcAQJypuZ06dPoygKUVFRNbrfihUrKCws5LPPPrMH8++//z7Tp0/n1VdfxWAwkJWVxVVXXUVkZCQA3bp1s99/4cKFPPnkk8ydOxeAjh078uKLL/LEE09UOyjfv38/K1asYOzYsfbriouL+eyzzwgICKjwPidPnuTrr79m48aNjB8/3r5vm/fff59+/frxr3/9y37d4sWLCQ0N5eTJk3Tp0qVaYxNCCCGEEEIIR5Cg/BKuBh3HXpjkkP1WR03K3Es7fvw4ffr0KZNdHz58OBaLhejoaEaOHMm8efOYNGkSEyZMYPz48cyePZvg4GAADh48yI4dO3j55Zft9zebzRQWFpKfn4+bm1uF+z18+DAeHh6YzWaKi4uZNm0a77//vv328PDwSgNygAMHDqDT6Rg1alSFtx88eJBNmzbh4eFR7raYmBgJyoUQQgghhGghzBaF344n4eakZ0Rnf0cPp95IUH4JjUZTrTJyR+ncuTMajaZBmrktWbKEBx98kHXr1vHVV1/x9NNPs3HjRoYMGUJubi4LFy7k2muvLXc/21zwinTt2pU1a9ag1+sJCQnBycmpzO0VleCX5urqWuXtubm59mz/pWwnFIQQQgghhBDNV5HJzJe7z7Fk51ni0vLp1c6b4Z2G26faNndNN/oUFfL19WXSpEksWrSIBx98sFxQm5mZWeG88m7durF06VLy8vLs99mxYwdarZauXbvat+vXrx/9+vXjqaeeYujQoaxYsYIhQ4bQv39/oqOj6dSpU43G6+TkVOP7lNarVy8sFgtbtmyxl6+X1r9/f7777jsiIiLQ6+VwFkIIIYQQoqV5c+NJ/rvlDADergaGd/LHaFZw0reMoFy6rzdDixYtwmw2c8UVV/Ddd99x6tQpjh8/zrvvvsvQoUMrvM9NN92Ei4sLc+fO5ciRI2zatIkHHniAW265hbZt2xIbG8tTTz3Frl27iIuLY8OGDZw6dco+r/zZZ5/ls88+Y+HChRw9epTjx4+zcuVKnn766QZ9rhEREcydO5f58+ezevVqYmNj2bx5M19//TUACxYsID09nTlz5rB3715iYmJYv349t912G2azuUHHJoQQQgghhGh4iVmFAMwe2J5dT43lySlROOlbTijbcp5JK9KxY0f+/PNPxowZw2OPPUbPnj2ZMGECv/32Gx9++GGF93Fzc2P9+vWkp6czaNAgrrvuOsaNG2ef3+3m5saJEyfsS6zdddddLFiwgLvvvhuASZMm8dNPP7FhwwYGDRrEkCFDeOuttwgPD2/w5/vhhx9y3XXXcd999xEVFcWdd95JXl4eACEhIezYsQOz2czEiRPp1asXDz/8MD4+Pmi1cngLIYQQQgjR3JnMal+tHiHeTXqqcW1plNp2DmsmsrOz8fb2JisrCy8vrzK3FRYWEhsbW2aNbFF7FouF7OxsvLy8mm1ALMdE4zIajaxdu5apU6diMBgcPRzRjMixI2pLjh1RV3IMidqSY6f27vpsHxuOJfHyNT25aXDDJwXrQ1Vx6KWaZ+QkhBBCCCGEEKJVMFvUPLJe2zLmkF9KgnIhhBBCCCGEEE2W0R6Ut8zwtWU+KyGEEEIIIYQQLYLJbAFAr5NMuRBCCCGEEEII0ahMkikXQgghhBBCCCEcQzLlQgghhBBCCCGEg9gy5QYJyoUQQgghhBBCiMZlW6dcJ+XrQgghhBBCCCFE4zJZ1PJ1gyyJJoQQQgghhBBCNC57ozddywxfW+azEvVm6dKl+Pj4OHoYQgghhBBCiFaqpHxdMuWiiXj++efRaDRlLlFRUWW2KSwsZMGCBfj5+eHh4cGsWbNISkpy0IirdvbsWTQaDTqdjosXL5a5LSEhAb1ej0aj4ezZs+XuO2nSJHQ6HXv37m2k0QohhBBCCCEak637ujR6E01Kjx49SEhIsF+2b99e5vZHHnmEH3/8kW+++YYtW7YQHx/Ptdde66DRVk+7du347LPPyly3bNky2rVrV+H2586dY+fOndx///0sXry4MYYohBBCCCGEaGS28nXJlDeArVu3Mn36dEJCQtBoNKxevbrM7Zdmg22X119/veEGpShQnNf4F0Wp0TD1ej1BQUH2i7+/v/22rKwsPv30U958803Gjh3LgAEDWLJkCTt37uSPP/6o8nGXLl1KWFgYbm5uXHPNNaSlpZW5PSYmhquvvpq2bdvi4eHBoEGD+PXXX+23v/baa/Tu3bvc4/bt25dnnnmmyn3PnTuXJUuWlLluyZIlzJ07t8LtlyxZwlVXXcW9997Ll19+SUFBQZWPL4QQQgghhGh+SpZEa5k5Zb0jd56Xl0efPn2YP39+hVnchISEMj//8ssv3H777cyaNavhBmXMh3+FNNzjV+af8eDkXu3NT506RUhICC4uLgwdOpRXXnmFsLAwAPbv34/RaGT8+PH27aOioggLC2PXrl0MGTKkwsfcvXs3t99+O6+88gozZ85k3bp1PPfcc2W2yc3NZerUqbz88ss4Ozvz2WefMX36dKKjo2nfvj033XQTr776Knv37mXQoEEA/PXXXxw6dIjvv/++yuc0Y8YMPvroI7Zv386IESPYvn07GRkZTJ8+nRdffLHMtoqisGTJEhYtWkRUVBSdOnXi22+/5ZZbbqn2ayiEEEIIIYRo+ozW8nV9C82UOzQonzJlClOmTKn09qCgoDI///DDD4wZM4aOHTs29NCatMGDB7N06VK6du1KQkICCxcu5Morr+TIkSN4enqSmJiIk5NTuQZtbdu2JTExsdLHfeedd5g8eTJPPPEEAF26dGHnzp2sW7fOvk2fPn3o06eP/ecXX3yRVatWsWbNGu677z7atWvHxIkTWbJkiT0oX7JkCaNGjbrs781gMHDzzTezePFiRowYweLFi7n55psxGAzltv3111/Jz89n0qRJANx88818+umnEpQLIYQQQgjRwpht3ddb6DrlDg3KayIpKYmff/6ZZcuWVbldUVERRUVF9p+zs7MBMBqNGI3GMtsajUYURcFisWCxrn2HzgWevFC/g68OnQvYxnAZtkAUoGfPngwaNIgOHTqwcuVKbr/9dvtzsVTweLbn26tXL+Li4gAYMWIEa9eu5fjx48ycObPM/YYMGcK6devs1+Xm5rJw4ULWrl1LQkICJpOJgoIC4uLiUKwl+HfccQd33HEH//nPf9BqtaxYsYI33nijwvGUHqfFYmHevHmMGDGCl156iW+++YYdO3ZgMpnst9u2/fTTT5k9ezZarRaLxcL111/P3//+d06dOkVkZGS1XsfKxqIoCkajEZ1OV+vHEdVj+5u89G9TiMuRY0fUlhw7oq7kGBK1JcdO7dky5SjmZvP61WSczSYoX7ZsGZ6enpdtVvbKK6+wcOHCctdv2LABNze3MtfZ5mXn5uZSXFxcr+OtscKcWt9Vq9USGRnJsWPHyM7OxsvLi+LiYs6fP4+3t7d9u4SEBHx8fMjOzubLL7+0B7suLi5kZ2djNpspKiqyn8gAtYu7oij26x555BE2b97Miy++SIcOHXB1dWXu3Lnk5uaSk6M+h1GjRuHk5MSKFStwcnKiuLiYiRMnlnnc0nJzcwF1OkOvXr3o3Lkz119/PV26dCEsLIzDhw/bt8vOziYjI4PVq1djNBr56KOP7I9jNpv56KOPLjt3vSrFxcUUFBSwdetW++sjGt7GjRsdPQTRTMmxI2pLjh1RV3IMidqSY6fmTGYdoGHLpt/xcnL0aKonPz+/2ts2m6B88eLF3HTTTbi4uFS53VNPPcWjjz5q/zk7O5vQ0FAmTpyIl5dXmW0LCws5f/48Hh4el33cpiw3N5ezZ88yd+5cvLy8uPLKKzEYDOzZs8c+/z46OpoLFy4wevRovLy86NmzZ7nH6dGjBwcOHCjzOh04cACNRmO/bt++fdx2223ceOON9n2fP38eJycnPD09ycnJoU2bNsydO5evvvoKJycnbrjhBtq2bVvp+D08PABwd3fHy8uL22+/nfvvv59Fixbh5eWFu7u7fTsvLy8+++wz2rdvX26O+saNG3nzzTf597//Xessd2FhIa6urowcObJZHxPNhdFoZOPGjUyYMKHCaQpCVEaOHVFbcuyIupJjSNSWHDu1Y7EoKLvUExkTJ4zH1715ROWVJSQr0iyC8m3bthEdHc1XX3112W2dnZ1xdnYud73BYCh38JvNZjQaDVqtFm0zmp/w+OOPM336dMLDw4mPj+e5555Dp9Nx4403otVqadOmDbfffjuPP/44/v7+eHl58cADDzB06FCGDRtW6eM+9NBDDB8+nDfffJOrr76a9evXs379egD769O5c2dWrVrFjBkz0Gg0PPPMM1gsFntnfFC75t95551069YNgB07dlT5+tpus/0e7r77bq6//np8fHzK/G5s3y9evJjrrruuXJf38PBw/vnPf7JhwwamTZtWq9dWq9Wi0WgqPF5Ew5HXW9SWHDuituTYEXUlx5CoLTl2aqbIZLZ/7+ri1Gxeu5qMs1lEop9++ikDBgwo02CsNbtw4QJz5syha9euzJ49Gz8/P/744w8CAgLs27z11ltcddVVzJo1i5EjRxIUFHTZ7udDhgzhk08+4Z133qFPnz5s2LCBp59+usw2b775Jm3atGHYsGFMnz6dSZMm0b9//3KP1blzZ4YNG0ZUVBSDBw+u0fPT6/X4+/uj15c/Z7R//34OHjxYYQd+b29vxo0bx6efflqj/QkhhBBCCCGaJpO5ZOloQzNKpNaEQzPlubm5nD592v5zbGwsBw4cwNfX1768V3Z2Nt988w1vvPGGo4bZ5KxcufKy27i4uLBo0SIWLVpUo8eeP38+8+fPL3PdY489Zv8+IiKC33//vcztCxYsAMo2llMUhfj4eO67777L7jMiIsLeJK4iffv2td9+uW3Xrl172f0JIYQQQgghmgfbGuUAOlkSrf7t27ePMWPG2H+2zQWfO3cuS5cuBdQAVFEU5syZ44ghilpISUnh66+/JjExkdtuu83RwxFCCCGEEEI0UyZzSeLPoJOgvN6NHj26yqwnwF133cVdd93VSCMS9SEoKAh/f38+/vhj2rRp4+jhCCGEEEIIIZopW6Zcpy3pYdXSNItGb6J5MZvNzapxnhBCCCGEEKJpKh2Ut1QSOQkhhBBCCCGEaJJs5esGCcqFEEIIIYQQQojGZcuU63UtN3Rtuc9MCCGEEEIIIUSzZlsSTS+ZciGEEEIIIYQQonEZreXr+hbaeR0kKBdCCCGEEEII0USZbeXrLbiRdMt9ZkIIIYQQQgghmjWTRTLlQlzWvHnzmDlzpqOHIYQQQgghhGhhjDKnXDRFr7zyCoMGDcLT05PAwEBmzpxJdHR0mW0KCwtZsGABfn5+eHh4MGvWLJKSkspsc+7cOaZNm4abmxuBgYH8/e9/x2QyNeZTqbalS5ei0Wjo1q1budu++eYbNBoNERER5W4rKCjA19cXf39/ioqKGmGkQgghhBBCiPoi5euiSdqyZQsLFizgjz/+YOPGjRiNRiZOnEheXp59m0ceeYQff/yRb775hi1bthAfH8+1115rv91sNjNt2jSKi4vZuXMny5YtY+nSpTz77LOOeErV4u7uTnJyMrt27Spz/aeffkpYWFiF9/nuu+/o0aMHUVFRrF69uhFGKYQQQgghhKgv0uitFVIUhXxjfqNfFEWp9hjXrVvHvHnz6NGjB3369GHp0qWcO3eO/fv3A5CVlcWnn37Km2++ydixYxkwYABLlixh586d/PHHHwBs2LCBY8eO8cUXX9C3b1+mTJnCiy++yKJFiyguLq5032azmUcffRQfHx/8/Px44oknyo193bp1jBgxwr7NVVddRUxMjP32sWPHcv/995e5T0pKCk5OTvz222+V7luv13PjjTeyePFi+3UXLlxg8+bN3HjjjRXe59NPP+Xmm2/m5ptv5tNPP630sYUQQgghhBBNj31JtBa8Trne0QNoagpMBQxeMbjR97v7xt24Gdxqdd+srCwAfH19Adi/fz9Go5Hx48fbt4mKiiIsLIxdu3YxZMgQdu3aRa9evWjbtq19m0mTJnHvvfdy9OhR+vXrV+G+3njjDZYuXcrixYvp1q0bb7zxBqtWrWLs2LH2bfLy8nj00Ufp3bs3ubm5PPvss1xzzTUcOHAArVbLHXfcwf33388bb7yBs7MzAF988QXt2rUr8zgVmT9/PqNHj+add97Bzc2NpUuXMnny5DLPwyYmJoZdu3bx/fffoygKjzzyCHFxcYSHh1fzlRVCCCGEEEI4kskic8pFE2exWHj44YcZPnw4PXv2BCAxMREnJyd8fHzKbNu2bVsSExPt21wayNp+tm1TkbfffpunnnqKa6+9lm7duvHRRx/h7e1dZptZs2Zx7bXX0qlTJ/r27cvixYs5fPgwx44dA7CX0f/www/2+yxdupR58+ah0VT9x9avXz86duzIt99+i6IoLF26lPnz51e47eLFi5kyZQpt2rTB19eXSZMmsWTJkiofXwghhBBCCNF02Luvt+CgXDLll3DVu7L7xt0O2W9tLFiwgCNHjrB9+/Z6Hc+5c+fo3r27/ed//vOfLFiwgISEBAYPLqkk0Ov1DBw4sEwJ+6lTp3j++efZvXs3qampWKx/SOfOnaNnz564uLhwyy23sHjxYmbPns2ff/7JkSNHWLNmTbXGNn/+fJYsWUJYWBh5eXlMnTqV999/v8w2ZrOZZcuW8c4779ivu/nmm3n88cd59tln0bbgRhFCCCGEEEK0FLbydYOUr7ceGo2m1mXkje3+++/np59+YuvWrbRv395+fVBQEMXFxWRmZpbJliclJREUFGTfZs+ePWUez9adPSgoiJCQEA4cOGC/zVYaXx1XX3014eHhfPLJJ4SEhGCxWOjZs2eZuep33HEHffv25cKFCyxZsoSxY8dWu6z8pptu4oknnuD555/nlltuQa8vfxivX7+eixcvcv3115e53mw289tvvzFhwoRqPx8hhBBCCCGEY9jK13UtOFPeck83tGCKonD//fezatUqfv/9dzp06FDm9gEDBmAwGMo0TYuOjubcuXMMHToUgKFDh3L48GGSk5Pt22zcuBEvLy+6d++OXq+nU6dO9ouvry/e3t4EBweze3dJJYHJZLI3mANIT08nOjqap59+mnHjxtGtWzcyMjLKPYdevXoxcOBAPvnkE1asWFFpCXpFfH19mTFjBlu2bKn0fp9++ik33HADBw4cKHO54YYbpOGbEEIIIYQQzYTJ2n3d0IK7r0umvBlasGABK1as4IcffsDT09M+B9zb2xtXV1e8vb25/fbbefTRR/H19cXLy4sHHniAoUOHMmTIEAAmTpxI9+7dueWWW3jttddITEzk6aefZsGCBfbmaxV56KGH+Pe//03nzp2JiorizTffJDMz0367reP6xx9/THBwMOfOnePJJ5+s8LFsDd/c3d255ppravQaLF26lA8++AA/P79yt6WkpPDjjz+yZs0a+zx7m1tvvZVrrrmG9PT0GmX/hRBCCCGEEI3PJOuUi6boww8/JCsri9GjRxMcHGy/fPXVV/Zt3nrrLa666ipmzZrFyJEjCQoK4vvvv7ffrtPp+Omnn9DpdAwdOpSbb76ZW2+9lRdeeKHKfT/22GPccsstzJ07l6FDh+Lp6VkmoNZqtaxYsYL9+/fTs2dPHnnkEV5//fUKH2vOnDno9XrmzJmDi4tLjV4DV1fXCgNygM8++wx3d3fGjRtX7rZx48bh6urKF198UaP9CSGEEEIIIRqfLVOuk0y5aEqqs6a5i4sLixYtYtGiRZVuEx4eztq1a2u0b71ez9tvv83bb79d7jZbQ7fx48fbO61XNebU1FQKCwu5/fbbL7vfefPmMW/evEpvf/jhh3n44YcB9cTBY489VuF2Tk5OFZbTCyGEEEIIIZoeW6bc0ILnlEtQLhqd0WgkLS2Np59+miFDhtC/f39HD0kIIYQQQgjRBJU0emu5Rd4t95mJJmvHjh0EBwezd+9ePvroI0cPRwghhBBCCNFESaM3IRrA6NGjq1WCL4QQQgghhGjdjNZ1yvUtOCiXTLkQQgghhBBCiCbJLN3XWwfJ2gobORaEEEIIIYRoOozWZtL6FtzorVUH5QaDAYD8/HwHj0Q0FbZjwXZsCCGEEEIIIRzHZC9fb7mha6ueU67T6fDx8SE5ORkANzc3NJqWewamoVksFoqLiyksLETbzMpLFEUhPz+f5ORkfHx80Ol0jh6SEEIIIYQQrV5J+XrLjdNadVAOEBQUBGAPzEXtKYpCQUEBrq6uzfbkho+Pj/2YEEIIIYQQQjiW0dp9vSU3emv1QblGoyE4OJjAwECMRqOjh9OsGY1Gtm7dysiRI5tl+bfBYJAMuRBCCCGEEE2IrXzdIOXrLZ9Op5OArI50Oh0mkwkXF5dmGZQLIYQQQgghmhaTtXxd14LL11vu6QYhhBBCCCGEEM2aSbqvCyGEEEIIIYQQjtEaytdb7jMTQgghhBBCCNGs2TLlUr4uhBBCCCGEEEI0spJMuQTlQgghhBBCCCFEoypp9NZyQ9eW+8yEEEIIIYQQQjRrtvJ1yZQLIYQQQgghhBCNzGgtX9dLplwIIYQQQgghhGhcZlmnXAghhBBCCCGEcAyTWcrXhRBCCCGEEEIIh7CXr8s65UIIIYQQQgghROOyla/rpXxdCCGEEEIIIYRoXEZr93UJyhvI1q1bmT59OiEhIWg0GlavXl1um+PHjzNjxgy8vb1xd3dn0KBBnDt3rvEHK4QQQgghhBCiUZmkfL1h5eXl0adPHxYtWlTh7TExMYwYMYKoqCg2b97MoUOHeOaZZ3BxcWnkkQohhBBCCCGEaGytoXxd78idT5kyhSlTplR6+//93/8xdepUXnvtNft1kZGRjTE0IYQQQgghhBAOZrR2X9e34O7rDg3Kq2KxWPj555954oknmDRpEn/99RcdOnTgqaeeYubMmZXer6ioiKKiIvvP2dnZABiNRoxGY0MPu1Wzvb7yOovqkmNG1JYcO6K25NgRdSXHkKgtOXZqxxaUayyWZvXa1WSsGkVRlAYcS7VpNBpWrVplD7gTExMJDg7Gzc2Nl156iTFjxrBu3Tr++c9/smnTJkaNGlXh4zz//PMsXLiw3PUrVqzAzc2tIZ+CEEIIIYQQQoh69OQeHQVmDf/sa6Ktq6NHU335+fnceOONZGVl4eXlVeW2TTYoj4+Pp127dsyZM4cVK1bYt5sxYwbu7u58+eWXFT5ORZny0NBQUlNTL/tiiLoxGo1s3LiRCRMmYDAYHD0c0QzIMSNqS44dUVty7Ii6kmNI1JYcO7XT58XfyC8289sjIwjzbT5J1uzsbPz9/asVlDfZ8nV/f3/0ej3du3cvc323bt3Yvn17pfdzdnbG2dm53PUGg0EO/kYir7WoKTlmRG3JsSNqS44dUVdyDInakmOnZkzWRm+uzk7N6nWryVibbF95JycnBg0aRHR0dJnrT548SXh4uINGJYQQQgghhBCisZjMLX+dcodmynNzczl9+rT959jYWA4cOICvry9hYWH8/e9/5/rrr2fkyJH2OeU//vgjmzdvdtyghRBCCCGEEEI0OItFwZoob9HrlDs0KN+3bx9jxoyx//zoo48CMHfuXJYuXco111zDRx99xCuvvMKDDz5I165d+e677xgxYoSjhiyEEEIIIYQQohHYStcBdJIpbxijR4/mcn3m5s+fz/z58xtpREIIIYQQQgghmgKTxWL/3tCC1ylvuTUAQgghhBBCCCGaLaO5JIGr17bc0LXlPjMhhBBCCCGEEM2W2VI6KJdMuRBCCCGEEEII0Whsnde1GtBKUC6EEEIIIYQQQjQeozVT3pI7r4ME5UIIIYQQQgghmiCzdU55Sy5dBwnKhRBCCCGEEEI0QUZr93UJyoUQQgghhBBCiEZmsmbKDVK+LoQQQgghhBBCNC7bOuU6yZQLIUT9MZotfLL1DL8eS8JSapkLIYQQQgghSmstmXK9owcghGhdvt1/gZfXHgcg3NeNkb4apjp4TEIIIYQQoumxZcr1OsmUCyFEvdkfl2H/Pi49ny9Oa8kpNDpwREIIIYQQoimyZcqlfF0IIerRoQuZALxzQ1/8PZxQ0HA6Jc+xgxJCCCGEEE2OyTrV0aBt2WFry352QogmJa/IxOnkXACGdvSjc6AHADESlAshhBBCiEvYgnLJlAshRD05Gp+NRYEgLxcCvVyIDHAHJCgXQgghhBDlmczqnHKDzCkXQoj6YStd793eG6BUUJ7rqCEJIYQQQogmymidU65v4d3XW/azE0I4XExKLnFpaib84IUsAPqE+gBIplwIIYQQQlTK3ErK12VJNCFEg8ktMjHz/R0owK+PjqogU67OKb+QUUCh0YyLQeegkQohhBBCiKbGtiSalK8LIUQtRSdmk1NkIrfIxNOrDxOXlg9A73Y+AAR4OOGqU7AoEJsq2XIhhBBCCFHCXr4u3deFEKJ2TiTm2L//9XgyAOF+bni7GQDQaDS0dVVvt3VlF0IIIYQQAsBszZTrW3j5ugTlQogGc9IalDuVas7Ru71PmW3auqpnQCUoF0IIIYQQpZU0epOgXAghasWWKX9ofGdcDOrbTR/rfHIbe1AuHdiFEEIIIUQptiXRWnr3dWn0JoRoEIqiEJ2kBuWjugQQ7O3CV3vPM6NvSJnt2rqpX2MkUy6EEEIIIUoxWWxzylt2plyCciFEg0jJKSIz34hWA50CPejZzptr+7cvt12QNVN+JjUPs0Vp8UteVFdWgZFfjyVxMimH24Z3IMjbxdFDEkIIIYRoVCVBuWTKhRCixmyl6xH+7lUudebrDE56LcUmC+fT84nwd2+sITY5mfnFbDiWxC+HE9h+OtU+j0qn1fDE5CgHj04IIYQQonHZytdb+pJoEpQLUQOSya2+aGtQHhXkWeV2Wg109HPjRFIup5NzW11QnpFXzIZjiaw9nMiO06n2M8IAbk468ovNZOQbHThCIYQQQgjHsH0uaumfvyUoF6KaTiXlcO0HO7ltRAcendDF0cNp8mzzybu29brstpEBHmpQnpLLeNo29NCahK0nU/hk2xl2xqRhLhWIRwV5MrVXMFN7BbE5OoWXfj5OQbHJgSMVQgghhHAMk7Vq0CCN3oQQAAcvZJFTZOLHg/ESlFeDLVPeNcjjsttGBqrZ8dayLNrvJ5K487P99mC8e7AXU3sFMaVXMJEBJa/XntgMAPKKzQ4ZpxBCCCGEI0mmXAhRRpFJDYxiU/PILjTi5WJw8IiaLrNF4aQtUx50+Ux5p4DWE5Tvj0vnvuV/YrYoXNU7mMcmdqVDJSX7bk7qXPwCCcqFEEII0QqVLInWsoPyll0HIEQ9KjJa7N8fi8924EiavnPp+RSZLLgYtIT5ul12+0hrUB6TnIuiKJfZuvnKyCtm/tJ9FBotjOkawFvX9600IIeSoDxPyteFEEII0QrZMuWGFt59vWU/OyHqUZGpJCg/cjHLgSNp+v6MU8uuOwd6VqvcKNzPHa0GcopMJOcUNfTwHGbv2XSyCoyE+bqx6Kb+l50f5eakFjNJplwIIYQQrZHJon7+bunl6xKUC1FNhcaSwOiwBOWVOnIxi+fWHAVgWCe/at3HWa8l3K/ll7DHZxYA0CPEyx5wV8XNWTLlQgghhGi9Shq9SVAuhEAy5Tbf7r/ArYv3kJpbPqMdl5bHvCV7yS0yMaSjL4+Mr35DPFuDs5YclF+0BuUhPq7V2l7mlAshhBCiNTNag3J9C+++3rKfnRD1yNboDeBMah65Ra0ze/m/bWfYejKFxdtjy1yfklNkD9a7BXvx8a0DcTHoqv24nQJbflAen1kIQLvqBuUGNZueVyRBuRBCCCFaH7O1fF0v5etCCCibKVcUOJ7QOpu92eZ8f7X3vP1ERU6hkXlL9hCXlk+oryvL5g+qcXf61hCUX7Bmytu1qWZQbi1fLzCasVhabgO85iSrwMi5tHxHD0MIIYRoFYzWzz8SlAshgLLd1wEOX2h9JezFJgvpecUApOUVs+5IIkUmM3d/vp+j8dn4uTvx+fzBBHq61Pix7UF5SssNyi9mWIPyGpavgxqYC8d78Mu/GPfm5hZ98kgIIYRoKkqWRGvZYWvLfnZC1CNbVriNm5oBPhLf+oLylEvmkX+2K45HvzrIzpg03J10LL3tCiKqWOKrKrZl0VJyisgqMNZ5rE1NodFsn4df3aDcRa9DYz0xnC/zyh3OZLaw60waRrPC7tg0Rw9HCCGEaPHMkikXQpRmK1/vH9YGaJ3N3pKy1TnRXi569FoN++My+PlwAgadho9vHUiv9t61fmxPFwNBXmqGvSVmIROy1NfO1aDDx616pf1arQZX67z8fOnA7nBn0/Iptr4PtNbpK0IIIURjkkZvQogybEuiDYhQg/LTybmcT29dc0uTrUF5p0APJvUIAkCjgbeu78vwTv51fnxbCXtMCwzK40vNJ9doqn+217Z0mmTKHS86Mcf+/YmEnCq2FEIIIUR9sK1TLkuiCSGAkkx5mK8bV0T4YlHg4a8O2Oe6tAZJ2Wr5daCnCw+O60yPEC9evbY3V/UOqZfHb8nzym3zyau7HJqNbV65ZModLzqpVFCemCPN94QQQogGZlunXCfl60IIKAnKXfQ63pjdB09nPfvjMnh/02kHj6zx2MrX23o50zXIk58fvJLZg0Lr7fEjW3AHdnvn9VoH5ZIpd7ToxJKS9dwik33deSGEEEI0DJN9TnnLDltb9rMToh4VWcvXnQ1aQn3deOmangC8+9sp9p1Nd+TQGo1tObRAr5p3V6+OTgEtNyi3l6/71Oy1swXlsla549nK121n64/JvHIhhBCiQdkqUqV8vQFt3bqV6dOnExISgkajYfXq1WVunzdvHhqNpsxl8uTJjhmsaPVsDZ6c9WqQdHXfdlzTrx0WBR5aeYDswpbXMfxSJZnyBgrKrZny8xn59jn8LYV9ObRqrlFu4+6szikvMEr5uiPlF5uIs/aQGGHtnyDzyoUQQoiGZcuUS/l6A8rLy6NPnz4sWrSo0m0mT55MQkKC/fLll1824giFKFFkD8pL/mxeuLoHob6uXMws4OlVR1CUlj3HNNk6p7ytl3ODPL6/hxPergYUBc6k5DXIPhwlPsuWKXer0f1s3dclU+5Yp5NzURTwc3fiys5qUC4d2IUQQoiGZZtTbmjh3df1jtz5lClTmDJlSpXbODs7ExQU1EgjEqJytnXKnQ0lbwqeLgbeuaEff/toF2sOxjO6awDX9m/vqCE2uKSchs2UazQaOgV6sD8ug9MpuXQP8WqQ/TQ2i0UhIVN97UJqWL5uz5TLnHKHOmEtXe8a5Em3YC/rdRKUCyGEEA3J1n29pWfKHRqUV8fmzZsJDAykTZs2jB07lpdeegk/P79Kty8qKqKoqMj+c3a2+qHJaDRiNLb88mJHsr2+LfV1LjRa3xSwlHmOvYI9eGBMJG//dppnfjhC73aehPvWLBvaHBQZzWTmq8+7jYuuXn7PFR0zHf3d2B+XwcmELIzdA+q8j6YgKbuQYrMFrQb8XGv22jnr1X9COQXFLfZvqzYa+/3meHwWAJ0C3In0V6cgxKXnk5lbYD9xIpqHlv6/SjQ8OYZEbcmxU3NG65xyjWJpdq9bTcbbpD9JTJ48mWuvvZYOHToQExPDP//5T6ZMmcKuXbvQ6XQV3ueVV15h4cKF5a7fsGEDbm4tL1BqijZu3OjoITSIgmIdoGHH1i0cu6R6O1yBSE8dMTlmbv9kGw/1MNPSqmzSCgH06DUKOzZtpAZLbV9W6WOmOFUD6Nh++DSdi07W304c6GwOgB4vg8KG9etqdN+kC1pAy5HoU6wtjG6I4TVrjfV+s/OY+nsoTo5l95YzeBl0ZBs1LFu9gQjPRhmCqGct9X+VaDxyDInakmOn+nJy1c/fu//YSeIRR4+mZvLz86u9bZMOym+44Qb797169aJ3795ERkayefNmxo0bV+F9nnrqKR599FH7z9nZ2YSGhjJx4kS8vFpGKWxTZTQa2bhxIxMmTMBgMDh6OPXKbFEw71LfQKdMHI+vu1O5bfoPL+CqRbuIyzVx2qULj4zvVO/j+Gb/BVbsucA/JnVhSEffen/8quyPy4C/9hLk48a0aVfWy2NWdMy4nUzhh8//Il/nxdSpw+plP4728+FEOHKITsFtmDr1ihrd99Rvp9mUcIag9mFMndq9gUbY/DT2+81LhzcDxcwaP5S+oT58m7KfbafTaNOxF1PrcVlA0fBa8v8q0TjkGBK1JcdOzf3ryBYoLmLkiBH0aGbTGm0V29XRpIPyS3Xs2BF/f39Onz5daVDu7OyMs3P5JlQGg0EO/kbSEl9rU6n5vB6uzhgM5f90wgMMvHJtL+5f8Rf/3RbLXaMi8XErH7zXhqIovLHhpH1N9Me+PczGR0fh7Vr2dU7KLuSHAxfZeCyJcd3acs+oyHrZP0B6gfoaBHm51Pvvt/QxExXsA8DZtHw0Wh36ZlxycDY1j7/OZ7DlZCoA7dq41fi183RVj6FCk9Li/q7qQ2O836TnFZOSWwxAt3ZtMBj0dG/nzbbTaRxLyJXfSzPVEv9XicYlx5CorZZ67MSm5qEBIvzd6+0xbd3XXZ2dmt1rVpPxNqug/MKFC6SlpREcHOzooYhWxtbkDcp2X7/UVb1DWPjjMVJyijifXlDnoHzf2XS2nExhZ0yamqkGvF0NJOcU8cra4/x7Vm8Kis1sOJbId39eZPupFKzvXRxPyOHukR3RXFJnfjwhG193pxo3a2vo5dBs2vm44mLQUmi0cD6jgA4VvLHHpOSi12oI96u/N/36pCgKS3ee5ZW1Jyi2zoUCaF/D5dCgZJ3yfOm+7jB/nVP/9jr4u+NhnT8+tKMf/91yhk3RySiKUu7vTAghhGhNcotMzFy0A51Ww84nx+JiqHiqcU3Z5pTrW/g65Q4NynNzczl9+rT959jYWA4cOICvry++vr4sXLiQWbNmERQURExMDE888QSdOnVi0qRJDhy1aI1sy6HptJrLZm5DvF1IySkiIauAXu29a73Pk0k5XPfRLvvPOq2Gf13Tkw7+Hsz+7y5W7j1PVoGRrSdTyCuVye8f5sNf5zPJLTKRnleMn4daOaIoCh9sjuH19dF0DvRg46OjajSeJOtyaIENtByajVaroaO/B8cSsjmdnFsmKN90IpmPt55h15k0PJ31bP/HWLzdmtZZ04y8Yv7+7UF+PZ4MQK923vi4GfB00XPDoLAaP56bk/o2nd/C1m1vTnbGpAEwpGNJk9EhHf1wc9KRlF3EkYvZdfpbF0IIIZq7vbHpZBWojc3OpOTV2wo6Zmu2SS/d1xvOvn37GDNmjP1n21zwuXPn8uGHH3Lo0CGWLVtGZmYmISEhTJw4kRdffLHC8nQhGlKRsfwa5ZUJ8nbh4IUsErIK67TPs6nqOt1tvZx5YGxnRnTyt5cD3TIknM//iOOXI4kAhPq6ck2/9lzbrx0R/u4M//fvXMws4GxaHn4ezpjMFp5bc5Tlu88BcCo5l6x8Y40C2mRrpjzQs2Ez5QCdAkuC8gnd2wKw/VQqty3da98mp8jEgQuZjOrSdDq07z6TxsNfHSAhqxAnnZanr+rGLUPC65RFLcmUm+prmKKGdlmD8qGRJUG5i0HHyM4BrDuayK/HkyQoF0II0ar9cSbN/n19LWurKApG6zrlzXk6Y3U4NCgfPXo0iqJUevv69esbcTRCVM6+Rnk1gvJgb7VEua5Beab1bGO3YC9uHhJe5rZ/TIkip9CIq5OOa/u3Z2B4mzKBX4S/mxqUp+YzINyXL/ecY/nuc2g06nMoNFo4kZjN4I6VLy94qZI1yhv+pFinQA8ATifn2q/bGaPOyx7a0Q8nvZYtJ1M4dL5pBOVmi8L7v5/mnd9OYlGgo787793Yjx4hdQ/U3Kzl0vmyTrlDZOQVcyxBbdQy9JK/l3HdAu1B+SMTujhieEIIIUSTsKt0UJ6UUy+PmV1osk8D9K2nPk1NVbOaUy6EoxTaM+WXnx8T7K1mkhOzCuq0zyzbmuAVvAl5OOt5+4Z+ld433M+dHafTiEtTs+1/nEkH4IExnTgSn83vJ5I5mZRTs6DcWr7e0HPKoVRQnlISlB84nwnA9D4h5Beb2HIyhYMXshp8LJeTmFXIQyv/Ynes+hrP6t+eF67uUW9rV9sz5cWSKXcE25n/Lm09CPAse0JqbFQgGg0cjc8mIavAfkJOCCGEaE2yC40cuVjymaz057e6SLQmuNq4GXB1qp856k1Vy64DEKKe2DPlhuqVrwPE1zFTnpGvdnu+tMN6dUT4uQEQm6auj3g8Uc30DYzwpUtbdVHlE4k1O4uZnN34mfKY5FwURcFsUThkDcD7hfnQJ9QHgEMXMht8LFU5dCGTq97bzu7YdNycdLw5uw9vzO5TbwE5lA7KJVPuCLb55MMi/cvd5ufhTP+wNgD2HgJCCCFEa7M3Nt3eaBjKVjrWRbw1wRXUCk56S1AuRDXYGr25VCNTHuKjvnEk1lP5uk8tGplFWLuSx6XlUVBsts9P7xbsRVSQGpRH1yAoLyg2k12oZmoDGyFTHuHnjpNOS26RiZNJucSk5JJbZMLNSUeXtp70CPFCq4HknCJ7V/jGtulEMjd8/AepuUVEBXny0wMjuLZ/+3rfj73RmwTlDmErxxtSSVXJ+G5qz4NfjyU12piEEEKIpsTWe+XKzuoJ7NjUPEylVp+pLdtnaVsVaksmQbkQ1VCjTLmXrXy9sMqeCZdjK1/3qU2m3NoQLjY1j+ikHCwK+Hs4EeDpTFdbUJ6UU+3xbT+tzuf2ctHjWY9Z4Mo46bWMsL6xrzuSaF+Sqlc7b3RaDW5OenvG/6C1rL0xnUrK4Y7P9pFfbObKzv58c89QOgZ4NMi+3EuVr9fleBI1l5xdyOnkXDQaGNLRt8JtbI0It5xMYdMJyZYLIYRoff6IVYPy6wa0x9Wgw2hWiEvPr/PjJkhQLoQorSbd19t6uaDRQLHZQlpeca33mVmg3rc2a52H+arl6zmFJvvZy27BahfMyAAP9FoNOYWmajWjUxSFt389CcBNdewkXhOTewYB8MuRBPt88n7WUmGA3tZu14ccMK98U3QyZovCFRG+LJ43CE+XhluWzTaHSq8YKY7dCdvfgrV/h03/gj8+gkNfw+lfIf0MSNBer2xZ8h4hXpX+HXYK9ODWoWojxke+PsDFzLr1khBCCCGak6x8I0fjSxqi2qYgnkqqewl7gvV/amsIyqXRmxDVYCtfr06jNye9Fn8PZ1JyikjMKsTfo3ZzsDOtmfLarMPtYtAR7O1CQlYh644kANjL1p30Wjr4u3MqOZfoxBx7uX1lNhxL4mh8Nu5OOu66smONx1JbE7q1RafVcCIxh9RctclcX+tccoDe7X34et8FDjpgXrntJMHYboEYGmqJDkWBrPO4n9/L+4aPGaP9C+fPiqq+j6svtBsA7QdCu4HQrj+4VZzhFZe383Tl88lL+79p3fjrXCaHL2Zx/4o/+equoThV4wSeEEII0dztOZuOokDHAHcCvVzoFOjB4YtZxNRDs7dE6xTF1jCnXIJyIaqhJkuigXpGLyWniISsQnq2q92yWJl1KF8HdV52QlahvUN5VFDJepFdgzzVoDwphzFRgZU+hsWi8PavpwCYNzyCNu6NtxxFG3cnhnb0Y/vpVFJz1aqBfmE+9tttmfLDF7NQFKXRMvgAf53LBMqeJKiT/HRIPgZJxyD5qPXrcSjOQQtcZT0XZHb1Qxc+FPy7QGEWFKRDfhrkpUHaKfXn0xvVi41vR+g6FYY/BB6V/65FebZM+aVLoV3KWa/jg5v6M/Xdbfx1LpP/bonhgXGdG2OIQgghhEPZKjJt/ytLMuV1XxbNVtEZIplyIQSUypRXY045qEH5oQtZJNRhWbTM/NqXr4O6VnnpNSNt5eugZs1/OpRQabO3zPxiNhxL4seD8RxPyMbDWc+djZglt5ncM8g+nz3E26XMcmxRQV446bRk5hs5n15AmLXjfENLyi4kIasQrUad415rh7+FAyvUYDwnoeJttAYI6MrS5Ei+LxzIf+6ZR5dSJ1fKMBVB0hG4sB8u7oML+yA9Ri1r3/U+7FsMQ+6FKx8Hp8Z5rZqz8+n5nEvPR6fVMKjD5asNQn3deGlmTx5aeYD3fj/N5J5BdLb2PRBCCCFaKtvSoUMjywbldV0WTVEUe/l6kATlQgiAQqMtU169NRJt6xVXZ852RYpNFvKs3bbb1KJ8HdS1ym30Wg2RgSU/d7UGdqWD8oy8YjYcS+Tnw4nsPJ2KqdTaFo9O6FLrkwN1MalHEM/8cARFgb6lsuSgluF3C/bk4IUsFqz4k2Gd/Ogf1ob+YW3KrSddn2xZ8q5BXrVf+mz/MvjxwbLX+YRBYA8I7AZte0Bgd/DrBHonPvn371wsKLAfExXSO6ul6+0GAHep1+WnQ9xO2PYGxP+pfj26GmZ+CGGDazf2VsJ2QqtPe288qvl7ntEnhB8OxPP7iWT+8d0hvrlnGDpt41VwCCGEEI0pM7/Yvuzu4A5qUN7ZFpQn52KxKGhr+X8wp8hk/9wTLOXrQgioWaM3KDmjV9tl0bKsy6FpNNS6iVhEqaC8U6BHmRMKXa0ZvNPJuazYfY5fjiSwMyYNc6lAPCrIk6m9gpnaK9h+1rOxBXg6c0WEL7tj0+3rQZc2qWcQBy9kcfiierEJ83Wjf5gPwzr5c22/dujrcd73X+fVTvC1Ll0/8j38+JD6/cD50GcOBESBSyUZcMDdWf3dFdR0WTQ3X+h2FURNgxM/wdon1Oz54knQYyYMuA0irgRty57/nFNoZMvJFMZGBdqXmLucXVWsT14ZjUbDSzN7MvGtrfx5LpPPdp3ltuEdajVmIYQQoqn744w6n7xzoIc9IRLm64aTTkuh0cLFzAJCfWtXnWf7DO3jZrA3vW3JJCgXohpKGr1Vv3wdIL6WnZizrJ3XvVwMtc60RfiXvAnamrzZtG/jipuTjvxiM/9cddh+ffdgL6b1DmZKz6AGW+Krpv51bS9++OsiNw0OL3fbfaM7MalHEPvjMvjrXAb74zI4lZzLOWvp8eoD8fi4GpjYI6jexnPAminvV5ug/Ogq+P4uQIEB82Dam+qZl8twtQaSVWbKq6LRQLfpagC+7ik4uEIdy9FV4N8VRjwCva4DXcN1kXekDzbH8OHmGJ6cEsU9oyIvu72iKCVz5CKrnk9+qRAfV56cEsXTq4/w2rpoxndrW+sPJEIIIURTZitdH1Kq94pepzYUjk7K4ar3tuPmpOO+0ZHcMjSiRo9t+wwd5NXyS9dBgnIhqsXW6M3FULPydVvXyJqyN3mrZek6QLhvSaa89HxyAK1Ww5ioQH4+lECvdt5M6RXE1J7B9vXNm5LIAA8endi1ytsjAzyYPTAUgOxCIwfOZfL6+mgOX8yq9YmRipjMFntGvt8l5fSXtX8p/PgwoECvv1U7IIeya5XXiasPXPOhOrd8/1J1ObXUaFh9D/z6PIQNUTu2h/SH4D5VZu+bE9ta9qeTqze/LTY1j8TsQpx0WgaEl6/QuJwbrwhjzcF49sSm889Vh/ls/hWN2ohQCCGEaAyXzie3GdHZn+ikHLIKjGQVGHl2zVHC/dwZ2SWgysfLyjcSnZTDoIg29kx5a1gODSQobxZyCo2cTc2nV/s6NJVqAaITc/By1TtkXkltM+UJWYW16gxe187roK5vbVsW7dKgHOCd6/vy8syeDpkr3pC8XAyM7BLAL0cSOXwxi6yCOgaypZxMyiW/2Iyns57I6lYSWMzqmuLb/qP+POA2mPYGaKtfiuVmD8prmSm/VHBvuOpNGP887PsUdi2C3EQ4tlq9AKAB/84Q0g/ChkLfG9V5683QSWsH2Oo2XtxpzZL3D/ep9om40rRaDf++thdT3tnGtlOpfLv/An+znjQSQgghWoL0vGJOWHsTDb6kIerT07px4+AwTGaFxdtj+WrfeR7+6gBrH7yy0qZtaw8n8MzqI6TlFfPqrF7E24Lyyyzd21K07ImELcRLPx1n+vvb+eHARUcPpUFZLApPfX+Yxdtjy92WnFPI9Pe2c9Mnux0wslJzyqv5Ad3WJbzYZCE9r7jG+8sssK1RXreA+f+mdePWoeEMq6AEV6/TtriAvDQvV/Wco21+fn2wrU/eO9S78sYlpiK1wVrmOYg/AJ9dXRKQj3gErnqrRgE5YJ8HXW9BuY2Llzqmhw/DLath3HNqmbt3KKBA6kk49BX89DB8PEZ9Ps1MWm6RfUm9hMzqVa7UZj75pToGePDIhC4AvPjTMZJzalc1I4QQQjRFu61Z8q5tPfHzKHvSXqPREBngQdcgTxZe3YPuwV6k5xXzwJd/YjJbymybmlvEfcv3c9/yP0mzfmb+/s+LJFpPpAdL+bpoKmxdDT/YFMOMPiEttgzy4IVMvtxzDoNOw42Dw8pkqA6cy6TYbOFCPZYi10RN1yl30mvx93AmNVddq/zSN6u/zmXw3y1neGpqVJku6Tb25dDqkCkHuKp3CFf1DqnTYzRX3tbXLruw/oLywxczMWBiom8S/Pk5JB6ChEOQfRGKcqA4DywV7M/JA6a/o87brgV7pryo/rL+ZRhcIXKMerHJTVaD8Iv7Ye//1PXT/zdOzZoHdIXQwWoDOaemN+WhtJNJJSXr8VkFl61csVgUezleRSezauKOER346VA8Ry5m89wPR/nw5gF1ejwhhBCiqSiZT171sqEuBh0f3NSfq97bzt6zGfxnw0menBKFoij8eCiB5344Qka+EZ1Ww82Dw1i2K469Z9PJKVQ/87SG5dBAgvJmISWnCIDopBy2nExhdNdAB4+oYcSl5QNgNCscuZjFwIiSP/Kj8eqJiWKTpVbl4HVV0/J1UEvYU3OLSMwqpOcl61kv2hTDr8eTUFD47y0Dy93XVr5e2+XQRElQXqNMuakYMs5C2ulSlxj1a0EGCy0KLzqb0B+ywKHLPJbeRQ1YA7ur2XH/zrV+LvZMubGeM+VV8QiELhPVyxV3ws+PwrEf4Ow29bL3f+rJhu5Xqx3kw4dX2sU9PrMAPw+nai8pWJ9OJZcs+1dotJCZb6SNe+UVIieTc0jLK8bVoKN3e5867Vuv0/LarD7MeH87vxxJ5JfDCUzpFVynxxRCCCGagl2VzCevSIS/O69d15v7lv/JR1ti6Bjgzq/HkthwLAlQex+9fl1verbz5s9zmRy+mMWxBPWzf2tYDg0kKG/yFEUhLbek/PmTbWdabFB+Ni3P/v3+uIwyQbntDxPUALk28zzroqbrlIMalB++mEV8BfNYD13IBGDDsSRiU/PocEmDtUxr9/W6lq+3ZtUKylNPwV+fQ/JxNfDOiAOl8sDXCUADRoMXhnZ91GZoQb3BLxKcPdUg1cldvdRjJ/MGz5Rfjrs//G0ZJB2BxMOQdFRdYi3jLBxYrl68Q9UTEC7e1osXuHgTk6PnzW2JdOx9JY9dP7HRhx6dmFPm54uZBVUG5TtPqx8yBnXwxakGJ+Eq0z3Ei3tGRfL+ptM888NRhnf2x6uWyxwKIYQQTUFqbpG9Eu2KDtWrKpvaK5h5wyJYuvMsT3yrZjb0Wg33j+3EfaM72f/nTu4ZVGaZ22AfyZSLJiC7wESxde6FTqthx+k0jlzMKpd5bQnOWTPloAblpR2LLxWUGxs/KLdnyg3V/5AeZl0G6WxqfpnrE7MKSbZWPygKfLr9DC/N7FVmm/po9Nba2cvXLw3KM89B3C44+j2cXFf+jk4eapDt16nUJRLcA5nzyS5i0op595bJDKnDfOOacnOu50ZvtaHRQFAv9QIw8SU494d1ebXVkHVevVwiElhkAI6/C+91VtdNH3QHeLdvlGGfSirbcT2hgsqV0nbG1E/pemn3j+3E6gMXuZBRwM7TaUzuWX9L9AkhhBCNbfeZdEBdcte3ihPdl/rn1G78dT6Tg+cz6RHixevX9aF7SNlmxFN6BvH6+mj7z7IkmmgSUnLV4M3TRc/YqEB+OBDPh1tiWHRjfwePrP6VzpT/eS7DXqaemV/MxVJzydX53Y0brNamfL1DgJr9PpNaNig4aM2SuzvpyCs2882+CzwyvkuZeee27G5dlkRrNYpyICcRchIgJ0ntIp6TSM/kcyw1xOGUrYMv/CA3CbIuQEF6qTtroMtk6DKpJAD3DKp0qbJTRSdIpRifGvwDqg9uhiYQlF9Ko4HwoeplymsQu1V9jQuzoDAbCrMwF2Sy9fBp2lgy6amJRZ92Cra/BTvehe4zoOMYaDdAzbBXUvpeF4qiEG3tvB7q68r59IJyHdjPpubx6fZY7ryyI+3auLI7tv6DcheDjh4hXlzIKCBFGr4JIYRo5nadSQXKrk9eHU56LSvuGMyf5zIY0tEPg678//6OAR50betJdFIOXi563J1bR7jaOp5lM2abTx7g4cw9oyJZczCenw8lcPfIzDrPd2xqzqWXZJRTc4s5l55PuJ97mdJ1UOeFNjZ7o7caZOg7+qtLZsWm5pW5/vAFtSRnaq9gopNyOHQhiy/+OMdD40vmHNfHOuUtVnY8nN1hndu8HdJjKtysDTBaB1iA06Vu0OggpC9EjIB+t4J/p2rtVlGUUhUMjRyUO9u6rzuofP1yDK7qiY1LbI1O5ra9ewHwIo/fZpoJOLFc/d0dXaVeAHw7whV3Qe/rwa3qhjE1kZJTRFaBEa0GRnQK4Ms954gv1YE9t8jE/KV7OZOax4nEbJ6e1p2cQhOeLnp6hNRvNVKAp7N9TEIIIURzcPhCFpuik7lvdCT6UgH0H9ZMeXXmk1/K3VnPlZ2rXq98cs8gopNyCGkly6GBBOVNXqo1U+7v4Uy3YC+u6duO7/+6yCtrT7DizsEtphN7TqHRvmxRl7YenEzKZd/ZDDUojy8blNsC5MZkXxKtBpnySGum/Hx6PkUms30+ui1T3jvUh5FdAnjgy7/4bNdZ7h7V0V6Wb59T3sjBX5OUmwwxmyBuuzUIP1N+GydPNcPtGQQebcEziHwnf579NQGAf1/bE71HoFoy3SYCnKu5xnjpYRSZMFkUoPFPltT7OuWNZN3hRPv32bhzpM0gxsybo3Z1P7Za7ex+Yb/6O133pHpx84M2HcC3g/q7amP92rY7uLap0f5tWfIIf3ci/NTpJLZMuaKoSzCesZ4023s2gxd+OgbA4A5+6Cpb8q6W/K2VMCm5NV8iUQghhGhsiqLw0Mq/OGPtfTS9j7qaT3JOIaeTc9Foyq9PXl/mXBHG5uhkZg1onKluTYEE5U2cLSi3ZVkendiFnw4lsOtMGptPpjCmhTR9s3Ve93N3YnTXQE4m5bL/XAazBrQvF5Q7JlNuC8qrnykP8HTGw1lPbpGJc2n5dG7riaIo9uYVfdp70z3Yi3Y+rlzMLOC7Py9w0+BwADLzWkmm3GLG2ZipNlozZkN+mrrGd0G6+vX8HriwF1BK7qPRqs3VIkaol7AhFQZrzhaFb9evBeCpruPLLUtXU7YsubNe2+g9Ddwbap3yBmQyW9hwTA3KAzydSckpIjYljzFdUSsVQvqqGxbnwcGVsOcTSDluPQbS4OK+sg+od4EB82D4w+BVvQ7mtiZvXQI9CbaebbetVb589zl+PBiPXqthfLe2rDuaaO9lUZ+l6zaSKRdCCNGcHLmYbT9xfSwh2x6U2+aTdwvywqeBGhIHebvww/0jGuSxmyoJypu4kky5etC3b+PGvOERfLz1DP9ee4KRnQPqPaPjCLbS9TA/N/qHqQHWn9YPyEebQqa8huuUA2g0Gjr4u3P4YhYxKXl0buvJufR8MvONOOm0RAV5oddpuX1EB1746Rj/2xbLnEFhmBWFHGuX7TYtufv6ibXof3mCyVnn4chltg3uAx1GlQThLpcvLdZpNXg668kpMpFVYKxzUG6b5++I34mrPVPeRMvXK7AnNp2MfCNt3Axc268d/916ptxUDkDtVD/odvVSmKV2dM84C+mx1u9jIe0MZJ2D3R/BviUwYC4Mvv+yY7A1eesS5EmIdZ3T+KwCLBaFtzaeBOCJyV25eUg4f/0ng6Rs9f12WKcGCMrtmXIJyoUQQjR9Pxy4aP++9EomNVkKTVSfBOVNXGqOWuroXyqguG90JCv3nCM6KYfv/7zA3waGOmp49cbW5C3Cz53+4T6AWnqanFPI6RT1g7W3q4GsAqM9a92YbNl5lxp0XwfoGKAG5bZg5KB1Pnm3YE/70g/XDwrl7V9PEpuax6/HkxgQXpL19XJpgX+iaTGw8Vk48RMaQEEDrm3QuPmp84ldfa1f26iN17pMAq+QWu3Ky9VgD8rrypHz/D2sc8ptY2gOfjmiZskndg+iU6A6XeDSpofluHirJ2CC+5S9XlHgzGbY8iqc2wV7Pka/fyl9vYegiQ+G8CsqfDhb+XqXth72THlSdiFH4rNIyyvGw1nPbcM7YNBp+cfkKB79+iD+Hs50CfSs/ROvhC1TniqZciGEEA6y72w6D3z5FxO7t+Wpqd0qrfwzWxR+PBRv/7l0UP6HNSivaZM3UbUW+Im/ZbFlVfw9S4JyHzcn7h/biX+tPcGbG08yvU9Io5fT1jfbcmjhfm4EerrQwd+d2NQ8Zry3A7NFoY2bgTBfNw5eyLKvGd6YSjLlNXudbc3ezlhPLBw6nwlQpkmfu7Oem4eE88HmGD7ZdobIwN6A2nFfX0FXymYrPRZ2vKOuC24xgVaPecgCfsnryaSrrsFgqP9g18vVwMXMgvoJyu3z/Bs/KI8M8ECrgbS8YpJzCgn0bNrLg1gsCuuPqkH55F5B9nW5Y1MqyJRXh0YDkWOg42i1SdzmV9HEbSc8fSssmQDtr4Apr0K7klUpFEXhlDUo79rWk7aezmg1YDQrrP5L/aAxpKOvvfPrNf3aYTIrRAa6o22A6iP/Uply28oSQgghRGP6aEsMCVmFLNsVx+7YdN6/sR+dKjgRvSc2naTsIvtKQRczC8guNFJQbOZMSh4aDVzRQPPJW6sW9Im/ZbLPKb+k9PbWoRG083ElIauQJTvOVvkYiVmFjH9zC8+vOYrR3PhZ5uqwZcrDrc2Y/nVNL/w9nEjMVud/9gjxtnc+b+xMuaIotVoSDUovi6Y+v0PWTHmv9mXLr+cNi8Cg07D3bAabTiQDLWQ+uaLAsR9g6VXwbl/Yv0QNyDtPhLu3YRnzDGZt3crKq+Ltqp53bO6ZclcnHR381WPp0h4LTdGf5zJIzinC00XP8Eh/OlrHHp9VSEFd5sVrNNBhJNz2M6a5aznfZhiKzgku7IFPxsKPD8PBr+DkBlKObyfQeIFAXQ4Rvs7odVr7yYzV1pK84Z38Sz20htmDQhkQ3jAfMmyZ8mKThezC5jMNQQghRMuQmV/MlpMpgPpZ5kRiDtPf28HX+86jKEqZbdccVE9eT+sdbF8n/GRijj1L3iPEyyFJipZMgvImzlbqWDpTDuq6t49N7ALAB5tPk5FXeUffrSdTOJ2cy9KdZ7lj2T7yipreB8KSTLn64X1opB8bHxnFzL5q2fLYqEB7QNzYmXKjWcH2XlXzTLk1KE/JJafQyKGLmQD0uWQ5u0AvF2b2bQfAB5vVJb4ae9mtepcRB1/Mgq9vVbObaCByLNz2C9z0jdpNu4HZ/mFk10NQbl873kG/l+7WJbouXSKwKbKVro/v1hYnvZY27k72kxm2E3B1pbS/gj8j7sF0/1/qUmoo6kmfVXfBir8R+PVVbHJ+jD2GuzG8HACvduAZ3RLaa5JJt75fjigVlDc0F4MOT+t0FGn2JoQQorH9ciQRo1khKsiTDY+MZEQnfwqMZp749hAPrTxATqH6OSchq4C1h9XVa67u246uQWomPTqpJCgfKqXr9U6C8iZMURT7MmG2Rm+lzezbjm7BXuQUmnh/U8lCzGdScsvM/biQWWD/fsvJFG74+A+yC5vO3NRCo5kEa0Y83NfNfn0bdyfevqEfh5+fyPwRHewl+o2dKS/dWM65FnPKATLyjSzdcZZCo4WOAe50aVt+Sa47R3YEsAcMzSpTXpgFcbvUDto/PgSfjIP3B0HMb6Bzhisfh4cPwy2rIHxYow3LHpTXQ2Yyw8G/lx4hXkD5xodNjaIorLMG5ZN7Btmvt2X6K2z2VhcebeHaj2Huj9BrNnQcA8F9yXZpR7ZS8n5CQTrTCn5ki9MjvGt4j5EeF+1z3RuLfV65NHsTQgjRyNYcULPfV/dtR6CnC5/Nv4InJndFp9Ww5mA8V723nR8OXOSaRTvJKjAS4efGkI5+JUF5Yg67YmQ+eUOROeVNWHaBiWJrubl/BZ2jtVoNT02J4tbFe/h8VxzzhkWQW2Ri5qId6LUa/vjnODxdDFzMUIPyqb2C2H0mncMXs7hz2T6Wzb+iScxFv5CRj6KAp7MeX/fyJx88rfNRHZUpL30SoKbl625OeoK9XUjIKuTDLWoG/JYh4RXOJ+3S1pPRXQPYHK2WFjX5sqD4v2D7W+rXzHMVbxM+HKa/C/6dGndsVrbXsH7mlKuP4e2goLx7sBqUH2/iQfnhi1lczCzAzUnHqC4B9us7+Lvz17nM+g/K7TsYqV6snv/6AN//eZHHx3fk/qGBkHiQmB/+TWT2bmbodjHDtAs++wmGP6RWcDTCHG9/D2fOpORJplwIIUSjSswq5I9YNaCe3kddVlSr1XDf6E4M7uDLg18eIC4tn4dWHgCgU6AHS28bhE6roWtbNSjfejKFs2n5aDUwSOaT1zvJlDdhtiZvns76SoPnkV0CGNHJn2KzhRd/OsZ9y/+kyGQhr9hs//B7IUMtDZ/UI4jPbr8CT2c9u2PTeXjlAcwWpcLHbUxnU0uWQ6uq+ZGtdLzxM+Xq/pz02lo1Z7JlCPOLzbgadFzbv32l295lzZZDE14OrTAbfvmHOof32A8lAblXe+g8Ca58DK5bAvfvg3k/Oywgh1JBeT10LbfNKXfU76W7NVMem5bXJKeg2NhK18d0DSzzvlUylaOBgvJLnLQ2eesc3Abc/SByLFuu+C9Til5hlXk4Fo0OYrfAF9fCR1dC9C+gNOz7oaxVLoQQwhF+OhSPosDA8Da0b+NW5rYB4b6sffBKplir267o4Mt39wyzb2fLlJ+1TjXt2c7b3sBV1B/JlDdh9iZvnlU3wnpyShRXvbedDceSylx/Ni2f3u19uGgtX2/n40qPEG8+vnUgcxfvYd3RRJ5efYR/XdPToZ2ASy+HVhXbcmRFxkYOyo01X6O8tI4B7uy0lvvM7BdSZQZ8aEc/erbz4sjFbNo0xfJ1YwF8OFxdMxqg53UwYB607aEuY9bEeNVjpjzL2n3dx0EVDP4ezrT1ciYpu4gTidkN1pCsLiorXQfoYF2JIPZyy6LVA7NFKVmjvG1JV9kQHxeOK+E8YlzAiLvfIeDIYti/DJIOw5c3qJn2gfPVE0xeIWppvO6Sf5OKAgUZkHUBchJBqwODq3rRu5Z8b/u51P1lrXIhhBCOYPvfPKNvxUvMersZ+OCm/sSl5RPq64au1CoknQLVFWBseTyZT94wJChvwmxBeUWl66X1bOfNzL4hrD4Qj16rISrYkyMXszmbmofJbCEhS52v3a6Nuk7v0Eg/3rmhL/et+JMv95wjwNOZRyd0adgnU4Vz6SWZ8qrYMuWFpsYtX7etUV7TJm82tmXRAG4aHF7lthqNhn9f25sPNp9umuvPG1yh5zVw/EeY9oZa9tuE1Wv5er5jy9dBLWFPyk7haHzTDMqjk3KITc3DSa9lTFRgmdsabE55Bc6n51NksuCs1xJWqk9FjxBvnHRa+oR6E9C+M7R/BUb+XV2q748PIXarerHRaNXA3DNYDdLz0yHluBqUV5fWYA/SH1A8CNd3QnNxJJw8p/Zi0GrB1Rdc26gntlx9wcm9UcrphRBCtHxmi8KReHX1n+FVNDjVaDRE+JdPkLkYdET4u9sr3WQ+ecOQoLwJK+m8fvly2aemdiOzwMiMPiEkZBWqQXlaHkk5RZgtCgadpszaxlN6BfPi1T15evUR3v3tFAEeTtwyNKKhnkqVbOUwEZcJyh2WKTfVLVM+ILwNoK6J3LOd92W2Vk+yfHDTgFrtq1GMfgpG/xMMTXutbKjfTHmmg7uvgxpUbopOabLLov1yWD0TP7KzPx7OZf+9RPirf98Z+UYy8oppU0H/iPoSbS1d7xToUeZsf6ivG78+OqpstYqbL0xYCANvU3skJB2DnAT1YjGVfB//Z9mduAeowbqigDEfTIXqV2MhmEqaa2IxQpERirLxI4nb9DFwfj2sqOIJaHTg7AnOXuDipX519rR+b72+w0joNK4eXi0hhBAtWVxaHoVGCy4G7WWrUisTFeTJmZQ8dFoNAyPa1PMIBUhQ3qSlVDNTDtDWy4Wlt10BqPNGAM6m5tmbvAV7u5b5cApw85BwUnOLePvXUzy75ih+Hs5M7RVcn0+hWs7Z1yiv+o3CUZly+xrlNey8btMn1IefHhhx2UqAZsPg6ugRVFtJ9/W6BeWKopCZ7/iu+N2beAf2ktL18u8jpZsenknNY0ADBuWnrEF511Kl6zaV/h22iYDp75T8bLFAXgpkX1SD8ux4NRgOjAK/zuBUxd+zoliD9IKSi6mAw0cOcnDz9wxzjqFjoI8aZFssUJCuZt/z08FcBIoZCjPVS1Yl+9jxNsz8EPreePkXRAghRKt1PKHkf+KlsUB1dWnrydrDifRs521vwCzqlwTlTVhqjhoEBFQjKC/NdhYsLi3f3uStfZuKA6mHxnUmJaeI5bvP8fDKA/i4GhjWiGv3mswWLlhPHIQ32Ux53crXgWplyEX9q6/y9fxiM0azOpnKkUG5bVm06KQcjGYLBl3T6dV5JiWX6KQc9FoNE7q1rXCbjgHuJGQVEpuaZ68gaQjR1vnknSsIyqtNqwXPtuqlpjSaknnlpa82h/H0r20IdHVmz93jy9/PlnUvzIaiHCjKVi+F1q9FOer3SUfgxE/ww/1quXvXybV8kkIIIVoKk9mCyaKUaw59IlE9kd/NuopLbfxtYCg7Y9K4u1RDYlG/JChvwuxzyi/T6O1StuA2La+YE9b1ytv5VByUazQaXri6J+l5xfxyJJG7Pt/PyruGNFoQGZ9ZiMmi4KzX0taz6nJoW/l4UWNnyq2N3lxqmSkXjmMLynMKTZgtSq3PENtK1530WlwduIxgaBs3PJz15BaZiEnJJSqo9v9g65ut6/rQSL9K59138Hdnx+m0Bm/2Zs+UBzXuOuSXY2vamZZXjMWioL30eNRo1PnkTu5AFVVLFgv8sAAOroBv5sGtqyFsSEMNWwghRDPwwJd/sfVkChsfHUVIqc/9xxPUoDwqqPYnqtv5uPL13UPrPEZROYkymrDqNnq7lKeLAX8PtTR0x+lUoKTJW0V0Wg1vXd+XIR19yS0yMW/JXuLSGmfZIlvn9TBft/IfUC/hbA2GCh2WKZc/l+am9JIdOXUoYbeXrrsaHLpSgVarITLQ2sW8kZYWqy5b6fqUCkrXbUo6sKtjN1sUPtl6xv6BoT4YzRZiUqyZ8sA6ZMobgK+1ZN9sUciwHlO1otXCjHfVJQhNBbBitjoXXgghRKsUk5LLL0cSySs221f8sbGVr9clUy4ankQZTZhtLVtbgF0TthJ229zTS9ckvJSLQcfHtw6kW7AXqblF3PLpHvtJgYYUZ+28frn55FCqfN1Rc8rrUL4uHKN0ZrumJewFxWZ7wGhb59yRpes2toaItr+dpuB8ej6HL2ah1cDEHpWXe1+6VvmPB+N5ee1xnll9pN7GEpeWh9Gs4O6kq7RCyFEMOq09ME/JLUKpy7roOgP8bSmEDla7uH9xLWTE1c9AhRBCNCtf7z1v/770ie6sAqN9aeSmVF0nypOgvIlSFIXUXOuc8hqWr0P5ILc6H069XAwsmz+IUF9XzqXn8+HmmBrvt6biUm1N3i7fBM0WFDf2nPLCOq5TLhyrNvPKjWYL96/4k5fXHufJ7w83ic7rNuHWJb7i0ppOUP7lHnXd+kERvlVW9tiWRTublofForAvLh1QTx5aLHUIUEuJTlSz5J3ael62+sYRbD1CTiXlMvGtrdzy6e7aP5iTG8xZCQHd1GZ0K29Uu78LIYRoNYxmC9/9ecH+s20OOUB0qWmsjlzSVVyeRBlN1Od/xFFstmDQaWpcvg7llxerrNHbpQI9XXjuqh4ArP7rIkZzwwbA1V0ODZpAptyBc4lF7dU0KFcUhae+P8xvJ5IBOHwhk3PWrHRT+IcWZm/k2DTK19Pzilm28ywAt4/oUOW27du4otdqKDRaSMwu5MD5TAAKjOZ6y/xH2zuvN6355Da2JS5f+OkYp5Jz2XYqlbS6VCW5+cLN34Gbv9oAbsP/1dNIhRBCNAe/HU8mNbcYg049EX08IcdeiVUf88lF43BoUL5161amT59OSEgIGo2G1atXV7rtPffcg0aj4e2332608TnKT4fieW7NUUDtjn5pF8XqiPAvyZRrNRDkXf01pUd3DcDfw5m0vGI2WQOThnIu3TqnvBrl6/Yl0ZrZOuXCsezLohWYqrX9a+uj+Xb/BbQa8HLRY1Fgw1F1vrSPq+ODcltVSVPJlH+89Qx5xWZ6tvNiQveqO5XrdVr7kmTH4rM5YZ3nBnCinuaV25q8dalL5/UGZMuU26YnAZxOrmPjO+92cM1/1e/3/g+O/VC3xxNCCNFsfLVXrVa7aXA4Wo16stz2P6Y+Oq+LxuHQKCMvL48+ffqwaNGiKrdbtWoVf/zxByEhIY00Msc5npDNI18dQFHg1qHhLBjTqVaPE1EqyA3ycqnR0kl6nZZr+7cD4Jv9Fy6zde1ZLIo9sGjSmXKjNHprzrxqkCn/dHusfdrGv6/tzbTe6nvOn+cygaYxp9wWlCdkFVBsatwTVJdKyy3is11nAXh4XJdqNcGzzSv/8VA8plIl6/XV7C26qQflpaYj2arrT9U1KAfoPB6GP6x+/+3tsOZBSD9T98cVQgjRZCVkFbDlZAoAc4dF2KeJHbP+Tz1mPfkdFdw0/yeKEg6NMqZMmcJLL73ENddcU+k2Fy9e5IEHHmD58uUYDI7/QNzQfj+RjNGsMLSjH89N71HrTs/h/iVB7uWavFXkbwPaA7DpRHKDNXxLzimiyGRBr9VUa867fU55Iwci0uitefNyVVd+vFxQvvqvi7z4k9rB+u+TujJ7UChDI/3KbOPj5vg55QEezrg56bAocCHDsdnyT7bFkl9spnd7b8Z1C6zWfWwfGNZbqw9sb3HHE3Mqu0u1FRrN9hN9XZtoqV6wt/pe176NKzcODgPqIVNuM/Zp6DoVLEb4cxm8NwC+u0M6swshRAv17b4LWBS4ooMvHfzd7RnxE4k5mC0KJxOl83pz0aTXKbdYLNxyyy38/e9/p0ePHtW6T1FREUVFJUFkdrZ6pshoNGI01n5JpMYSa13KZ1CEDxazCUstk8KuOvB1N5CeZyTY27nGzz3C14Xe7b04dCGbT7fFcO/Ijrg6VR2U2vZR3X2dTsoCIMTHBcVixniZJ6tFDY4LjeYG/10qisLTPxyjwGi2L6vlpKv+cxPVU9NjpjY8ndXj9mxqDvOX7CEpp5A+7b0ZGN6GyT3aYtBp2XYqlce/OQjA3KFh3Dk8DKPRyIBQr0seS9skjoHQNq5EJ+VyJjmbUJ+a95yoDyazhW/3q91e7x3ZAZOpetMDwqz9LWzTUEZE+rHtdBrH47Nq9NpWdOycTFA/hHi56Gnj0jR+V5ea0bst8Zn5XNs3hIMX1ffAk0nZ9TfW6z5Dc24X2p3voI35FQ5/A4e/wdJlCpZhj6C0618/+2nGGuN9R7RscgyJ2qrJsbM/LoOX1kbzzLQo+of5lLvdYlHspet/6x+C0WikS6B19aWLmfwVl0aB0YyLQUs7Lyc5Xh2gJq95kw7KX331VfR6PQ8++GC17/PKK6+wcOHCctdv2LABN7eaZ4wb21+ndICGjHMnWbs2uk6P5aXRkY6GgtSLrF17/vJ3uERXg4ZD6PhwSyz/3XKGrj4Kd3a1cLlK+I0bN1br8f9I1gA63Mx5rF279rLbpxUC6MkrLK7W9nWRY4Sv96t/Hs46BdAQd+Y0a9eeatD9tlbVPWZqI+m8epx9te+i/bqj8Tms2HOBN9ceYnSwha/PaDFZNPT3s9BXOcMvv5SU/bZ11ZFUoKZzz5w4wtqUww021upyKtYCWn7Zto+80/XTtbymTmRqSM3V4a5XKIjZx9rY6t0vOQtK/+vprE1mGzouZBby3Zq1uF7yXynPCKvjtBgtcGtnC5c2VC997OxLUX/X/gYjv/zyS62eV2PoBZzaf5rkHAA9R8+l1f97mteteHcdSeekHwnJ3If25C9oT/5CgvcA9kYsQNE26X//jaIh33dE6yDHkKit6hw738VqOZKo5d01fzC7Y/kq0egsDRcydbjoFDh/gLXxB8jNUP8P7juVwMWL8YCWHt4m1q9ruv8TW7L8/OpXNDbZ/8r79+/nnXfe4c8//6xRCfdTTz3Fo48+av85Ozub0NBQJk6ciJdX0y/dePnIFqCIq8cNo0977zo91mHdSf63/SzXjRnAmK4BNb7/mGIzph+PsTMmnaScIo5nagjpNZgB4W0q3N5oNLJx40YmTJhQrakGxzeegphYBnQNZ+rUbpfdPjmniBf+2oJJ0TBlypRal/ZXx6ELWbBPXaqoyKzup2f3KKZeWXV3aVEzNT1maiNlVxzrLqgnuIK9XXhobCSnknP57s94LuQZ+eK0mkkfHunHxzf3w+mS3gG7zcdYsUftrTBm2BUMu6Sk3REOaaM5vCMOj+AOTJ0aVa375BSaOJ+RT/d6KmHbuuoIEM+MfqFMv6p7te+XnFPEe8e2AOqc6gdmT2DtuztIzC4ivM9QBpZ6f9kXl8Gj3xwmIUtd5uuVm64kMkDNAlR07BzfeApOx3JFVChTp1Z/TI6SU2jkrSObyDJquHLsBDxdGuJv4D5MqafQ7XoXzZFvCM7azzTPY1hGPdkA+2oeGuN9R7RscgyJ2qrJsbP5+yOQGI/eO5CpU8tXOW34+hCQyLUDQpk5Xf2f1y+rkI9PbCW5SEtyEYDCM38bRo+Qph8DtUS2iu3qaLJB+bZt20hOTiYsLMx+ndls5rHHHuPtt9/m7NmzFd7P2dkZZ+fy5ZwGg6HJv3HmF5tItnZL7NzWu87jfWpqd+YO60Cob+0qBAwGA2/d0B9FUZi/dC+bolOITs5nSKeq545W97U+n6F+0O4Q4FGt7T2s084tCmh0+ho1r6up5Nzy5SZuzk3/GGquGvLvs3uID6AuB7L0tivsKxHcPboTz6w+wvqjSfRp781/bx2Iu3P5t8RhnQLsQbmfp2uTOAY6BKjzpS9kFFZrPMk5hcz6cBfn0wv45aEr6zy3rNBoZsNRdWWGa/qH1ug1CWmjx91JR16xmS5tPWnj4Uq3YC8Ss1M4nZLP0E6BmC0KH2w6zVu/nqT08uXpBSaiLtlX6WPndIp6RrpbcN3fPxuDr8FAWy9nkrKLOJtRRP+wBqrmCu4O134EXSbAt/PR7XgLXffpENK3YfbXTDSHzwWiaZNjSFRGURTe3HgSnVbDw+O7lLu9OsdOfrE6rTMxq6jcthl5xWw8pv4fvnFwhP32UD893q4Gex+d4Z386Bvu+GRCa1WT94cm2076lltu4dChQxw4cMB+CQkJ4e9//zvr16939PAahK1BkY+boV7WQ9ZpNbUOyEvTaDT0bKdm7Y/F10+HZIA463Jo4dVYDg3Kdj8vNDZsB/aLmQUADAxvg7t1Lr2rrFPeLA3r5M9vj41izf0jyiwNGOjpwkc3D+DXR0fy9T1D8aggIAcY0rHkn1kbd8c3eoNSy6JVY23v7EIjcxfv5Xy6ekyfTKp7Q7XN0SnkFJkI9nYpk9muDo1GQwdrtrufdY6c7STBsYQckrILufl/u3ljoxqQX9OvHX1C1e1KLyNWEdtz69xE1yivSKdAdaz11uytKj1nQfeZoJhh1T1gapgmnkII0dodupDFe7+f5u1fT5GZX1yrx8gpVHu1XMwssK87brP6wEWKzRa6B3vZP6OD+j+29Jrkd42MrNW+ReNzaKY8NzeX06dP23+OjY3lwIED+Pr6EhYWhp9f2TM7BoOBoKAgunbt2thDbRRnU2sWpDYmW9nL0YSsenk8RVGIS63+cmhQNigvMlloyN7KtqB8QEQb7hkVycq95xnXreo1mEXTFRlQcZCm0WjoFFj1keTv4cyD4zqTmltESKmg3pHCfdX3iHPp+VgsCtpLJ1pbFRrN3PXZvjLLjaXl1u7DQWlrDqrz86f3Cal031UZGO7LkYvZjOqiVt1EWYPyLdHJrD+aSHpeMa4GHS/O7Ml1A9rz4Jd/cfB8ZpVBeX6xWp4P0LWJLodWkc6Bnuw4ndY4QTnAtDfg7HZIOQ6b/w3jn2uc/QohRCvyzf6SXk6pucW1Wr0lt8hk/5pdaMLbusSroih8tVd9/BuuCC13v27BXuyOTadrW09GdvavzfCFAzg0KN+3bx9jxoyx/2ybCz537lyWLl3qoFE5ztkarNnd2LoHq2fhTibmYjRb6lw6npFvJMf6ZlPdbL5Go8FZr6XIZGnwZdEuZqhBeTsfV8Z3b8v47hKQt2aPTihfeuZIIT4u6LUaik0WErMLCalgScEik5m7P9/PH2fS8XDW06udN7vOpJGeV7egvMhk5rfjasncjD4htXqMf0yO4roB7e1n97tb10+Nt84d7xbsxfs39rOfTLGt7Z1cRVB+OjkXRQF/Dyf8PBzTkb42IhszUw7g7g/T34avboYdb0PUVdB+QOPsWwghWoFCo5k1B+LtP9f2/25uYcmqJhczCuxB+aELWZxIzMFZr+XqPu3K3e/mIWGcTMrhkQldGrT/kqhfDg3KR48eXa4coyqVzSNvKeLS1Ex5RBPMlIf6uuLprCenyMTp5Nw6z0k9a32uwd4uuNSgLNwWlDdW+Xp11k8XorHpdVrat3HlbFo+cWn55YLyYpOFBcv/ZMvJFFwNOhbPG8SumDR2nUkjLa9uJcsZeUaKTBZ0Wk2tG8e4OunKlNtF+Lnj42YgM9/IvGERPDklqsz7QqAtKM8uLPdYvxxJZG9cFgXW94TOl6l8aGo6W4PyU8l1n1ZQbd2mQ6+/qculrb4H7t4KBnmvE0KI+rDhWBLZpQLqtNza/d8t/RjxmQV0t/7PXWnNkk/pGVThdNdOgZ6suHNIrfYpHKfJzilvjWKt5esR/k0vU67RaOhmfTOoj3nl56xVAeE1rAqwfVAvMjZspjzeFpS3kQ+qomkK87OVsOeVud5ktvDQyr/49Xgyznotn84dyBUdfPHzUEvn6lq+biun83DW19sZeL1Oyzd3D2XVfcN4fkaPcifqAr3UoDzlkg82FgWeXHWUz/+I49v9ajO+rkHNKyi3zSm/kFHAtlMpvPzzMaITGyFAn/IaeLSF1JOw6eWG358QQrQS3+wruwxxWm0z5UUlTYdtyaL8YhM/HlSz8NcPCqvwfqJ5qlGmPDMzk1WrVrFt2zbi4uLIz88nICCAfv36MWnSJIYNG9ZQ42wV4uyBatPLlAN0D/ZiT2w6R+OzmVXHakdbptw2N7a6nA3qeaRCU80z5YcvZPHbiSTuG92p3LJXpeUXm8jIV98IKyoLFqIpCLdO+zha6iSZ2aLwyNcH+eVIIk46LR/fOpBhndT5ZH7WJnW1/XBgUzoor0+dq5gHHuipzuVPzi4blKcXqd1pnXRa+oX5kJJTxDX9ypfyNWV+7k72KoFbPt0DwPbTaax9cETDlh26+cL0d+HL62Hn+xA1HcIGN9z+hBCiFYjPLGD76VQAhkX6sTMmrVYnw41mC4WlElC2oPznQwnkFpkI93NjSEff+hm0aBKqlSmPj4/njjvuIDg4mJdeeomCggL69u3LuHHjaN++PZs2bWLChAl0796dr776qqHH3CIVFJtJtJZmdmiiQbmtVPVYPTR7s2fKa1gV4KKvfab8tfUnePvXU3z/54Uqt7NlyT1d9Hg1yLrBQtTdyC4BAHy55xzRiTlYLApPfHuIHw/Go9dq+OCm/oyybgPY51nXdU55XgMF5VWpbE55YoEatHYMcOeru4fy++Oj7Z3amwuNRkPv9j6AOj3HSa/leEK2/UNdg+o6GfreBCiw+l4ovnw3fyGEEJX77XgSigJXRPjaVxhJr8W0sdLzyaEkKP/GWhU2e2CozBdvYar1qapfv37MnTuX/fv307179wq3KSgoYPXq1bz99tucP3+exx9/vF4H2tLZlgfzctHjUw/LoTWE7qXK1xVFqdObQV0z5UW1yJTbgpFN0cnccEXlJT8XMmQ+uWj6xncLZHy3tvx6PIknvjtE92BPvvvzAjqthvdv7FeuOaGvNVOeWsu5bTa2JVo8XBovKLfNKc8qMFJoNNvL25OsMWRVWfbm4F/X9GT3mXRGdw3gvd9Ps3TnWT7eeoYrOwdc/s51NelfELMJ0mPgtxdgyr8bfp9CCNFCHTivJq6GRPrhY23MVpsKNVtVms3FjALyi038GZcB1L7Rqmi6qpUpP3bsGK+99lqlATmAq6src+bMYdeuXdx22231NsDW4qxteTB/9yZ75qtzoCcGnYbsQpM9cK2tuFrOKXe2ZsoLa5Ept73B7TidhtFc+f3jM9WKBQnKRVOm0Wh4aWZPPJ31HDyfyZd7zqPVwFvX92Vyz+By2/tb55TnFJoorsPqBbZMuXsjZsq9XQ32KSelTyrYMuWdKlnyrrlo38aNWQPa4+fhzO0jOqDVwLZTqWWWsmswrj4w4z31+90fwoV9Db9PIYRooQ5dyASgT3vvOvVyybkkUx6fWcBf5zIxWRTa+bhWe+Ui0XxUKyi/dL3w+t5elHReb6rzyQGc9Fp7Z+NjdfiwmFNotJ81rHmjt9pnyvNKrfe433qmsSIXM9UTBjKfXDR1Qd4u/HNaNwA0Gnj9uj6Vnj33cjGgs64pXpcSdtvJLc9GDMo1Gg0BHuVL2JOsQXnnts07KC8t1NeNKb3UkyqfbDvTODvtPB763Kh+//NjYGnY1S2EEKIlyi0ycTpFXd6yV3tv/NxrP20sp1DtbWSrnk3OKbJPaxoU0aY+hiuamDp3Xz9+/DhLlizhwIED9TCc1stWzt2hCa5RXlrPdmoJ+y+HE2r9GLYsuZ+7E541nLPtXIc55aVLgTZHp1S6nX2Ncum8LpqBGwaF8tqs3nw+fzCzBrSvdDutVmMvYa/Lsmi59kx59ZcyrA/2eeXWZm+KopBoLdixdTBvKe66siMAaw7Ek5BVt6qkapuwEJy9IOEA/LmscfYphBAtyNGLWSiKutxvoKdLnf7n2v7Xhvm62RNStrXPB3WQBm8tUY2C8hdeeIHXX3/d/vOmTZvo27cvf//73xk0aBDLly+v9wG2Frby9aacKQe4aXA4Gg2sPhDP7jNptXqMc+nqcw2rxQmI2mbKTZd0sdxysvKgXMrXRXOi0WiYPSiUEZ39L7utvQN7HZZFK+m+3ri9L2zzym3LoiXlFFFk1qDTaoho4u+bNdUn1IcrOvhisigs3Xm2cXbqEQhj/ql+/9sLkF37E69CCNEaHbqgzifv3d4bKJk2lp5XjMWi1OixbOXrni56e+WmrdnbFRESlLdENQrKv/322zLzyl9++WUefPBBUlNTef/99/nXv/5V7wNsLUZ2CWBSj7ZEBTfthkV9Qn2YY22S9swPR6qcm10ZW1VAbT5I13ZOeV5x2SD+eEI2SdZu95eyvelJ+bpoafxKfUCorZLu642bKbevVW79uz2dbGsW6VrlEofNlS1bvuKPc/YyxgY36E4I7AEFGbDoCtj+Npjq1hhQCCFai4PW+eS2FTXaWE+EWxTILKjZ+3hOqZVOSieJ2rgZWlx1mFBV65PMZ599xrJlyzh79iwHDhyw/7xjxw48PDz47LPPsFgsnDlzhs8++4zPPvusocfd4tw7OpL/3jKQHiHejh7KZT0xqSu+7k6cTMpl6Y6zNb6/bTm0sFo0qXDW1y5Tbgsk9FoNfaxnMDccTSy3nclssS9N117K10UL42ud31aXDuy5Dui+DqXWKrfOKbfN24ts5k3eKjM2KpDIAHdyikx8tfd84+xUp4e/LYHgPlCUDb8+B+8PgqOrQalZlkcIIVobW6a8jzUoN+i0eFs7sNd0WbRce6bcUObz6MAI3ybbEFrUTbWC8vDwcCIiInBycqJt27aEh4eTmZmJl5cXY8aMITw8nMjISDQaDREREYSHhzf0uIUD+bg58eTkKADe/vUkiVkVZ5wrY8+U13CNcsC+FFJRDbtHl+4YPaprIADP/HCUsW9s5qWfjrHzdCrFJgtJOUWYLQoGXUljKSFaClv5en00emvM7utQfq3ymBT1faRTQMsqXbfRajXcac2WL94eW6uqpFoJ6Ap3boaZH4JHEGTGwTdzYclUiP+rccYghBDNTEZesX16Zq92JQk2P/typDX7v2urkPJw1hPiXRKUS+l6y1WtoHzUqFGMGjWK/v3789NPP+Hk5MS6deuYOnUqI0eOZNSoUQQHBxMaGmr/WbRs1w1oT/8wH/KKzbz487Ea3fdcWu3nz9sy5YXGmmXKc0uVAd00OIwrO/uj12o4k5LH/7bHcuP/dtP/xY08vFL90Bns7YpWK2ciRcty6Zzy8+n5pOTU8Ox9qb+lxmSfU27LlCdbM+UtuIxvZr92+Hs4EZ9VyNo6NNesMa0W+t4ID+yHkU+A3hXO7YSPx8Cqe6Eop/HGIoQQzcDhi2qWPMLPDW+3kp4rtZ02Zl/pxEVfpvGwNHlruWo0Ee/111/nwIEDDB8+nLi4OF544QX7bUuXLmXy5Mn1PkDRNGm1Gl6c2ROtBn4+lMD2U6nVul+h0Uy8NbMeXpvy9VpnytUg3sNZT1svFz6/fTB/PjuBD27qz3UD2uPv4URukYm9Z9Wl0qTJm2iJ/KzVH2l5xWTkFTPp7a3M+nBnjR4jz2FBua18XX3/aOmZclArg+YOjQDg461nUBq7hNzZA8b+HzywD3rNBhQ4uALWPdW44xBCiCbu0CXzyW3sHdhrOG0st1SjN9tnUleDjh4hXnUbqGiyavSpqk+fPpw9e5a0tLRya5E//vjjeHnJgdKa9Ajx5tahESzdeZZnfzjCmgVDL3ufCxlqltzDWW9/o6qJumbKSy/j5OViYGqvYKb2CsZiUTh8MYvfTyRz8EImtw3vUOOxCdHUlV6eZV9cBvnFZs6l55NfbMLNqXr/DnIcFZR72ebDF5OSU0RGvhENCh39W25QDnDzkHA+2BzD0fhsdsakMbzT5bvs1zvv9jDrE+h9PSyfBQeWw5D7oG33y99XCCFagb/OZQIlnddtSp8Mr4nswpKVTgZG+DLnijB6t/fGoGt5jU2Fqla/2UsDcoDg4GDc3Vv2hyNR3qMTu+Dv4cyZ1DyWVKPpW5J1jeFgb5daNaqojznlFdFqNfQJ9eGRCV1YetsVjOoSUOOxCdHUlV6e5cD5DPv1NSlhv9zfUkPxc3dCowGzRWG9tUljG2dwdWrcLvCNrY27E7MHquvPf7z1jGMH03k8dJsOikVtAieEEIJCo5mdMeoywUM6lo2R/Gu5FGlukXVOuYsenVbDK9f2sq9+JFqmagXlK1eurPYDnj9/nh07dtR6QKJ58XIx8H/T1KZvi7acIf0yn+1tXZ9tc2xqqraZ8rxix2T3hGhKfEt9OLCd1YeS5mnVUbqkrjHpdVr7nPh/rT0OQB/f1tERfP6IDmg1sOVkCtGJDp7PPX4haPVwagOc2eLYsQghRBOwKyaNAqOZYG+XcuXlvpc0WM0tMlGdJctLzykXrUO1gvIPP/yQbt268dprr3H8+PFyt2dlZbF27VpuvPFG+vfvT1paWr0PVDRdM/u2Y3AHXwqNFr6PrfqQsnWf9K9lZ/OSJdFqlil3VMdoIZoSWxldbpGJA+cz7dcnZ1cvKLdYFPKK1RNijvhbCrDOK88vNtPex4UpoY3UkdzBwv3cmdwzCIBPtjk4W+4XCQPnq9//8gQU5zl2PEII4WC/Hk8CYFy3wHJVoLb/u6m5Rfx1LoMrXtnED3GXD79ybCfA5XNrq1GtoHzLli28+uqrbNy4kZ49e+Ll5UXnzp3p1asX7du3x8/Pj/nz5xMWFsaRI0eYMWNGQ49bNCEajdr0Ta/VcDhDy+aTKZVua8uU1zYot5evG2tXvi6ZctGaebnoMejUDwz5xSXVJik51VvW0FZxAo75W7J1YAd48eoeOLfsyvUybMuj/XDgIknZNVuGst6NelJdLi3lBKx5QNYwF0K0Woqi8NvxZADGdWtb7vbSS5Gu3HMeo1lhX4rmso07bVVpHpIpbzWq/ZueMWMGM2bMIDU1le3btxMXF0dBQQH+/v7069ePfv36odVK84HWqsv/t3ff8W1W1x/HP5Isy3vPbGeH7ISQkEUCSSDsvQmzlF3KhrasX9mrzFIoBcoom7ADITtkkL33juMRO95Tlp7fH4+lxImdeMuyv+9WLzQeSUfytaOje+65ieFcfWIn3vltF49/v5ExPRO9CfShsirLZOPDGzZTXlpRx/L1Ms/sXhv6FC9yGIvFQkxooLe3g0dty9c9v0cBVov3d7E5pcSFMmfzfi4c2oHR3WP5cXOzh+AzgztFc0KXGH7feYD3Fuzk/tN6+y6Y0Fi46D14/0xY+yV0GAYjbvZdPCIiPrJuXz7p+aWEBNo4seuRPbc8M+WZBWX8st7sh1JYYWFrZhHHdah5Kad3pjzIXuMx0rrU+euXuLg4zj333CYIRfzdbeO78cWSnezJKeGfs7fx54k9jzjGu6a8Hp3Xof4z5SpfFzHFhDq8SbkjwEpZhbvWSfmhjWfq06ixoW4/uTv920dyxoBkoG2Urh/qD2O78vvOA3y4aBe3ju/u28qfzifCpCdg2v3w818g4TjoepLv4hER8YHp683S9TE94qqdjPKsKc8rcVa5fvHOAxzXIbraxyyrcFHuMv+NU4Vn26GpbWk0YY4Azuts/hH555xt7Mo+cq1hY60pr/tMucrXReBgB3aAMT3MXQZqn5RXVpzUcvu0xhYb5uCCoR2q/eDTFpzSO4GucaEUlFbw6ZI9vg4Hhv/R3L/ccMFnUyB7m68jEhFpVjM2etaTH1m6DhAdYufQ77ADKz/HLt6RU+3xcLB0HfS5tS1RUi6NalCswchuMZRXuL0dkg+V7VlTXt/y9YbOlPsomRBpKWIOqVKZ1Nf8EFHbLdF81XldTFarhRsq15b/Z/6OY65JbHIWC5z9CrQ/Hkpz4eOLoaTmD5oiIq1JWl4Ja1PzsVjg5N4J1R4TYLMSFXywBP36UZ0BWLzjQI1/wz2l66GBNmzW5q9KE99QUi6NymKBO0/uDsCyXVU/nBmGcchMeX3L1+vXfd1XeyuLtDSxoeYXYsmRQfRrFwnUvtGbp3xdv0e+c9bAZABSc0u8nfB9yh4Ml34MER0geyt8ehVU1H6LPRERf+Vp8Da4Y9RRK0A968rDHQH8cUwKgVaDnGInmzMKqz3eM5GkJm9ti5JyaXQdY4IBs9Ok65DNGPNLK7xrZOpfvu6ZKa9fozeVAUlblxhh/u4N6RRNQuX57KJyKlzH/qKrUL9HPhdySLWPs45fTjaZ8ES4/BMIDIed8+Drm8DdQmITEWkinq3QJhxXfem6h6dC7ZQ+CYQ6AkgJNz8bL9pe/RbSnply/Vt7FL6uFGsC9U7Ky8vL2bRpExUVFcc+WNqU6JBArBZwG5BddHDGxNPkLcwRUO81ofWdKT/Y6K1trkUV8bhgaAf+OLYrf57Yk5iQQGxWC4ZhJubHUlha2ehNHxR8xma14KlmLK/FFynNJqk/XPIBWANg3Vfw6RWw+WeoOPa4EhHxN0VlFSzYZibVE2pYT+5xYtdY7DYLV4wwS9e7RxwrKTf/rVXn9RoUZcPLA2DG/4HLeezj/USdk/Li4mKuv/56QkJC6Nu3L7t37wbg9ttv5+mnn270AMX/2KwWYipLZA9dq+rZDq2+petwcKa83OWuMgt/LJ79lZVMSFsXF+bgwdP70D0hDKvV4v19zMw/dsmxp1xav0e+5WkUVN5SZso9uo2Hc94wz2/60Vxj/nwP+OY22DYTXPoSX0Rah3lbsiivcNMpJoQeCWFHPfbOCT1Y/cipDOsSA0CPSPPz6+IdB3BX81nWM5Gk/i01WPUx5O6GrdPNL4JbiTon5Q8++CCrVq1i9uzZBAUFea+fMGECn376aaMGJ/7Lsw/5oUm5ZyauvqXrcHCmHOr2gbRI63NEqpUQbv4dz6zFunJv8xkl5T5lt5l/B50taabcY+Al8IdZcMIfISzRbAC34gP44Dx4oRd8fxfs3+TrKEVEGmTGBk/X9YRjbhFqsVgIDjxYqdkpFILtVg4UlbM5s+CI4wu1Y1DNDAOWvmueH3ot+GB71qZS56R86tSpvPbaa4wePbrKIOzbty/btmk7FDFVl5R7ytcbkpQH2g4O2dJarisvq3DhdJnfRCqZEKkqofJ3tTbbounLrZbBszVkiypfP1T7IXD6s3DXBrj6O/ODU0gsFGfB0nfg3xMh88jdOURE/IHLbTBzo9nkbeIxSterY7PC0M7mHuWLth1Zwl6gnU5qtmMuHNhm9jDpf6Gvo2lUdU7K9+/fT0LCkW3/i4qKjvlNkbQdnpJYT7d1OKR8Pbz+5esBNisBlQsqa7uu3NPkDbQlmsjhqvsCrSYHv71XbwZf8s6UV7TwRjdWG6SMhbP+AXdvhiu/MrdPK8uDjy6CggxfRygiUmcr9+SSXVROeFAAw1Ji6vUYw7tUJuXbDxxx28FGb1pTfoSl/zH/O+BicIT7NpZGVuek/Pjjj+eHH37wXvYk4v/+97858cQTGy8y8WvVfdDfX5mge7Zkqi9Pk7iyitrNlHtm94Lt2u9R5HAHZ8qPXb5+MCnXBwVf8iTlLXamvDq2AOh+ClzxOcR0g7w98PFFkLPT15GJiNSJp3R9XK8E79/juhpemcwv3pF9xLpyT6M3VaUdpiADNn5vnj/+Wt/G0gTq/NN+8sknmTx5MuvXr6eiooKXX36Z9evXs2DBAubMmdMUMYofiq8sUd9fWE35enjDknJHgJXCMih11u4DqdbBitQsPqJyTXktGr0VlmoXg5agxTZ6q42QGDMxf2cipK2C14fD6D/DqD+Ze56LiLRw3q3Q+hxZOVxb/dpHEBJoI6fYyaaMAvokR3hv83wBHqGkvKqVH4K7AjoMM3f8aGXq/PXO6NGjWblyJRUVFfTv359ffvmFhIQEFi5cyNChQ5siRvFDB2fKD86+ZVcm5fEN6L4O9ZgpL1fJrUhNqvsCrSae3yWtc/OtFt3orTZiu8F1P5ul7RWlMPspMznf+GOr3HtWRFqP3dnFbM4oxGa1MK5n/ZNyu83K8ZXd2A/fGq1Q+5QfyTBg1Sfm+SFX+zaWJlKvn3a3bt14++23GzsWaUU8H/SrrCkvbHj3dTjY5KikvHZJ+cE9yvXHTeRwCRGV5et1mSlXbwaf8uuZco+4HjDlW1j3Nfz8F8jdBZ9cBn3Ph/PehICG/TshItIUPLPkw7pEExnSsKVcI7rGMHfzfhZtz+baUSne671ryvUF+EFpqyBrMwQEwXHn+DqaJlHnn/aPP/6IzWbj1FNPrXL9zz//jNvtZvLkyY0WnPivo3Vfj21oUl45U375vxeTEO4gKTKI5MggkiKCSY4MIjnKvNw+KoSkyCDvmnIl5SJHSjjkd9UwjKM27CxU9/UWIdBm/oz8dqbcw2KBfudDj0kw73lY8Bqs+wrKC+HiD8AedOzHEBFpRjM2ekrX6951/XAjusYCB/crt1b2PSrw7lOu/i1eaz43/9vzNAiKOPqxfqrOn6weeOABnn766SOuNwyDBx54QEm5AAeT8rwSJ2UVLlxug+LKme24BpavnzkgmS0ZBVS4DdLySknLK2VFDcc+OLk3UZXfZKoMSORInt/VcpebvBInUSE1/35q79SWwS8bvR2NIwwmPAopJ8H/LoMtv8BHF8LJf4WOw1vVPrQi4r/yS50sruyW3hhJef/2kYQG2sgtdrIxvYDj2pnJprfRm/6tNbldsOYL8/yAi30bSxOq8097y5YtHHfccUdc37t3b7Zu3dooQYn/iwy2Y7dZcLoMsgrLcVXuE+4IsDb4j8yt47tz00ndyC4sq0zKS0jLKyW9MkFPzytl14EiMvLLmL4+g8n9kwHNlItUxxFgIzLYTl6Jk/0FZTUm5Yd+saYPCr7VKsrXq9NtPFz5BXx0MeycB/85FaI6w4BLzFNcd19HKCJt2JxN+6lwG3SLD6VLXGiDH8+zrnxOZQn7ce0iKCh1kppbAkDHaDW/BMx/DwrTITgauk/0dTRNps6frCIjI9m+fTtdunSpcv3WrVsJDW34AJXWwWKxEBfmIC2vlKyCMlyVzXviwhyNsp+9zWohISKIhIggBnaMOuL2DWn5TH55HpsyChjTIx5QozeRmiSEO8grcbL7QDE9Eqvf99PT5A30BZevHWz01gqbonUZDdf/Agtfhw3fmmvN5z5rntoPNZPzvudDWLyvIxWRNmbmxkwAJhzX8FlyjxFdY71J+XWjU1i5JxfDgI4xwSREaAkPAKsrS9ePOxcCGlZt25LVufv6Oeecw5133sm2bdu8123dupW7776bs88+u1GDE/926LryrILG2Q6ttrrFhxFgtVBQWsHW/YWAmlOJ1GRIp2gAXvhlc43rlD29Gew2i7fZovhGoKd8vZY7UPidpH5w3j/hni1wwTvmmnOLDVKXwU/3wT/6w7qpvo5SRNqYDWn5AIxIiW20xxzR1bNfubmufNmuHACGVv673OYVZMD6qeb5Vly6DvVIyp999llCQ0Pp3bs3KSkppKSk0KdPH2JjY3n++eebIkbxU4duteTtvB7aPN9wBQZY6RpvVm4sr/wDp9k9kerdc2ovokLsrE/L562526s95uAe5QGNUu0i9ecpX2+VM+WHCgyB/hea+5rfvRFOe8bcm7aiBD6/Bhb/y9cRikgbkppTWVYe03hl5Z515XklTjak5x9MyjsrKQfg10fN5p/tBkPHEb6OpknVOSmPjIxkwYIF/PDDD9xyyy3cfffdzJgxg5kzZxIVFdUEIYq/8mx9tr+gzNt5vaHbodVFz8oyXM/aHK2DFalefLiDh880e4W8PGMLWzMLjzhGTd5aDntl9/VW0+itNsISYMRNcOMcOP56wDBnzX+8D1xOX0cnIi1ceYWbS/61kJs/XEZ+ad3/ZuSVOL1d0dtHhTRaXAE2K8NSzNnyBVuzWbk7F4AhSsphz++w6mPz/OkvgLV1V+nV69VZLBYmTZrEvffey2233cbYsWMbOy5pBTzl61mFZSyt/OavMRpj1FbvpKprY7WNk0jNzhvcnpN6xlNe4eaBL1fjdledhVVS3nK02kZvtWG1wRkvwCkPm5d//xe8f7ZZ4igiUoM1qXks3nGAn9amc/GbC0nLK6nT/T2z5LGhgQQHNm6PIs/WaB8t3kVBWQWhgTZ6J7XObb9qze2CH+8xzw++EjoM9W08zaBWSfkrr7xCaWmp9/zRTnUxd+5czjrrLNq1a4fFYmHq1KlVbn/00Ufp3bs3oaGhREdHM2HCBBYvXlyn5xDf8STlWzMLWbA1C4BJfRuvOcax9DrsD5rK10VqZrFYeOK8foQG2li6K4cPF++qcnuRkvIW42CjtzaYlIO5RdqYu+HSj8ERAbsXwL/Gwm59PhCR6m3NLPCe35hewPlvLGBjen6t7783pxiA9k3QEd2TlO/MNp9jcKdobNY2vkxsyTuQtgockXDKo76OplnU6tPVSy+9xBVXXEFQUBAvvfRSjcdZLBbuuOOOWj95UVERAwcO5LrrruP8888/4vaePXvy2muv0bVrV0pKSnjppZeYNGkSW7duJT5enVdbOk9SvmBbNgA9E8PoFh/WbM/f67Au0uq+LnJ0HaJDuH9ybx7+Zh3P/LSRk3sn0CHaLNMrOGRNufhWYFtPyj16nwF/mAWfXgH7N8J7p8NpT8OwG7S3uYhU4VmWdVrfJLZkFrBtfxEX/XMh/7pqKCO7xx3z/p6lkB2aICnv1y6CMEeAtyKtzZeu5+wy15IDnPK3NrPbRq1mynfs2EFsbKz3fE2n7durbxBUk8mTJ/P3v/+d8847r9rbL7/8ciZMmEDXrl3p27cvL774Ivn5+axevbpOzyO+cfj68dP6JTfr83eIDibkkBIjdV8XObYrh3fm+M7RFJW7+MvXazEqtzP8Zb1ZHtwUH0ikbtp0+frh4rrDDTOg73ngrjDLHRfUrWpPRFo/T1I+pmccX948khO6xFBQVsHV7/7O1BWpx7z/3sry9fZRjf9vYIDNyrAuBxPxNt3kzTDguz+Bswg6jazsIdI21ClLcTqd9O7dm++//54+ffo0VUzVKi8v56233iIyMpKBAwfWeFxZWRllZWXey/n5ZmmK0+nE6VQzmKbkeX89/40Orvqdz8Tecc3+M+iREMaqvXkAOGxoDLQwh48ZaRmeOOc4znpjIXM27+eLpbvpkRDG9PUZWC1w1fCOLeLn1ZbHjhXzi5JSp6tNvv4jWB1wzltY43pjm/MUTH+YipAEjH4XVnt4Wx470jg0hvzPlgyzfL1LTBChdgv/mTKY+75ay49rM7jz05XsyS7kj2NTatxdZM+BIgCSIxwN+rnXNHaGdYli1qb9WCzQLym0bY4tZzHW+S9g2z4Lw+ag4vQXweUyT36qLj/HOiXldrvdu7a8uXz//fdceumlFBcXk5yczPTp04mLq7nM5KmnnuKxxx474vpffvmFkJDG65YoNZs+fToApS7wDLH4IINty+axvZkrCoPLrXgKQpYunM8uTfK1SJ4xIy3HpHYWvt9t45Fv1tAuxACsDI51s2nJHDb5OrhDtMWxsz3VAtjYsWs3P/6409fhtCB96Bd/Kt32/4z121tZs3wRu2NG47ZWvxVnWxw70rg0hvxDmQtSc22AhZ2rFpG13rx+YhiUJFuZlWblhV+38tPSzfSJMugRaZB8WMqwfpd5/7St6/jxwNoGx3T42LEVg81io0sYzJ/VtsaVxV1B5+zZ9Er/BnuFOZG2PvFcti7eDGz2bXANVFxcXOtjLYanNrGWnnzySTZv3sy///1vAgIarxzYYrHw9ddfc+6551a5vqioiLS0NLKysnj77beZOXMmixcvJiEhodrHqW6mvGPHjmRlZRER0cY7GTYxp9PJ9OnTmThxIna7HcMwGPB/Myh1uvnjmBTumdSj2WN6b+EunvjRTCEW3n9Ss27JJsd2+JiRlsPpcnPBm4vZkG7OLlgtMO2OUaQ04w4KR9OWx867C3bx5E+bOLN/Ei9dPMDX4bQshhvb13/AuuEb82JILO4h1+Aeci2EJwFte+xI49AY8i/r9uVz7j8XER1i5/cHxx9x+/sLd/HET5s4NCP65IZhVcrIT3hqFjnFTr6/9UR6Hba7T10cbezsyCoiOiSQqJA2MqYMN5Z1X2Kb8wyW3J3mVZGdcJ30AEa/i1pFb5D8/Hzi4uLIy8s7Zh5a56x6yZIlzJgxg19++YX+/fsTGlr1A9pXX31V14c8qtDQULp370737t0ZMWIEPXr04J133uHBBx+s9niHw4HDcWTiZbfb9YezmRz6XvdOimBNah7nDO7gk/e/b7so7/mo0GDsdjV7a4n0+9ny2O3w3EUDOef133C5Dc4d1J6eyVG+DusIbXHsBFf2x3AZtLnXXisX/Bt+HwaL/4Ulbw+2+S9gW/AK9LvA3Os8vh/QNseONC6NIf+w84BZ5dsjMbzan9cNY7szvGs8szZl8uOaNDamF/D1ynRGdDcnAIvKKsgpNsuQO8dX/xh1Vd3YaYn/xjYJw4DNP8PM/4OMyqqD0AQ46T4sQ64mIKD66iZ/VJexUuekPCoqigsuuKCud2s0bre7yky4tGxvXjmUrMIyjmvnmyqFPskRBAZYCQ20EWSvVV9DEanUr30kD595HFNXpnLXpJ6+DkcqqdHbMQQEwsjbYfjNsPF7WPRP2LMIVn8Cqz/B1nEEESFn+jpKEWkmWyq3Q+ueUPMOQP07RNK/QyTDusRw2duL+GltGo+f2xdHgM3beT0iKIDwIH0J0yDFB2DqLbD5J/OyIwJG/QlG3AyBLaMSz1fqnJS/++67jfbkhYWFbN261Xt5x44drFy5kpiYGGJjY3niiSc4++yzSU5OJisri9dff53U1FQuuuiiRotBmlZSZBBJkUE+e/7o0EA+++OJOAKsNTbvEJGaXT2yC1eP7OLrMOQQnn3Ky9v6lmjHYguAvueap9RlsOhNWPcV1j2LGGHfBEUXQlTz7goiIs1jyc4DLN+Vw3WjU7yd13scJSn3OCElhsQIBxn5ZczZtJ9JfZNIzfFsh6beVA2yZwl8fg3k7wVboJmIj7oTQmJ8HVmLUOupQ7fbzTPPPMOoUaMYNmwYDzzwACUlJQ168qVLlzJ48GAGDx4MwF133cXgwYN5+OGHsdlsbNy4kQsuuICePXty1llnkZ2dzbx58+jbt2+DnlfalkEdo+iTrH4CItI6eJNyzZTXXvuhcMHbcOcajNgeBDtzsH13G7j1Hoq0Ro99t46nftrIf+bvYEtlUn60mXIPm9XCWQPaAfDtqn0A7M0xm3W115ag9WMYsOBVePc0MyGP6Qo3/AoTH1dCfohaz5Q/8cQTPProo0yYMIHg4GBefvllMjMz+c9//lPvJx83bhxH6zPX2OvTRURE/J2nfN2pmfK6i2hHxXn/xvrOBGzbfoWFr5qlkyLSqmQXlgPw2sytFDvNLbV6JNSuQdvZg9rx7/k7+HVDBkVlFezN9cyUKymvs/Ii+PIG2PSjebnveXDWKxCkybLD1Xqm/L///S9vvPEGP//8M1OnTuW7777jo48+wq1vmUVERJpNoM2TlNdp8xTxSOzLmg5XmOdnPA7pa3wbj0gLtT57PYvTFlNS0bDKWF8oLK0AoKCsApfbIMwRQGJE7Xbg6d8+ki6xIZQ63fyyPt1bvt4+Skl5nf36qJmQ2wLhjBfgwneVkNeg1jPlu3fv5vTTT/denjBhAhaLhX379tGhQ4cmCU5ERESqUqO3htsVO54BwZlYN/9oNh36w0ywqYGTyKE+2vAR3277lgBLAMfFHkeXyC4khiQSHxJPQnACCSEJxIfEExsci93acn5/DMOgsLyiynXdE8Jq3VvIYrFw9qD2vDJjC89O20RQ5c49WlNeR7sXw+9vm+cv+x90n+DbeFq4WiflFRUVBAVVbdhlt9txOp2NHpSIiIhUz25T+XqDWSy4Jj+Hdc9CSF8N8/8BJ93r66hEWpRoRzSJIYlkFGewOms1q7NWV3uc1WKlX1w/Tk85nVO7nEpccFwzR1pVcbnLu+f4ST3jmbN5P32S67a3+PWjUvhh9T627S/yXqfy9TqoKINvbwcMGHSlEvJaqHVSbhgG11xzTZU9wEtLS7npppuq7FWudeAiIiJNx24zZ3vKNFPeMGGJMPk5+OoGmPMMxPUwO7WLCAD3DLuHu4+/m31F+1iVuYp9RfvYX7yfzOJMMksyySzOJKs4iwqjgtX7V7N6/2qeX/I8EzpP4JJelzA0cahPdr4pKjNnyS0W+Mclg/hg0S4uGFq3qt7IEDv/vX44F7yxgPR8c59zla/XwbwXIWsThMbDpP/zdTR+odZJ+dVXX33EdVdeeWWjBiMiIiJHp0Zvjaj/hbDhW/P0+dWw5Qo47WmteRSpZLFYaB/WnvZh7au93W24ySjKYMbuGXy//XvWZa9j2s5pTNs5je5R3bmk1yWc3vV0IgKb73eqoDIpD3MEEB0ayB2n9KjX47SPCua/15/A5W8vJj7cQVRIyynRb9EyN8C8F8zzk59Vh/VaqnVS3pj7k4uIiEj9BKp8vfFYLHDBOzDnaZj/Eqz8CHbOh/Pfgk4jfB2dSItntVhJDkvmyuOu5MrjrmRD9gY+3fQpP+74ka25W3li8RM88/szDEkcwrCkYSSFJpEQbK5FTwhJICIwotFn04sOScobqmdiOPPuG4/dZvHJrL/fcbvMsnW3E3pONrutS600fLSKiIhIs9E+5Y0sIBBOeRi6T4Svb4TcXfDuZBhxC4y8HcKTfB2hiN/oE9uHR0c+yl3H38V3277ji81fsDV3K7+n/87v6b8fcXygNZD4kHg6hndkSOIQjk88ngHxA3DYatcpvTqezuuNkZQDBAfaGuVx2oQl78DeJRAYbnZb1xcZtaakXERExI8cLF/XlmiNqvOJcNNv8NP9sOpjWPgaLP4X9DvfTNDbDfJ1hCJ+IyIwgiv6XMEVfa5gT/4eZu+dzaYDm8gqySKzJJP9xfvJLcul3F1OamEqqYWpLEpbBJiJev/4/gxPGs6J7U6kb1zfOnV3L6ycKQ9tpKRcaqGsEOY+Z/7dBJjwCERWv+RBqqfRKiIi4ke8M+UuN4ZhqKSyMQVFwHn/NBu+zX8Jdi+E1Z+ap04jYcTN0PsMsGrmTKS2OkZ05Krjrjri+jJXmbdx3JacLSzNWMqS9CVkl2azLGMZyzKW8caqNwi1hzIsaRjDk4bTLqwd0UHRRDmiiHJEEREYge2w30dPUh4epDSnyRkGrP8Gfn4I8lPN6/pdAMdf79u4/JBGq4iIiB/xzJSDOVseGKCkvNH1PNU8pS6HRf+EdV/B7gXmKaoTnHQ/DFazW5GGcNgcdAjvQIfwDgxJHMIlvS/BMAx25u9kSfoSFqctZnH6YvLK8pi9Zzaz98w+4jEsWIh0RBITFEPP6J70j+vP9uwQrEFZ2Ow2ip3FhNi1v3iTyNoKP90L22aal6M6m43dep3m27j8lJJyERERP+Jp9AZms7dDk3RpZO2HwAVvw8THYcnbsPQ/kLsbvrkVgqKgz5m+jlCkVbFYLKREppASmcLFvS7G5Xax8cBGFqYtZFXmKg6UHiCnLIfc0lwKnAUYGOSW5ZJblsv2vO1M2zkNgNAUWGbA8I/NcniLxYLL7cLAwIIF8/+V/7NYsFqsBNmCiHBEEBsUS4/oHvSK6UWv6F70iO5BcIC2Q/MqLza7qy94BVzlYHPA6Dth9J/BrvepvpSUi4iI+BHPPuVgNnsLrX8/JKmtiGSzGdyYe+CXv8LSd+Db28ykPaKdr6MTabVsVht94/rSN67vEbc53U7yyvLILc0lsziTtdlrWZO1hlVpO8kuySHAXowbJ+Xu8iMfuJqWHCUVJeSU5bArfxfLM5d7r7dgoV1YOzqGdyQpNIkgWxDBAcE4Ahw4bA6CbEEEBQRhs9jIL88nryyP/PL8g6eyfEoqSrBb7QRYAigoKOD7md/jCHAQaAvEbrVjt9oJtAWaJ2sgAdaAKpcDbYEkhCTQPao77cLaYbX46MvYsgJ4+xRzD3KA7hPM2fHYbs0eSrmrnEBbYLM/b1NRUi4iIuJHAmxWrBZwG9oWrdkFhpj7mKcuhbRV8NWNMOUbrTEX8QG71U5ccBxxwXF0j+7OyPYjAXjkm7W8v2YXt47rxi2ndCSvLA8L5my4xWLBMAyMyqzcc95tuCmpKCG/PJ+0ojQ252xm04FNbDqwiezSbG8zusayM31nve9rtVi9XwY4Air/a3PgCHAQbg8nMTSRxJBEIh2RhNpDCbSaiaun/4iFyi92LQfPe/97yDEOm4PwwHAiAiOIcEQQHhiOY/bTZkIemgBnvgi9z2y2Duu5pbn8nv67d1lD75jePH/S883y3M1BSbmIiIifsduslFW4KVdS3vwCAuGC/8C/xsLOeTDrSTjlb76OSkQqFZa5AAgLshNqDyXUHtqgx8sqyWJX/i5SC1PJLM6ktKLUPLnM/5a5yih1leJyu7xJbKQj0pvMRgRGEGIPocJdQWl5KYuXLqbvwL64LW6cLnMmv9xVTrm7HKfLidPt9F4ud5nXlbnKSC1MZXvedpxuJyUVJZRUlEBZY7xjtRdoGIR3bE9QaDysfx3L+jewWCzeLz0A72XP0gCLxYLNYjNPVhsBlgBsVhtWi9V7/tDbbRYbAdYArBYrbsON0+1kZ95ONh7Y6P0yBaDIWdSqmp0qKRcREfEzgQGVSbn2KveNuO5w5kvmvubznof43jDgIl9HJSJAYZkTgLBG6r7umY0fmji0wY/ldDopWl3E6SmnY7fXfps3jwp3BTmlOVW/EDjki4H8snwyijPILM4kvzyfwvJCKtwVBysDDqsQ8Jw/lIGBYRiUucrIL8+noLyAgnJz/X65xUJ2gA3KDjT7FwIA3aO6Mzx5OMOThnN80vGtJiEHJeUiIiJ+x9PsTXuV+9DASyBzHfz2stn4LaYrdGj4h3YRaZgiz0y5o/UtKwmwBhAfEt/sz+te/BbF0+6jICic/Ms/piwo0pu8A7gNt/dylf9WLg1wG25cbhcuo/LkdlFhVHivrzAqDt5+yHGe2fT4kHiGJQ0jLjiu2V97c1FSLiIi4me8e5Vrpty3TnkEsrbAph/h86vh5t8gKNLXUYm0aQWV+5SHOeo+Ey3VyN6G9ddHCDMMwk76C8kdx/g6olZJ+6iIiIj4Gc82aFpT7mNWG5z/FkR3gbw98ON9vo5IpM0rqkzKQ1vhTHmzc7tg6s3gLIYuY+CEG30dUaulpFxERMTPeLZFU/f1FsARDue9BRYrrP4E1n3t64hE2rTCUjMpD9dMecMteBX2LIbAcDj3DbAqdWwqemdFRET8jMrXW5hOw2H0Xeb57+6EnJ2+jEakTdNMeSM5sANmP2WeP+0piOrk23haOSXlIiIifsYR4Gn0pqS8xRj3ALQfCqW58MmVUF7k64hE2hzDMCgsr1xT3kjd19skw4Cf7oOKUkgZC4Ov9HVErZ6SchERET9jtykpb3Fsdrj4AwiNh4w18M1t5gdbEWk2xeUu769dmENJeb1t+A62/AJWO5z+ArSircdaKiXlIiIifsbT6K1M5estS2R7uPi/YA2AdV/BP0fCsvfAWeLryETahMLK0nWrBYLtKl+vl9I8+Ol+8/zoOyG+p0/DaSuUlIuIiPgZu/Ypb7k6j4SzXwN7KGSuh+/+BC/2gV8fhbxUX0cn0qoVerdDC8Ci2d36+fkhKNgH0Skw5m5fR9NmKCkXERHxM2r01sINugzuWg+T/m42RyrJgfkvwT/6w+fXwJ7ffR2hSKvk6byu0vV62jQNVnwIWMxu6/ZgX0fUZigpFxER8TNq9OYHgqNg5O1wx0q45ENzj1/DZW6Z9s5E+O1lX0co0up4Oq+ryVs9ZG2F7+4wz594q1n1I81GI1ZERMTPaJ9yP2K1QZ+zzFP6Gljwmrmf+fSHzfLQ4872dYQirUaBdzs0pTi1YhiwexEsfA02/gAYENcLTv6bryNrczRiRURE/IynfF2N3vxMUn84/18QFAG/vwVf3QiRHaD9EF9HJtIqFJWpfL1WXBWw8TvzS8LUpQev73GquSe5Pch3sbVRGrEiIiJ+JlDl6/7t1KfgwA7YOh0+uRxunAPhib6OSsTvFSopPzpXBSx9Bxa+Drm7zOtsDhh4qVmyHt/Lt/G1YRqxIiIifkaN3vycLQAu/A/8ewJkbYLPr4Yp30JAoK8jE/FrSsqP4ddHzFJ1gJBYGHYDDPsDhMX7Ni5RozcRERF/o0ZvrUBQBFz6ETgiYPdC+PlBX0ck4vc83de1prwaqctg0Rvm+Ul/hzvXwviHlJC3EErKRURE/Iz2KW8l4nrA+W8DFljyb9g+x9cRifg1z5rycHVfr6qiHL65HQw39L/Y3BkiMMTXUckhlJSLiIj4GTV6a0V6nQYn/ME8//NfwO3ybTwifkzd12vw28uQuc4sWT/taV9HI9VQUi4iIuJn1OitlRn3IARFQsYaWPmxr6MR8Vvqvl6N/Ztg7rPm+dOegdBY38Yj1VJSLiIi4me0T3krExIDY+8zz8/8Pygr9G08In5Kjd4O43bDt3eAqxx6TIL+F/o6IqmBknIRERE/42n0pu7rrcgJf4DoFCjMgLnP+ToaEb9UWGYu/1BSXmnpO7BnEQSGwRkvgsXi64ikBkrKRURE/MzBRm9KyluNAAec+qR5fuFrkLnBt/GI+KHCUiegNeUA5O6GXx8zz5/yCER19G08clRKykVERPyMGr21Ur1Ph15ngLsCvv+zWXoqIrVWVDlT3ua7r7vd8M2tUF4AHUeY+5FLi6akXERExM+o0VsrNvkZsIeae5f//hYY2vZOpLYK1X3dtPQd2DEX7CFw7htgVcrX0uknJCIi4me0T3krFtURxj9onp92P/z3bNi3wrcxifgBt9tQozeA7G0w/WHz/ITHILabb+ORWvFpUj537lzOOuss2rVrh8ViYerUqd7bnE4n999/P/379yc0NJR27doxZcoU9u3b57uARUREWoDAALNZjxq9tVLDb4bRfwabw5ztemscfHkD5Oz0dWQiLVax0+U932aTcrcLpt4CzmJIGauydT/i06S8qKiIgQMH8vrrrx9xW3FxMcuXL+dvf/sby5cv56uvvmLTpk2cffbZPohURESk5Qi02QCVr7datgCY8CjcvhQGXGJet+ZzeG0Y/PwXKD7g0/BEWqLCUnOW3Ga1EGRvo8XAC1+v7LYeDue8rrJ1P+LTr5EmT57M5MmTq70tMjKS6dOnV7nutdde44QTTmD37t106tSpOUIUERFpcTz7lGumvJWL6gTnvwUjbjHLUXfMMTuzr/gATn8BBlzk6whFWoy8ksrO64E2LG1x66/MjTDz7+b50540/36I3/Cr2o68vDwsFgtRUVE1HlNWVkZZWZn3cn5+PmCWwzudzqYOsU3zvL96n6W2NGakvtr62LFiJuPlFa42+x7Ul1+Onfi+cNkXWLbPxDbzMSyZ6zG+uZWKdkMhUtscNTe/HENtwMJt+wHomRjWYn82TTJ2Ksqwrngf628vYXGV4e42AVe/S6GFvgdtSV1+zhbDaBltPS0WC19//TXnnntutbeXlpYyatQoevfuzUcffVTj4zz66KM89thjR1z/8ccfExIS0ljhioiI+MzeInhudQCRdoPHj3cd+w7SehhuRm59hvjCDeyNGsGylFt8HZFIi/D2Ritrc6yc2cnFxPYtIr1pUhajgo7Z8+mVPpUQp7mkpdCRxG89HqTUHu3j6ATM5diXX345eXl5REREHPVYv0jKnU4nF1xwAXv37mX27NlHfVHVzZR37NiRrKysY74Z0jBOp5Pp06czceJE7Ha7r8MRP6AxI/XV1sfOloxCTn9tAdEhdn5/cLyvw/ErrWLspK8h4J2TsWBQcfWPGB1O8HVEbUqrGEOtTFmFmxOemkVxuYtvbhnBcckt8zN/o4wdw41l3VfY5j6DJWeHeVV4Mu7Rd+MeeAXYNCZbivz8fOLi4mqVlLf48nWn08nFF1/Mrl27mDlz5jFfkMPhwOFwHHG93W7XH85movda6kpjRuqrrY6dkKBAwNwSrS2+/sbg12On4xAYfCWs+ICAX/8G1/+qhk4+4NdjqIVzutzerR9r4/ddWRSXu4gPdzCgY0yLX1Ne77Gz8zf48R7IXG9eDomDMXdhOf56bPYgbI0bpjRQXX7GLTop9yTkW7ZsYdasWcTGxvo6JBEREZ+zB5gfVtXorQ07+W+w7mtIXQbrp0K/830dkUiDpOeV8tqsLSzdmcOmjALO6J/Ma5cPqdV9Z2/KBOCknvEtPiGvt9w98PElUF4AjkgYdbu5faIjzNeRSSPwaVJeWFjI1q1bvZd37NjBypUriYmJITk5mQsvvJDly5fz/fff43K5SE9PByAmJobAwEBfhS0iIuJTgZUzSOUuN4ZhtN4PoVKz8EQYeTvMfsrsuNznLJWtil97e952Ply023v5xzVp5Jc6iQg69ries9ls8nZSz/gmi8+nDAO+u8NMyDsMgys+h2CtG29NfFrrtHTpUgYPHszgwYMBuOuuuxg8eDAPP/wwqampfPvtt+zdu5dBgwaRnJzsPS1YsMCXYYuIiPhU4CFlnRXuFtEaRnzhxFshJBYObIMVH/o6GpEG2ZVdDMB1o1LoEhuC24CF27KPeb99uSVszijEaoExPeKaOkzfWPkRbJsJNgec84YS8lbIpzPl48aN42h95lpIDzoREZEWxR5wcGa8vKJuay+lFXGEw9h7YdoDMOcZGHgp2IN9HZVIvezLLQFgdI9YKtxudi7cxW9bszi1b1KN98kvdfLCL5sBGNQxiqiQVlhJm78Ppj1knh//EMT39G080iT0r7iIiIifOXSm3OnSuvI27fjrzL3KC9Lg97d8HY1IvaXlmUl5u6hgRnU3Z7znb82q9li32+DzpXs4+fk5fLl8LwCXDuvUPIE2J8OA7/8MZXnQbgiceJuvI5ImoqRcRETEz9isFjzLyMuVlLdtAQ5z9gxg3otQkuvTcETqo7i8gpxiJ2Am5Sd2i8Vqge37i7wz6B4r9+Ry3j8XcO8Xq8kqLKNrXCjvXTuMi4d19EXoTWvN57B5GljtcO4bYGvRPbqlAZSUi4iI+BmLxeItWVcHdmHAJRDfG0pzYcGrvo5GpM725ZYCEO4IICLITkSQnYEdo4CDs+X7C8q474tVnPv6b6zak0tooI2HTu/NtDvHMq5Xgq9CbzoFGfDTfeb5k+6HhD6+jUealJJyERERP+SoTMqdLvVfafOsNnOLNIBFb5gf5kUa6Nf1GVz4zwXsyi5q8ufyzIYnRwV5rxtdWcI+b0sWHy7axcnPz+azpWap+vlD2jPrnnHcOLYbgQGtNJ355S9QkgNJA2D0nb6ORppYKx3FIiIirZv2Kpcqep8B7Y8HZzHMfc7X0Ugr8NnSPSzdlcNXy1Ob/Lk8SXm7qIONCj1J+Xer9vHXqWspKKugf/tIvrx5JC9ePIiEiKBqH6tV2PmbWbqOBc5+RdsdtgFKykVERPxQoHemXEm5ABYLTHjEPL/sPcjb69NwxP/ll5prvNfty2vy59qXZ5avH5qUD+4UTUigDYAgu5WHzzyOqbeOYmjnVr4dmKviYNn60Gug3WCfhiPNQ0m5iIiIH/Jsi6ZGb+KVMha6jAG3E+a/5OtoxM8VlFYAsCa1GZLyypny9ock5YEBVh46vQ9nD2zHz3eO5brRKdislpoeovVY+h/IWAtBUXDKw76ORpqJknIRERE/pEZvUq2T7jf/u/y/kNf0ZcfSenmS8oz8MjILSpv0uQ6Wr1ctSb9yRGdeuWwwnWNDm/T5W4ysLTDjMfP8KX+DkBjfxiPNRkm5iIiIH1L5ulQrZQx0HgWucs2WS4MUllV4z69LzW/S5/Im5ZHBxziyFSsvhs+mQHmhWfEy9FpfRyTNSEm5iIiIHwpUozepiXe2/H1Y+yUY6tAvdWMYBgWVa8qhaUvYDcOodk15m+Isge/vhMz1EJYIF7xj7qogbYaSchERET9k10y51CRlLPSYZM6Wf3EdfHYVFGb6OirxI2UV7irbLa5twqQ8u6ic8go3FgsktuaO6oczDNizBL67E57vBas/BYvVTMjDE30dnTSzAF8HICIiInXnKV8vb8A+5YZhYLG0gcZJbY3FApd8BPNegHnPw4bvYOd8mPwc9L/QvF3kKDzryT2aMin3lK4nhDta757jhwhy5mBd8Aqs+QSyNh+8IbITnPxXcwmKtDlKykVERPyQZ5/yMqerXvffklHA+W8s4KLjO/LwWcc1ZmjSEgQEwvgHzf3Lv7kF0tfAVzfAuq/hzBchPMnXEUoL5ildDwywUl7hZl9eKdmFZcSGORr9uarbo7xVKsrGNvUWJm35GQuVX6YGBMNx58Cgy8115NbW/6WEVE8/eRERET/k2TpoU3pBve7/3oKdFJRV8OXyvbjdWnPcaiUPgD/MgvF/BasdNv0Arw+H1Z/5OjJpwTwz5XGhgaTEmZ3P1+5rmmZvqbltYD25YcB3d2DdMg0LBu6OI+DsV+GezXD+v6DrSUrI2zj99EVERPzQiK7mVjkLt2fX+b6lThffrtoHQF6Jky2ZhY0am7QwNjucdC/8cQ4kD4LSXPjqD7Bluq8jkxbK03k9LCiAfu0jgaYrYT/Yeb0Vrydf+yVs/B7DGsDcnn/DNeV7GDIFgiJ8HZm0EErKRURE/NCJXWMBWJ+WT16x8xhHV/XzuvQqa0Z/33mgUWOTFiqxL9wwAwZfZV7++S/gqjj6faRN8pSvhwfZ6dfOTBxX781tkudKy2vl5esFGfDjPQC4R91FTmgPHwckLZGSchERET+UEBFE1/hQDAMW76h5ttwwDHZnF/PNylTmb8nCMAy+WLYXgOgQOwC/71BS3mbYAmDS3yE4BrI2mdumiRwmv/JLu/CgAEZUfgE4a9N+DhSVN9pz5BaXs3x3jncJTqtMyt1u+PZ2KMmBpP64R/3Z1xFJC6VGbyIiIn5qRNdYtu8vYtH2A0zqazbuKi6vYNWePFbsyWH5rlxW7skhq/DgB+mxPeOZvzULgAcn9+G+L1ezZMcBdWJvS4KjYNwD8NN9MOtJ6H+RymiligJvUm5nQIdI+rePZE1qHp8s2c0t47rX+fE2ZxTw89p0dmQVsSO7iB1ZReQeVuHTMTqkUWJvURa8Alt+BpsDzn3TXEoiUg0l5SIiIn7qxK6xfLx4Nwu3Z1Ne4ebPn61k2tp0XIc1brPbLPROimBjej5zN+8HYHhKDGcNbMdfpq4hPb+UvTklrNiTy/M/byLYbiMqxE50SCBRIXaiQgKJDrGTHBXMaX2TvNsWPTttI2tS83j1ssFEhQQ2++uXBjj+Ovj9LcjeCvNfggmP+DoiaUEKK5PyMEcAFouFq0d24Z7PV/HRot3cOKYrAba6Fdte//4S9hwoOeL6pIggUuJCGd41hj7J4Y0Se4uxayHMeNw8P/kZSOoHzrotNZK2Q0m5iIiIn/KUlW5Iy+ehr9fww+o0wGyYNLhTNIM7RTG4UzR920UQZLexMT2fez9fzdp9efxhTFeCA230ax/Jit25fLtqH/+cvc3b4KkmfzvzOK4fnUKp08W/5m7H5Ta4+7NVvD3leKxWzbT7DZsdJj4On1wOi94wk/Sojr6OSloIz5ryiCAzVThzQDJP/riB1NwSft2QyWn9ar+lntttkJpjJuS3ju/GccmRpMSF0iUuhJDAVpqKlObBF9eB4YL+F8PQa3wdkbRwrfQ3QUREpPWLD3fQPSGMrZmF3nXib145tMYPzL2TIvjm1lHklTiJDjVntk/oEsOK3bk8/8smDAMGd4rirok9ySl2kltcTk6Rk9ySctbszWPprhwWbc/m+tEprE/L987Iz9iYydvztvPHk7o1zwuXxtHrdHNv5J3zzBm9C972dUTSQhQcsqYcIMhu49JhHXlj9jbeX7CzTkl5YXkFnuKd20/uQZDd1ujxtji/PgYF+yCmK5z5EmhpkByDGr2JiIj4MU8XdoAbx3Y95odlq9XiTcgBhnUxt1YzDAi0WXn2ggGM6RHP2QPbMeXELvxpQg8eOasvD0zuDcCK3bkYhsHqPbkARFU2i3v2502s2J1T4/PmFTu5438reH/Bzvq8TGkKFovZ9A0LrPkMUpf5OiJpIbxbojkOzt9dOaIzNquFhduzvc3ZaiO/xJx1Dwywto2EfPciWPqOef6sV8AR5tt4xC8oKRcREfFjE49LBGBYl2juPbVXne8/rEuMdxLn9pO70yOx+nWd/dpHEmC1kFVYRmpuCav3mnsWX31iF87on4zLbfDhot3V3tfpcnPzR8v4dtU+nvppA6VOV53jlCbSbhAMvNQ8//NfzW9npM3LP2RLNI92UcFMqvx78/7CnbV+rLzKpDwyuA00Oasog2/vMM8PvhJSxvg2HvEbSspFRET82Nie8fz0pzF8eMNw7HVsvgQQGWLnnkm9uOyETtw0ruby8yC7jT7JZofulXtyWVW5Z/HAjpFcPrwTAHM278d9WJM5wzD469drWbDN3Lat1OlmsbZga1lO/hsEBMHuBbBthq+jkRbg8PJ1j6tHdgHg6+Wp5BXXrmlZm0rKf3vZ3GowNB4m/p+voxE/oqRcRETEz/VJjsARUP+y0FvHd+ep8/sfM6kf1DEKgPlbstieVQTAgA5RHN8lmpBAG1mFZaxPy69yn7fmbufTpXuwWuC4yqR+zqb99Y5VmkBkexh2g3l+5t81Wy7eRm9hhyXlw1Ni6J0UTonTxefL9tTqsfLbSlKetQXmPmeeP+1pCInxbTziV5SUi4iISK0M7hQFwDcr92EY0D4qmLgwB44AGyO7xQHmbLnHtLVpPD1tI2B2bb/9ZHN/49mbM5s3cDm2UXeCPRT2rYCNP/g6GvExz5ryiKCqibRnezSA/y7cdcT2i9VpEzPlbjd89ydwlUP3idDvAl9HJH5GSbmIiIjUimemvKRyTfiADpHe207qFQ/A7E1mwr16by53froSw4ApJ3bmmpFdGNUjDpvVwvb9Rew5UNxkcRaWVWBotrduwuJhxM3m+VlPmEmGtFk1la8DnDuoPZHBdnYfKPb+vh+NJymPqOaxWoWKMpj9FOz6DewhcMYL6rYudaakXERERGolJS60ymzXgA5R3vPjeppJ+fLduWxKL+CG95dS6nRzUs94Hj7zOCwWCxFBdoZ2igZg9uamKWF/77cdDHj0Z/7x65YmefxWbeRt4IiEzPWw7itfRyM+UuFyU1xufvEWHnTk7HZwoI1Lhpl72r9Xi90UWu1MuasCVnwIrw6Fuc+a1538V4ju7Nu4xC8pKRcREZFasVgs3tlyqDpT3jEmhK7xobjcBhe+uYDMgjJ6JYbz2uWDCThkrbpnRn1OLWbY6uqblak8+t163Aa8PW87OUXljf4crVpwNIy63Tw/60kz6ZA2p6js4O4Ih26Jdqgrh3fGYoF5W7LYtr/wqI+XX2KOo1aTlLvdsPZLeGMEfHMr5O2B8GQ48x8w4hZfRyd+Skm5iIiI1NqhSXm/9pFVbhvXMwEwS1/jwhy8c83xR8y0nVQ5o75gWzaZBaWNFtf8LVnc8/kqABwBVorLXfx34a5Ge/w2Y/hNEBILB7bB6k98HY34gGc7NEeAlcCA6lOFTrEhnNLb/H3/7zFmy73l6/6elBsGbJoG/xoLX1wH2VsgOAYm/R3uWAHHX6uydak3JeUiIiJSayekmB2FeyWGHzHzdUof80O6I8DK21OG0iE65Ij7920XQVJEEMXlLkY/PYt7Pl/F+n35RxxXV//3/XqcLoOzBrbj2QsHAPDegh0Ul2u2t04c4TD6z+b52c+Y62WlTTm4nvzoSbSn4dsXy/Z6u7VXp1WUrxsGfHs7/O8SyFgDjggY9xD8aRWMvB3swb6OUPycknIRERGptZHdYnnx4oH849JB1d721Pn9+d+NIxhcuXb8cBaLhTevGsrgTlGUu9x8sWwvp78yj8veWsTi7dn1iik1t4RNGQVYLfB/5/TljP7JdI4NIafYySe/127bJjnEsBsgLAnydsOvj6mMvY3xdF6vrsnboUZ3j6NbfChF5S6+XLa3xuNaRVK+4gPzZLHBqD+Zyfi4+yEowteRSSuhpFxERERqzWKxcP6QDvRJPvLDqMVi4bITOjGkhoTcY1DHKL6+ZRRf3TKSMwckY7NaWLg9m8v/vZhPl+yuc0yefc8Hd4omKiSQAJuVG8d2BeDf87ZTXqFO4nViD4aT/2KeX/Q6vHc6HNjh25haIafLzXer9nnLxVsKz6z3sZJyi8XClBO7APDtqn01Huf3+5RnrIMf7zXPn/xXmPi49iCXRqekXERERHxiSKdoXrt8CHPvG8/ZA9vhchvc/+Uanpm2kf0FtS+b9mzL5OkAD3DBkA7EhzvYl1d61IRBajBkCpz/tlmmu2cxvDkGVv7PLOOVRvGvOdu4/X8rePGXzb4OpYqjbYd2uGFdzOR0Z3bNWxz69ZryskL4/BqoKIXuE2DUnb6OSFopJeUiIiLiU+2jgnn50kHcfnJ3AP45exvDnviVc16bzz9+3czqvbm43dUng+UVbhZsM8vePZ3dAYLsNq4fnQLAm3O21Xh/X8kpKufc13/j3/O2+zqUmg24GG6aD51OhPICmHoTfHEtlOT4OrJWwfNl0e87Dvg4kqq8M+WOYyfRHWPMtdQHisqrXVduGIZ/l6//eA9kbTa7q5/3L7AqdZKmoZElIiIiPmexWLh7Ui/+cckg+ld2dV+1N49//LqFs1/7jeFPzeDez1cxY0MGxiGztct25VBYVkFsaCD92lXtBn/F8E6EBwWwNbOQXzdkNOvrOZZF27NZuSeXN2Zvq/J6WpzoznDND2bZrjUA1n0N/xwFO+b6OjK/tm1/IZszzK3ENmcUUOp0HeMeR/fNylSuemdxo+xoUFC5pjysFjPl4UF2YkIDAdh94MjZ8hKni4rKL8T8Lilf8RGs+h9YrHDBOxAa5+uIpBVTUi4iIiItxrmD2/Pd7aP5/aFTeOaC/pzaN5HQQBv7C8r4fNlern9/KVe/u4TU3BIA5mw215OP7RmP1Vp1O6LwIDtXjegM0GTJb05ROTe8v5Rpa9PqdL/syj3UDxSVe5OzFstqg7H3wvW/QEw3yE+F98+GFR/6OjK/NW1tuvd8hdtgY3pBgx7vnfk7mLcli48W1b0nw+HqUr4O0CnG3GVhTzVJuWeWPMBqISTQ1uDYmk3mBvjhbvP8uIegyyjfxiOtnpJyERERaXESIoK4ZFgn/nXV8Sx/eCIfXj+ca0Z2ITDAytzN+zn1pbnc9dlKvl9tlgCPO6R0/VDXjkrBEWBl5Z5cFm0/sky4rMLF5oyCeifsny3dw68bMnhzTt3K0A9UJuVgzpr7hfZD4Y9zYeDlgAHTH4byIl9H5Zd+qvwSx24zv0hasze3QY/nSYgPTfbr62Cjt9rNbHuS8upmyg8tXbf4yx7e5UWV68hLoOt4GHOXryOSNkBJuYiIiLRojgAbo3vE8ejZffnpT2MY2jmawrIKvlqeyt6cEiwWGNOj+qQ8PtzBxcd3BOCfc7ZVuc3tNpjyzu9Memkuf526lgpX3bu0z9+aBUB6Xt3Khg9Nyhdu85OkHMARBme/CtEpUJwNS/7t64j8zp4DxaxNzcdqgYsqx+bqvXn1frzCsgpyis3kd1NGAdv3F2IYBk/9uIEXftlU5y+cCj0z5Y66zZRXm5QX++F68h/vg/0bISwRzn/LrBQRaWJKykVERMRvdIsP47M/nsj7153AHSd3Z1yveO47tbd3XWt1bhzbFZvVwtzN+1m3L997/QeLdrG4ssnWR4t3c9OHyygur/2e3KVOl7dJ1/7Csjol9Ycm5Yt3ZLe4RnRHZQswy9kBfntFs+V15JnNHp4S690xYE1q/ZPy1JySKpd/WpvOt6v28a+523l15laW7apbY776lq/vPlByxG1+13l91Sew8sPKdeT/hrAEX0ckbYSSchEREfErNquFk3rGc9ekXrx37QncPK7bUY/vGBPCmQOSAXh73k4AUnNLeGbaRgDOHdQOR4CVXzdkctnbi8kqrN12bMt25VBWuQe6y22QVVh+jHscdGhSnlPsZFNGw9YUN7sBl1TOlmfBknd8HY1f8ZSuT+6fRP8OZnPCLZmF9W72dvha7u9W7eOZnzZ6L781t25LKw4m5bVLpDt6kvLsI7+c8aukPHc3/HCPef6k+yFlrG/jkTbFp0n53LlzOeuss2jXrh0Wi4WpU6dWuf2rr75i0qRJxMbGYrFYWLlypU/iFBEREf9200lm4v7TunRmp1m445NVFJe7GNYlmhcvHsTHfxhOVIidVXtyOf+NBezIOvbsr6d03SMt78iZwpp4Gr0F2c2PYn6zrtyjymz5y5otr6X0vFKW784F4NS+SSRFBBEX5sDlNlifln/0O9dgb46ZlJ/QJQarBTamF7Avr5S4MLN6ZPqGDLbvP3YzwbwSJ4ZhkO9dU167mfLOsSGVcZTgOqziw2+2Q3O74ZvbzK3/OpxwcGyLNBOfJuVFRUUMHDiQ119/vcbbR48ezTPPPNPMkYmIiEhr0ic5gpN7J+A24OudNlan5hMYYOXpCwZgtVoY2jmGL28eSceYYHYfKOa8N37jt8OS7sMdfntGfu3XledUJuUn9zbLY/1qXbmHZsvr7Od1Zun60M7RJEYEYbFYGFA5W76mnuvK91aWrw/oEMkJKTHe6x8+qy8T+iRgGPDv+TuO+hg/rUnj+L9P55J/LfJWfNRmSzSAxIggAm1WKtzGEV9M5VfOukcG1+6xfGbpO7BjDgQEw3lvah25NDufJuWTJ0/m73//O+edd161t1911VU8/PDDTJgwoZkjExERkdbm3lN70TUulG7hBjeO6cLUW0bRLT7Me3u3+DC+unkUAztEklvsZMp/fued+TuqbZSVU1TuXQc8tHM0AGm1bPZmGIa3fP30/mZZ/eIdB/xrXTlotrwevKXr/ZK81/Vrbybl9W325knKO0QHc9bAdgAM6RTFWQOS+cOYrgB8uWwvny7ZzdbMgiPG2Y6sIu79YjVOl8HvOw94l29E1DIpt1ktdIgOBo5s9pbf0mfK09fAVzfCtAfMyxMehdijL4cRaQot/GuruisrK6Os7OBasPx8sxTI6XTidDp9FVab4Hl/9T5LbWnMSH1p7Eh9dI8L5vtbTmD69OlMHJ+C3W4/YgxFBVn58Lrj+ds365m6Ko3/+349a/fm8PjZxxFkPzh7Nm9zBoYBPRJC6dcunGW7ckjNKa7VmCworaC8sincyJQogu1W8kqcbEnPo2t8aOO+6KZ23PkEzH0OS84OXIvfxj3iVl9H1OTq+/cnu6jc2xjwlF5x3vsfl2T+zNfsza3X37TdB8wvQ5IiAhnfM55Qu5WR3WKoqKhgcIdwBnaIZNXePO7/cg1glqUP7BDJoA6RDOwYyQvTt1JYVsGADhFkFZSzr/LLpSBb7V9jh+ggtmcVsXN/AcM6RXqvzykyP5OHBdpazt9rw8Cycy7WRa9h3T7Le7X7uPNwDbkWmjBO/dvVttTl59zqkvKnnnqKxx577Ijrf/nlF0JCQnwQUdszffp0X4cgfkZjRupLY0fq61hjZ1wwWLpY+Ganla9XprF0yz5u6OUiygGGAe9ssgJW2lkLyN2XD9hYsXE7P7q2HvO5s0oBArBbDebNnE5kgI0Sp4Wp0+fSO8rPZsuBjuETGJLzNs45L/Dr/na4bA5fh9Qs6vr3Z0GGBbdho2OoweqFs1hdeX1eOUAAWzIL+Pq7H3HUsXJ6Z6YNsLBj7VLKtoMFWLjn4O0XJUGc28rOAgu7i8wvheZvzWb+1oNLJsICDC5IOIAlEf67xQxgybyZWGu5tbg73/x9mLlkLaEZq73Xb9llXr9rywZ+zF9ftxfWyCxGBe1zfqdb5o9ElewGwMBCatQJbEucTK6jK/w0rVli0b9dbUNx8ZHbBNak1SXlDz74IHfddZf3cn5+Ph07dmTSpElERET4MLLWz+l0mrMPEydit7fQMiVpUTRmpL40dqS+6jJ2zgDO3ZbNnz5dzZ4iJ69uDuG1Swcya1MWa3J2YLXAbWePYG9OCd/sWoM1LJbTTx92zBhW7smFFb8THx7M6aeP5cusZaRvyaZjr/6cPrRD47zQ5uSehPHmrwTl7GBy2Drc4//q64iaVH3//nzx/jIgm4tP7MHpJ3Wtcttrm+eQWVBGpwEnepdD1EZBqZPiheZs76VnTSKshr3FL/HE7nKzOaOQlXtyWbknj5V788guKue1SwcyslssAJfV+tkPSvttJ/OnbcYR3Y7TTx/gvf791N8hN5fRJwzh1L6J9XjkRlKcTcB/z8SSvQUAwx6Ce9CVuE/4I4lRnWmuyPRvV9viqdiujVaXlDscDhyOI7+htdvtGvzNRO+11JXGjNSXxo7UV23Hzkm9k/j2tghu/GApG9MLuOI/S70dpp86vz/DusZjVJYkZxaU1eox88vM0vXYMAd2u5320aFANukFTj8dz3aY9Hf49ApsC1/FNuBCSOrv66CaXF3+/uQVO1m43RwnZwxsf8T9BnSI5NcNmWzIKGJE99rvjZ2RZa4njw6xEx0WXIuYYVBnB4M6x9b6OWojJT4cgL25JVVem6fRW0xYkO/GtmHAj3dB9hYIiYURN2M5/npsITH4qp2b/u1qG+ryM9Y+5SIiIiJH0Sk2hC9vHsnp/ZO8Cfm9p/bikmGdAEiODALMRm/VNYU7nGc7tJhQc8uq9lGV98+t/ZZqLU6fM6HP2WC44NvbwVXh64halF83ZFDhNuiVGE7XQ5oLeniavdW1A7tnj3LPXuG+0smzV3kNjd58uk/58vdh0w9gtcNVU83mhCExx7ybSHPy6Ux5YWEhW7ceXHu1Y8cOVq5cSUxMDJ06deLAgQPs3r2bffv2AbBp0yYAkpKSSEpKqvYxRURERBpbqCOA1y8fwufL9gJw0SFl5gkRZoVeeYWbnGKnN9muSc5hSXm7KHOGc18d9jlvkU5/DrbPgX0r4NdHYOL/gVXzPwA/rTW3QjutX/WfX73boqXWLSk/tPO6L3m+FMgpdpJf6iQiyEzCfb5PedZWmPagef6UhyF5wNGPF/ERn/6lXLp0KYMHD2bw4MEA3HXXXQwePJiHH34YgG+//ZbBgwdzxhlnAHDppZcyePBg3nzzTZ/FLCIiIm2TxWLh4uM7cvHxHbFYDnbAcgTYiK1MsNNrsS3agZqS8tza73PeIoUnwalPmOcXvgYfXwzFB3wbUwtQWFbB3C37AZjcv/qk3DNTvnV/IUVlFazfl8+Zr87j1/UZR33sg0m5b2fKwxwBtK8cx8t35QBQ6nRRVmEu1YgM8UFS7nabVRvOYkgZCyfe1vwxiNSST5PycePGYRjGEaf33nsPgGuuuaba2x999FFfhi0iIiJSRVJlCXt6/rFnuw8vX28X6UnKS2pV/t6iDbkKznkDAoJg63R4qR98dBEsfB0y1pvre9uYWRszKa9wkxIXSq/E8GqPSQgPIikiCMOAdfvyeXH6Ztam5vPv+duP+th7csxycV/PlAOM7RkHwOxN5hcQntJ1qwXCAn1QnLv8fdi9AOyhcM7rqtqQFk2jU0RERKSBDl1XfiyemXLP7HpipAOLBcoq3N7b/NrgK+D66RDbA5xFsOUX+Pkh+OeJ8EJv+OqPsOqTNjOLPu2Q0vVDKywO17+yhP3ndenM3GjOkK/YnUt55WxzdTwz5R19PFMOcFJPs0Hd3M1mUp53yHpya233VmssBekw/RHz/Ml/hahOzfv8InWkpFxERESkgRIjzKQ8ow5JeXRlUu4IsBEfZq5L9/sSdo/kAXDr73DTfHNtebdTICAYCtNh9Sfw9R/h5UGw8QdfR9qkSp0uZm3KBGByDevJPQZUlrC/t2Anlf0EKatwH3Wd+d4WNFM+qnssAVYL27OK2J1d7Nv15D/eC2V50G4IDP9j8z+/SB21ui3RRERERJpbQ2bKwVxXnllQRmpuiXfG1O9ZrebWaEn9YdQd4CyFPYth+yzY+CNkbYJPLoeRd8Apj4Ct9Xws/XTJbj5dsofichfF5S7aRwXTv/3Rf679Kn/ung7/0SF2coqd/L7jQLV7l3+5bC8FlVuOtW8BSXl4kJ2hnaNZvOMAczZnsjmjEPDBLP7GH2DDt2CxwdmvgNVXG5+J1J5mykVEREQaKKlyXXh6ft0bvQG0q9wWbZ8/b4t2LPYg6HoSTHgUbv7tYOOtBa/A+2eZJcetQIXLzd9/2MDy3blsTC8A4NzB7Y5aug5USdoTwh3cdFI3AJbsrFrmbxgGr83cwt2frwLg8uGdCPHFmu1qnNQrHoCPFu/mo8W7ALh1fPfmC6A0H364xzw/6g7zCyERP6CkXERERKSBkirL14/Vfb2swkVhmTm7GRvq8F7vafaW1ozboqXnlXLfF6vYtr+w2Z7Ty2Y3O7Vf/F8IDDcbcr05Brb82vyxNLLVqXkUlFYQERTA21OO57/XncCfTul5zPvFhTm8HcwvO6ETJ3aLBWDpzgO4K2fPK1xuHvp6Lc//shmAm07qxt/P6ddEr6TuxlWuK9+YXoDbgFP7JnpfR7OY8TgU7IOYrnDS/c33vCINpKRcREREpIG83dePkZTnFJnrbG1WCxHBB2c3fbEt2vsLd/LZ0r08+u26ZnvOIxx3DvxxDiT2g6JM+OgC+OI6KDj6VmAt2W9bsgAY2S2OicclMrZnPIEBtfvI/adTenBK7wSuGdmF45IjCA20kV9awaaMAorKKrjxg2X87/fdWC3w+Dl9eWBy7+ZvonYUfZLDSQg3v2yy2yw8OLlP8z35pmmw5N/m+TP/AXbfl/SL1JaSchEREZEG8qwpL6jcY7om2UVlAESHBFYpZ/Yk5anNWL6+M6sIgPlbs5p1hv4Isd3Mbu3DbwaLFdZ+Ca8PgzVf+C6mBpi/1UzKR/WIq/N9Lx7WkXeuGUZ0aCABNitDKteS/7QmjcveXsTMjZkE2a28eeVQppzYpTHDbhQWi4VJfRMBuHZUCl3iQpvniTM3wJfXAwYM+4O5TELEjygpFxEREWmgUEcAYyqTsGve/Z09B4qrPa66Jm9QtzXlpU5Xo+xnvjPbjNEw4KvlqQ1+vAYJDIHJT8MfZkHyICjNM5Osz6bA6s8hbTU4W/56++LyCpbvzgFgTPe6J+WHG9YlBoBXZm5l9d48YkID+fgPI5jU9+id3H3pgcl9+PeU47n/tN5N/2SGAftWwv8uhfJC6DIGTnuq6Z9XpJG1jK4QIiIiIn7utcuGcPG/FrIpo4Cr//M7T57fnxO6xFQpL66uyRscnCnfX1hGeYW7xnLnb1am8tBXaxjfO4HXLh9S71gNw2B3dpH38udL93DLuG7HbEbW5NoNght+hbnPw9znYP035gkAC0R3gfjeEN/LbOLV+4wWVaa8eMcBnC6D9lHBdI5teNfxE1JivOc7x4bw3rUnkNJcs8/1FOYIYMJxiU33BIYB+5YfHBs5O83ro7uYPQpsPtiCTaSBlJSLiIiINILIEDvvX3cCF/xzAduzirj0rUW0jwrm/CHtOW9we7rGhx1MysOqJuWxoYEEBlgpr3CTkV9Kx5iqCZ3LbfDMtI28NXc7AL+szzhq8n4s2UXlFJW7sFgg2G5jZ3YxS3fleGdmfcpmh/EPQs9TYcUHkLkR9m+AkhzI2WGeNv9kHhuWBGPugiFXm93dfcyznnxMj7hG+YJjcKcoBneKIiTQxsuXDiYuzHHsO7VGhgGpy2Dd17D+W8jbffC2gGDoMdHs6h/SAsavSD0oKRcRERFpJEmRQXz6xxG8OmMrP65JIzW3hFdnbuXVmVsZ3CkKR2USfXj5usVioX1UMDuyikjNLamSlOcWl3P7/1YwrzLhC7BaKK9wsyEtn4Edo+oV567K0vV2kcGM7BbL58v28vnSPS0jKfdoP8Q8gZmUFWXB/o2Vp02weRrk7YGf7oP5/4Cxd8PgqyDAd4mrdz15I5SuAzgCbHx9y6hGeSy/VXwAvr0dNn5/8Dp7qPmlzXHnmAl5YMuuHhA5FiXlIiIiIo2oQ3QIz1w4gMfO6cv09Rl8tXwvc7dksWJ3rveY6JDAI+6XHBnEjqyiKuvKN6UXcOMHS9mVXUyw3cZzFw3gi2V7mb1pPyt251SblLvdBnO37Ce/tIKzBiRXO2O7q7J0vVNMCOcP6cDny/by64bMhr/4pmKxQFi8eUoZY1536pPmTPq8FyA/FX64G+a9BGP+3GjJ+drUPBZuyyYhwkFyZDDJkUFEB9uqPfbHNWnefclHNuc2YK3ZtpnwzW3mz9dqN5PwvudCt1PMPgQirYSSchEREZEmEGS3cdbAdpw1sB2ZBaV8u3IfXy1PZXtWIWN7xh9xfOfYUBZsy+b1WVsZ1iWGdfvyueuzlRSXu+gQHcxbVx3Pce0i2JpZyOxN+1m5J7fK/Q3D4POle3lzzja2V3ZWzy0ur7ZLt2emvHNsCL2SwgFzvXtDSuKbXUAgDLseBl8Jy/9bmZzvrUzOX4TRlcl5PcvaDcPgxv8uZV8129yF2W28tWshyZEhJEcGkZ5fyvT15jZuE/okENtWy8wby875MOsp2DXfvBzTDS78j9lzQKQVUlIuIiIi0sQSwoO4YUxXbhjTFcMwqp29/uPYrszamMm2/UWc8co88ksrAHPW9bXLh3ibww3uZG6TteKwpHxjegH3fbkaAEeAlbIKN0/8sIHhKbHexNtjd2V3+E6xIUQF27FZLbjcBtlFZSRHtpzGabUS4IAT/mAm4Mvfh/kvmTOrP95jJurHnQtJ/SCxL8T3qXWSnpFfxr68UqwWs+Fael4paXmllFW4KXRaWLevgHX7CrzH26wWbj6pG7ef0r2JXmgbsGsBzHoSds4zL9sCYei1cMrD4AjzbWwiTUhJuYiIiEgzqqkBWJe4UL6+dSTXvrvEWwZ97agu/OX0PgTYDs5eD+oQBZiz3QeKyr3Jumf2u2diGF/dMorbPl7O7E37ueN/K/jmtlEE2Q+WXXvK1zvHhGK1WogNDSSzoIysgnL/S8o97EEw/I9m07cVHxxMzhf/8+AxFivE9jAT9MS+0Guy+d9qrNuXB0D3hDA+ufFEwJw9z8wr5osff6X7gGHsL3SSnldKYVkFFw7tQL/2kU3+Mlulwv3w071mIzcwS9WHTDGb+EV28G1sIs1ASbmIiIhIC5EcGcxnN53IG7O2MaBDJKf3Tz7imMgQO13jQtmeVcSqPbmM750AQEa+WWbdNS6MMEcAz180kNP+MZdNGQU8/dNGHj37YPJ5aPk6QFyYw0zKC8ua+iU2PXuQOXM+ZIrZqXvfcshYC+lroeQAZG0yT+u+gpn/B73PhLH3HlEavW5fPgD92h1MtC0WCzGhgXQIhZN7xWO3a/utOvM07cvZaZ6yt8Lvb5k/G4sNhlwFY+6GqE6+jlSk2SgpFxEREWlBIoLsPDC591GPGdQpiu1ZRazYneNNytMrk/KkSLM8Oy7MwXMXDeTad5fw3oKdnNQznvG9EygsqyC7cms2b1Ie7oA0c5/0ViPAAQMuMk9gJoMF6ZCxDjLWwO7FZgf3jd+bpwGXwCmPQGR7wGzyBnBcuwhfvYLWoXA/rPof7F50MBF3Fh15XGI/OOd1rRuXNklJuYiIiIifGdwxiq+Wp1ZZV55R2ZAsMeLgmunxvRK4dlQX3v1tJ/d+sYqf/jSWzALzuJjQQMKDzJneuMp901vFTHlNLBaISDZPPSaY1+3fBHOfgzWfw+pPzZn1ARfDkKtZV5mUqyS9Hgr3w7YZsOkn2PgDuJ2HHWCBiHYQnQLRXaD9YBg8xWzeJ9IGKSkXERER8TODOprN3lbtycXtNrBaLYfMlFft/H3/ab1ZuC2bjekF3PP5Ki4+viNgbofmEV/ZLTyroLw5wm854nvBBf+GETfDtIdgzyKzWdzy9/nYncC8gP4MPJALFfEQHAVBURAQisVd4ePAfcRZCoXpZsWB51Td5ZKcqvdrNwT6XwhxPc0kPLJjvbvii7RGSspFRERE/Ezv5HAcAVbySyvYkV1Et/gwb1J+6Ew5mFuzvXLZYM56dT5zNu8ntXIf9C6xB5PyOE9S3ppnyo+m/VC4bpq5FdeKD3Ctm0pnMulsnQHfz6hyqB04GzDWh5hJenAUhMRC8kDoNAKSBpjNyazV72fuV0pyYdUnsPoTOLADSnNrf9+k/tB9ormvePLAJgpQpHVQUi4iIiLiZ+w2Kz0Sw1ibms+2zEK6xYd5y9eTIo6cgeyZGM5fz+jD375Zx9bMQgA6xYZ6b48LbwPl68disUDKGEgZw/uRt/HbjKlcFb+NcTEHzOS0NBdK8qDMLGu3OIvBWQwF+8z775wHC18zz9sCzRnhmK7mfwOCzCTdYq08VZ632iCifeWxKRAab8bR3IoPQNZmsNkhJA72b4S1X8KG78zXeKiAIAhLhPBkCK/87+GXw5PNLytEpFaUlIuIiIj4oS6xoaxNzWdndhEFpU6Kyl3AwUZvh7tyRGfmbN7PrxsyAegco5nymqzMdDHDPZQhgy5n3Piq+447y0qZ/v2XTBwzDLuzEErzzLLtvb+bzeOyt4Cr3ExyszbX7YntoRDV0Zx5D4mBuF6QXDnzHhBsNq+zB5uJsedUXmg+f3E2YJgN7cA8X5IDubuhIA3Ki6C82DzeWVx5vgiKs8zbaxLfB4ZdD13GmEl3UJRvvjgQacWUlIuIiIj4oa5x5kz3jqwi73Zo4UEBhARW//HOYrHwzAUDOO3leewvKKNv+4NdxQ8m5f67pry8wo3VQpU93etrbeUe5X2r67xuteEMCDOblB26Jdqgy8z/ul2QtxcObIMD282k2OUEw33kqaIc8vaYHcnz9ppdyfdvPOTJvmvwa6m1yI5mTEX7ITjGLDvvdwF0GKYkXKSJKSkXERER8UNdDknK0/PMGe7qStcPFRvm4LvbRrMjq4jeSUcm5TnF5VS43I2S2DanCpeb8//5GwcKy5lx9ziCA21UuNy8OnMrI7vFMrxrbK0fq6isgh1Z5pZdfdvVo/O61QbRnc1Tt5Nrf7+KMsjdA/l7zXLywgzIXA/pa8x9vZ0l5jEVJXB4ozlHJITGmmXxcDCJdoSb+31HtIfAMAgMhcAQc0becz4oymzAFlQ5Hjwz7UrERZqNknIRERERP5RyaFJ+2B7lR5MUGXTEcTGhgVgt4DbgQFE5CcdI7luaGRszWZuaD8C6fXkc3yWGGRszeXnGFt6au51vbxtFj8TwWj3WhrR8DAMSIxzEhzuOfYfGEuCAuO7m6VhcFWZy7iw1y9kdYY0Xh5JxkWbnX1+DioiIiAhwMCnPyC9jR5bZvO3wzuu1ZbNaiAk1m73t98N15R8u2uU9vymjAMC7z3iJ08XNHy2nuLx225it22cm9/WaJW8utgBzFjwsvnETchHxCSXlIiIiIn4oKiSQ6BBzTfPi7QeAY5evH42/rivfkVXEvC1Z3sub0s2kfEPlfwG2Zhby16/XYniboNVsbWUy36+69eQiIk1ASbmIiIiIn/KsK1+1NxeAxFqUr9fEm5QXtPyZ8sz8Uj5YuJPU3BI+XmzOkgfZzY+1Gz1JeZo5433XxJ5YLfDVilQ+W7rnmI/tmSk/riXPlItIq6KkXERERMRPeUrYnS5zBrhhM+X+sVe5YRjc9OEy/vbNOsY+O4v3F5pJ+S3jzLXYmzMKyC91sjenBIApJ3bmnlN7AfDwN+u8yXp1yipcbMk0k/p+7TVTLiLNQ0m5iIiIiJ9KiQ2tcrlxytdbdlI+c2Mmy3fnYrWAy21QXuGmfVQwN4xJwWqB3GIn8zab5ezJkUFEhQRy09hujO8VT1mFm1s+Wk5BqbPax96SUYjTZRAZbKd9VHBzviwRacOUlIuIiIj4qZT4qkl5YmT9u4XHhbf8NeVut8FzP28C4Max3fjpT2O4bXx3Xr9iCCGBAd5y/q9XpALQJ9mc7bZaLbx48SDaRQaxI6uIB79aU+368nWV+5P3ax+BRV3IRaSZKCkXERER8VNdDpkpD7BaiAttQFLuBzPl369JY2N6AeGOAG46qSt9kiO459ReDOoYBUDvJHPbszmbM6tcBogODeTVy4cQYLXw/eq0Kh3bPTzbqrXozusi0uooKRcRERHxU5415QAJ4Q6s1vrP7nrWlO9voY3enC43L/7imSXvSlRI4BHH9Kzci9yzxt4zU+4xtHM0D0zuDcD/fb+BNXvzqtzumSnvq87rItKMlJSLiIiI+KlQRwCJEeYMd0M6r0PL3xLti2V72ZldTGxoINeOTqn2mENnxgH6JIcfccz1o1OYdFwi5S4317z7O6v25ALm+vQNaWaTN82Ui0hzUlIuIiIi4sc8JewNafIGEF+5pvxAURku97H3825OpU4XL/+6BYBbxncnzBFQ7XG9kg7OcDsCrFXK+z0sFgvPXTSQvu0iyC4q59K3FvHLunR2ZBVS4nQREmirUoEgItLUlJSLiIiI+LGulc3eEhuYlMeEmuXgbgNyilvWbPmHi3aRnl9KcmQQVwzvVONxnWJCvPuV90wMJ8BW/UfdyGA7n/7xRMb2jKfE6eLGD5Zx9X+WAGbJu60BywBEROpKSbmIiIiIH7v8hM6c1DOei47v0KDHsdus3sS8JTV7Kyyr4I3Z2wD40yk9CLLbajzWZrV415VXV7p+qDBHAO9cfTzXjOyC3WYhNdfc11zryUWkuVVf+yMiIiIifqF/h0jev+6ERnmsuLBADhSVk1VQDkmN8pAN9p/5OzhQVE5KXCgXDj32Fw/DU2JYvTePEV1jj3ms3Wbl0bP7cvO4bnywcBfLd+cw5cQujRC1iEjtKSkXEREREcBs9rY5o9CnM+VlFS4MA4LsNnKKynl77nYA7prYs8Zy9EPdNbEXE49LYliX6Fo/Z2JEEPec2qveMYuINISSchEREREBfL9XucttMPkf88grcfLWlOP5ZX06BWUV9EmO4Iz+ybV6jOBAGyekxDRxpCIijUdJuYiIiIgAB5Py/T5KytPzS9meVQTA5W8v8l5/76k9G7QHu4hIS+bTRm9z587lrLPOol27dlgsFqZOnVrldsMwePjhh0lOTiY4OJgJEyawZcsW3wQrIiIi0srFhVc2eivwTff13dnF3vNlFW7KKtwM7RzN+F4JPolHRKQ5+DQpLyoqYuDAgbz++uvV3v7ss8/yyiuv8Oabb7J48WJCQ0M59dRTKS0tbeZIRURERFo/X5ev78kxk/KR3WK5dlQXOsYE88hZx2GxaJZcRFovn5avT548mcmTJ1d7m2EY/OMf/+Cvf/0r55xzDgD//e9/SUxMZOrUqVx66aXNGaqIiIhIqxfv46R87wEzKe8SF8ojZ/XlkbP6+iQOEZHm1GLXlO/YsYP09HQmTJjgvS4yMpLhw4ezcOHCGpPysrIyysoO/kOSn58PgNPpxOl0Nm3QbZzn/dX7LLWlMSP1pbEj9aWxc3RRQeYe4FkFZT55j3Zlm+vJ20U4WuzPSGNI6ktjp22py8+5xSbl6enpACQmJla5PjEx0XtbdZ566ikee+yxI67/5ZdfCAkJadwgpVrTp0/3dQjiZzRmpL40dqS+NHaql1sGEMD+wlK+/+FHmru32urtNsBC1s6N/Fi4oXmfvI40hqS+NHbahuLi4mMfVKnFJuX19eCDD3LXXXd5L+fn59OxY0cmTZpERESEDyNr/ZxOJ9OnT2fixInY7XZfhyN+QGNG6ktjR+pLY+foyivcPLL8V9yGhVHjJxAdEtisz//k2jlAGWedPJIBHSKb9blrS2NI6ktjp23xVGzXRotNypOSkgDIyMggOfngvpQZGRkMGjSoxvs5HA4cDscR19vtdg3+ZqL3WupKY0bqS2NH6ktjp3p2O0QG28krcZJX6iYhsvneo1Kni4wCcwli14SIFv/z0RiS+tLYaRvq8jP2aff1o0lJSSEpKYkZM2Z4r8vPz2fx4sWceOKJPoxMREREpPWKCzNnx5t7r/K9OSUAhDkCiApRwiIibYdPZ8oLCwvZunWr9/KOHTtYuXIlMTExdOrUiTvvvJO///3v9OjRg5SUFP72t7/Rrl07zj33XN8FLSIiItKKxYU52La/iKzCuu9VvudAMQ9+tYZxveK5dlQKtjosSvdsh9YhOlhboIlIm+LTpHzp0qWMHz/ee9mzFvzqq6/mvffe47777qOoqIgbb7yR3NxcRo8ezbRp0wgKCvJVyCIiIiKtWlx45bZoBXWfKf95XTrzt2Yxf2sW369O4/mLBtA9IbxW9/Vsh9YxRo15RaRt8WlSPm7cOAzDqPF2i8XC448/zuOPP96MUYmIiIi0XQ3Zqzyn+ODs+so9uVz05kJm3TOOqFo0jNtTWb7eMVpJuYi0LS12TbmIiIiIND/PmvL6JOW5xea+vJcO60iPhDByip28PmvrMe5l2p1tzpR3igmu8/OKiPgzJeUiIiIi4hXnnSmv+5pyT1LeOymcv5zRB4D3F+xiz4Hq9+tduvMAf5u6lgNF5d415SpfF5G2Rkm5iIiIiHjFNaB8PbfETOSjQgI5qWc8o7rHUu5y8+L0zdUe/+SPG/hg0S7+8vUab+KupFxE2hol5SIiIiLi1ZBGbzlF5kx5VIgdi8XCA6eZs+VTV6ayNjWvyrGlThdrKq/7aW06+aUVgNl9XUSkLVFSLiIiIiJeB9eUlx+1IW918ko8Sbn5GP07RHLOoHYYBjwzbWOVY9em5uF0VX38uLBAQgJ92odYRKTZKSkXERERES9P+Xq5y+2dva4tT/f16BC797p7JvUi0GZl3pYs5m7e771+2a4cAMb3iqdbfCig0nURaZuUlIuIiIiIV5DdRrjDnK2uy7rysgoXxeUuAKKCD26B1jEmhKtO7AzA0z9txO02Z8c9SfmJ3WJ54eJBtI8K5txB7RvlNYiI+BMl5SIiIiJSRX3WlXtK160WCA+qWoJ+2/juhAcFsD4tn29WpWIYBst3m0n50M7RDOoYxW8PnMzVI7s0zgsQEfEjSspFREREpIpD15XXlmc7tMhgO1arpcpt0aGB3DyuGwDP/7yZLZmFZBWWE2iz0rddZCNFLSLin5SUi4iIiEgV9dkWzZOUe5q8He66USkkRwaRmlvCvZ+vAqBf+wiC7LYGRisi4t+UlIuIiIhIFfVJyj1N3qIOafJ2qCC7jT9P7AnAqr3mVmhDO0c3JEwRkVZBSbmIiIiIVJFQuaZ8e1ZRre+T55kpD64+KQe4YEgHeiWGey8rKRcRUVIuIiIiIocZ3SMOgF/XZ3gbuB3Lwe3Qqi9fB7BZLTwwubf38hAl5SIiSspFREREpKpBHaPolRhOWYWbb1em1uo+uZXJe2QN5ese43rF8+Dk3jx2dl8SwoMaHKuIiL9TUi4iIiIiVVgsFi4Z1hGAT5fuqdV9cmsxU+557D+e1E3bn4mIVFJSLiIiIiJHOG9wewJtVtam5rM2Ne+Yxx/svn70mXIREalKSbmIiIiIHCE6NJBJfRMB+HTJsWfLD92nXEREak9JuYiIiIhUy1PCPnVlKqVO11GPrU2jNxEROZKSchERERGp1qhucbSPCqagtIKf1qYd9VhPl3aVr4uI1I2SchERERGpltV6SMO3Y5Swa6ZcRKR+lJSLiIiISI0uHNoBiwUWbT/Azqyiao8pdboodbqBY2+JJiIiVSkpFxEREZEatYsK5qSe8QB8VsP2aJ4mbzarhXBHQLPFJiLSGigpFxEREZGjurSyhP3zZXupcLmPuD23xCxdjwq2Y7FYmjU2ERF/p6RcRERERI7q5N6JxIYGsr+gjFmb9h9xe06RmryJiNSXknIREREROarAACsXDO0AVN/wLc8zU64mbyIidaakXERERESO6eLjzRL2WZsyycwvrXJbTuWa8mjNlIuI1JmSchERERE5pu4JYRzfORqX2+CL5Xur3OZp9BYZrJlyEZG6UntMEREREamVS4Z1ZOmuHD5dsoeTesbz5I8biA9zEB1qJuNaUy4iUndKykVERESkVs4YkMxj361nV3YxZ746H8Mwr7fbzI7rKl8XEak7la+LiIiISK2EBAZw1sB2ABgGjO4eh91mwekys/NINXoTEakzzZSLiIiISK39eWIPXG43J/VM4PT+SXy/Oo07PlmBYUBsqJJyEZG6UlIuIiIiIrWWEB7EsxcO9F4+a2A73IbBzI2ZnNQz3oeRiYj4JyXlIiIiItIg5wxqzzmD2vs6DBERv6Q15SIiIiIiIiI+oqRcRERERERExEeUlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIj7T4pLygoIA777yTzp07ExwczMiRI1myZImvwxIRERERERFpsBaflN9www1Mnz6dDz74gDVr1jBp0iQmTJhAamqqr0MTERERERERaZAWnZSXlJTw5Zdf8uyzzzJ27Fi6d+/Oo48+Svfu3fnnP//p6/BEREREREREGiTA1wEcTUVFBS6Xi6CgoCrXBwcHM3/+/GrvU1ZWRllZmfdyfn4+AE6nE6fT2XTBivf91fsstaUxI/WlsSP1pbEjDaUxJPWlsdO21OXnbDEMw2jCWBps5MiRBAYG8vHHH5OYmMj//vc/rr76arp3786mTZuOOP7RRx/lscceO+L6jz/+mJCQkOYIWURERERERNqw4uJiLr/8cvLy8oiIiDjqsS0+Kd+2bRvXXXcdc+fOxWazMWTIEHr27MmyZcvYsGHDEcdXN1PesWNHsrKyjvlmSMM4nU6mT5/OxIkTsdvtvg5H/IDGjNSXxo7Ul8aONJTGkNSXxk7bkp+fT1xcXK2S8hZdvg7QrVs35syZQ1FREfn5+SQnJ3PJJZfQtWvXao93OBw4HI4jrrfb7Rr8zUTvtdSVxozUl8aO1JfGjjSUxpDUl8ZO21CXn3GLbvR2qNDQUJKTk8nJyeHnn3/mnHPO8XVIIiIiIiIiIg3S4mfKf/75ZwzDoFevXmzdupV7772X3r17c+211/o6NBEREREREZEGafEz5Xl5edx666307t2bKVOmMHr0aH7++WeVfIiIiIiIiIjfa/Ez5RdffDEXX3yxr8MQERERERERaXQtPilvKE9zec9+5dJ0nE4nxcXF5Ofnq5JBakVjRupLY0fqS2NHGkpjSOpLY6dt8eSftdnsrNUn5QUFBQB07NjRx5GIiIiIiIhIW1JQUEBkZORRj2nx+5Q3lNvtZt++fYSHh2OxWHwdTqvm2RN+z5492hNeakVjRupLY0fqS2NHGkpjSOpLY6dtMQyDgoIC2rVrh9V69FZurX6m3Gq10qFDB1+H0aZEREToD43UicaM1JfGjtSXxo40lMaQ1JfGTttxrBlyjxbffV1ERERERESktVJSLiIiIiIiIuIjSsql0TgcDh555BEcDoevQxE/oTEj9aWxI/WlsSMNpTEk9aWxIzVp9Y3eRERERERERFoqzZSLiIiIiIiI+IiSchEREREREREfUVIuIiIiIiIi4iNKykVERERERER8REl5G/DUU08xbNgwwsPDSUhI4Nxzz2XTpk1VjiktLeXWW28lNjaWsLAwLrjgAjIyMry3r1q1issuu4yOHTsSHBxMnz59ePnll2t8zt9++42AgAAGDRp0zPgMw+Dhhx8mOTmZ4OBgJkyYwJYtW6oc88QTTzBy5EhCQkKIioqq0+uXumkN4+Xss8+mU6dOBAUFkZyczFVXXcW+ffvq9kZInbWGsdOlSxcsFkuV09NPP123N0LqzN/HzuzZs48YN57TkiVL6v6GSJ35+xgCWL58ORMnTiQqKorY2FhuvPFGCgsL6/ZGSJ219LHz1VdfMWnSJGJjY7FYLKxcufKIY9566y3GjRtHREQEFouF3Nzc2r58aSGUlLcBc+bM4dZbb2XRokVMnz4dp9PJpEmTKCoq8h7z5z//me+++47PP/+cOXPmsG/fPs4//3zv7cuWLSMhIYEPP/yQdevW8Ze//IUHH3yQ11577Yjny83NZcqUKZxyyim1iu/ZZ5/llVde4c0332Tx4sWEhoZy6qmnUlpa6j2mvLyciy66iJtvvrkB74TURmsYL+PHj+ezzz5j06ZNfPnll2zbto0LL7ywAe+K1EZrGDsAjz/+OGlpad7T7bff5rinnAAAC1RJREFUXs93RGrL38fOyJEjq4yZtLQ0brjhBlJSUjj++OMb+O5Ibfj7GNq3bx8TJkyge/fuLF68mGnTprFu3Tquueaahr0xckwtfewUFRUxevRonnnmmRqPKS4u5rTTTuOhhx6qwyuXFsWQNiczM9MAjDlz5hiGYRi5ubmG3W43Pv/8c+8xGzZsMABj4cKFNT7OLbfcYowfP/6I6y+55BLjr3/9q/HII48YAwcOPGosbrfbSEpKMp577jnvdbm5uYbD4TD+97//HXH8u+++a0RGRh7jFUpj8ufx4vHNN98YFovFKC8vP+rjS+Pyx7HTuXNn46WXXqrlK5Sm4o9j51Dl5eVGfHy88fjjjx/1saXp+NsY+te//mUkJCQYLpfLe8zq1asNwNiyZUutXrM0jpY0dg61Y8cOAzBWrFhR4zGzZs0yACMnJ6fWjystg2bK26C8vDwAYmJiAPPbPafTyYQJE7zH9O7dm06dOrFw4cKjPo7nMTzeffddtm/fziOPPFKrWHbs2EF6enqV546MjGT48OFHfW5pPv4+Xg4cOMBHH33EyJEjsdvttXoeaRz+OnaefvppYmNjGTx4MM899xwVFRW1eg5pPP46djy+/fZbsrOzufbaa2v1HNL4/G0MlZWVERgYiNV68KN5cHAwAPPnz6/V80jjaEljR9qOAF8HIM3L7XZz5513MmrUKPr16wdAeno6gYGBR6zVTkxMJD09vdrHWbBgAZ9++ik//PCD97otW7bwwAMPMG/ePAICaje0PI+fmJhY6+eW5uPP4+X+++/ntddeo7i4mBEjRvD999/X6jmkcfjr2LnjjjsYMmQIMTExLFiwgAcffJC0tDRefPHFWj2PNJy/jp1DvfPOO5x66ql06NChVs8hjcsfx9DJJ5/MXXfdxXPPPcef/vQnioqKeOCBBwBIS0ur1fNIw7W0sSNth2bK25hbb72VtWvX8sknn9T7MdauXcs555zDI488wqRJkwBwuVxcfvnlPPbYY/Ts2bPa+3300UeEhYV5T/Pmzat3DNI8/Hm83HvvvaxYsYJffvkFm83GlClTMAyj3q9D6sZfx85dd93FuHHjGDBgADfddBMvvPACr776KmVlZfV+HVI3/jp2PPbu3cvPP//M9ddfX+/4pWH8cQz17duX999/nxdeeIGQkBCSkpJISUkhMTGxyuy5NC1/HDvSSvi6fl6az6233mp06NDB2L59e5XrZ8yYUe36k06dOhkvvvhilevWrVtnJCQkGA899FCV63NycgzAsNls3pPFYvFeN2PGDCM/P9/YsmWL91RcXGxs27at2vUxY8eONe64444jXoPWlDef1jBePPbs2WMAxoIFC+r+Rkidtaaxs3btWgMwNm7cWPc3QuqsNYydxx9/3IiPj1cPCx9pDWMoPT3dKCgoMAoLCw2r1Wp89tln9X9DpNZa4tg5lNaUt25KytsAt9tt3HrrrUa7du2MzZs3H3G7p4HFF1984b1u48aNRzSwWLt2rZGQkGDce++9RzyGy+Uy1qxZU+V08803G7169TLWrFljFBYW1hhbUlKS8fzzz3uvy8vLU6M3H2pN48Vj165dBmDMmjWrNm+B1FNrHDsffvihYbVajQMHDtTqPZD6aS1jx+12GykpKcbdd99d5/dAGqa1jKFDvfPOO0ZISIgSrCbWksfOoZSUt25KytuAm2++2YiMjDRmz55tpKWleU+HfgN30003GZ06dTJmzpxpLF261DjxxBONE0880Xv7mjVrjPj4eOPKK6+s8hiZmZk1Pm9tu0o+/fTTRlRUlPHNN98Yq1evNs455xwjJSXFKCkp8R6za9cuY8WKFcZjjz1mhIWFGStWrDBWrFhhFBQU1O9NkRr5+3hZtGiR8eqrrxorVqwwdu7cacyYMcMYOXKk0a1bN6O0tLT+b4wck7+PnQULFhgvvfSSsXLlSmPbtm3Ghx9+aMTHxxtTpkyp/5siteLvY8fj119/NQBjw4YNdX8TpEFawxh69dVXjWXLlhmbNm0yXnvtNSM4ONh4+eWX6/eGSK219LGTnZ1trFixwvjhhx8MwPjkk0+MFStWGGlpad5j0tLSjBUrVhhvv/22ARhz5841VqxYYWRnZ9fvTZFmp6S8DQCqPb377rveY0pKSoxbbrnFiI6ONkJCQozzzjuvyi/7I488Uu1jdO7cucbnre0fG7fbbfztb38zEhMTDYfDYZxyyinGpk2bqhxz9dVXV/v8mvlsfP4+XlavXm2MHz/eiImJMRwOh9GlSxfjpptuMvbu3Vuft0PqwN/HzrJly4zhw4cbkZGRRlBQkNGnTx/jySef1Jc5zcDfx47HZZddZowcObIuL10aSWsYQ1dddZURExNjBAYGGgMGDDD++9//1vVtkHpo6WPn3XffrfaxH3nkkWM+/6GvQVo2i2Go85GIiIiIiIiIL6ido4iIiIiIiIiPKCkXERERERER8REl5SIiIiIiIiI+oqRcRERERERExEeUlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIjygpFxERaeWuueYaLBYLFosFu91OYmIiEydO5D//+Q9ut7vWj/Pee+8RFRXVdIGKiIi0QUrKRURE2oDTTjuNtLQ0du7cyU8//cT48eP505/+xJlnnklFRYWvwxMREWmzlJSLiIi0AQ6Hg6SkJNq3b8+QIUN46KGH+Oabb/jpp5947733AHjxxRfp378/oaGhdOzYkVtuuYXCwkIAZs+ezbXXXkteXp531v3RRx8FoKysjHvuuYf27dsTGhrK8OHDmT17tm9eqIiIiJ9RUi4iItJGnXzyyQwcOJCvvvoKAKvVyiuvvMK6det4//33mTlzJvfddx8AI0eO5B//+AcRERGkpaWRlpbGPffcA8Btt93GwoUL+eSTT1i9ejUXXXQRp512Glu2bPHZaxMREfEXFsMwDF8HISIiIk3nmmuuITc3l6lTpx5x26WXXsrq1atZv379Ebd98cUX3HTTTWRlZQHmmvI777yT3Nxc7zG7d++ma9eu7N69m3bt2nmvnzBhAieccAJPPvlko78eERGR1iTA1wGIiIiI7xiGgcViAeDXX3/lqaeeYuPGjeTn51NRUUFpaSnFxcWEhIRUe/81a9bgcrno2bNnlevLysqIjY1t8vhFRET8nZJyERGRNmzDhg2kpKSwc+dOzjzzTG6++WaeeOIJYmJimD9/Ptdffz3l5eU1JuWFhYXYbDaWLVuGzWarcltYWFhzvAQRERG/pqRcRESkjZo5cyZr1qzhz3/+M8uWLcPtdvPCCy9gtZotZz777LMqxwcGBuJyuapcN3jwYFwuF5mZmYwZM6bZYhcREWktlJSLiIi0AWVlZaSnp+NyucjIyGDatGk89dRTnHnmmUyZMoW1a9fidDp59dVXOeuss/jtt9948803qzxGly5dKCwsZMaMGQwcOJCQkBB69uzJFVdcwZQpU3jhhRcYPHgw+/fvZ8aMGQwYMIAzzjjDR69YRETEP6j7uoiISBswbdo0kpOT6dKlC6eddhqzZs3ilVde4ZtvvsFmszFw4EBefPFFnnnmGfr168dHH33EU089VeUxRo4cyU033cQll1xCfHw8zz77LADvvvsuU6ZM4e6776ZXr16ce+65LFmyhE6dOvnipYqIiPgVdV8XERERERER8RHNlIuIiIiIiIj4iJJyERERERERER9RUi4iIiIiIiLiI0rKRURERERERHxESbmIiIiIiIiIjygpFxEREREREfERJeUiIiIiIiIiPqKkXERERERERMRHlJSLiIiIiIiI+IiSchEREREREREfUVIuIiIiIiIi4iP/D8qtFloq3E+YAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "stream = team.run_stream(task=\"Write a financial report on American airlines\")\n", - "await Console(stream)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb index c4d22fa6e..c513e8278 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/examples/literature-review.ipynb @@ -1,334 +1,334 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Literature Review\n", - "\n", - "A common task while exploring a new topic is to conduct a literature review. In this example we will explore how a multi-agent team can be configured to conduct a _simple_ literature review.\n", - "\n", - "- **Arxiv Search Agent**: Use the Arxiv API to search for papers related to a given topic and return results.\n", - "- **Google Search Agent**: Use the Google Search api to find papers related to a given topic and return results.\n", - "- **Report Agent**: Generate a report based on the information collected by the search and stock analysis agents.\n", - "\n", - "\n", - "First, let us import the necessary modules. " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core.tools import FunctionTool\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining Tools \n", - "\n", - "Next, we will define the tools that the agents will use to perform their tasks. In this case we will define a simple function `search_arxiv` that will use the `arxiv` library to search for papers related to a given topic. \n", - "\n", - "Finally, we will wrap the functions into a `FunctionTool` class that will allow us to use it as a tool in the agents. \n", - "\n", - "Note: You will need to set the appropriate environment variables for tools as needed.\n", - "\n", - "Also install required libraries: \n", - "\n", - "```bash\n", - "!pip install arxiv\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def google_search(query: str, num_results: int = 2, max_chars: int = 500) -> list: # type: ignore[type-arg]\n", - " import os\n", - " import time\n", - "\n", - " import requests\n", - " from bs4 import BeautifulSoup\n", - " from dotenv import load_dotenv\n", - "\n", - " load_dotenv()\n", - "\n", - " api_key = os.getenv(\"GOOGLE_API_KEY\")\n", - " search_engine_id = os.getenv(\"GOOGLE_SEARCH_ENGINE_ID\")\n", - "\n", - " if not api_key or not search_engine_id:\n", - " raise ValueError(\"API key or Search Engine ID not found in environment variables\")\n", - "\n", - " url = \"https://www.googleapis.com/customsearch/v1\"\n", - " params = {\"key\": api_key, \"cx\": search_engine_id, \"q\": query, \"num\": num_results}\n", - "\n", - " response = requests.get(url, params=params) # type: ignore[arg-type]\n", - "\n", - " if response.status_code != 200:\n", - " print(response.json())\n", - " raise Exception(f\"Error in API request: {response.status_code}\")\n", - "\n", - " results = response.json().get(\"items\", [])\n", - "\n", - " def get_page_content(url: str) -> str:\n", - " try:\n", - " response = requests.get(url, timeout=10)\n", - " soup = BeautifulSoup(response.content, \"html.parser\")\n", - " text = soup.get_text(separator=\" \", strip=True)\n", - " words = text.split()\n", - " content = \"\"\n", - " for word in words:\n", - " if len(content) + len(word) + 1 > max_chars:\n", - " break\n", - " content += \" \" + word\n", - " return content.strip()\n", - " except Exception as e:\n", - " print(f\"Error fetching {url}: {str(e)}\")\n", - " return \"\"\n", - "\n", - " enriched_results = []\n", - " for item in results:\n", - " body = get_page_content(item[\"link\"])\n", - " enriched_results.append(\n", - " {\"title\": item[\"title\"], \"link\": item[\"link\"], \"snippet\": item[\"snippet\"], \"body\": body}\n", - " )\n", - " time.sleep(1) # Be respectful to the servers\n", - "\n", - " return enriched_results\n", - "\n", - "\n", - "def arxiv_search(query: str, max_results: int = 2) -> list: # type: ignore[type-arg]\n", - " \"\"\"\n", - " Search Arxiv for papers and return the results including abstracts.\n", - " \"\"\"\n", - " import arxiv\n", - "\n", - " client = arxiv.Client()\n", - " search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n", - "\n", - " results = []\n", - " for paper in client.results(search):\n", - " results.append(\n", - " {\n", - " \"title\": paper.title,\n", - " \"authors\": [author.name for author in paper.authors],\n", - " \"published\": paper.published.strftime(\"%Y-%m-%d\"),\n", - " \"abstract\": paper.summary,\n", - " \"pdf_url\": paper.pdf_url,\n", - " }\n", - " )\n", - "\n", - " # # Write results to a file\n", - " # with open('arxiv_search_results.json', 'w') as f:\n", - " # json.dump(results, f, indent=2)\n", - "\n", - " return results" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "google_search_tool = FunctionTool(\n", - " google_search, description=\"Search Google for information, returns results with a snippet and body content\"\n", - ")\n", - "arxiv_search_tool = FunctionTool(\n", - " arxiv_search, description=\"Search Arxiv for papers related to a given topic, including abstracts\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining Agents \n", - "\n", - "Next, we will define the agents that will perform the tasks. " - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "google_search_agent = AssistantAgent(\n", - " name=\"Google_Search_Agent\",\n", - " tools=[google_search_tool],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " description=\"An agent that can search Google for information, returns results with a snippet and body content\",\n", - " system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n", - ")\n", - "\n", - "arxiv_search_agent = AssistantAgent(\n", - " name=\"Arxiv_Search_Agent\",\n", - " tools=[arxiv_search_tool],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " description=\"An agent that can search Arxiv for papers related to a given topic, including abstracts\",\n", - " system_message=\"You are a helpful AI assistant. Solve tasks using your tools. Specifically, you can take into consideration the user's request and craft a search query that is most likely to return relevant academi papers.\",\n", - ")\n", - "\n", - "\n", - "report_agent = AssistantAgent(\n", - " name=\"Report_Agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " description=\"Generate a report based on a given topic\",\n", - " system_message=\"You are a helpful assistant. Your task is to synthesize data extracted into a high quality literature review including CORRECT references. You MUST write a final report that is formatted as a literature review with CORRECT references. Your response should end with the word 'TERMINATE'\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating the Team \n", - "\n", - "Finally, we will create a team of agents and configure them to perform the tasks." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "termination = TextMentionTermination(\"TERMINATE\")\n", - "team = RoundRobinGroupChat(\n", - " participants=[google_search_agent, arxiv_search_agent, report_agent], termination_condition=termination\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a literature review on no code tools for building multi agent ai systems\n", - "---------- Google_Search_Agent ----------\n", - "[FunctionCall(id='call_bNGwWFsfeTwDhtIpsI6GYISR', arguments='{\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}', name='google_search')]\n", - "[Prompt tokens: 123, Completion tokens: 29]\n", - "---------- Google_Search_Agent ----------\n", - "[FunctionExecutionResult(content='[{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', call_id='call_bNGwWFsfeTwDhtIpsI6GYISR')]\n", - "---------- Google_Search_Agent ----------\n", - "Tool calls:\n", - "google_search({\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}) = [{'title': 'Literature Review — AutoGen', 'link': 'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html', 'snippet': 'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\xa0...', 'body': 'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and'}, {'title': 'Vertex AI Agent Builder | Google Cloud', 'link': 'https://cloud.google.com/products/agent-builder', 'snippet': 'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\xa0...', 'body': 'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents'}, {'title': 'AI tools I have found useful w/ research. What do you guys think ...', 'link': 'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/', 'snippet': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I've missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", 'body': 'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.'}]\n", - "---------- Arxiv_Search_Agent ----------\n", - "[FunctionCall(id='call_ZdmwQGTO03X23GeRn6fwDN8q', arguments='{\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}', name='arxiv_search')]\n", - "[Prompt tokens: 719, Completion tokens: 28]\n", - "---------- Arxiv_Search_Agent ----------\n", - "[FunctionExecutionResult(content='[{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', call_id='call_ZdmwQGTO03X23GeRn6fwDN8q')]\n", - "---------- Arxiv_Search_Agent ----------\n", - "Tool calls:\n", - "arxiv_search({\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}) = [{'title': 'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems', 'authors': ['Victor Dibia', 'Jingya Chen', 'Gagan Bansal', 'Suff Syed', 'Adam Fourney', 'Erkang Zhu', 'Chi Wang', 'Saleema Amershi'], 'published': '2024-08-09', 'abstract': 'Multi-agent systems, where multiple agents (generative AI models + tools)\\ncollaborate, are emerging as an effective pattern for solving long-running,\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\nremains challenging for most developers. To address this challenge, we present\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\ndebugging of workflows, and a gallery of reusable agent components. We\\nhighlight four design principles for no-code multi-agent developer tools and\\ncontribute an open-source implementation at\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio', 'pdf_url': 'http://arxiv.org/pdf/2408.15247v1'}, {'title': 'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration', 'authors': ['Cory Hymel', 'Sida Peng', 'Kevin Xu', 'Charath Ranganathan'], 'published': '2024-10-29', 'abstract': 'In recent years, with the rapid advancement of large language models (LLMs),\\nmulti-agent systems have become increasingly more capable of practical\\napplication. At the same time, the software development industry has had a\\nnumber of new AI-powered tools developed that improve the software development\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\nfrequently been examined in real-world applications, we have seen comparatively\\nfew real-world examples of publicly available commercial tools working together\\nin a multi-agent system with measurable improvements. In this experiment we\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\nsharing business requirements from PRD AI, we improve the code suggestion\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\n24.5% -- demonstrating a real-world example of commercially-available AI\\nsystems working together with improved outcomes.', 'pdf_url': 'http://arxiv.org/pdf/2410.22129v1'}, {'title': 'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML', 'authors': ['Patara Trirat', 'Wonyong Jeong', 'Sung Ju Hwang'], 'published': '2024-10-03', 'abstract': \"Automated machine learning (AutoML) accelerates AI development by automating\\ntasks in the development pipeline, such as optimal model search and\\nhyperparameter tuning. Existing AutoML systems often require technical\\nexpertise to set up complex tools, which is in general time-consuming and\\nrequires a large amount of human effort. Therefore, recent works have started\\nexploiting large language models (LLM) to lessen such burden and increase the\\nusability of AutoML frameworks via a natural language interface, allowing\\nnon-expert users to build their data-driven solutions. These methods, however,\\nare usually designed only for a particular process in the AI development\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\nAutoML-Agent takes user's task descriptions, facilitates collaboration between\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\nplanning strategy to enhance exploration to search for more optimal plans. We\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\nnetwork design) each of which is solved by a specialized agent we build via\\nprompting executing in parallel, making the search process more efficient.\\nMoreover, we propose a multi-stage verification to verify executed results and\\nguide the code generation LLM in implementing successful solutions. Extensive\\nexperiments on seven downstream tasks using fourteen datasets show that\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\nprocess, yielding systems with good performance throughout the diverse domains.\", 'pdf_url': 'http://arxiv.org/pdf/2410.02958v1'}, {'title': 'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges', 'authors': ['Sivan Schwartz', 'Avi Yaeli', 'Segev Shlomov'], 'published': '2023-08-10', 'abstract': 'Trust in AI agents has been extensively studied in the literature, resulting\\nin significant advancements in our understanding of this field. However, the\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\nresearch. In the field of process automation, a new generation of AI-based\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\nthe process of building automation has become more accessible to business users\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\nAI agents discussed in existing literature, and identifies specific\\nconsiderations and challenges relevant to this new generation of automation\\nagents. We also evaluate how nascent products in this category address these\\nconsiderations. Finally, we highlight several challenges that the research\\ncommunity should address in this evolving landscape.', 'pdf_url': 'http://arxiv.org/pdf/2308.05391v1'}, {'title': 'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications', 'authors': ['Xin Pang', 'Zhucong Li', 'Jiaxiang Chen', 'Yuan Cheng', 'Yinghui Xu', 'Yuan Qi'], 'published': '2024-04-07', 'abstract': 'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\nIDE) with full-cycle capabilities that accelerates developers to build\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\nthe Integrity of its development tools and the Visuality of its components,\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\nintegrates a comprehensive development toolkit ranging from a prototyping\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\ndeployment tools all within a web-based graphical user interface. On the other\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\ntoken consumption and API calls when debugging a specific sophisticated\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\nincluding an online demo, open-source code, and a screencast video, is now\\npublicly accessible.', 'pdf_url': 'http://arxiv.org/pdf/2404.04902v1'}]\n", - "---------- Report_Agent ----------\n", - "## Literature Review on No-Code Tools for Building Multi-Agent AI Systems\n", - "\n", - "### Introduction\n", - "\n", - "The emergence of multi-agent systems (MAS) has transformed various domains by enabling collaboration among multiple agents—ranging from generative AI models to orchestrated tools—to solve complex, long-term tasks. However, the traditional development of these systems often requires substantial technical expertise, making it inaccessible for non-developers. The introduction of no-code platforms aims to shift this paradigm, allowing users without formal programming knowledge to design, debug, and deploy multi-agent systems. This review synthesizes current literature concerning no-code tools developed for building multi-agent AI systems, highlighting recent advancements and emerging trends.\n", - "\n", - "### No-Code Development Tools\n", - "\n", - "#### AutoGen Studio\n", - "\n", - "One of the prominent no-code tools is **AutoGen Studio**, developed by Dibia et al. (2024). This tool provides a web interface and a declarative specification method utilizing JSON, enabling rapid prototyping, debugging, and evaluating multi-agent workflows. The drag-and-drop capabilities streamline the design process, making complex interactions between agents more manageable. The framework operates on four primary design principles that cater specifically to no-code development, contributing to an accessible pathway for users to harness multi-agent frameworks for various applications (Dibia et al., 2024).\n", - "\n", - "#### AI2Apps Visual IDE\n", - "\n", - "Another notable tool is **AI2Apps**, described by Pang et al. (2024). It serves as a Visual Integrated Development Environment that incorporates a comprehensive set of tools from prototyping to deployment. The platform's user-friendly interface allows for the visualization of code through drag-and-drop components, facilitating smoother integration of different agents. An extension system enhances the platform's capabilities, showcasing the potential for customization and scalability in agent application development. The reported efficiency improvements in token consumption and API calls indicate substantial benefits in user-centric design (Pang et al., 2024).\n", - "\n", - "### Performance Enhancements in Multi-Agent Configurations\n", - "\n", - "Hymel et al. (2024) examined the collaborative performance of commercially available AI tools, demonstrating a measurable improvement when integrating multiple agents in a shared configuration. Their experiments showcased how cooperation between tools like Crowdbotics PRD AI and GitHub Copilot significantly improved task success rates, illustrating the practical benefits of employing no-code tools in multi-agent environments. This synergy reflects the critical need for frameworks that inherently support such integrations, especially through no-code mechanisms, to enhance user experience and productivity (Hymel et al., 2024).\n", - "\n", - "### Trust and Usability in AI Agents\n", - "\n", - "The concept of trust in AI, particularly in LLM-based automation agents, has gained attention. Schwartz et al. (2023) addressed the challenges and considerations unique to this new generation of agents, highlighting how no-code platforms ease access and usability for non-technical users. The paper emphasizes the need for further research into the trust factors integral to effective multi-agent systems, advocating for a user-centric approach in the design and evaluation of these no-code tools (Schwartz et al., 2023).\n", - "\n", - "### Full-Pipeline AutoML with Multi-Agent Systems\n", - "\n", - "The **AutoML-Agent** framework proposed by Trirat et al. (2024) brings another layer of innovation to the no-code landscape. This framework enhances existing automated machine learning processes by using multiple specialized agents that collaboratively manage the full AI development pipeline from data retrieval to model deployment. The novelty lies in its retrieval-augmented planning strategy, which allows for efficient task decomposition and parallel execution, optimizing the overall development experience for non-experts (Trirat et al., 2024).\n", - "\n", - "### Conclusion\n", - "\n", - "The literature presents a growing array of no-code tools designed to democratize the development of multi-agent systems. Innovations such as AutoGen Studio, AI2Apps, and collaborative frameworks like AutoML-Agent highlight a trend towards user-centric, efficient design that encourages participation beyond technical boundaries. Future research should continue to explore aspects of trust, usability, and integration to further refine these tools and expand their applicability across various domains.\n", - "\n", - "### References\n", - "\n", - "- Dibia, V., Chen, J., Bansal, G., Syed, S., Fourney, A., Zhu, E., Wang, C., & Amershi, S. (2024). AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems. *arXiv:2408.15247*.\n", - "- Hymel, C., Peng, S., Xu, K., & Ranganathan, C. (2024). Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration. *arXiv:2410.22129*.\n", - "- Pang, X., Li, Z., Chen, J., Cheng, Y., Xu, Y., & Qi, Y. (2024). AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications. *arXiv:2404.04902*.\n", - "- Schwartz, S., Yaeli, A., & Shlomov, S. (2023). Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges. *arXiv:2308.05391*.\n", - "- Trirat, P., Jeong, W., & Hwang, S. J. (2024). AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML. *arXiv:2410.02958*.\n", - "\n", - "TERMINATE\n", - "[Prompt tokens: 2381, Completion tokens: 1090]\n", - "---------- Summary ----------\n", - "Number of messages: 8\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 3223\n", - "Total completion tokens: 1147\n", - "Duration: 17.06 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Literature Review\n", + "\n", + "A common task while exploring a new topic is to conduct a literature review. In this example we will explore how a multi-agent team can be configured to conduct a _simple_ literature review.\n", + "\n", + "- **Arxiv Search Agent**: Use the Arxiv API to search for papers related to a given topic and return results.\n", + "- **Google Search Agent**: Use the Google Search api to find papers related to a given topic and return results.\n", + "- **Report Agent**: Generate a report based on the information collected by the search and stock analysis agents.\n", + "\n", + "\n", + "First, let us import the necessary modules. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core.tools import FunctionTool\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining Tools \n", + "\n", + "Next, we will define the tools that the agents will use to perform their tasks. In this case we will define a simple function `search_arxiv` that will use the `arxiv` library to search for papers related to a given topic. \n", + "\n", + "Finally, we will wrap the functions into a `FunctionTool` class that will allow us to use it as a tool in the agents. \n", + "\n", + "Note: You will need to set the appropriate environment variables for tools as needed.\n", + "\n", + "Also install required libraries: \n", + "\n", + "```bash\n", + "!pip install arxiv\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def google_search(query: str, num_results: int = 2, max_chars: int = 500) -> list: # type: ignore[type-arg]\n", + " import os\n", + " import time\n", + "\n", + " import requests\n", + " from bs4 import BeautifulSoup\n", + " from dotenv import load_dotenv\n", + "\n", + " load_dotenv()\n", + "\n", + " api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + " search_engine_id = os.getenv(\"GOOGLE_SEARCH_ENGINE_ID\")\n", + "\n", + " if not api_key or not search_engine_id:\n", + " raise ValueError(\"API key or Search Engine ID not found in environment variables\")\n", + "\n", + " url = \"https://www.googleapis.com/customsearch/v1\"\n", + " params = {\"key\": api_key, \"cx\": search_engine_id, \"q\": query, \"num\": num_results}\n", + "\n", + " response = requests.get(url, params=params) # type: ignore[arg-type]\n", + "\n", + " if response.status_code != 200:\n", + " print(response.json())\n", + " raise Exception(f\"Error in API request: {response.status_code}\")\n", + "\n", + " results = response.json().get(\"items\", [])\n", + "\n", + " def get_page_content(url: str) -> str:\n", + " try:\n", + " response = requests.get(url, timeout=10)\n", + " soup = BeautifulSoup(response.content, \"html.parser\")\n", + " text = soup.get_text(separator=\" \", strip=True)\n", + " words = text.split()\n", + " content = \"\"\n", + " for word in words:\n", + " if len(content) + len(word) + 1 > max_chars:\n", + " break\n", + " content += \" \" + word\n", + " return content.strip()\n", + " except Exception as e:\n", + " print(f\"Error fetching {url}: {str(e)}\")\n", + " return \"\"\n", + "\n", + " enriched_results = []\n", + " for item in results:\n", + " body = get_page_content(item[\"link\"])\n", + " enriched_results.append(\n", + " {\"title\": item[\"title\"], \"link\": item[\"link\"], \"snippet\": item[\"snippet\"], \"body\": body}\n", + " )\n", + " time.sleep(1) # Be respectful to the servers\n", + "\n", + " return enriched_results\n", + "\n", + "\n", + "def arxiv_search(query: str, max_results: int = 2) -> list: # type: ignore[type-arg]\n", + " \"\"\"\n", + " Search Arxiv for papers and return the results including abstracts.\n", + " \"\"\"\n", + " import arxiv\n", + "\n", + " client = arxiv.Client()\n", + " search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n", + "\n", + " results = []\n", + " for paper in client.results(search):\n", + " results.append(\n", + " {\n", + " \"title\": paper.title,\n", + " \"authors\": [author.name for author in paper.authors],\n", + " \"published\": paper.published.strftime(\"%Y-%m-%d\"),\n", + " \"abstract\": paper.summary,\n", + " \"pdf_url\": paper.pdf_url,\n", + " }\n", + " )\n", + "\n", + " # # Write results to a file\n", + " # with open('arxiv_search_results.json', 'w') as f:\n", + " # json.dump(results, f, indent=2)\n", + "\n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "google_search_tool = FunctionTool(\n", + " google_search, description=\"Search Google for information, returns results with a snippet and body content\"\n", + ")\n", + "arxiv_search_tool = FunctionTool(\n", + " arxiv_search, description=\"Search Arxiv for papers related to a given topic, including abstracts\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining Agents \n", + "\n", + "Next, we will define the agents that will perform the tasks. " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "google_search_agent = AssistantAgent(\n", + " name=\"Google_Search_Agent\",\n", + " tools=[google_search_tool],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " description=\"An agent that can search Google for information, returns results with a snippet and body content\",\n", + " system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n", + ")\n", + "\n", + "arxiv_search_agent = AssistantAgent(\n", + " name=\"Arxiv_Search_Agent\",\n", + " tools=[arxiv_search_tool],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " description=\"An agent that can search Arxiv for papers related to a given topic, including abstracts\",\n", + " system_message=\"You are a helpful AI assistant. Solve tasks using your tools. Specifically, you can take into consideration the user's request and craft a search query that is most likely to return relevant academi papers.\",\n", + ")\n", + "\n", + "\n", + "report_agent = AssistantAgent(\n", + " name=\"Report_Agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " description=\"Generate a report based on a given topic\",\n", + " system_message=\"You are a helpful assistant. Your task is to synthesize data extracted into a high quality literature review including CORRECT references. You MUST write a final report that is formatted as a literature review with CORRECT references. Your response should end with the word 'TERMINATE'\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating the Team \n", + "\n", + "Finally, we will create a team of agents and configure them to perform the tasks." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "termination = TextMentionTermination(\"TERMINATE\")\n", + "team = RoundRobinGroupChat(\n", + " participants=[google_search_agent, arxiv_search_agent, report_agent], termination_condition=termination\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a literature review on no code tools for building multi agent ai systems\n", + "---------- Google_Search_Agent ----------\n", + "[FunctionCall(id='call_bNGwWFsfeTwDhtIpsI6GYISR', arguments='{\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}', name='google_search')]\n", + "[Prompt tokens: 123, Completion tokens: 29]\n", + "---------- Google_Search_Agent ----------\n", + "[FunctionExecutionResult(content='[{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', call_id='call_bNGwWFsfeTwDhtIpsI6GYISR')]\n", + "---------- Google_Search_Agent ----------\n", + "Tool calls:\n", + "google_search({\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}) = [{'title': 'Literature Review — AutoGen', 'link': 'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html', 'snippet': 'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\xa0...', 'body': 'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and'}, {'title': 'Vertex AI Agent Builder | Google Cloud', 'link': 'https://cloud.google.com/products/agent-builder', 'snippet': 'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\xa0...', 'body': 'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents'}, {'title': 'AI tools I have found useful w/ research. What do you guys think ...', 'link': 'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/', 'snippet': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I've missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", 'body': 'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.'}]\n", + "---------- Arxiv_Search_Agent ----------\n", + "[FunctionCall(id='call_ZdmwQGTO03X23GeRn6fwDN8q', arguments='{\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}', name='arxiv_search')]\n", + "[Prompt tokens: 719, Completion tokens: 28]\n", + "---------- Arxiv_Search_Agent ----------\n", + "[FunctionExecutionResult(content='[{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', call_id='call_ZdmwQGTO03X23GeRn6fwDN8q')]\n", + "---------- Arxiv_Search_Agent ----------\n", + "Tool calls:\n", + "arxiv_search({\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}) = [{'title': 'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems', 'authors': ['Victor Dibia', 'Jingya Chen', 'Gagan Bansal', 'Suff Syed', 'Adam Fourney', 'Erkang Zhu', 'Chi Wang', 'Saleema Amershi'], 'published': '2024-08-09', 'abstract': 'Multi-agent systems, where multiple agents (generative AI models + tools)\\ncollaborate, are emerging as an effective pattern for solving long-running,\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\nremains challenging for most developers. To address this challenge, we present\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\ndebugging of workflows, and a gallery of reusable agent components. We\\nhighlight four design principles for no-code multi-agent developer tools and\\ncontribute an open-source implementation at\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio', 'pdf_url': 'http://arxiv.org/pdf/2408.15247v1'}, {'title': 'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration', 'authors': ['Cory Hymel', 'Sida Peng', 'Kevin Xu', 'Charath Ranganathan'], 'published': '2024-10-29', 'abstract': 'In recent years, with the rapid advancement of large language models (LLMs),\\nmulti-agent systems have become increasingly more capable of practical\\napplication. At the same time, the software development industry has had a\\nnumber of new AI-powered tools developed that improve the software development\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\nfrequently been examined in real-world applications, we have seen comparatively\\nfew real-world examples of publicly available commercial tools working together\\nin a multi-agent system with measurable improvements. In this experiment we\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\nsharing business requirements from PRD AI, we improve the code suggestion\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\n24.5% -- demonstrating a real-world example of commercially-available AI\\nsystems working together with improved outcomes.', 'pdf_url': 'http://arxiv.org/pdf/2410.22129v1'}, {'title': 'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML', 'authors': ['Patara Trirat', 'Wonyong Jeong', 'Sung Ju Hwang'], 'published': '2024-10-03', 'abstract': \"Automated machine learning (AutoML) accelerates AI development by automating\\ntasks in the development pipeline, such as optimal model search and\\nhyperparameter tuning. Existing AutoML systems often require technical\\nexpertise to set up complex tools, which is in general time-consuming and\\nrequires a large amount of human effort. Therefore, recent works have started\\nexploiting large language models (LLM) to lessen such burden and increase the\\nusability of AutoML frameworks via a natural language interface, allowing\\nnon-expert users to build their data-driven solutions. These methods, however,\\nare usually designed only for a particular process in the AI development\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\nAutoML-Agent takes user's task descriptions, facilitates collaboration between\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\nplanning strategy to enhance exploration to search for more optimal plans. We\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\nnetwork design) each of which is solved by a specialized agent we build via\\nprompting executing in parallel, making the search process more efficient.\\nMoreover, we propose a multi-stage verification to verify executed results and\\nguide the code generation LLM in implementing successful solutions. Extensive\\nexperiments on seven downstream tasks using fourteen datasets show that\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\nprocess, yielding systems with good performance throughout the diverse domains.\", 'pdf_url': 'http://arxiv.org/pdf/2410.02958v1'}, {'title': 'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges', 'authors': ['Sivan Schwartz', 'Avi Yaeli', 'Segev Shlomov'], 'published': '2023-08-10', 'abstract': 'Trust in AI agents has been extensively studied in the literature, resulting\\nin significant advancements in our understanding of this field. However, the\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\nresearch. In the field of process automation, a new generation of AI-based\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\nthe process of building automation has become more accessible to business users\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\nAI agents discussed in existing literature, and identifies specific\\nconsiderations and challenges relevant to this new generation of automation\\nagents. We also evaluate how nascent products in this category address these\\nconsiderations. Finally, we highlight several challenges that the research\\ncommunity should address in this evolving landscape.', 'pdf_url': 'http://arxiv.org/pdf/2308.05391v1'}, {'title': 'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications', 'authors': ['Xin Pang', 'Zhucong Li', 'Jiaxiang Chen', 'Yuan Cheng', 'Yinghui Xu', 'Yuan Qi'], 'published': '2024-04-07', 'abstract': 'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\nIDE) with full-cycle capabilities that accelerates developers to build\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\nthe Integrity of its development tools and the Visuality of its components,\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\nintegrates a comprehensive development toolkit ranging from a prototyping\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\ndeployment tools all within a web-based graphical user interface. On the other\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\ntoken consumption and API calls when debugging a specific sophisticated\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\nincluding an online demo, open-source code, and a screencast video, is now\\npublicly accessible.', 'pdf_url': 'http://arxiv.org/pdf/2404.04902v1'}]\n", + "---------- Report_Agent ----------\n", + "## Literature Review on No-Code Tools for Building Multi-Agent AI Systems\n", + "\n", + "### Introduction\n", + "\n", + "The emergence of multi-agent systems (MAS) has transformed various domains by enabling collaboration among multiple agents—ranging from generative AI models to orchestrated tools—to solve complex, long-term tasks. However, the traditional development of these systems often requires substantial technical expertise, making it inaccessible for non-developers. The introduction of no-code platforms aims to shift this paradigm, allowing users without formal programming knowledge to design, debug, and deploy multi-agent systems. This review synthesizes current literature concerning no-code tools developed for building multi-agent AI systems, highlighting recent advancements and emerging trends.\n", + "\n", + "### No-Code Development Tools\n", + "\n", + "#### AutoGen Studio\n", + "\n", + "One of the prominent no-code tools is **AutoGen Studio**, developed by Dibia et al. (2024). This tool provides a web interface and a declarative specification method utilizing JSON, enabling rapid prototyping, debugging, and evaluating multi-agent workflows. The drag-and-drop capabilities streamline the design process, making complex interactions between agents more manageable. The framework operates on four primary design principles that cater specifically to no-code development, contributing to an accessible pathway for users to harness multi-agent frameworks for various applications (Dibia et al., 2024).\n", + "\n", + "#### AI2Apps Visual IDE\n", + "\n", + "Another notable tool is **AI2Apps**, described by Pang et al. (2024). It serves as a Visual Integrated Development Environment that incorporates a comprehensive set of tools from prototyping to deployment. The platform's user-friendly interface allows for the visualization of code through drag-and-drop components, facilitating smoother integration of different agents. An extension system enhances the platform's capabilities, showcasing the potential for customization and scalability in agent application development. The reported efficiency improvements in token consumption and API calls indicate substantial benefits in user-centric design (Pang et al., 2024).\n", + "\n", + "### Performance Enhancements in Multi-Agent Configurations\n", + "\n", + "Hymel et al. (2024) examined the collaborative performance of commercially available AI tools, demonstrating a measurable improvement when integrating multiple agents in a shared configuration. Their experiments showcased how cooperation between tools like Crowdbotics PRD AI and GitHub Copilot significantly improved task success rates, illustrating the practical benefits of employing no-code tools in multi-agent environments. This synergy reflects the critical need for frameworks that inherently support such integrations, especially through no-code mechanisms, to enhance user experience and productivity (Hymel et al., 2024).\n", + "\n", + "### Trust and Usability in AI Agents\n", + "\n", + "The concept of trust in AI, particularly in LLM-based automation agents, has gained attention. Schwartz et al. (2023) addressed the challenges and considerations unique to this new generation of agents, highlighting how no-code platforms ease access and usability for non-technical users. The paper emphasizes the need for further research into the trust factors integral to effective multi-agent systems, advocating for a user-centric approach in the design and evaluation of these no-code tools (Schwartz et al., 2023).\n", + "\n", + "### Full-Pipeline AutoML with Multi-Agent Systems\n", + "\n", + "The **AutoML-Agent** framework proposed by Trirat et al. (2024) brings another layer of innovation to the no-code landscape. This framework enhances existing automated machine learning processes by using multiple specialized agents that collaboratively manage the full AI development pipeline from data retrieval to model deployment. The novelty lies in its retrieval-augmented planning strategy, which allows for efficient task decomposition and parallel execution, optimizing the overall development experience for non-experts (Trirat et al., 2024).\n", + "\n", + "### Conclusion\n", + "\n", + "The literature presents a growing array of no-code tools designed to democratize the development of multi-agent systems. Innovations such as AutoGen Studio, AI2Apps, and collaborative frameworks like AutoML-Agent highlight a trend towards user-centric, efficient design that encourages participation beyond technical boundaries. Future research should continue to explore aspects of trust, usability, and integration to further refine these tools and expand their applicability across various domains.\n", + "\n", + "### References\n", + "\n", + "- Dibia, V., Chen, J., Bansal, G., Syed, S., Fourney, A., Zhu, E., Wang, C., & Amershi, S. (2024). AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems. *arXiv:2408.15247*.\n", + "- Hymel, C., Peng, S., Xu, K., & Ranganathan, C. (2024). Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration. *arXiv:2410.22129*.\n", + "- Pang, X., Li, Z., Chen, J., Cheng, Y., Xu, Y., & Qi, Y. (2024). AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications. *arXiv:2404.04902*.\n", + "- Schwartz, S., Yaeli, A., & Shlomov, S. (2023). Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges. *arXiv:2308.05391*.\n", + "- Trirat, P., Jeong, W., & Hwang, S. J. (2024). AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML. *arXiv:2410.02958*.\n", + "\n", + "TERMINATE\n", + "[Prompt tokens: 2381, Completion tokens: 1090]\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 3223\n", + "Total completion tokens: 1147\n", + "Duration: 17.06 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a literature review on no code tools for building multi agent ai systems', type='TextMessage'), ToolCallRequestEvent(source='Google_Search_Agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=29), content=[FunctionCall(id='call_bNGwWFsfeTwDhtIpsI6GYISR', arguments='{\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}', name='google_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='Google_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', call_id='call_bNGwWFsfeTwDhtIpsI6GYISR')], type='ToolCallExecutionEvent'), TextMessage(source='Google_Search_Agent', models_usage=None, content='Tool calls:\\ngoogle_search({\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}) = [{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', type='TextMessage'), ToolCallRequestEvent(source='Arxiv_Search_Agent', models_usage=RequestUsage(prompt_tokens=719, completion_tokens=28), content=[FunctionCall(id='call_ZdmwQGTO03X23GeRn6fwDN8q', arguments='{\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}', name='arxiv_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='Arxiv_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', call_id='call_ZdmwQGTO03X23GeRn6fwDN8q')], type='ToolCallExecutionEvent'), TextMessage(source='Arxiv_Search_Agent', models_usage=None, content='Tool calls:\\narxiv_search({\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}) = [{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', type='TextMessage'), TextMessage(source='Report_Agent', models_usage=RequestUsage(prompt_tokens=2381, completion_tokens=1090), content=\"## Literature Review on No-Code Tools for Building Multi-Agent AI Systems\\n\\n### Introduction\\n\\nThe emergence of multi-agent systems (MAS) has transformed various domains by enabling collaboration among multiple agents—ranging from generative AI models to orchestrated tools—to solve complex, long-term tasks. However, the traditional development of these systems often requires substantial technical expertise, making it inaccessible for non-developers. The introduction of no-code platforms aims to shift this paradigm, allowing users without formal programming knowledge to design, debug, and deploy multi-agent systems. This review synthesizes current literature concerning no-code tools developed for building multi-agent AI systems, highlighting recent advancements and emerging trends.\\n\\n### No-Code Development Tools\\n\\n#### AutoGen Studio\\n\\nOne of the prominent no-code tools is **AutoGen Studio**, developed by Dibia et al. (2024). This tool provides a web interface and a declarative specification method utilizing JSON, enabling rapid prototyping, debugging, and evaluating multi-agent workflows. The drag-and-drop capabilities streamline the design process, making complex interactions between agents more manageable. The framework operates on four primary design principles that cater specifically to no-code development, contributing to an accessible pathway for users to harness multi-agent frameworks for various applications (Dibia et al., 2024).\\n\\n#### AI2Apps Visual IDE\\n\\nAnother notable tool is **AI2Apps**, described by Pang et al. (2024). It serves as a Visual Integrated Development Environment that incorporates a comprehensive set of tools from prototyping to deployment. The platform's user-friendly interface allows for the visualization of code through drag-and-drop components, facilitating smoother integration of different agents. An extension system enhances the platform's capabilities, showcasing the potential for customization and scalability in agent application development. The reported efficiency improvements in token consumption and API calls indicate substantial benefits in user-centric design (Pang et al., 2024).\\n\\n### Performance Enhancements in Multi-Agent Configurations\\n\\nHymel et al. (2024) examined the collaborative performance of commercially available AI tools, demonstrating a measurable improvement when integrating multiple agents in a shared configuration. Their experiments showcased how cooperation between tools like Crowdbotics PRD AI and GitHub Copilot significantly improved task success rates, illustrating the practical benefits of employing no-code tools in multi-agent environments. This synergy reflects the critical need for frameworks that inherently support such integrations, especially through no-code mechanisms, to enhance user experience and productivity (Hymel et al., 2024).\\n\\n### Trust and Usability in AI Agents\\n\\nThe concept of trust in AI, particularly in LLM-based automation agents, has gained attention. Schwartz et al. (2023) addressed the challenges and considerations unique to this new generation of agents, highlighting how no-code platforms ease access and usability for non-technical users. The paper emphasizes the need for further research into the trust factors integral to effective multi-agent systems, advocating for a user-centric approach in the design and evaluation of these no-code tools (Schwartz et al., 2023).\\n\\n### Full-Pipeline AutoML with Multi-Agent Systems\\n\\nThe **AutoML-Agent** framework proposed by Trirat et al. (2024) brings another layer of innovation to the no-code landscape. This framework enhances existing automated machine learning processes by using multiple specialized agents that collaboratively manage the full AI development pipeline from data retrieval to model deployment. The novelty lies in its retrieval-augmented planning strategy, which allows for efficient task decomposition and parallel execution, optimizing the overall development experience for non-experts (Trirat et al., 2024).\\n\\n### Conclusion\\n\\nThe literature presents a growing array of no-code tools designed to democratize the development of multi-agent systems. Innovations such as AutoGen Studio, AI2Apps, and collaborative frameworks like AutoML-Agent highlight a trend towards user-centric, efficient design that encourages participation beyond technical boundaries. Future research should continue to explore aspects of trust, usability, and integration to further refine these tools and expand their applicability across various domains.\\n\\n### References\\n\\n- Dibia, V., Chen, J., Bansal, G., Syed, S., Fourney, A., Zhu, E., Wang, C., & Amershi, S. (2024). AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems. *arXiv:2408.15247*.\\n- Hymel, C., Peng, S., Xu, K., & Ranganathan, C. (2024). Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration. *arXiv:2410.22129*.\\n- Pang, X., Li, Z., Chen, J., Cheng, Y., Xu, Y., & Qi, Y. (2024). AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications. *arXiv:2404.04902*.\\n- Schwartz, S., Yaeli, A., & Shlomov, S. (2023). Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges. *arXiv:2308.05391*.\\n- Trirat, P., Jeong, W., & Hwang, S. J. (2024). AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML. *arXiv:2410.02958*.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await Console(\n", + " team.run_stream(\n", + " task=\"Write a literature review on no code tools for building multi agent ai systems\",\n", + " )\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a literature review on no code tools for building multi agent ai systems', type='TextMessage'), ToolCallMessage(source='Google_Search_Agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=29), content=[FunctionCall(id='call_bNGwWFsfeTwDhtIpsI6GYISR', arguments='{\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}', name='google_search')], type='ToolCallMessage'), ToolCallResultMessage(source='Google_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', call_id='call_bNGwWFsfeTwDhtIpsI6GYISR')], type='ToolCallResultMessage'), TextMessage(source='Google_Search_Agent', models_usage=None, content='Tool calls:\\ngoogle_search({\"query\":\"no code tools for building multi agent AI systems literature review\",\"num_results\":3}) = [{\\'title\\': \\'Literature Review — AutoGen\\', \\'link\\': \\'https://microsoft.github.io/autogen/dev//user-guide/agentchat-user-guide/examples/literature-review.html\\', \\'snippet\\': \\'run( task=\"Write a literature review on no code tools for building multi agent ai systems\", ) ... ### Conclusion No-code tools for building multi-agent AI systems\\\\xa0...\\', \\'body\\': \\'Literature Review — AutoGen Skip to main content Back to top Ctrl + K AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation. User Guide Packages API Reference Twitter GitHub PyPI User Guide Packages API Reference Twitter GitHub PyPI AgentChat Installation Quickstart Tutorial Models Messages Agents Teams Selector Group Chat Swarm Termination Custom Agents Managing State Examples Travel Planning Company Research Literature Review Core Quick Start Core Concepts Agent and\\'}, {\\'title\\': \\'Vertex AI Agent Builder | Google Cloud\\', \\'link\\': \\'https://cloud.google.com/products/agent-builder\\', \\'snippet\\': \\'Build and deploy enterprise ready generative AI experiences · Product highlights · Easily build no code conversational AI agents · Ground in Google search and/or\\\\xa0...\\', \\'body\\': \\'Vertex AI Agent Builder | Google Cloud Page Contents Vertex AI Agent Builder is making generative AI more reliable for the enterprise. Read the blog. Vertex AI Agent Builder Build and deploy enterprise ready generative AI experiences Create AI agents and applications using natural language or a code-first approach. Easily ground your agents or apps in enterprise data with a range of options. Vertex AI Agent Builder gathers all the surfaces and tools that developers need to build their AI agents\\'}, {\\'title\\': \\'AI tools I have found useful w/ research. What do you guys think ...\\', \\'link\\': \\'https://www.reddit.com/r/PhD/comments/14d6g09/ai_tools_i_have_found_useful_w_research_what_do/\\', \\'snippet\\': \"Jun 19, 2023 ... Need help deciding on the best ones, and to identify ones I\\'ve missed: ASSISTANTS (chatbots, multi-purpose) Chat with Open Large Language Models.\", \\'body\\': \\'Reddit - Dive into anything Skip to main content Open menu Open navigation Go to Reddit Home r/PhD A chip A close button Get app Get the Reddit app Log In Log in to Reddit Expand user menu Open settings menu Log In / Sign Up Advertise on Reddit Shop Collectible Avatars Get the Reddit app Scan this QR code to download the app now Or check it out in the app stores Go to PhD r/PhD r/PhD A subreddit dedicated to PhDs. Members Online • [deleted] ADMIN MOD AI tools I have found useful w/ research.\\'}]', type='TextMessage'), ToolCallMessage(source='Arxiv_Search_Agent', models_usage=RequestUsage(prompt_tokens=719, completion_tokens=28), content=[FunctionCall(id='call_ZdmwQGTO03X23GeRn6fwDN8q', arguments='{\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}', name='arxiv_search')], type='ToolCallMessage'), ToolCallResultMessage(source='Arxiv_Search_Agent', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', call_id='call_ZdmwQGTO03X23GeRn6fwDN8q')], type='ToolCallResultMessage'), TextMessage(source='Arxiv_Search_Agent', models_usage=None, content='Tool calls:\\narxiv_search({\"query\":\"no code tools for building multi agent AI systems\",\"max_results\":5}) = [{\\'title\\': \\'AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems\\', \\'authors\\': [\\'Victor Dibia\\', \\'Jingya Chen\\', \\'Gagan Bansal\\', \\'Suff Syed\\', \\'Adam Fourney\\', \\'Erkang Zhu\\', \\'Chi Wang\\', \\'Saleema Amershi\\'], \\'published\\': \\'2024-08-09\\', \\'abstract\\': \\'Multi-agent systems, where multiple agents (generative AI models + tools)\\\\ncollaborate, are emerging as an effective pattern for solving long-running,\\\\ncomplex tasks in numerous domains. However, specifying their parameters (such\\\\nas models, tools, and orchestration mechanisms etc,.) and debugging them\\\\nremains challenging for most developers. To address this challenge, we present\\\\nAUTOGEN STUDIO, a no-code developer tool for rapidly prototyping, debugging,\\\\nand evaluating multi-agent workflows built upon the AUTOGEN framework. AUTOGEN\\\\nSTUDIO offers a web interface and a Python API for representing LLM-enabled\\\\nagents using a declarative (JSON-based) specification. It provides an intuitive\\\\ndrag-and-drop UI for agent workflow specification, interactive evaluation and\\\\ndebugging of workflows, and a gallery of reusable agent components. We\\\\nhighlight four design principles for no-code multi-agent developer tools and\\\\ncontribute an open-source implementation at\\\\nhttps://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2408.15247v1\\'}, {\\'title\\': \\'Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration\\', \\'authors\\': [\\'Cory Hymel\\', \\'Sida Peng\\', \\'Kevin Xu\\', \\'Charath Ranganathan\\'], \\'published\\': \\'2024-10-29\\', \\'abstract\\': \\'In recent years, with the rapid advancement of large language models (LLMs),\\\\nmulti-agent systems have become increasingly more capable of practical\\\\napplication. At the same time, the software development industry has had a\\\\nnumber of new AI-powered tools developed that improve the software development\\\\nlifecycle (SDLC). Academically, much attention has been paid to the role of\\\\nmulti-agent systems to the SDLC. And, while single-agent systems have\\\\nfrequently been examined in real-world applications, we have seen comparatively\\\\nfew real-world examples of publicly available commercial tools working together\\\\nin a multi-agent system with measurable improvements. In this experiment we\\\\ntest context sharing between Crowdbotics PRD AI, a tool for generating software\\\\nrequirements using AI, and GitHub Copilot, an AI pair-programming tool. By\\\\nsharing business requirements from PRD AI, we improve the code suggestion\\\\ncapabilities of GitHub Copilot by 13.8% and developer task success rate by\\\\n24.5% -- demonstrating a real-world example of commercially-available AI\\\\nsystems working together with improved outcomes.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.22129v1\\'}, {\\'title\\': \\'AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML\\', \\'authors\\': [\\'Patara Trirat\\', \\'Wonyong Jeong\\', \\'Sung Ju Hwang\\'], \\'published\\': \\'2024-10-03\\', \\'abstract\\': \"Automated machine learning (AutoML) accelerates AI development by automating\\\\ntasks in the development pipeline, such as optimal model search and\\\\nhyperparameter tuning. Existing AutoML systems often require technical\\\\nexpertise to set up complex tools, which is in general time-consuming and\\\\nrequires a large amount of human effort. Therefore, recent works have started\\\\nexploiting large language models (LLM) to lessen such burden and increase the\\\\nusability of AutoML frameworks via a natural language interface, allowing\\\\nnon-expert users to build their data-driven solutions. These methods, however,\\\\nare usually designed only for a particular process in the AI development\\\\npipeline and do not efficiently use the inherent capacity of the LLMs. This\\\\npaper proposes AutoML-Agent, a novel multi-agent framework tailored for\\\\nfull-pipeline AutoML, i.e., from data retrieval to model deployment.\\\\nAutoML-Agent takes user\\'s task descriptions, facilitates collaboration between\\\\nspecialized LLM agents, and delivers deployment-ready models. Unlike existing\\\\nwork, instead of devising a single plan, we introduce a retrieval-augmented\\\\nplanning strategy to enhance exploration to search for more optimal plans. We\\\\nalso decompose each plan into sub-tasks (e.g., data preprocessing and neural\\\\nnetwork design) each of which is solved by a specialized agent we build via\\\\nprompting executing in parallel, making the search process more efficient.\\\\nMoreover, we propose a multi-stage verification to verify executed results and\\\\nguide the code generation LLM in implementing successful solutions. Extensive\\\\nexperiments on seven downstream tasks using fourteen datasets show that\\\\nAutoML-Agent achieves a higher success rate in automating the full AutoML\\\\nprocess, yielding systems with good performance throughout the diverse domains.\", \\'pdf_url\\': \\'http://arxiv.org/pdf/2410.02958v1\\'}, {\\'title\\': \\'Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges\\', \\'authors\\': [\\'Sivan Schwartz\\', \\'Avi Yaeli\\', \\'Segev Shlomov\\'], \\'published\\': \\'2023-08-10\\', \\'abstract\\': \\'Trust in AI agents has been extensively studied in the literature, resulting\\\\nin significant advancements in our understanding of this field. However, the\\\\nrapid advancements in Large Language Models (LLMs) and the emergence of\\\\nLLM-based AI agent frameworks pose new challenges and opportunities for further\\\\nresearch. In the field of process automation, a new generation of AI-based\\\\nagents has emerged, enabling the execution of complex tasks. At the same time,\\\\nthe process of building automation has become more accessible to business users\\\\nvia user-friendly no-code tools and training mechanisms. This paper explores\\\\nthese new challenges and opportunities, analyzes the main aspects of trust in\\\\nAI agents discussed in existing literature, and identifies specific\\\\nconsiderations and challenges relevant to this new generation of automation\\\\nagents. We also evaluate how nascent products in this category address these\\\\nconsiderations. Finally, we highlight several challenges that the research\\\\ncommunity should address in this evolving landscape.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2308.05391v1\\'}, {\\'title\\': \\'AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications\\', \\'authors\\': [\\'Xin Pang\\', \\'Zhucong Li\\', \\'Jiaxiang Chen\\', \\'Yuan Cheng\\', \\'Yinghui Xu\\', \\'Yuan Qi\\'], \\'published\\': \\'2024-04-07\\', \\'abstract\\': \\'We introduce AI2Apps, a Visual Integrated Development Environment (Visual\\\\nIDE) with full-cycle capabilities that accelerates developers to build\\\\ndeployable LLM-based AI agent Applications. This Visual IDE prioritizes both\\\\nthe Integrity of its development tools and the Visuality of its components,\\\\nensuring a smooth and efficient building experience.On one hand, AI2Apps\\\\nintegrates a comprehensive development toolkit ranging from a prototyping\\\\ncanvas and AI-assisted code editor to agent debugger, management system, and\\\\ndeployment tools all within a web-based graphical user interface. On the other\\\\nhand, AI2Apps visualizes reusable front-end and back-end code as intuitive\\\\ndrag-and-drop components. Furthermore, a plugin system named AI2Apps Extension\\\\n(AAE) is designed for Extensibility, showcasing how a new plugin with 20\\\\ncomponents enables web agent to mimic human-like browsing behavior. Our case\\\\nstudy demonstrates substantial efficiency improvements, with AI2Apps reducing\\\\ntoken consumption and API calls when debugging a specific sophisticated\\\\nmultimodal agent by approximately 90% and 80%, respectively. The AI2Apps,\\\\nincluding an online demo, open-source code, and a screencast video, is now\\\\npublicly accessible.\\', \\'pdf_url\\': \\'http://arxiv.org/pdf/2404.04902v1\\'}]', type='TextMessage'), TextMessage(source='Report_Agent', models_usage=RequestUsage(prompt_tokens=2381, completion_tokens=1090), content=\"## Literature Review on No-Code Tools for Building Multi-Agent AI Systems\\n\\n### Introduction\\n\\nThe emergence of multi-agent systems (MAS) has transformed various domains by enabling collaboration among multiple agents—ranging from generative AI models to orchestrated tools—to solve complex, long-term tasks. However, the traditional development of these systems often requires substantial technical expertise, making it inaccessible for non-developers. The introduction of no-code platforms aims to shift this paradigm, allowing users without formal programming knowledge to design, debug, and deploy multi-agent systems. This review synthesizes current literature concerning no-code tools developed for building multi-agent AI systems, highlighting recent advancements and emerging trends.\\n\\n### No-Code Development Tools\\n\\n#### AutoGen Studio\\n\\nOne of the prominent no-code tools is **AutoGen Studio**, developed by Dibia et al. (2024). This tool provides a web interface and a declarative specification method utilizing JSON, enabling rapid prototyping, debugging, and evaluating multi-agent workflows. The drag-and-drop capabilities streamline the design process, making complex interactions between agents more manageable. The framework operates on four primary design principles that cater specifically to no-code development, contributing to an accessible pathway for users to harness multi-agent frameworks for various applications (Dibia et al., 2024).\\n\\n#### AI2Apps Visual IDE\\n\\nAnother notable tool is **AI2Apps**, described by Pang et al. (2024). It serves as a Visual Integrated Development Environment that incorporates a comprehensive set of tools from prototyping to deployment. The platform's user-friendly interface allows for the visualization of code through drag-and-drop components, facilitating smoother integration of different agents. An extension system enhances the platform's capabilities, showcasing the potential for customization and scalability in agent application development. The reported efficiency improvements in token consumption and API calls indicate substantial benefits in user-centric design (Pang et al., 2024).\\n\\n### Performance Enhancements in Multi-Agent Configurations\\n\\nHymel et al. (2024) examined the collaborative performance of commercially available AI tools, demonstrating a measurable improvement when integrating multiple agents in a shared configuration. Their experiments showcased how cooperation between tools like Crowdbotics PRD AI and GitHub Copilot significantly improved task success rates, illustrating the practical benefits of employing no-code tools in multi-agent environments. This synergy reflects the critical need for frameworks that inherently support such integrations, especially through no-code mechanisms, to enhance user experience and productivity (Hymel et al., 2024).\\n\\n### Trust and Usability in AI Agents\\n\\nThe concept of trust in AI, particularly in LLM-based automation agents, has gained attention. Schwartz et al. (2023) addressed the challenges and considerations unique to this new generation of agents, highlighting how no-code platforms ease access and usability for non-technical users. The paper emphasizes the need for further research into the trust factors integral to effective multi-agent systems, advocating for a user-centric approach in the design and evaluation of these no-code tools (Schwartz et al., 2023).\\n\\n### Full-Pipeline AutoML with Multi-Agent Systems\\n\\nThe **AutoML-Agent** framework proposed by Trirat et al. (2024) brings another layer of innovation to the no-code landscape. This framework enhances existing automated machine learning processes by using multiple specialized agents that collaboratively manage the full AI development pipeline from data retrieval to model deployment. The novelty lies in its retrieval-augmented planning strategy, which allows for efficient task decomposition and parallel execution, optimizing the overall development experience for non-experts (Trirat et al., 2024).\\n\\n### Conclusion\\n\\nThe literature presents a growing array of no-code tools designed to democratize the development of multi-agent systems. Innovations such as AutoGen Studio, AI2Apps, and collaborative frameworks like AutoML-Agent highlight a trend towards user-centric, efficient design that encourages participation beyond technical boundaries. Future research should continue to explore aspects of trust, usability, and integration to further refine these tools and expand their applicability across various domains.\\n\\n### References\\n\\n- Dibia, V., Chen, J., Bansal, G., Syed, S., Fourney, A., Zhu, E., Wang, C., & Amershi, S. (2024). AutoGen Studio: A No-Code Developer Tool for Building and Debugging Multi-Agent Systems. *arXiv:2408.15247*.\\n- Hymel, C., Peng, S., Xu, K., & Ranganathan, C. (2024). Improving Performance of Commercially Available AI Products in a Multi-Agent Configuration. *arXiv:2410.22129*.\\n- Pang, X., Li, Z., Chen, J., Cheng, Y., Xu, Y., & Qi, Y. (2024). AI2Apps: A Visual IDE for Building LLM-based AI Agent Applications. *arXiv:2404.04902*.\\n- Schwartz, S., Yaeli, A., & Shlomov, S. (2023). Enhancing Trust in LLM-Based AI Automation Agents: New Considerations and Future Challenges. *arXiv:2308.05391*.\\n- Trirat, P., Jeong, W., & Hwang, S. J. (2024). AutoML-Agent: A Multi-Agent LLM Framework for Full-Pipeline AutoML. *arXiv:2410.02958*.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await Console(\n", - " team.run_stream(\n", - " task=\"Write a literature review on no code tools for building multi agent ai systems\",\n", - " )\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 04a51ae43..7f5edd8e4 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -12,7 +12,7 @@ "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", "\n", "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", @@ -74,7 +74,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "[ToolCallMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15), content=[FunctionCall(id='call_hqVC7UJUPhKaiJwgVKkg66ak', arguments='{\"query\":\"AutoGen\"}', name='web_search')]), ToolCallResultMessage(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_hqVC7UJUPhKaiJwgVKkg66ak')])]\n", + "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15), content=[FunctionCall(id='call_hqVC7UJUPhKaiJwgVKkg66ak', arguments='{\"query\":\"AutoGen\"}', name='web_search')]), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_hqVC7UJUPhKaiJwgVKkg66ak')])]\n", "source='assistant' models_usage=RequestUsage(prompt_tokens=92, completion_tokens=14) content='AutoGen is a programming framework designed for building multi-agent applications.'\n" ] } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb index 67705a2cf..cd26728f8 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb @@ -1,313 +1,313 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Custom Agents\n", - "\n", - "You may have agents with behaviors that do not fall into a preset. \n", - "In such cases, you can build custom agents.\n", - "\n", - "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", - "class and implement the following abstract methods and attributes:\n", - "\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", - "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", - "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", - "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", - "yields all messages in the response." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CountDownAgent\n", - "\n", - "In this example, we create a simple agent that counts down from a given number to zero,\n", - "and produces a stream of messages with the current count." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3...\n", - "2...\n", - "1...\n", - "Done!\n" - ] - } - ], - "source": [ - "from typing import AsyncGenerator, List, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage\n", - "from autogen_core import CancellationToken\n", - "\n", - "\n", - "class CountDownAgent(BaseChatAgent):\n", - " def __init__(self, name: str, count: int = 3):\n", - " super().__init__(name, \"A simple agent that counts down.\")\n", - " self._count = count\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Calls the on_messages_stream.\n", - " response: Response | None = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " response = message\n", - " assert response is not None\n", - " return response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentMessage | Response, None]:\n", - " inner_messages: List[AgentMessage] = []\n", - " for i in range(self._count, 0, -1):\n", - " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", - " inner_messages.append(msg)\n", - " yield msg\n", - " # The response is returned at the end of the stream.\n", - " # It contains the final message and all the inner messages.\n", - " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_countdown_agent() -> None:\n", - " # Create a countdown agent.\n", - " countdown_agent = CountDownAgent(\"countdown\")\n", - "\n", - " # Run the agent with a given task and stream the response.\n", - " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", - " if isinstance(message, Response):\n", - " print(message.chat_message.content)\n", - " else:\n", - " print(message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", - "await run_countdown_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ArithmeticAgent\n", - "\n", - "In this example, we create an agent class that can perform simple arithmetic operations\n", - "on a given integer. Then, we will use different instances of this agent class\n", - "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", - "\n", - "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", - "after applying an arithmetic operation to the integer.\n", - "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", - "and returns a response with the result." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Callable, List, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "\n", - "class ArithmeticAgent(BaseChatAgent):\n", - " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", - " super().__init__(name, description=description)\n", - " self._operator_func = operator_func\n", - " self._message_history: List[ChatMessage] = []\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Update the message history.\n", - " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", - " self._message_history.extend(messages)\n", - " # Parse the number in the last message.\n", - " assert isinstance(self._message_history[-1], TextMessage)\n", - " number = int(self._message_history[-1].content)\n", - " # Apply the operator function to the number.\n", - " result = self._operator_func(number)\n", - " # Create a new message with the result.\n", - " response_message = TextMessage(content=str(result), source=self.name)\n", - " # Update the message history.\n", - " self._message_history.append(response_message)\n", - " # Return the response.\n", - " return Response(chat_message=response_message)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "The `on_messages` method may be called with an empty list of messages, in which\n", - "case it means the agent was called previously and is now being called again,\n", - "without any new messages from the caller. So it is important to keep a history\n", - "of the previous messages received by the agent, and use that history to generate\n", - "the response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", - "\n", - "- one that adds 1 to the input integer,\n", - "- one that subtracts 1 from the input integer,\n", - "- one that multiplies the input integer by 2,\n", - "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", - "- one that returns the input integer unchanged.\n", - "\n", - "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", - "and set the appropriate selector settings:\n", - "\n", - "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", - "- customize the selector prompt to tailor the model's response to the specific task." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Apply the operations to turn the given number into 25.\n", - "---------- user ----------\n", - "10\n", - "---------- multiply_agent ----------\n", - "20\n", - "---------- add_agent ----------\n", - "21\n", - "---------- multiply_agent ----------\n", - "42\n", - "---------- divide_agent ----------\n", - "21\n", - "---------- add_agent ----------\n", - "22\n", - "---------- add_agent ----------\n", - "23\n", - "---------- add_agent ----------\n", - "24\n", - "---------- add_agent ----------\n", - "25\n", - "---------- Summary ----------\n", - "Number of messages: 10\n", - "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", - "Total prompt tokens: 0\n", - "Total completion tokens: 0\n", - "Duration: 2.40 seconds\n" - ] - } - ], - "source": [ - "async def run_number_agents() -> None:\n", - " # Create agents for number operations.\n", - " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", - " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", - " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", - " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", - " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", - "\n", - " # The termination condition is to stop after 10 messages.\n", - " termination_condition = MaxMessageTermination(10)\n", - "\n", - " # Create a selector group chat.\n", - " selector_group_chat = SelectorGroupChat(\n", - " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " termination_condition=termination_condition,\n", - " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", - " selector_prompt=(\n", - " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", - " \"Current conversation history:\\n{history}\\n\"\n", - " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", - " ),\n", - " )\n", - "\n", - " # Run the selector group chat with a given task and stream the response.\n", - " task: List[ChatMessage] = [\n", - " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", - " TextMessage(content=\"10\", source=\"user\"),\n", - " ]\n", - " stream = selector_group_chat.run_stream(task=task)\n", - " await Console(stream)\n", - "\n", - "\n", - "# Use asyncio.run(run_number_agents()) when running in a script.\n", - "await run_number_agents()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the output, we can see that the agents have successfully transformed the input integer\n", - "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Custom Agents\n", + "\n", + "You may have agents with behaviors that do not fall into a preset. \n", + "In such cases, you can build custom agents.\n", + "\n", + "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", + "class and implement the following abstract methods and attributes:\n", + "\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", + "\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", + "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", + "yields all messages in the response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CountDownAgent\n", + "\n", + "In this example, we create a simple agent that counts down from a given number to zero,\n", + "and produces a stream of messages with the current count." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3...\n", + "2...\n", + "1...\n", + "Done!\n" + ] + } + ], + "source": [ + "from typing import AsyncGenerator, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n", + "from autogen_core import CancellationToken\n", + "\n", + "\n", + "class CountDownAgent(BaseChatAgent):\n", + " def __init__(self, name: str, count: int = 3):\n", + " super().__init__(name, \"A simple agent that counts down.\")\n", + " self._count = count\n", + "\n", + " @property\n", + " def produced_message_types(self) -> List[type[ChatMessage]]:\n", + " return [TextMessage]\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Calls the on_messages_stream.\n", + " response: Response | None = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " response = message\n", + " assert response is not None\n", + " return response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", + " inner_messages: List[AgentEvent | ChatMessage] = []\n", + " for i in range(self._count, 0, -1):\n", + " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", + " inner_messages.append(msg)\n", + " yield msg\n", + " # The response is returned at the end of the stream.\n", + " # It contains the final message and all the inner messages.\n", + " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_countdown_agent() -> None:\n", + " # Create a countdown agent.\n", + " countdown_agent = CountDownAgent(\"countdown\")\n", + "\n", + " # Run the agent with a given task and stream the response.\n", + " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", + " if isinstance(message, Response):\n", + " print(message.chat_message.content)\n", + " else:\n", + " print(message.content)\n", + "\n", + "\n", + "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", + "await run_countdown_agent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ArithmeticAgent\n", + "\n", + "In this example, we create an agent class that can perform simple arithmetic operations\n", + "on a given integer. Then, we will use different instances of this agent class\n", + "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", + "\n", + "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", + "after applying an arithmetic operation to the integer.\n", + "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", + "and returns a response with the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Callable, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.conditions import MaxMessageTermination\n", + "from autogen_agentchat.messages import ChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class ArithmeticAgent(BaseChatAgent):\n", + " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", + " super().__init__(name, description=description)\n", + " self._operator_func = operator_func\n", + " self._message_history: List[ChatMessage] = []\n", + "\n", + " @property\n", + " def produced_message_types(self) -> List[type[ChatMessage]]:\n", + " return [TextMessage]\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Update the message history.\n", + " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", + " self._message_history.extend(messages)\n", + " # Parse the number in the last message.\n", + " assert isinstance(self._message_history[-1], TextMessage)\n", + " number = int(self._message_history[-1].content)\n", + " # Apply the operator function to the number.\n", + " result = self._operator_func(number)\n", + " # Create a new message with the result.\n", + " response_message = TextMessage(content=str(result), source=self.name)\n", + " # Update the message history.\n", + " self._message_history.append(response_message)\n", + " # Return the response.\n", + " return Response(chat_message=response_message)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "The `on_messages` method may be called with an empty list of messages, in which\n", + "case it means the agent was called previously and is now being called again,\n", + "without any new messages from the caller. So it is important to keep a history\n", + "of the previous messages received by the agent, and use that history to generate\n", + "the response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", + "\n", + "- one that adds 1 to the input integer,\n", + "- one that subtracts 1 from the input integer,\n", + "- one that multiplies the input integer by 2,\n", + "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", + "- one that returns the input integer unchanged.\n", + "\n", + "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", + "and set the appropriate selector settings:\n", + "\n", + "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", + "- customize the selector prompt to tailor the model's response to the specific task." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Apply the operations to turn the given number into 25.\n", + "---------- user ----------\n", + "10\n", + "---------- multiply_agent ----------\n", + "20\n", + "---------- add_agent ----------\n", + "21\n", + "---------- multiply_agent ----------\n", + "42\n", + "---------- divide_agent ----------\n", + "21\n", + "---------- add_agent ----------\n", + "22\n", + "---------- add_agent ----------\n", + "23\n", + "---------- add_agent ----------\n", + "24\n", + "---------- add_agent ----------\n", + "25\n", + "---------- Summary ----------\n", + "Number of messages: 10\n", + "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", + "Total prompt tokens: 0\n", + "Total completion tokens: 0\n", + "Duration: 2.40 seconds\n" + ] + } + ], + "source": [ + "async def run_number_agents() -> None:\n", + " # Create agents for number operations.\n", + " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", + " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", + " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", + " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", + " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", + "\n", + " # The termination condition is to stop after 10 messages.\n", + " termination_condition = MaxMessageTermination(10)\n", + "\n", + " # Create a selector group chat.\n", + " selector_group_chat = SelectorGroupChat(\n", + " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " termination_condition=termination_condition,\n", + " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", + " selector_prompt=(\n", + " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", + " \"Current conversation history:\\n{history}\\n\"\n", + " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", + " ),\n", + " )\n", + "\n", + " # Run the selector group chat with a given task and stream the response.\n", + " task: List[ChatMessage] = [\n", + " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", + " TextMessage(content=\"10\", source=\"user\"),\n", + " ]\n", + " stream = selector_group_chat.run_stream(task=task)\n", + " await Console(stream)\n", + "\n", + "\n", + "# Use asyncio.run(run_number_agents()) when running in a script.\n", + "await run_number_agents()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the output, we can see that the agents have successfully transformed the input integer\n", + "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb index a7f383212..25dc78641 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb @@ -23,7 +23,7 @@ "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", "\n", "### Agent-Agent Messages\n", - "AgentChat supports many message types for agent-to-agent communication. The most common one is the {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", + "AgentChat supports many message types for agent-to-agent communication. They belong to the union type {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", "\n", "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" ] @@ -91,13 +91,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Internal Events and Messages\n", + "### Internal Events\n", "\n", - "AgentChat also supports the concept of `inner_messages` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself.\n", + "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the union type {py:class}`~autogen_agentchat.messages.AgentEvent`.\n", "\n", - "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallMessage`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallResultMessage`, which contains the results of tool calls.\n", + "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", "\n", - "Typically, these messages are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](./custom-agents.ipynb).\n", + "Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](./custom-agents.ipynb).\n", "\n", "\n", "You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb index 74110d180..ae67661dc 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb @@ -1,504 +1,504 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Selector Group Chat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", - "\n", - "Key features include:\n", - "\n", - "- Model-based speaker selection\n", - "- Configurable participant roles and descriptions\n", - "- Prevention of consecutive turns by the same speaker (optional)\n", - "- Customizable selection prompting\n", - "- Customizable selection function to override the default model-based selection\n", - "\n", - "```{note}\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", - "```\n", - "\n", - "## How Does it Work?\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "but with a model-based next speaker selection mechanism.\n", - "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", - "the following steps are executed:\n", - "\n", - "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. You can override the model by providing a custom selection function.\n", - "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", - "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", - "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", - "\n", - "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", - "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", - "\n", - "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Web Search/Analysis" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Sequence\n", - "\n", - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import AgentMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agents\n", - "\n", - "![Selector Group Chat](selector-group-chat.svg)\n", - "\n", - "This system uses three specialized agents:\n", - "\n", - "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", - "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", - "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", - "def search_web_tool(query: str) -> str:\n", - " if \"2006-2007\" in query:\n", - " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \"\"\"\n", - " elif \"2007-2008\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", - " elif \"2008-2009\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", - " return \"No data found.\"\n", - "\n", - "\n", - "def percentage_change_tool(start: float, end: float) -> float:\n", - " return ((end - start) / start) * 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", - "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", - "so it is recommended to provide meaningful names and descriptions." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "planning_agent = AssistantAgent(\n", - " \"PlanningAgent\",\n", - " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a planning agent.\n", - " Your job is to break down complex tasks into smaller, manageable subtasks.\n", - " Your team members are:\n", - " Web search agent: Searches for information\n", - " Data analyst: Performs calculations\n", - "\n", - " You only plan and delegate tasks - you do not execute them yourself.\n", - "\n", - " When assigning tasks, use this format:\n", - " 1. : \n", - "\n", - " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", - " \"\"\",\n", - ")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"A web search agent.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a web search agent.\n", - " Your only tool is search_tool - use it to find information.\n", - " You make only one search call at a time.\n", - " Once you have the results, you never do calculations based on them.\n", - " \"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"A data analyst agent. Useful for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"\n", - " You are a data analyst.\n", - " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", - " \"\"\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Workflow\n", - "\n", - "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", - "\n", - "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", - " ` : `\n", - "\n", - "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", - "\n", - "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", - "\n", - "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", - "\n", - "6. The workflow continues with agents being dynamically selected until either:\n", - " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", - " - An alternative termination condition is met (e.g., a maximum number of messages)\n", - "\n", - "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the team with two termination conditions:\n", - "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", - "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", - "max_messages_termination = MaxMessageTermination(max_messages=25)\n", - "termination = text_mention_termination | max_messages_termination\n", - "\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " termination_condition=termination,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we run the team with a task to find information about an NBA player." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To address this request, we will divide the task into manageable subtasks. \n", - "\n", - "1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Web search agent: Gather the total rebounds for the identified player during the 2007-2008 season.\n", - "3. Web search agent: Gather the total rebounds for the identified player during the 2008-2009 season.\n", - "4. Data analyst: Calculate the percentage change in total rebounds for the identified player between the 2007-2008 and 2008-2009 seasons.\n", - "[Prompt tokens: 159, Completion tokens: 122]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_xdYlGP2lsqDeWdSiOlwOBNiO', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')]\n", - "[Prompt tokens: 271, Completion tokens: 26]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_xdYlGP2lsqDeWdSiOlwOBNiO')]\n", - "---------- WebSearchAgent ----------\n", - "Tool calls:\n", - "search_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_asjxKtAVGfqrYl0jMpvwCrvV', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='WebSearchAgent'), FunctionCall(id='call_8L91Kizt0KU6RNwUgvNx7S0s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='WebSearchAgent')]\n", - "[Prompt tokens: 345, Completion tokens: 68]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_asjxKtAVGfqrYl0jMpvwCrvV'), FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_8L91Kizt0KU6RNwUgvNx7S0s')]\n", - "---------- DataAnalystAgent ----------\n", - "Tool calls:\n", - "WebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool 'WebSearchAgent' is not available.\n", - "WebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool 'WebSearchAgent' is not available.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_imvRJ2jhpPdovBbx8MFjlFVS', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_U30KVmFG1aeXPbqGJjDmJ6iJ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "[Prompt tokens: 445, Completion tokens: 70]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_imvRJ2jhpPdovBbx8MFjlFVS'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_U30KVmFG1aeXPbqGJjDmJ6iJ')]\n", - "---------- WebSearchAgent ----------\n", - "Tool calls:\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_CtAnvcbitN0JiwBfiLVzb5Do', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "[Prompt tokens: 562, Completion tokens: 20]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_CtAnvcbitN0JiwBfiLVzb5Do')]\n", - "---------- DataAnalystAgent ----------\n", - "Tool calls:\n", - "percentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Summary of Findings:\n", - "\n", - "1. Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1,397 points.\n", - "2. Dwyane Wade's total rebounds during the 2007-2008 season were 214.\n", - "3. Dwyane Wade's total rebounds during the 2008-2009 season were 398.\n", - "4. The percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", - "\n", - "TERMINATE\n", - "[Prompt tokens: 590, Completion tokens: 122]\n", - "---------- Summary ----------\n", - "Number of messages: 15\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 2372\n", - "Total completion tokens: 428\n", - "Duration: 9.21 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Selector Group Chat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", + "\n", + "Key features include:\n", + "\n", + "- Model-based speaker selection\n", + "- Configurable participant roles and descriptions\n", + "- Prevention of consecutive turns by the same speaker (optional)\n", + "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", + "```\n", + "\n", + "## How Does it Work?\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", + "\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. You can override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", + "\n", + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Web Search/Analysis" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Sequence\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " Web search agent: Searches for information\n", + " Data analyst: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"A web search agent.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"A data analyst agent. Useful for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the team with two termination conditions:\n", + "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination\n", + "\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " termination_condition=termination,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To address this request, we will divide the task into manageable subtasks. \n", + "\n", + "1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Web search agent: Gather the total rebounds for the identified player during the 2007-2008 season.\n", + "3. Web search agent: Gather the total rebounds for the identified player during the 2008-2009 season.\n", + "4. Data analyst: Calculate the percentage change in total rebounds for the identified player between the 2007-2008 and 2008-2009 seasons.\n", + "[Prompt tokens: 159, Completion tokens: 122]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_xdYlGP2lsqDeWdSiOlwOBNiO', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 271, Completion tokens: 26]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_xdYlGP2lsqDeWdSiOlwOBNiO')]\n", + "---------- WebSearchAgent ----------\n", + "Tool calls:\n", + "search_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_asjxKtAVGfqrYl0jMpvwCrvV', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='WebSearchAgent'), FunctionCall(id='call_8L91Kizt0KU6RNwUgvNx7S0s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='WebSearchAgent')]\n", + "[Prompt tokens: 345, Completion tokens: 68]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_asjxKtAVGfqrYl0jMpvwCrvV'), FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_8L91Kizt0KU6RNwUgvNx7S0s')]\n", + "---------- DataAnalystAgent ----------\n", + "Tool calls:\n", + "WebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool 'WebSearchAgent' is not available.\n", + "WebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool 'WebSearchAgent' is not available.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_imvRJ2jhpPdovBbx8MFjlFVS', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_U30KVmFG1aeXPbqGJjDmJ6iJ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 445, Completion tokens: 70]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_imvRJ2jhpPdovBbx8MFjlFVS'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_U30KVmFG1aeXPbqGJjDmJ6iJ')]\n", + "---------- WebSearchAgent ----------\n", + "Tool calls:\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_CtAnvcbitN0JiwBfiLVzb5Do', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 562, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_CtAnvcbitN0JiwBfiLVzb5Do')]\n", + "---------- DataAnalystAgent ----------\n", + "Tool calls:\n", + "percentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Summary of Findings:\n", + "\n", + "1. Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1,397 points.\n", + "2. Dwyane Wade's total rebounds during the 2007-2008 season were 214.\n", + "3. Dwyane Wade's total rebounds during the 2008-2009 season were 398.\n", + "4. The percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", + "\n", + "TERMINATE\n", + "[Prompt tokens: 590, Completion tokens: 122]\n", + "---------- Summary ----------\n", + "Number of messages: 15\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 2372\n", + "Total completion tokens: 428\n", + "Duration: 9.21 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=122), content='To address this request, we will divide the task into manageable subtasks. \\n\\n1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Web search agent: Gather the total rebounds for the identified player during the 2007-2008 season.\\n3. Web search agent: Gather the total rebounds for the identified player during the 2008-2009 season.\\n4. Data analyst: Calculate the percentage change in total rebounds for the identified player between the 2007-2008 and 2008-2009 seasons.', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=271, completion_tokens=26), content=[FunctionCall(id='call_xdYlGP2lsqDeWdSiOlwOBNiO', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_xdYlGP2lsqDeWdSiOlwOBNiO')], type='ToolCallExecutionEvent'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=345, completion_tokens=68), content=[FunctionCall(id='call_asjxKtAVGfqrYl0jMpvwCrvV', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='WebSearchAgent'), FunctionCall(id='call_8L91Kizt0KU6RNwUgvNx7S0s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='WebSearchAgent')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_asjxKtAVGfqrYl0jMpvwCrvV'), FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_8L91Kizt0KU6RNwUgvNx7S0s')], type='ToolCallExecutionEvent'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\nWebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool \\'WebSearchAgent\\' is not available.\\nWebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool \\'WebSearchAgent\\' is not available.', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=445, completion_tokens=70), content=[FunctionCall(id='call_imvRJ2jhpPdovBbx8MFjlFVS', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_U30KVmFG1aeXPbqGJjDmJ6iJ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_imvRJ2jhpPdovBbx8MFjlFVS'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_U30KVmFG1aeXPbqGJjDmJ6iJ')], type='ToolCallExecutionEvent'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=562, completion_tokens=20), content=[FunctionCall(id='call_CtAnvcbitN0JiwBfiLVzb5Do', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_CtAnvcbitN0JiwBfiLVzb5Do')], type='ToolCallExecutionEvent'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\npercentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=590, completion_tokens=122), content=\"Summary of Findings:\\n\\n1. Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1,397 points.\\n2. Dwyane Wade's total rebounds during the 2007-2008 season were 214.\\n3. Dwyane Wade's total rebounds during the 2008-2009 season were 398.\\n4. The percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", + "\n", + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process. \n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To address this query, we'll need to break it down into a few specific tasks:\n", + "\n", + "1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. Web search agent: Find the total number of rebounds by this player in the 2007-2008 NBA season.\n", + "3. Web search agent: Find the total number of rebounds by this player in the 2008-2009 NBA season.\n", + "4. Data analyst: Calculate the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's get started with these tasks.\n", + "[Prompt tokens: 159, Completion tokens: 132]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_TSUHOBKhpHmTNoYeJzwSP5V4', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 281, Completion tokens: 26]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_TSUHOBKhpHmTNoYeJzwSP5V4')]\n", + "---------- WebSearchAgent ----------\n", + "Tool calls:\n", + "search_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\n", + "2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.\n", + "[Prompt tokens: 382, Completion tokens: 54]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_BkPBFkpuTG6c3eeoACrrRX7V', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_5LQquT7ZUAAQRf7gvckeTVdQ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 416, Completion tokens: 68]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_BkPBFkpuTG6c3eeoACrrRX7V'), FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_5LQquT7ZUAAQRf7gvckeTVdQ')]\n", + "---------- DataAnalystAgent ----------\n", + "Tool calls:\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool 'search_web_tool' is not available.\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool 'search_web_tool' is not available.\n", + "---------- PlanningAgent ----------\n", + "It seems there was a miscommunication in task assignment. Let me reassess and reassign the tasks correctly.\n", + "\n", + "1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\n", + "2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.\n", + "[Prompt tokens: 525, Completion tokens: 76]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_buIWOtu1dJqPaxJmqMyuRkpj', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_qcnHKdoPsNAzMlPvoBvqmt8n', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 599, Completion tokens: 70]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_buIWOtu1dJqPaxJmqMyuRkpj'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_qcnHKdoPsNAzMlPvoBvqmt8n')]\n", + "---------- WebSearchAgent ----------\n", + "Tool calls:\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "With this information, we can proceed to calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", + "\n", + "1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 (214 rebounds) and the 2008-2009 (398 rebounds) NBA seasons.\n", + "[Prompt tokens: 711, Completion tokens: 83]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_RjbFpLCehz1Nlk5kYmyMUenB', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 806, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_RjbFpLCehz1Nlk5kYmyMUenB')]\n", + "---------- DataAnalystAgent ----------\n", + "Tool calls:\n", + "percentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Based on the data collected, Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 NBA season, scoring a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. This represents an approximate 85.98% increase in his total rebounds.\n", + "\n", + "TERMINATE\n", + "[Prompt tokens: 834, Completion tokens: 90]\n", + "---------- Summary ----------\n", + "Number of messages: 18\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 4713\n", + "Total completion tokens: 619\n", + "Duration: 11.72 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=132), content=\"To address this query, we'll need to break it down into a few specific tasks:\\n\\n1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Web search agent: Find the total number of rebounds by this player in the 2007-2008 NBA season.\\n3. Web search agent: Find the total number of rebounds by this player in the 2008-2009 NBA season.\\n4. Data analyst: Calculate the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's get started with these tasks.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=281, completion_tokens=26), content=[FunctionCall(id='call_TSUHOBKhpHmTNoYeJzwSP5V4', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_TSUHOBKhpHmTNoYeJzwSP5V4')], type='ToolCallExecutionEvent'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=382, completion_tokens=54), content='1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\\n2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=416, completion_tokens=68), content=[FunctionCall(id='call_BkPBFkpuTG6c3eeoACrrRX7V', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_5LQquT7ZUAAQRf7gvckeTVdQ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_BkPBFkpuTG6c3eeoACrrRX7V'), FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_5LQquT7ZUAAQRf7gvckeTVdQ')], type='ToolCallExecutionEvent'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool \\'search_web_tool\\' is not available.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool \\'search_web_tool\\' is not available.', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=76), content='It seems there was a miscommunication in task assignment. Let me reassess and reassign the tasks correctly.\\n\\n1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\\n2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=599, completion_tokens=70), content=[FunctionCall(id='call_buIWOtu1dJqPaxJmqMyuRkpj', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_qcnHKdoPsNAzMlPvoBvqmt8n', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_buIWOtu1dJqPaxJmqMyuRkpj'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_qcnHKdoPsNAzMlPvoBvqmt8n')], type='ToolCallExecutionEvent'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=711, completion_tokens=83), content=\"With this information, we can proceed to calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\\n\\n1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 (214 rebounds) and the 2008-2009 (398 rebounds) NBA seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=806, completion_tokens=20), content=[FunctionCall(id='call_RjbFpLCehz1Nlk5kYmyMUenB', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_RjbFpLCehz1Nlk5kYmyMUenB')], type='ToolCallExecutionEvent'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\npercentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=834, completion_tokens=90), content=\"Based on the data collected, Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 NBA season, scoring a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. This represents an approximate 85.98% increase in his total rebounds.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " termination_condition=termination,\n", + " selector_func=selector_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=122), content='To address this request, we will divide the task into manageable subtasks. \\n\\n1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Web search agent: Gather the total rebounds for the identified player during the 2007-2008 season.\\n3. Web search agent: Gather the total rebounds for the identified player during the 2008-2009 season.\\n4. Data analyst: Calculate the percentage change in total rebounds for the identified player between the 2007-2008 and 2008-2009 seasons.', type='TextMessage'), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=271, completion_tokens=26), content=[FunctionCall(id='call_xdYlGP2lsqDeWdSiOlwOBNiO', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_xdYlGP2lsqDeWdSiOlwOBNiO')], type='ToolCallResultMessage'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='TextMessage'), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=345, completion_tokens=68), content=[FunctionCall(id='call_asjxKtAVGfqrYl0jMpvwCrvV', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='WebSearchAgent'), FunctionCall(id='call_8L91Kizt0KU6RNwUgvNx7S0s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='WebSearchAgent')], type='ToolCallMessage'), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_asjxKtAVGfqrYl0jMpvwCrvV'), FunctionExecutionResult(content=\"Error: The tool 'WebSearchAgent' is not available.\", call_id='call_8L91Kizt0KU6RNwUgvNx7S0s')], type='ToolCallResultMessage'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\nWebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool \\'WebSearchAgent\\' is not available.\\nWebSearchAgent({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool \\'WebSearchAgent\\' is not available.', type='TextMessage'), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=445, completion_tokens=70), content=[FunctionCall(id='call_imvRJ2jhpPdovBbx8MFjlFVS', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_U30KVmFG1aeXPbqGJjDmJ6iJ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_imvRJ2jhpPdovBbx8MFjlFVS'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_U30KVmFG1aeXPbqGJjDmJ6iJ')], type='ToolCallResultMessage'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='TextMessage'), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=562, completion_tokens=20), content=[FunctionCall(id='call_CtAnvcbitN0JiwBfiLVzb5Do', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_CtAnvcbitN0JiwBfiLVzb5Do')], type='ToolCallResultMessage'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\npercentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=590, completion_tokens=122), content=\"Summary of Findings:\\n\\n1. Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1,397 points.\\n2. Dwyane Wade's total rebounds during the 2007-2008 season were 214.\\n3. Dwyane Wade's total rebounds during the 2008-2009 season were 398.\\n4. The percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", - "\n", - "# Use asyncio.run(...) if you are running this in a script.\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Custom Selector Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Often times we want better control over the selection process. \n", - "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", - "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", - "\n", - "```{note}\n", - "Returning `None` from the custom selector function will use the default model-based selection.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To address this query, we'll need to break it down into a few specific tasks:\n", - "\n", - "1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. Web search agent: Find the total number of rebounds by this player in the 2007-2008 NBA season.\n", - "3. Web search agent: Find the total number of rebounds by this player in the 2008-2009 NBA season.\n", - "4. Data analyst: Calculate the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's get started with these tasks.\n", - "[Prompt tokens: 159, Completion tokens: 132]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_TSUHOBKhpHmTNoYeJzwSP5V4', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')]\n", - "[Prompt tokens: 281, Completion tokens: 26]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_TSUHOBKhpHmTNoYeJzwSP5V4')]\n", - "---------- WebSearchAgent ----------\n", - "Tool calls:\n", - "search_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\n", - "2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.\n", - "[Prompt tokens: 382, Completion tokens: 54]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_BkPBFkpuTG6c3eeoACrrRX7V', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_5LQquT7ZUAAQRf7gvckeTVdQ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "[Prompt tokens: 416, Completion tokens: 68]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_BkPBFkpuTG6c3eeoACrrRX7V'), FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_5LQquT7ZUAAQRf7gvckeTVdQ')]\n", - "---------- DataAnalystAgent ----------\n", - "Tool calls:\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool 'search_web_tool' is not available.\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool 'search_web_tool' is not available.\n", - "---------- PlanningAgent ----------\n", - "It seems there was a miscommunication in task assignment. Let me reassess and reassign the tasks correctly.\n", - "\n", - "1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\n", - "2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.\n", - "[Prompt tokens: 525, Completion tokens: 76]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_buIWOtu1dJqPaxJmqMyuRkpj', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_qcnHKdoPsNAzMlPvoBvqmt8n', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "[Prompt tokens: 599, Completion tokens: 70]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_buIWOtu1dJqPaxJmqMyuRkpj'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_qcnHKdoPsNAzMlPvoBvqmt8n')]\n", - "---------- WebSearchAgent ----------\n", - "Tool calls:\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "search_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "With this information, we can proceed to calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", - "\n", - "1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 (214 rebounds) and the 2008-2009 (398 rebounds) NBA seasons.\n", - "[Prompt tokens: 711, Completion tokens: 83]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_RjbFpLCehz1Nlk5kYmyMUenB', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "[Prompt tokens: 806, Completion tokens: 20]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_RjbFpLCehz1Nlk5kYmyMUenB')]\n", - "---------- DataAnalystAgent ----------\n", - "Tool calls:\n", - "percentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Based on the data collected, Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 NBA season, scoring a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. This represents an approximate 85.98% increase in his total rebounds.\n", - "\n", - "TERMINATE\n", - "[Prompt tokens: 834, Completion tokens: 90]\n", - "---------- Summary ----------\n", - "Number of messages: 18\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 4713\n", - "Total completion tokens: 619\n", - "Duration: 11.72 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=132), content=\"To address this query, we'll need to break it down into a few specific tasks:\\n\\n1. Web search agent: Identify the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Web search agent: Find the total number of rebounds by this player in the 2007-2008 NBA season.\\n3. Web search agent: Find the total number of rebounds by this player in the 2008-2009 NBA season.\\n4. Data analyst: Calculate the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's get started with these tasks.\", type='TextMessage'), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=281, completion_tokens=26), content=[FunctionCall(id='call_TSUHOBKhpHmTNoYeJzwSP5V4', arguments='{\"query\":\"Miami Heat highest points player 2006-2007 season\"}', name='search_web_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_TSUHOBKhpHmTNoYeJzwSP5V4')], type='ToolCallResultMessage'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\":\"Miami Heat highest points player 2006-2007 season\"}) = Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=382, completion_tokens=54), content='1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\\n2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.', type='TextMessage'), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=416, completion_tokens=68), content=[FunctionCall(id='call_BkPBFkpuTG6c3eeoACrrRX7V', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_5LQquT7ZUAAQRf7gvckeTVdQ', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_BkPBFkpuTG6c3eeoACrrRX7V'), FunctionExecutionResult(content=\"Error: The tool 'search_web_tool' is not available.\", call_id='call_5LQquT7ZUAAQRf7gvckeTVdQ')], type='ToolCallResultMessage'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = Error: The tool \\'search_web_tool\\' is not available.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = Error: The tool \\'search_web_tool\\' is not available.', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=76), content='It seems there was a miscommunication in task assignment. Let me reassess and reassign the tasks correctly.\\n\\n1. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2007-2008 NBA season.\\n2. Web search agent: Find the total number of rebounds by Dwayne Wade in the 2008-2009 NBA season.', type='TextMessage'), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=599, completion_tokens=70), content=[FunctionCall(id='call_buIWOtu1dJqPaxJmqMyuRkpj', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_qcnHKdoPsNAzMlPvoBvqmt8n', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_buIWOtu1dJqPaxJmqMyuRkpj'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_qcnHKdoPsNAzMlPvoBvqmt8n')], type='ToolCallResultMessage'), TextMessage(source='WebSearchAgent', models_usage=None, content='Tool calls:\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nsearch_web_tool({\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}) = The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=711, completion_tokens=83), content=\"With this information, we can proceed to calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\\n\\n1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 (214 rebounds) and the 2008-2009 (398 rebounds) NBA seasons.\", type='TextMessage'), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=806, completion_tokens=20), content=[FunctionCall(id='call_RjbFpLCehz1Nlk5kYmyMUenB', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallMessage'), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_RjbFpLCehz1Nlk5kYmyMUenB')], type='ToolCallResultMessage'), TextMessage(source='DataAnalystAgent', models_usage=None, content='Tool calls:\\npercentage_change_tool({\"start\":214,\"end\":398}) = 85.98130841121495', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=834, completion_tokens=90), content=\"Based on the data collected, Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 NBA season, scoring a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. This represents an approximate 85.98% increase in his total rebounds.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def selector_func(messages: Sequence[AgentMessage]) -> str | None:\n", - " if messages[-1].source != planning_agent.name:\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " termination_condition=termination,\n", - " selector_func=selector_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb index b0feb00d0..6dd051108 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb @@ -1,532 +1,532 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Swarm\n", - "\n", - "{py:class}`~autogen_agentchat.teams.Swarm` implements a team in which agents can hand off \n", - "task to other agents based on their capabilities. \n", - "It is a multi-agent design pattern first introduced by OpenAI in \n", - "[an experimental project](https://github.com/openai/swarm).\n", - "The key idea is to let agent delegate tasks to other agents using a special tool call, while\n", - "all agents share the same message context.\n", - "This enables agents to make local decisions about task planning, rather than\n", - "relying on a central orchestrator such as in {py:class}`~autogen_agentchat.teams.SelectorGroupChat`.\n", - "\n", - "```{note}\n", - "{py:class}`~autogen_agentchat.teams.Swarm` is a high-level API. If you need more\n", - "control and customization that is not supported by this API, you can take a look\n", - "at the [Handoff Pattern](../../core-user-guide/design-patterns/handoffs.ipynb)\n", - "in the Core API documentation and implement your own version of the Swarm pattern.\n", - "```\n", - "\n", - "## How Does It Work?\n", - "\n", - "At its core, the {py:class}`~autogen_agentchat.teams.Swarm` team is a group chat\n", - "where agents take turn to generate a response. \n", - "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", - "broadcast their responses so all agents share the same mesasge context.\n", - "\n", - "Different from the other two group chat teams, at each turn,\n", - "**the speaker agent is selected based on the most recent\n", - "{py:class}`~autogen_agentchat.messages.HandoffMessage` message in the context.**\n", - "This naturally requires each agent in the team to be able to generate\n", - "{py:class}`~autogen_agentchat.messages.HandoffMessage` to signal\n", - "which other agents that it hands off to.\n", - "\n", - "For {py:class}`~autogen_agentchat.agents.AssistantAgent`, you can set the\n", - "`handoffs` argument to specify which agents it can hand off to. You can\n", - "use {py:class}`~autogen_agentchat.base.Handoff` to customize the message\n", - "content and handoff behavior.\n", - "\n", - "The overall process can be summarized as follows:\n", - "\n", - "1. Each agent has the ability to generate {py:class}`~autogen_agentchat.messages.HandoffMessage`\n", - " to signal which other agents it can hand off to. For {py:class}`~autogen_agentchat.agents.AssistantAgent`, this means setting the `handoffs` argument.\n", - "2. When the team starts on a task, the first speaker agents operate on the task and make locallized decision about whether to hand off and to whom.\n", - "3. When an agent generates a {py:class}`~autogen_agentchat.messages.HandoffMessage`, the receiving agent takes over the task with the same message context.\n", - "4. The process continues until a termination condition is met.\n", - "\n", - "In this section, we will show you two examples of how to use the {py:class}`~autogen_agentchat.teams.Swarm` team:\n", - "\n", - "1. A customer support team with human-in-the-loop handoff.\n", - "2. An automonous team for content generation." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Customer Support Example" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Customer Support](swarm_customer_support.svg)\n", - "\n", - "This system implements a flights refund scenario with two agents:\n", - "\n", - "- **Travel Agent**: Handles general travel and refund coordination.\n", - "- **Flights Refunder**: Specializes in processing flight refunds with the `refund_flight` tool.\n", - "\n", - "Additionally, we let the user interact with the agents, when agents handoff to `\"user\"`.\n", - "\n", - "#### Workflow\n", - "1. The **Travel Agent** initiates the conversation and evaluates the user's request.\n", - "2. Based on the request:\n", - " - For refund-related tasks, the Travel Agent hands off to the **Flights Refunder**.\n", - " - For information needed from the customer, either agent can hand off to the `\"user\"`.\n", - "3. The **Flights Refunder** processes refunds using the `refund_flight` tool when appropriate.\n", - "4. If an agent hands off to the `\"user\"`, the team execution will stop and wait for the user to input a response.\n", - "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandaffMessage`. This message is directed to the agent that originally requested user input.\n", - "6. The process continues until the Travel Agent determines the task is complete and terminates the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Any, Dict, List\n", - "\n", - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import HandoffMessage\n", - "from autogen_agentchat.teams import Swarm\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Tools" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def refund_flight(flight_id: str) -> str:\n", - " \"\"\"Refund a flight\"\"\"\n", - " return f\"Flight {flight_id} refunded\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agents" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"YOUR_API_KEY\",\n", - ")\n", - "\n", - "travel_agent = AssistantAgent(\n", - " \"travel_agent\",\n", - " model_client=model_client,\n", - " handoffs=[\"flights_refunder\", \"user\"],\n", - " system_message=\"\"\"You are a travel agent.\n", - " The flights_refunder is in charge of refunding flights.\n", - " If you need information from the user, you must first send your message, then you can handoff to the user.\n", - " Use TERMINATE when the travel planning is complete.\"\"\",\n", - ")\n", - "\n", - "flights_refunder = AssistantAgent(\n", - " \"flights_refunder\",\n", - " model_client=model_client,\n", - " handoffs=[\"travel_agent\", \"user\"],\n", - " tools=[refund_flight],\n", - " system_message=\"\"\"You are an agent specialized in refunding flights.\n", - " You only need flight reference numbers to refund a flight.\n", - " You have the ability to refund a flight using the refund_flight tool.\n", - " If you need information from the user, you must first send your message, then you can handoff to the user.\n", - " When the transaction is complete, handoff to the travel agent to finalize.\"\"\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "termination = HandoffTermination(target=\"user\") | TextMentionTermination(\"TERMINATE\")\n", - "team = Swarm([travel_agent, flights_refunder], termination_condition=termination)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "I need to refund my flight.\n", - "---------- travel_agent ----------\n", - "[FunctionCall(id='call_ZQ2rGjq4Z29pd0yP2sNcuyd2', arguments='{}', name='transfer_to_flights_refunder')]\n", - "[Prompt tokens: 119, Completion tokens: 14]\n", - "---------- travel_agent ----------\n", - "[FunctionExecutionResult(content='Transferred to flights_refunder, adopting the role of flights_refunder immediately.', call_id='call_ZQ2rGjq4Z29pd0yP2sNcuyd2')]\n", - "---------- travel_agent ----------\n", - "Transferred to flights_refunder, adopting the role of flights_refunder immediately.\n", - "---------- flights_refunder ----------\n", - "Could you please provide me with the flight reference number so I can process the refund for you?\n", - "[Prompt tokens: 191, Completion tokens: 20]\n", - "---------- flights_refunder ----------\n", - "[FunctionCall(id='call_1iRfzNpxTJhRTW2ww9aQJ8sK', arguments='{}', name='transfer_to_user')]\n", - "[Prompt tokens: 219, Completion tokens: 11]\n", - "---------- flights_refunder ----------\n", - "[FunctionExecutionResult(content='Transferred to user, adopting the role of user immediately.', call_id='call_1iRfzNpxTJhRTW2ww9aQJ8sK')]\n", - "---------- flights_refunder ----------\n", - "Transferred to user, adopting the role of user immediately.\n", - "---------- Summary ----------\n", - "Number of messages: 8\n", - "Finish reason: Handoff to user from flights_refunder detected.\n", - "Total prompt tokens: 529\n", - "Total completion tokens: 45\n", - "Duration: 2.05 seconds\n", - "---------- user ----------\n", - "Sure, it's 507811\n", - "---------- flights_refunder ----------\n", - "[FunctionCall(id='call_UKCsoEBdflkvpuT9Bi2xlvTd', arguments='{\"flight_id\":\"507811\"}', name='refund_flight')]\n", - "[Prompt tokens: 266, Completion tokens: 18]\n", - "---------- flights_refunder ----------\n", - "[FunctionExecutionResult(content='Flight 507811 refunded', call_id='call_UKCsoEBdflkvpuT9Bi2xlvTd')]\n", - "---------- flights_refunder ----------\n", - "Tool calls:\n", - "refund_flight({\"flight_id\":\"507811\"}) = Flight 507811 refunded\n", - "---------- flights_refunder ----------\n", - "[FunctionCall(id='call_MQ2CXR8UhVtjNc6jG3wSQp2W', arguments='{}', name='transfer_to_travel_agent')]\n", - "[Prompt tokens: 303, Completion tokens: 13]\n", - "---------- flights_refunder ----------\n", - "[FunctionExecutionResult(content='Transferred to travel_agent, adopting the role of travel_agent immediately.', call_id='call_MQ2CXR8UhVtjNc6jG3wSQp2W')]\n", - "---------- flights_refunder ----------\n", - "Transferred to travel_agent, adopting the role of travel_agent immediately.\n", - "---------- travel_agent ----------\n", - "Your flight with reference number 507811 has been successfully refunded. If you need anything else, feel free to let me know. Safe travels! TERMINATE\n", - "[Prompt tokens: 272, Completion tokens: 32]\n", - "---------- Summary ----------\n", - "Number of messages: 8\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 841\n", - "Total completion tokens: 63\n", - "Duration: 1.64 seconds\n" - ] - } - ], - "source": [ - "task = \"I need to refund my flight.\"\n", - "\n", - "\n", - "async def run_team_stream() -> None:\n", - " task_result = await Console(team.run_stream(task=task))\n", - " last_message = task_result.messages[-1]\n", - "\n", - " while isinstance(last_message, HandoffMessage) and last_message.target == \"user\":\n", - " user_message = input(\"User: \")\n", - "\n", - " task_result = await Console(\n", - " team.run_stream(task=HandoffMessage(source=\"user\", target=last_message.source, content=user_message))\n", - " )\n", - " last_message = task_result.messages[-1]\n", - "\n", - "\n", - "await run_team_stream()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Stock Research Example" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Stock Research](swarm_stock_research.svg)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This system is designed to perform stock research tasks by leveraging four agents:\n", - "\n", - "- **Planner**: The central coordinator that delegates specific tasks to specialized agents based on their expertise. The planner ensures that each agent is utilized efficiently and oversees the overall workflow.\n", - "- **Financial Analyst**: A specialized agent responsible for analyzing financial metrics and stock data using tools such as `get_stock_data`.\n", - "- **News Analyst**: An agent focused on gathering and summarizing recent news articles relevant to the stock, using tools such as `get_news`.\n", - "- **Writer**: An agent tasked with compiling the findings from the stock and news analysis into a cohesive final report.\n", - "\n", - "#### Workflow\n", - "1. The **Planner** initiates the research process by delegating tasks to the appropriate agents in a step-by-step manner.\n", - "2. Each agent performs its task independently and appends their work to the shared **message thread/history**. Rather than directly returning results to the planner, all agents contribute to and read from this shared message history. When agents generate their work using the LLM, they have access to this shared message history, which provides context and helps track the overall progress of the task.\n", - "3. Once an agent completes its task, it hands off control back to the planner.\n", - "4. The process continues until the planner determines that all necessary tasks have been completed and decides to terminate the workflow." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Tools" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "async def get_stock_data(symbol: str) -> Dict[str, Any]:\n", - " \"\"\"Get stock market data for a given symbol\"\"\"\n", - " return {\"price\": 180.25, \"volume\": 1000000, \"pe_ratio\": 65.4, \"market_cap\": \"700B\"}\n", - "\n", - "\n", - "async def get_news(query: str) -> List[Dict[str, str]]:\n", - " \"\"\"Get recent news articles about a company\"\"\"\n", - " return [\n", - " {\n", - " \"title\": \"Tesla Expands Cybertruck Production\",\n", - " \"date\": \"2024-03-20\",\n", - " \"summary\": \"Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\",\n", - " },\n", - " {\n", - " \"title\": \"Tesla FSD Beta Shows Promise\",\n", - " \"date\": \"2024-03-19\",\n", - " \"summary\": \"Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\",\n", - " },\n", - " {\n", - " \"title\": \"Model Y Dominates Global EV Sales\",\n", - " \"date\": \"2024-03-18\",\n", - " \"summary\": \"Tesla's Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\",\n", - " },\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"YOUR_API_KEY\",\n", - ")\n", - "\n", - "planner = AssistantAgent(\n", - " \"planner\",\n", - " model_client=model_client,\n", - " handoffs=[\"financial_analyst\", \"news_analyst\", \"writer\"],\n", - " system_message=\"\"\"You are a research planning coordinator.\n", - " Coordinate market research by delegating to specialized agents:\n", - " - Financial Analyst: For stock data analysis\n", - " - News Analyst: For news gathering and analysis\n", - " - Writer: For compiling final report\n", - " Always send your plan first, then handoff to appropriate agent.\n", - " Always handoff to a single agent at a time.\n", - " Use TERMINATE when research is complete.\"\"\",\n", - ")\n", - "\n", - "financial_analyst = AssistantAgent(\n", - " \"financial_analyst\",\n", - " model_client=model_client,\n", - " handoffs=[\"planner\"],\n", - " tools=[get_stock_data],\n", - " system_message=\"\"\"You are a financial analyst.\n", - " Analyze stock market data using the get_stock_data tool.\n", - " Provide insights on financial metrics.\n", - " Always handoff back to planner when analysis is complete.\"\"\",\n", - ")\n", - "\n", - "news_analyst = AssistantAgent(\n", - " \"news_analyst\",\n", - " model_client=model_client,\n", - " handoffs=[\"planner\"],\n", - " tools=[get_news],\n", - " system_message=\"\"\"You are a news analyst.\n", - " Gather and analyze relevant news using the get_news tool.\n", - " Summarize key market insights from news.\n", - " Always handoff back to planner when analysis is complete.\"\"\",\n", - ")\n", - "\n", - "writer = AssistantAgent(\n", - " \"writer\",\n", - " model_client=model_client,\n", - " handoffs=[\"planner\"],\n", - " system_message=\"\"\"You are a financial report writer.\n", - " Compile research findings into clear, concise reports.\n", - " Always handoff back to planner when writing is complete.\"\"\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Conduct market research for TSLA stock\n", - "---------- planner ----------\n", - "[FunctionCall(id='call_BX5QaRuhmB8CxTsBlqCUIXPb', arguments='{}', name='transfer_to_financial_analyst')]\n", - "[Prompt tokens: 169, Completion tokens: 166]\n", - "---------- planner ----------\n", - "[FunctionExecutionResult(content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', call_id='call_BX5QaRuhmB8CxTsBlqCUIXPb')]\n", - "---------- planner ----------\n", - "Transferred to financial_analyst, adopting the role of financial_analyst immediately.\n", - "---------- financial_analyst ----------\n", - "[FunctionCall(id='call_SAXy1ebtA9mnaZo4ztpD2xHA', arguments='{\"symbol\":\"TSLA\"}', name='get_stock_data')]\n", - "[Prompt tokens: 136, Completion tokens: 16]\n", - "---------- financial_analyst ----------\n", - "[FunctionExecutionResult(content=\"{'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\", call_id='call_SAXy1ebtA9mnaZo4ztpD2xHA')]\n", - "---------- financial_analyst ----------\n", - "Tool calls:\n", - "get_stock_data({\"symbol\":\"TSLA\"}) = {'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\n", - "---------- financial_analyst ----------\n", - "[FunctionCall(id='call_IsdcFUfBVmtcVzfSuwQpeAwl', arguments='{}', name='transfer_to_planner')]\n", - "[Prompt tokens: 199, Completion tokens: 337]\n", - "---------- financial_analyst ----------\n", - "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_IsdcFUfBVmtcVzfSuwQpeAwl')]\n", - "---------- financial_analyst ----------\n", - "Transferred to planner, adopting the role of planner immediately.\n", - "---------- planner ----------\n", - "[FunctionCall(id='call_tN5goNFahrdcSfKnQqT0RONN', arguments='{}', name='transfer_to_news_analyst')]\n", - "[Prompt tokens: 291, Completion tokens: 14]\n", - "---------- planner ----------\n", - "[FunctionExecutionResult(content='Transferred to news_analyst, adopting the role of news_analyst immediately.', call_id='call_tN5goNFahrdcSfKnQqT0RONN')]\n", - "---------- planner ----------\n", - "Transferred to news_analyst, adopting the role of news_analyst immediately.\n", - "---------- news_analyst ----------\n", - "[FunctionCall(id='call_Owjw6ZbiPdJgNWMHWxhCKgsp', arguments='{\"query\":\"Tesla market news\"}', name='get_news')]\n", - "[Prompt tokens: 235, Completion tokens: 16]\n", - "---------- news_analyst ----------\n", - "[FunctionExecutionResult(content='[{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', call_id='call_Owjw6ZbiPdJgNWMHWxhCKgsp')]\n", - "---------- news_analyst ----------\n", - "Tool calls:\n", - "get_news({\"query\":\"Tesla market news\"}) = [{'title': 'Tesla Expands Cybertruck Production', 'date': '2024-03-20', 'summary': 'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.'}, {'title': 'Tesla FSD Beta Shows Promise', 'date': '2024-03-19', 'summary': 'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.'}, {'title': 'Model Y Dominates Global EV Sales', 'date': '2024-03-18', 'summary': \"Tesla's Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]\n", - "---------- news_analyst ----------\n", - "Here are some of the key market insights regarding Tesla (TSLA):\n", - "\n", - "1. **Expansion in Cybertruck Production**: Tesla has increased its Cybertruck production capacity at the Gigafactory in Texas to meet the high demand. This move might positively impact Tesla's revenues if the demand for the Cybertruck continues to grow.\n", - "\n", - "2. **Advancements in Full Self-Driving (FSD) Technology**: The recent beta release of Tesla's Full Self-Driving software shows significant advancements, particularly in urban navigation and safety. Progress in this area could enhance Tesla's competitive edge in the autonomous driving sector.\n", - "\n", - "3. **Dominance of Model Y in EV Sales**: Tesla's Model Y has become the best-selling electric vehicle globally, capturing a substantial market share. Such strong sales performance reinforces Tesla's leadership in the electric vehicle market.\n", - "\n", - "These developments reflect Tesla's ongoing innovation and ability to capture market demand, which could positively influence its stock performance and market position. \n", - "\n", - "I will now hand off back to the planner.\n", - "[Prompt tokens: 398, Completion tokens: 203]\n", - "---------- news_analyst ----------\n", - "[FunctionCall(id='call_pn7y6PKsBspWA17uOh3AKNMT', arguments='{}', name='transfer_to_planner')]\n", - "[Prompt tokens: 609, Completion tokens: 12]\n", - "---------- news_analyst ----------\n", - "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_pn7y6PKsBspWA17uOh3AKNMT')]\n", - "---------- news_analyst ----------\n", - "Transferred to planner, adopting the role of planner immediately.\n", - "---------- planner ----------\n", - "[FunctionCall(id='call_MmXyWuD2uJT64ZdVI5NfhYdX', arguments='{}', name='transfer_to_writer')]\n", - "[Prompt tokens: 722, Completion tokens: 11]\n", - "---------- planner ----------\n", - "[FunctionExecutionResult(content='Transferred to writer, adopting the role of writer immediately.', call_id='call_MmXyWuD2uJT64ZdVI5NfhYdX')]\n", - "---------- planner ----------\n", - "Transferred to writer, adopting the role of writer immediately.\n", - "---------- writer ----------\n", - "[FunctionCall(id='call_Pdgu39O6GMYplBiB8jp3uyN3', arguments='{}', name='transfer_to_planner')]\n", - "[Prompt tokens: 599, Completion tokens: 323]\n", - "---------- writer ----------\n", - "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_Pdgu39O6GMYplBiB8jp3uyN3')]\n", - "---------- writer ----------\n", - "Transferred to planner, adopting the role of planner immediately.\n", - "---------- planner ----------\n", - "TERMINATE\n", - "[Prompt tokens: 772, Completion tokens: 4]\n", - "---------- Summary ----------\n", - "Number of messages: 27\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 4130\n", - "Total completion tokens: 1102\n", - "Duration: 17.74 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Swarm\n", + "\n", + "{py:class}`~autogen_agentchat.teams.Swarm` implements a team in which agents can hand off \n", + "task to other agents based on their capabilities. \n", + "It is a multi-agent design pattern first introduced by OpenAI in \n", + "[an experimental project](https://github.com/openai/swarm).\n", + "The key idea is to let agent delegate tasks to other agents using a special tool call, while\n", + "all agents share the same message context.\n", + "This enables agents to make local decisions about task planning, rather than\n", + "relying on a central orchestrator such as in {py:class}`~autogen_agentchat.teams.SelectorGroupChat`.\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.Swarm` is a high-level API. If you need more\n", + "control and customization that is not supported by this API, you can take a look\n", + "at the [Handoff Pattern](../../core-user-guide/design-patterns/handoffs.ipynb)\n", + "in the Core API documentation and implement your own version of the Swarm pattern.\n", + "```\n", + "\n", + "## How Does It Work?\n", + "\n", + "At its core, the {py:class}`~autogen_agentchat.teams.Swarm` team is a group chat\n", + "where agents take turn to generate a response. \n", + "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", + "broadcast their responses so all agents share the same mesasge context.\n", + "\n", + "Different from the other two group chat teams, at each turn,\n", + "**the speaker agent is selected based on the most recent\n", + "{py:class}`~autogen_agentchat.messages.HandoffMessage` message in the context.**\n", + "This naturally requires each agent in the team to be able to generate\n", + "{py:class}`~autogen_agentchat.messages.HandoffMessage` to signal\n", + "which other agents that it hands off to.\n", + "\n", + "For {py:class}`~autogen_agentchat.agents.AssistantAgent`, you can set the\n", + "`handoffs` argument to specify which agents it can hand off to. You can\n", + "use {py:class}`~autogen_agentchat.base.Handoff` to customize the message\n", + "content and handoff behavior.\n", + "\n", + "The overall process can be summarized as follows:\n", + "\n", + "1. Each agent has the ability to generate {py:class}`~autogen_agentchat.messages.HandoffMessage`\n", + " to signal which other agents it can hand off to. For {py:class}`~autogen_agentchat.agents.AssistantAgent`, this means setting the `handoffs` argument.\n", + "2. When the team starts on a task, the first speaker agents operate on the task and make locallized decision about whether to hand off and to whom.\n", + "3. When an agent generates a {py:class}`~autogen_agentchat.messages.HandoffMessage`, the receiving agent takes over the task with the same message context.\n", + "4. The process continues until a termination condition is met.\n", + "\n", + "In this section, we will show you two examples of how to use the {py:class}`~autogen_agentchat.teams.Swarm` team:\n", + "\n", + "1. A customer support team with human-in-the-loop handoff.\n", + "2. An automonous team for content generation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Customer Support Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Customer Support](swarm_customer_support.svg)\n", + "\n", + "This system implements a flights refund scenario with two agents:\n", + "\n", + "- **Travel Agent**: Handles general travel and refund coordination.\n", + "- **Flights Refunder**: Specializes in processing flight refunds with the `refund_flight` tool.\n", + "\n", + "Additionally, we let the user interact with the agents, when agents handoff to `\"user\"`.\n", + "\n", + "#### Workflow\n", + "1. The **Travel Agent** initiates the conversation and evaluates the user's request.\n", + "2. Based on the request:\n", + " - For refund-related tasks, the Travel Agent hands off to the **Flights Refunder**.\n", + " - For information needed from the customer, either agent can hand off to the `\"user\"`.\n", + "3. The **Flights Refunder** processes refunds using the `refund_flight` tool when appropriate.\n", + "4. If an agent hands off to the `\"user\"`, the team execution will stop and wait for the user to input a response.\n", + "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandaffMessage`. This message is directed to the agent that originally requested user input.\n", + "6. The process continues until the Travel Agent determines the task is complete and terminates the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any, Dict, List\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import HandoffMessage\n", + "from autogen_agentchat.teams import Swarm\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tools" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def refund_flight(flight_id: str) -> str:\n", + " \"\"\"Refund a flight\"\"\"\n", + " return f\"Flight {flight_id} refunded\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "\n", + "travel_agent = AssistantAgent(\n", + " \"travel_agent\",\n", + " model_client=model_client,\n", + " handoffs=[\"flights_refunder\", \"user\"],\n", + " system_message=\"\"\"You are a travel agent.\n", + " The flights_refunder is in charge of refunding flights.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " Use TERMINATE when the travel planning is complete.\"\"\",\n", + ")\n", + "\n", + "flights_refunder = AssistantAgent(\n", + " \"flights_refunder\",\n", + " model_client=model_client,\n", + " handoffs=[\"travel_agent\", \"user\"],\n", + " tools=[refund_flight],\n", + " system_message=\"\"\"You are an agent specialized in refunding flights.\n", + " You only need flight reference numbers to refund a flight.\n", + " You have the ability to refund a flight using the refund_flight tool.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " When the transaction is complete, handoff to the travel agent to finalize.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "termination = HandoffTermination(target=\"user\") | TextMentionTermination(\"TERMINATE\")\n", + "team = Swarm([travel_agent, flights_refunder], termination_condition=termination)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "I need to refund my flight.\n", + "---------- travel_agent ----------\n", + "[FunctionCall(id='call_ZQ2rGjq4Z29pd0yP2sNcuyd2', arguments='{}', name='transfer_to_flights_refunder')]\n", + "[Prompt tokens: 119, Completion tokens: 14]\n", + "---------- travel_agent ----------\n", + "[FunctionExecutionResult(content='Transferred to flights_refunder, adopting the role of flights_refunder immediately.', call_id='call_ZQ2rGjq4Z29pd0yP2sNcuyd2')]\n", + "---------- travel_agent ----------\n", + "Transferred to flights_refunder, adopting the role of flights_refunder immediately.\n", + "---------- flights_refunder ----------\n", + "Could you please provide me with the flight reference number so I can process the refund for you?\n", + "[Prompt tokens: 191, Completion tokens: 20]\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_1iRfzNpxTJhRTW2ww9aQJ8sK', arguments='{}', name='transfer_to_user')]\n", + "[Prompt tokens: 219, Completion tokens: 11]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Transferred to user, adopting the role of user immediately.', call_id='call_1iRfzNpxTJhRTW2ww9aQJ8sK')]\n", + "---------- flights_refunder ----------\n", + "Transferred to user, adopting the role of user immediately.\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Handoff to user from flights_refunder detected.\n", + "Total prompt tokens: 529\n", + "Total completion tokens: 45\n", + "Duration: 2.05 seconds\n", + "---------- user ----------\n", + "Sure, it's 507811\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_UKCsoEBdflkvpuT9Bi2xlvTd', arguments='{\"flight_id\":\"507811\"}', name='refund_flight')]\n", + "[Prompt tokens: 266, Completion tokens: 18]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Flight 507811 refunded', call_id='call_UKCsoEBdflkvpuT9Bi2xlvTd')]\n", + "---------- flights_refunder ----------\n", + "Tool calls:\n", + "refund_flight({\"flight_id\":\"507811\"}) = Flight 507811 refunded\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_MQ2CXR8UhVtjNc6jG3wSQp2W', arguments='{}', name='transfer_to_travel_agent')]\n", + "[Prompt tokens: 303, Completion tokens: 13]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Transferred to travel_agent, adopting the role of travel_agent immediately.', call_id='call_MQ2CXR8UhVtjNc6jG3wSQp2W')]\n", + "---------- flights_refunder ----------\n", + "Transferred to travel_agent, adopting the role of travel_agent immediately.\n", + "---------- travel_agent ----------\n", + "Your flight with reference number 507811 has been successfully refunded. If you need anything else, feel free to let me know. Safe travels! TERMINATE\n", + "[Prompt tokens: 272, Completion tokens: 32]\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 841\n", + "Total completion tokens: 63\n", + "Duration: 1.64 seconds\n" + ] + } + ], + "source": [ + "task = \"I need to refund my flight.\"\n", + "\n", + "\n", + "async def run_team_stream() -> None:\n", + " task_result = await Console(team.run_stream(task=task))\n", + " last_message = task_result.messages[-1]\n", + "\n", + " while isinstance(last_message, HandoffMessage) and last_message.target == \"user\":\n", + " user_message = input(\"User: \")\n", + "\n", + " task_result = await Console(\n", + " team.run_stream(task=HandoffMessage(source=\"user\", target=last_message.source, content=user_message))\n", + " )\n", + " last_message = task_result.messages[-1]\n", + "\n", + "\n", + "await run_team_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Stock Research Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Stock Research](swarm_stock_research.svg)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This system is designed to perform stock research tasks by leveraging four agents:\n", + "\n", + "- **Planner**: The central coordinator that delegates specific tasks to specialized agents based on their expertise. The planner ensures that each agent is utilized efficiently and oversees the overall workflow.\n", + "- **Financial Analyst**: A specialized agent responsible for analyzing financial metrics and stock data using tools such as `get_stock_data`.\n", + "- **News Analyst**: An agent focused on gathering and summarizing recent news articles relevant to the stock, using tools such as `get_news`.\n", + "- **Writer**: An agent tasked with compiling the findings from the stock and news analysis into a cohesive final report.\n", + "\n", + "#### Workflow\n", + "1. The **Planner** initiates the research process by delegating tasks to the appropriate agents in a step-by-step manner.\n", + "2. Each agent performs its task independently and appends their work to the shared **message thread/history**. Rather than directly returning results to the planner, all agents contribute to and read from this shared message history. When agents generate their work using the LLM, they have access to this shared message history, which provides context and helps track the overall progress of the task.\n", + "3. Once an agent completes its task, it hands off control back to the planner.\n", + "4. The process continues until the planner determines that all necessary tasks have been completed and decides to terminate the workflow." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tools" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "async def get_stock_data(symbol: str) -> Dict[str, Any]:\n", + " \"\"\"Get stock market data for a given symbol\"\"\"\n", + " return {\"price\": 180.25, \"volume\": 1000000, \"pe_ratio\": 65.4, \"market_cap\": \"700B\"}\n", + "\n", + "\n", + "async def get_news(query: str) -> List[Dict[str, str]]:\n", + " \"\"\"Get recent news articles about a company\"\"\"\n", + " return [\n", + " {\n", + " \"title\": \"Tesla Expands Cybertruck Production\",\n", + " \"date\": \"2024-03-20\",\n", + " \"summary\": \"Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\",\n", + " },\n", + " {\n", + " \"title\": \"Tesla FSD Beta Shows Promise\",\n", + " \"date\": \"2024-03-19\",\n", + " \"summary\": \"Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\",\n", + " },\n", + " {\n", + " \"title\": \"Model Y Dominates Global EV Sales\",\n", + " \"date\": \"2024-03-18\",\n", + " \"summary\": \"Tesla's Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\",\n", + " },\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "\n", + "planner = AssistantAgent(\n", + " \"planner\",\n", + " model_client=model_client,\n", + " handoffs=[\"financial_analyst\", \"news_analyst\", \"writer\"],\n", + " system_message=\"\"\"You are a research planning coordinator.\n", + " Coordinate market research by delegating to specialized agents:\n", + " - Financial Analyst: For stock data analysis\n", + " - News Analyst: For news gathering and analysis\n", + " - Writer: For compiling final report\n", + " Always send your plan first, then handoff to appropriate agent.\n", + " Always handoff to a single agent at a time.\n", + " Use TERMINATE when research is complete.\"\"\",\n", + ")\n", + "\n", + "financial_analyst = AssistantAgent(\n", + " \"financial_analyst\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " tools=[get_stock_data],\n", + " system_message=\"\"\"You are a financial analyst.\n", + " Analyze stock market data using the get_stock_data tool.\n", + " Provide insights on financial metrics.\n", + " Always handoff back to planner when analysis is complete.\"\"\",\n", + ")\n", + "\n", + "news_analyst = AssistantAgent(\n", + " \"news_analyst\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " tools=[get_news],\n", + " system_message=\"\"\"You are a news analyst.\n", + " Gather and analyze relevant news using the get_news tool.\n", + " Summarize key market insights from news.\n", + " Always handoff back to planner when analysis is complete.\"\"\",\n", + ")\n", + "\n", + "writer = AssistantAgent(\n", + " \"writer\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " system_message=\"\"\"You are a financial report writer.\n", + " Compile research findings into clear, concise reports.\n", + " Always handoff back to planner when writing is complete.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Conduct market research for TSLA stock\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_BX5QaRuhmB8CxTsBlqCUIXPb', arguments='{}', name='transfer_to_financial_analyst')]\n", + "[Prompt tokens: 169, Completion tokens: 166]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', call_id='call_BX5QaRuhmB8CxTsBlqCUIXPb')]\n", + "---------- planner ----------\n", + "Transferred to financial_analyst, adopting the role of financial_analyst immediately.\n", + "---------- financial_analyst ----------\n", + "[FunctionCall(id='call_SAXy1ebtA9mnaZo4ztpD2xHA', arguments='{\"symbol\":\"TSLA\"}', name='get_stock_data')]\n", + "[Prompt tokens: 136, Completion tokens: 16]\n", + "---------- financial_analyst ----------\n", + "[FunctionExecutionResult(content=\"{'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\", call_id='call_SAXy1ebtA9mnaZo4ztpD2xHA')]\n", + "---------- financial_analyst ----------\n", + "Tool calls:\n", + "get_stock_data({\"symbol\":\"TSLA\"}) = {'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\n", + "---------- financial_analyst ----------\n", + "[FunctionCall(id='call_IsdcFUfBVmtcVzfSuwQpeAwl', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 199, Completion tokens: 337]\n", + "---------- financial_analyst ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_IsdcFUfBVmtcVzfSuwQpeAwl')]\n", + "---------- financial_analyst ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_tN5goNFahrdcSfKnQqT0RONN', arguments='{}', name='transfer_to_news_analyst')]\n", + "[Prompt tokens: 291, Completion tokens: 14]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to news_analyst, adopting the role of news_analyst immediately.', call_id='call_tN5goNFahrdcSfKnQqT0RONN')]\n", + "---------- planner ----------\n", + "Transferred to news_analyst, adopting the role of news_analyst immediately.\n", + "---------- news_analyst ----------\n", + "[FunctionCall(id='call_Owjw6ZbiPdJgNWMHWxhCKgsp', arguments='{\"query\":\"Tesla market news\"}', name='get_news')]\n", + "[Prompt tokens: 235, Completion tokens: 16]\n", + "---------- news_analyst ----------\n", + "[FunctionExecutionResult(content='[{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', call_id='call_Owjw6ZbiPdJgNWMHWxhCKgsp')]\n", + "---------- news_analyst ----------\n", + "Tool calls:\n", + "get_news({\"query\":\"Tesla market news\"}) = [{'title': 'Tesla Expands Cybertruck Production', 'date': '2024-03-20', 'summary': 'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.'}, {'title': 'Tesla FSD Beta Shows Promise', 'date': '2024-03-19', 'summary': 'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.'}, {'title': 'Model Y Dominates Global EV Sales', 'date': '2024-03-18', 'summary': \"Tesla's Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]\n", + "---------- news_analyst ----------\n", + "Here are some of the key market insights regarding Tesla (TSLA):\n", + "\n", + "1. **Expansion in Cybertruck Production**: Tesla has increased its Cybertruck production capacity at the Gigafactory in Texas to meet the high demand. This move might positively impact Tesla's revenues if the demand for the Cybertruck continues to grow.\n", + "\n", + "2. **Advancements in Full Self-Driving (FSD) Technology**: The recent beta release of Tesla's Full Self-Driving software shows significant advancements, particularly in urban navigation and safety. Progress in this area could enhance Tesla's competitive edge in the autonomous driving sector.\n", + "\n", + "3. **Dominance of Model Y in EV Sales**: Tesla's Model Y has become the best-selling electric vehicle globally, capturing a substantial market share. Such strong sales performance reinforces Tesla's leadership in the electric vehicle market.\n", + "\n", + "These developments reflect Tesla's ongoing innovation and ability to capture market demand, which could positively influence its stock performance and market position. \n", + "\n", + "I will now hand off back to the planner.\n", + "[Prompt tokens: 398, Completion tokens: 203]\n", + "---------- news_analyst ----------\n", + "[FunctionCall(id='call_pn7y6PKsBspWA17uOh3AKNMT', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 609, Completion tokens: 12]\n", + "---------- news_analyst ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_pn7y6PKsBspWA17uOh3AKNMT')]\n", + "---------- news_analyst ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_MmXyWuD2uJT64ZdVI5NfhYdX', arguments='{}', name='transfer_to_writer')]\n", + "[Prompt tokens: 722, Completion tokens: 11]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to writer, adopting the role of writer immediately.', call_id='call_MmXyWuD2uJT64ZdVI5NfhYdX')]\n", + "---------- planner ----------\n", + "Transferred to writer, adopting the role of writer immediately.\n", + "---------- writer ----------\n", + "[FunctionCall(id='call_Pdgu39O6GMYplBiB8jp3uyN3', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 599, Completion tokens: 323]\n", + "---------- writer ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_Pdgu39O6GMYplBiB8jp3uyN3')]\n", + "---------- writer ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "TERMINATE\n", + "[Prompt tokens: 772, Completion tokens: 4]\n", + "---------- Summary ----------\n", + "Number of messages: 27\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 4130\n", + "Total completion tokens: 1102\n", + "Duration: 17.74 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Conduct market research for TSLA stock', type='TextMessage'), ToolCallRequestEvent(source='planner', models_usage=RequestUsage(prompt_tokens=169, completion_tokens=166), content=[FunctionCall(id='call_BX5QaRuhmB8CxTsBlqCUIXPb', arguments='{}', name='transfer_to_financial_analyst')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', call_id='call_BX5QaRuhmB8CxTsBlqCUIXPb')], type='ToolCallExecutionEvent'), HandoffMessage(source='planner', models_usage=None, target='financial_analyst', content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', type='HandoffMessage'), ToolCallRequestEvent(source='financial_analyst', models_usage=RequestUsage(prompt_tokens=136, completion_tokens=16), content=[FunctionCall(id='call_SAXy1ebtA9mnaZo4ztpD2xHA', arguments='{\"symbol\":\"TSLA\"}', name='get_stock_data')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='financial_analyst', models_usage=None, content=[FunctionExecutionResult(content=\"{'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\", call_id='call_SAXy1ebtA9mnaZo4ztpD2xHA')], type='ToolCallExecutionEvent'), TextMessage(source='financial_analyst', models_usage=None, content='Tool calls:\\nget_stock_data({\"symbol\":\"TSLA\"}) = {\\'price\\': 180.25, \\'volume\\': 1000000, \\'pe_ratio\\': 65.4, \\'market_cap\\': \\'700B\\'}', type='TextMessage'), ToolCallRequestEvent(source='financial_analyst', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=337), content=[FunctionCall(id='call_IsdcFUfBVmtcVzfSuwQpeAwl', arguments='{}', name='transfer_to_planner')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='financial_analyst', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_IsdcFUfBVmtcVzfSuwQpeAwl')], type='ToolCallExecutionEvent'), HandoffMessage(source='financial_analyst', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), ToolCallRequestEvent(source='planner', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=14), content=[FunctionCall(id='call_tN5goNFahrdcSfKnQqT0RONN', arguments='{}', name='transfer_to_news_analyst')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to news_analyst, adopting the role of news_analyst immediately.', call_id='call_tN5goNFahrdcSfKnQqT0RONN')], type='ToolCallExecutionEvent'), HandoffMessage(source='planner', models_usage=None, target='news_analyst', content='Transferred to news_analyst, adopting the role of news_analyst immediately.', type='HandoffMessage'), ToolCallRequestEvent(source='news_analyst', models_usage=RequestUsage(prompt_tokens=235, completion_tokens=16), content=[FunctionCall(id='call_Owjw6ZbiPdJgNWMHWxhCKgsp', arguments='{\"query\":\"Tesla market news\"}', name='get_news')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='news_analyst', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', call_id='call_Owjw6ZbiPdJgNWMHWxhCKgsp')], type='ToolCallExecutionEvent'), TextMessage(source='news_analyst', models_usage=None, content='Tool calls:\\nget_news({\"query\":\"Tesla market news\"}) = [{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', type='TextMessage'), TextMessage(source='news_analyst', models_usage=RequestUsage(prompt_tokens=398, completion_tokens=203), content=\"Here are some of the key market insights regarding Tesla (TSLA):\\n\\n1. **Expansion in Cybertruck Production**: Tesla has increased its Cybertruck production capacity at the Gigafactory in Texas to meet the high demand. This move might positively impact Tesla's revenues if the demand for the Cybertruck continues to grow.\\n\\n2. **Advancements in Full Self-Driving (FSD) Technology**: The recent beta release of Tesla's Full Self-Driving software shows significant advancements, particularly in urban navigation and safety. Progress in this area could enhance Tesla's competitive edge in the autonomous driving sector.\\n\\n3. **Dominance of Model Y in EV Sales**: Tesla's Model Y has become the best-selling electric vehicle globally, capturing a substantial market share. Such strong sales performance reinforces Tesla's leadership in the electric vehicle market.\\n\\nThese developments reflect Tesla's ongoing innovation and ability to capture market demand, which could positively influence its stock performance and market position. \\n\\nI will now hand off back to the planner.\", type='TextMessage'), ToolCallRequestEvent(source='news_analyst', models_usage=RequestUsage(prompt_tokens=609, completion_tokens=12), content=[FunctionCall(id='call_pn7y6PKsBspWA17uOh3AKNMT', arguments='{}', name='transfer_to_planner')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='news_analyst', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_pn7y6PKsBspWA17uOh3AKNMT')], type='ToolCallExecutionEvent'), HandoffMessage(source='news_analyst', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), ToolCallRequestEvent(source='planner', models_usage=RequestUsage(prompt_tokens=722, completion_tokens=11), content=[FunctionCall(id='call_MmXyWuD2uJT64ZdVI5NfhYdX', arguments='{}', name='transfer_to_writer')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to writer, adopting the role of writer immediately.', call_id='call_MmXyWuD2uJT64ZdVI5NfhYdX')], type='ToolCallExecutionEvent'), HandoffMessage(source='planner', models_usage=None, target='writer', content='Transferred to writer, adopting the role of writer immediately.', type='HandoffMessage'), ToolCallRequestEvent(source='writer', models_usage=RequestUsage(prompt_tokens=599, completion_tokens=323), content=[FunctionCall(id='call_Pdgu39O6GMYplBiB8jp3uyN3', arguments='{}', name='transfer_to_planner')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='writer', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_Pdgu39O6GMYplBiB8jp3uyN3')], type='ToolCallExecutionEvent'), HandoffMessage(source='writer', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), TextMessage(source='planner', models_usage=RequestUsage(prompt_tokens=772, completion_tokens=4), content='TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Define termination condition\n", + "text_termination = TextMentionTermination(\"TERMINATE\")\n", + "termination = text_termination\n", + "\n", + "research_team = Swarm(\n", + " participants=[planner, financial_analyst, news_analyst, writer], termination_condition=termination\n", + ")\n", + "\n", + "task = \"Conduct market research for TSLA stock\"\n", + "await Console(research_team.run_stream(task=task))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Conduct market research for TSLA stock', type='TextMessage'), ToolCallMessage(source='planner', models_usage=RequestUsage(prompt_tokens=169, completion_tokens=166), content=[FunctionCall(id='call_BX5QaRuhmB8CxTsBlqCUIXPb', arguments='{}', name='transfer_to_financial_analyst')], type='ToolCallMessage'), ToolCallResultMessage(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', call_id='call_BX5QaRuhmB8CxTsBlqCUIXPb')], type='ToolCallResultMessage'), HandoffMessage(source='planner', models_usage=None, target='financial_analyst', content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', type='HandoffMessage'), ToolCallMessage(source='financial_analyst', models_usage=RequestUsage(prompt_tokens=136, completion_tokens=16), content=[FunctionCall(id='call_SAXy1ebtA9mnaZo4ztpD2xHA', arguments='{\"symbol\":\"TSLA\"}', name='get_stock_data')], type='ToolCallMessage'), ToolCallResultMessage(source='financial_analyst', models_usage=None, content=[FunctionExecutionResult(content=\"{'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\", call_id='call_SAXy1ebtA9mnaZo4ztpD2xHA')], type='ToolCallResultMessage'), TextMessage(source='financial_analyst', models_usage=None, content='Tool calls:\\nget_stock_data({\"symbol\":\"TSLA\"}) = {\\'price\\': 180.25, \\'volume\\': 1000000, \\'pe_ratio\\': 65.4, \\'market_cap\\': \\'700B\\'}', type='TextMessage'), ToolCallMessage(source='financial_analyst', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=337), content=[FunctionCall(id='call_IsdcFUfBVmtcVzfSuwQpeAwl', arguments='{}', name='transfer_to_planner')], type='ToolCallMessage'), ToolCallResultMessage(source='financial_analyst', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_IsdcFUfBVmtcVzfSuwQpeAwl')], type='ToolCallResultMessage'), HandoffMessage(source='financial_analyst', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), ToolCallMessage(source='planner', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=14), content=[FunctionCall(id='call_tN5goNFahrdcSfKnQqT0RONN', arguments='{}', name='transfer_to_news_analyst')], type='ToolCallMessage'), ToolCallResultMessage(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to news_analyst, adopting the role of news_analyst immediately.', call_id='call_tN5goNFahrdcSfKnQqT0RONN')], type='ToolCallResultMessage'), HandoffMessage(source='planner', models_usage=None, target='news_analyst', content='Transferred to news_analyst, adopting the role of news_analyst immediately.', type='HandoffMessage'), ToolCallMessage(source='news_analyst', models_usage=RequestUsage(prompt_tokens=235, completion_tokens=16), content=[FunctionCall(id='call_Owjw6ZbiPdJgNWMHWxhCKgsp', arguments='{\"query\":\"Tesla market news\"}', name='get_news')], type='ToolCallMessage'), ToolCallResultMessage(source='news_analyst', models_usage=None, content=[FunctionExecutionResult(content='[{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', call_id='call_Owjw6ZbiPdJgNWMHWxhCKgsp')], type='ToolCallResultMessage'), TextMessage(source='news_analyst', models_usage=None, content='Tool calls:\\nget_news({\"query\":\"Tesla market news\"}) = [{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', type='TextMessage'), TextMessage(source='news_analyst', models_usage=RequestUsage(prompt_tokens=398, completion_tokens=203), content=\"Here are some of the key market insights regarding Tesla (TSLA):\\n\\n1. **Expansion in Cybertruck Production**: Tesla has increased its Cybertruck production capacity at the Gigafactory in Texas to meet the high demand. This move might positively impact Tesla's revenues if the demand for the Cybertruck continues to grow.\\n\\n2. **Advancements in Full Self-Driving (FSD) Technology**: The recent beta release of Tesla's Full Self-Driving software shows significant advancements, particularly in urban navigation and safety. Progress in this area could enhance Tesla's competitive edge in the autonomous driving sector.\\n\\n3. **Dominance of Model Y in EV Sales**: Tesla's Model Y has become the best-selling electric vehicle globally, capturing a substantial market share. Such strong sales performance reinforces Tesla's leadership in the electric vehicle market.\\n\\nThese developments reflect Tesla's ongoing innovation and ability to capture market demand, which could positively influence its stock performance and market position. \\n\\nI will now hand off back to the planner.\", type='TextMessage'), ToolCallMessage(source='news_analyst', models_usage=RequestUsage(prompt_tokens=609, completion_tokens=12), content=[FunctionCall(id='call_pn7y6PKsBspWA17uOh3AKNMT', arguments='{}', name='transfer_to_planner')], type='ToolCallMessage'), ToolCallResultMessage(source='news_analyst', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_pn7y6PKsBspWA17uOh3AKNMT')], type='ToolCallResultMessage'), HandoffMessage(source='news_analyst', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), ToolCallMessage(source='planner', models_usage=RequestUsage(prompt_tokens=722, completion_tokens=11), content=[FunctionCall(id='call_MmXyWuD2uJT64ZdVI5NfhYdX', arguments='{}', name='transfer_to_writer')], type='ToolCallMessage'), ToolCallResultMessage(source='planner', models_usage=None, content=[FunctionExecutionResult(content='Transferred to writer, adopting the role of writer immediately.', call_id='call_MmXyWuD2uJT64ZdVI5NfhYdX')], type='ToolCallResultMessage'), HandoffMessage(source='planner', models_usage=None, target='writer', content='Transferred to writer, adopting the role of writer immediately.', type='HandoffMessage'), ToolCallMessage(source='writer', models_usage=RequestUsage(prompt_tokens=599, completion_tokens=323), content=[FunctionCall(id='call_Pdgu39O6GMYplBiB8jp3uyN3', arguments='{}', name='transfer_to_planner')], type='ToolCallMessage'), ToolCallResultMessage(source='writer', models_usage=None, content=[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_Pdgu39O6GMYplBiB8jp3uyN3')], type='ToolCallResultMessage'), HandoffMessage(source='writer', models_usage=None, target='planner', content='Transferred to planner, adopting the role of planner immediately.', type='HandoffMessage'), TextMessage(source='planner', models_usage=RequestUsage(prompt_tokens=772, completion_tokens=4), content='TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Define termination condition\n", - "text_termination = TextMentionTermination(\"TERMINATE\")\n", - "termination = text_termination\n", - "\n", - "research_team = Swarm(\n", - " participants=[planner, financial_analyst, news_analyst, writer], termination_condition=termination\n", - ")\n", - "\n", - "task = \"Conduct market research for TSLA stock\"\n", - "await Console(research_team.run_stream(task=task))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index a97e521f8..b62fbbe6e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -1,764 +1,764 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Teams\n", - "\n", - "In this section you'll learn how to create a _multi-agent team_ (or simply team) using AutoGen. A team is a group of agents that work together to achieve a common goal.\n", - "\n", - "We'll first show you how to create and run a team. We'll then explain how to observe the team's behavior, which is crucial for debugging and understanding the team's performance, and common operations to control the team's behavior.\n", - "\n", - "We'll start by focusing on a simple team with consisting of a single agent (the baseline case) and use a round robin strategy to select the agent to act. We'll then show how to create a team with multiple agents and how to implement a more sophisticated strategy to select the agent to act." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating a Team\n", - "\n", - "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` is a simple yet effective team configuration where all agents share the same context and take turns responding in a round-robin fashion. Each agent, during its turn, broadcasts its response to all other agents, ensuring that the entire team maintains a consistent context.\n", - "\n", - "We will begin by creating a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` and a {py:class}`~autogen_agentchat.conditions.TextMentionTermination` condition that stops the team when a specific word is detected in the agent's response.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "# Create an OpenAI model client.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "\n", - "# Define a tool that gets the weather for a city.\n", - "async def get_weather(city: str) -> str:\n", - " \"\"\"Get the weather for a city.\"\"\"\n", - " return f\"The weather in {city} is 72 degrees and Sunny.\"\n", - "\n", - "\n", - "# Create an assistant agent.\n", - "weather_agent = AssistantAgent(\n", - " \"assistant\",\n", - " model_client=model_client,\n", - " tools=[get_weather],\n", - " system_message=\"Respond 'TERMINATE' when task is complete.\",\n", - ")\n", - "\n", - "# Define a termination condition.\n", - "text_termination = TextMentionTermination(\"TERMINATE\")\n", - "\n", - "# Create a single-agent team.\n", - "single_agent_team = RoundRobinGroupChat([weather_agent], termination_condition=text_termination)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running a Team\n", - "\n", - "Let's calls the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n", - "to start the team with a task." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=15), content=[FunctionCall(id='call_6qWxrK1VdEVSryXKyIVpwE0h', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 72 degrees and Sunny.', call_id='call_6qWxrK1VdEVSryXKyIVpwE0h')]), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=96, completion_tokens=13), content='The weather in New York is 72 degrees and sunny.'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=125, completion_tokens=4), content='TERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" - ] - } - ], - "source": [ - "async def run_team() -> None:\n", - " result = await single_agent_team.run(task=\"What is the weather in New York?\")\n", - " print(result)\n", - "\n", - "\n", - "# Use `asyncio.run(run_team())` when running in a script.\n", - "await run_team()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The team ran the same agent until the termination condition was met.\n", - "In this case, the termination condition was met when the word \"TERMINATE\" is detected in the\n", - "agent's response.\n", - "When the team stops, it returns a {py:class}`~autogen_agentchat.base.TaskResult` object with all the messages produced by the agents in the team." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Observability\n", - "\n", - "Similar to the agent's {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method, you can stream the team's messages by calling the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream` method. This method returns a generator that yields messages produced by the agents in the team as they are generated, with the final item being the task result.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='What is the weather in New York?'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=70, completion_tokens=15) content=[FunctionCall(id='call_FcJlzgDmPUgGSKLjeGajFZ7V', arguments='{\"city\":\"New York\"}', name='get_weather')]\n", - "source='assistant' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 72 degrees and Sunny.', call_id='call_FcJlzgDmPUgGSKLjeGajFZ7V')]\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=96, completion_tokens=14) content='The weather in New York is currently 72 degrees and sunny.'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=126, completion_tokens=4) content='TERMINATE'\n", - "Stop Reason: Text 'TERMINATE' mentioned\n" - ] - } - ], - "source": [ - "from autogen_agentchat.base import TaskResult\n", - "\n", - "\n", - "async def run_team_stream() -> None:\n", - " async for message in single_agent_team.run_stream(task=\"What is the weather in New York?\"):\n", - " if isinstance(message, TaskResult):\n", - " print(\"Stop Reason:\", message.stop_reason)\n", - " else:\n", - " print(message)\n", - "\n", - "\n", - "# Use `asyncio.run(run_team_stream())` when running in a script.\n", - "await run_team_stream()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As demonstrated in the example above, you can determine the reason why the team stopped by checking the {py:attr}`~autogen_agentchat.base.TaskResult.stop_reason` attribute.\n", - "\n", - "The {py:meth}`~autogen_agentchat.ui.Console` method provides a convenient way to print messages to the console with proper formatting.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What is the weather in Seattle?\n", - "---------- assistant ----------\n", - "[FunctionCall(id='call_QBqpeKQlczRYIlCIzKh43Kha', arguments='{\"city\":\"Seattle\"}', name='get_weather')]\n", - "[Prompt tokens: 69, Completion tokens: 14]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='The weather in Seattle is 72 degrees and Sunny.', call_id='call_QBqpeKQlczRYIlCIzKh43Kha')]\n", - "---------- assistant ----------\n", - "The weather in Seattle is currently 72 degrees and sunny.\n", - "[Prompt tokens: 93, Completion tokens: 13]\n", - "---------- assistant ----------\n", - "TERMINATE\n", - "[Prompt tokens: 122, Completion tokens: 4]\n", - "---------- Summary ----------\n", - "Number of messages: 5\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 284\n", - "Total completion tokens: 31\n", - "Duration: 1.82 seconds\n" - ] - } - ], - "source": [ - "from autogen_agentchat.ui import Console\n", - "\n", - "# Use `asyncio.run(single_agent_team.reset())` when running in a script.\n", - "await single_agent_team.reset() # Reset the team for the next run.\n", - "# Use `asyncio.run(single_agent_team.run_stream(task=\"What is the weather in Seattle?\"))` when running in a script.\n", - "await Console(\n", - " single_agent_team.run_stream(task=\"What is the weather in Seattle?\")\n", - ") # Stream the messages to the console." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Resetting a Team\n", - "\n", - "You can reset the team by calling the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset` method. This method will clear the team's state, including all agents." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "await single_agent_team.reset() # Reset the team for the next run." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is usually a good idea to reset the team if the next task is not related to the previous task.\n", - "However, if the next task is related to the previous task, you don't need to reset and you can instead resume. We will cover this in the next section." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Team Usage Guide\n", - "\n", - "We will now implement a slightly more complex team with multiple agents and learn how to resume the team after stopping it and even involve the user in the conversation.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Reflection Pattern\n", - "\n", - "We will now create a team with two agents that implement the _reflection_ pattern, a multi-agent design pattern where a critic agent evaluates the responses of a primary agent.\n", - "\n", - "Learn more about the reflection pattern using the [Core API](../../core-user-guide/design-patterns/reflection.ipynb).\n", - "\n", - "In this example, we will use the {py:class}`~autogen_agentchat.agents.AssistantAgent` class for both the primary and critic agents. We will combine the {py:class}`~autogen_agentchat.conditions.TextMentionTermination` and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` conditions to stop the team.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "# Create an OpenAI model client.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")\n", - "\n", - "# Define a termination condition that stops the task if the critic approves.\n", - "text_termination = TextMentionTermination(\"APPROVE\")\n", - "# Define a termination condition that stops the task after 5 messages.\n", - "max_message_termination = MaxMessageTermination(5)\n", - "# Combine the termination conditions using the `|`` operator so that the\n", - "# task stops when either condition is met.\n", - "termination = text_termination | max_message_termination\n", - "\n", - "# Create a team with the primary and critic agents.\n", - "reflection_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=termination)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's give a poem-writing task to the team and see how the agents interact with each other." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a short poem about fall season.\n", - "---------- primary ----------\n", - "Golden leaves dance on the breeze, \n", - "Whispering secrets through the trees. \n", - "Crisp air nips at cheeks so bright, \n", - "As daylight fades to early night. \n", - "\n", - "Pumpkins sit on porches grand, \n", - "In a painted, harvest land. \n", - "Sweaters hug us, warm and snug, \n", - "While cider fills each steamy mug. \n", - "\n", - "In this season's gentle sway, \n", - "Nature tells of time's ballet. \n", - "With each leaf's descent and flight, \n", - "Autumn sings its soft goodnight. \n", - "[Prompt tokens: 27, Completion tokens: 107]\n", - "---------- critic ----------\n", - "Your poem is beautiful and elegantly captures the essence of the fall season. The imagery you used creates vivid pictures of autumn landscapes and activities, making it easy for the reader to visualize the scene. The rhyming and rhythm contribute to the poem's musicality, enhancing its appeal. Each stanza highlights different aspects of fall, creating a well-rounded depiction of the season.\n", - "\n", - "To make the poem even more evocative, consider including a few additional sensory details or emotions tied to the season. For instance, you might evoke the sounds of rustling leaves or the feeling of warmth from a fireplace. Overall, it's a delightful and charming poem that effectively conveys the spirit of fall.\n", - "\n", - "If these suggestions are considered, please share the revised poem for additional feedback!\n", - "[Prompt tokens: 152, Completion tokens: 148]\n", - "---------- primary ----------\n", - "Thank you for the thoughtful feedback! Here's a revised version of the poem, incorporating more sensory details and emotions:\n", - "\n", - "---\n", - "\n", - "Golden leaves dance on the breeze, \n", - "Whispering secrets through the trees. \n", - "Crisp air kisses cheeks aglow, \n", - "As twilight casts a gentle show. \n", - "\n", - "Pumpkins guard each porch with pride, \n", - "In this painted, harvest tide. \n", - "Sweaters hug us, warm and snug, \n", - "While cider steams in every mug. \n", - "\n", - "Children laugh in rustling leaves, \n", - "As branches weave autumnal eaves. \n", - "Fireplaces crackle, whisper warmth, \n", - "Embracing hearts in homey charms. \n", - "\n", - "In this season's tender sway, \n", - "Nature turns in grand ballet. \n", - "With each leaf's descent and flight, \n", - "Autumn sings its soft goodnight. \n", - "\n", - "---\n", - "\n", - "I hope this version resonates even more deeply with the spirit of fall.\n", - "[Prompt tokens: 294, Completion tokens: 178]\n", - "---------- critic ----------\n", - "Your revised poem beautifully captures the essence of the fall season with delightful sensory details and emotions. The inclusion of words like \"twilight casts a gentle show,\" \"children laugh in rustling leaves,\" and \"fireplaces crackle\" adds depth and paints a vivid picture of autumn scenes. The addition of emotions, particularly the \"embracing hearts in homey charms,\" evokes a sense of warmth and comfort associated with this season.\n", - "\n", - "The poem flows smoothly with its rhythmic quality and maintains a harmonious balance in its description of autumn. Overall, it now provides an even richer and more immersive experience for the reader. Excellent work on enhancing the sensory experience—this version resonates wonderfully with the spirit of fall. \n", - "\n", - "APPROVE\n", - "[Prompt tokens: 490, Completion tokens: 142]\n", - "---------- Summary ----------\n", - "Number of messages: 5\n", - "Finish reason: Text 'APPROVE' mentioned, Maximum number of messages 5 reached, current message count: 5\n", - "Total prompt tokens: 963\n", - "Total completion tokens: 575\n", - "Duration: 8.10 seconds\n" - ] - } - ], - "source": [ - "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"Write a short poem about fall season.\")))` when running in a script.\n", - "await Console(\n", - " reflection_team.run_stream(task=\"Write a short poem about fall season.\")\n", - ") # Stream the messages to the console." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Resuming a Team\n", - "\n", - "Let's run the team again with a new task while keeping the context about the previous task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "将这首诗用中文唐诗风格写一遍。\n", - "---------- primary ----------\n", - "金叶飘舞随风起, \n", - "林间低语秋声细。 \n", - "凉风轻拂面颊红, \n", - "夕光渐隐天色丽。 \n", - "\n", - "南瓜门前静自笑, \n", - "丰收景色绘秋貌。 \n", - "暖衣贴身享温柔, \n", - "热饮氤氲杯中绕。 \n", - "\n", - "童声笑逐落叶中, \n", - "枝叶缀出秋帷浓。 \n", - "炉火轻鸣诉温情, \n", - "温馨满怀思乡容。 \n", - "\n", - "此时佳季自徘徊, \n", - "秋之舞步若梦来。 \n", - "片片叶落随风去, \n", - "秋声袅袅道安睡。 \n", - "[Prompt tokens: 664, Completion tokens: 155]\n", - "---------- critic ----------\n", - "这首诗成功地以唐诗的风格捕捉了秋天的精髓,以古雅的语言和流畅的节奏展示了秋日的美丽。你运用了优美的意象,如“金叶飘舞”和“夕光渐隐”,将秋季的景色描绘得栩栩如生。词句简炼而意境深远,充满了诗意。\n", - "\n", - "同时,诗中融入的情感,如“温馨满怀思乡容”以及“炉火轻鸣诉温情”,有效传达了秋天的暖意与思乡之情,令人感到温暖和亲切。\n", - "\n", - "整体而言,这是一首极具唐诗魅力的作品,成功地展现了秋天的许多层面,并引发读者的共鸣。恭喜你完成了这次优雅的改编!\n", - "\n", - "APPROVE\n", - "[Prompt tokens: 837, Completion tokens: 199]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'APPROVE' mentioned\n", - "Total prompt tokens: 1501\n", - "Total completion tokens: 354\n", - "Duration: 4.44 seconds\n" - ] - } - ], - "source": [ - "# Write the poem in Chinese Tang poetry style.\n", - "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"将这首诗用中文唐诗风格写一遍。\")))` when running in a script.\n", - "await Console(reflection_team.run_stream(task=\"将这首诗用中文唐诗风格写一遍。\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Resume with another task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write the poem in Spanish.\n", - "---------- primary ----------\n", - "Certainly! Here's a translation of the poem into Spanish:\n", - "\n", - "---\n", - "\n", - "Hojas doradas bailan en la brisa, \n", - "Susurran secretos en la arboleda. \n", - "El aire fresco besa las mejillas, \n", - "Mientras el crepúsculo se despide a su manera. \n", - "\n", - "Calabazas vigilan cada entrada, \n", - "En esta tierra de cosecha pintada. \n", - "Los suéteres nos abrazan calurosos, \n", - "Mientras el vapor sube de cada taza amorosa. \n", - "\n", - "Risas de niños suenan en hojas caídas, \n", - "Mientras ramas tejen toldos de caricias. \n", - "Las chimeneas crepitan, susurran calor, \n", - "Abrazando los corazones con su amor hogareño. \n", - "\n", - "En la danza de esta estación serena, \n", - "La naturaleza gira con su escena. \n", - "Con cada hoja que desciende en el viento, \n", - "El otoño canta su suave cuento. \n", - "\n", - "---\n", - "\n", - "Espero que esta traducción refleje el mismo espíritu y encantamiento del poema original.\n", - "[Prompt tokens: 1719, Completion tokens: 209]\n", - "---------- critic ----------\n", - "Your translation of the poem into Spanish beautifully captures the essence and lyrical quality of the original. The imagery and emotions are conveyed effectively, maintaining the warmth and serene atmosphere of fall that the poem embodies. Each stanza mirrors the themes presented in the English version, like the golden leaves, harvest, and cozy reflections of autumn.\n", - "\n", - "Overall, your translation is both poetic and faithful to the original content. If you have further adjustments or specific stylistic preferences, feel free to share. Great job on this translation!\n", - "\n", - "APPROVE\n", - "[Prompt tokens: 1946, Completion tokens: 102]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'APPROVE' mentioned\n", - "Total prompt tokens: 3665\n", - "Total completion tokens: 311\n", - "Duration: 4.22 seconds\n" - ] - } - ], - "source": [ - "# Write the poem in Spanish.\n", - "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"Write the poem in Spanish.\")))` when running in a script.\n", - "await Console(reflection_team.run_stream(task=\"Write the poem in Spanish.\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Resuming a Previous Task\n", - "\n", - "We can call {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream` methods\n", - "without setting the `task` again to resume the previous task. The team will continue from where it left off." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- primary ----------\n", - "Thank you for your kind words! I'm glad the translation captures the essence and lyrical quality of the original poem. If you have any more requests or need further assistance, please feel free to let me know. I'm here to help!\n", - "[Prompt tokens: 2042, Completion tokens: 46]\n", - "---------- critic ----------\n", - "You're very welcome! I'm glad to hear that you're satisfied with the translation. If you have any more requests, whether it's poetry, translation, or any other kind of assistance, don't hesitate to reach out. I'm here to help with whatever you need. Enjoy the rest of your creative journey!\n", - "[Prompt tokens: 2106, Completion tokens: 58]\n", - "---------- primary ----------\n", - "Thank you for your encouraging words! I'm here to assist you with any other requests you might have, whether related to creativity, translation, or any other topic. Feel free to reach out whenever you need help or inspiration. Enjoy your journey in creativity!\n", - "[Prompt tokens: 2158, Completion tokens: 50]\n", - "---------- critic ----------\n", - "You're welcome! It's always a pleasure to assist you. If you ever have more questions or need inspiration, don't hesitate to reach out. Happy creating, and enjoy every moment of your creative journey!\n", - "[Prompt tokens: 2226, Completion tokens: 39]\n", - "---------- primary ----------\n", - "Thank you so much! Your encouragement is greatly appreciated. Feel free to reach out at any time for assistance or inspiration. I wish you the best on your creative journey and hope you enjoy every step of the way!\n", - "[Prompt tokens: 2259, Completion tokens: 43]\n", - "---------- Summary ----------\n", - "Number of messages: 5\n", - "Finish reason: Maximum number of messages 5 reached, current message count: 5\n", - "Total prompt tokens: 10791\n", - "Total completion tokens: 236\n", - "Duration: 5.00 seconds\n" - ] - } - ], - "source": [ - "# Use the `asyncio.run(Console(reflection_team.run_stream()))` when running in a script.\n", - "await Console(reflection_team.run_stream())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Pausing for User Input\n", - "\n", - "Sometimes, teams may require additional input from the application (e.g., the user) to continue making meaningful progress on a task. Here are two ways to achieve this:\n", - "\n", - "- Set the maximum number of turns so that the team stops after the specified number of turns.\n", - "- Use the {py:class}`~autogen_agentchat.conditions.HandoffTermination` termination condition.\n", - "\n", - "You can also create custom termination conditions. For more details, see [Termination Conditions](./termination.ipynb).\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Via Max Turns\n", - "\n", - "This method allows you to pause the team for user input by setting a maximum number of turns. For instance, you can configure the team to stop after the first agent responds by setting `max_turns` to 1. This is particularly useful in scenarios where continuous user engagement is required, such as in a chatbot.\n", - "\n", - "To implement this, set the `max_turns` parameter in the {py:meth}`~autogen_agentchat.teams.RoundRobinGroupChat` constructor.\n", - "\n", - "```python\n", - "team = RoundRobinGroupChat([...], max_turns=1)\n", - "```\n", - "\n", - "Once the team stops, the turn count will be reset. When you resume the team,\n", - "it will start from 0 again.\n", - "\n", - "Note that `max_turn` is specific to the team class and is currently only supported by\n", - "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, {py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`.\n", - "When used with termination conditions, the team will stop when either condition is met." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Via Handoff\n", - "\n", - "You can use the {py:class}`~autogen_agentchat.conditions.HandoffTermination` termination condition\n", - "to stop the team when an agent sends a {py:class}`~autogen_agentchat.messages.HandoffMessage` message.\n", - "\n", - "Let's create a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` agent\n", - "with a handoff setting.\n", - "\n", - "```{note}\n", - "The model used with {py:class}`~autogen_agentchat.agents.AssistantAgent` must support tool call\n", - "to use the handoff feature.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.base import Handoff\n", - "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "# Create an OpenAI model client.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create a lazy assistant agent that always hands off to the user.\n", - "lazy_agent = AssistantAgent(\n", - " \"lazy_assistant\",\n", - " model_client=model_client,\n", - " handoffs=[Handoff(target=\"user\", message=\"Transfer to user.\")],\n", - " system_message=\"Always transfer to user when you don't know the answer. Respond 'TERMINATE' when task is complete.\",\n", - ")\n", - "\n", - "# Define a termination condition that checks for handoff message targetting helper and text \"TERMINATE\".\n", - "handoff_termination = HandoffTermination(target=\"user\")\n", - "text_termination = TextMentionTermination(\"TERMINATE\")\n", - "termination = handoff_termination | text_termination\n", - "\n", - "# Create a single-agent team.\n", - "lazy_agent_team = RoundRobinGroupChat([lazy_agent], termination_condition=termination)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's run the team with a task that requires additional input from the user\n", - "because the agent doesn't have relevant tools to continue processing the task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What is the weather in New York?\n", - "---------- lazy_assistant ----------\n", - "[FunctionCall(id='call_YHm4KPjFIWZE95YrJWlJwcv4', arguments='{}', name='transfer_to_user')]\n", - "[Prompt tokens: 68, Completion tokens: 11]\n", - "---------- lazy_assistant ----------\n", - "[FunctionExecutionResult(content='Transfer to user.', call_id='call_YHm4KPjFIWZE95YrJWlJwcv4')]\n", - "---------- lazy_assistant ----------\n", - "Transfer to user.\n", - "---------- Summary ----------\n", - "Number of messages: 4\n", - "Finish reason: Handoff to user from lazy_assistant detected.\n", - "Total prompt tokens: 68\n", - "Total completion tokens: 11\n", - "Duration: 0.73 seconds\n" - ] - } - ], - "source": [ - "from autogen_agentchat.ui import Console\n", - "\n", - "# Use `asyncio.run(Console(lazy_agent_team.run_stream(task=\"What is the weather in New York?\")))` when running in a script.\n", - "await Console(lazy_agent_team.run_stream(task=\"What is the weather in New York?\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see the team stopped due to the handoff message was detected.\n", - "Let's continue the team by providing the information the agent needs." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "It is raining in New York.\n", - "---------- lazy_assistant ----------\n", - "I hope you stay dry! Is there anything else you would like to know or do?\n", - "[Prompt tokens: 108, Completion tokens: 19]\n", - "---------- lazy_assistant ----------\n", - "TERMINATE\n", - "[Prompt tokens: 134, Completion tokens: 4]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'TERMINATE' mentioned\n", - "Total prompt tokens: 242\n", - "Total completion tokens: 23\n", - "Duration: 6.77 seconds\n" - ] - } - ], - "source": [ - "# Use `asyncio.run(Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\")))` when running in a script.\n", - "await Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\"))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Teams\n", + "\n", + "In this section you'll learn how to create a _multi-agent team_ (or simply team) using AutoGen. A team is a group of agents that work together to achieve a common goal.\n", + "\n", + "We'll first show you how to create and run a team. We'll then explain how to observe the team's behavior, which is crucial for debugging and understanding the team's performance, and common operations to control the team's behavior.\n", + "\n", + "We'll start by focusing on a simple team with consisting of a single agent (the baseline case) and use a round robin strategy to select the agent to act. We'll then show how to create a team with multiple agents and how to implement a more sophisticated strategy to select the agent to act." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating a Team\n", + "\n", + "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` is a simple yet effective team configuration where all agents share the same context and take turns responding in a round-robin fashion. Each agent, during its turn, broadcasts its response to all other agents, ensuring that the entire team maintains a consistent context.\n", + "\n", + "We will begin by creating a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` and a {py:class}`~autogen_agentchat.conditions.TextMentionTermination` condition that stops the team when a specific word is detected in the agent's response.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "# Create an OpenAI model client.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-2024-08-06\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "\n", + "# Define a tool that gets the weather for a city.\n", + "async def get_weather(city: str) -> str:\n", + " \"\"\"Get the weather for a city.\"\"\"\n", + " return f\"The weather in {city} is 72 degrees and Sunny.\"\n", + "\n", + "\n", + "# Create an assistant agent.\n", + "weather_agent = AssistantAgent(\n", + " \"assistant\",\n", + " model_client=model_client,\n", + " tools=[get_weather],\n", + " system_message=\"Respond 'TERMINATE' when task is complete.\",\n", + ")\n", + "\n", + "# Define a termination condition.\n", + "text_termination = TextMentionTermination(\"TERMINATE\")\n", + "\n", + "# Create a single-agent team.\n", + "single_agent_team = RoundRobinGroupChat([weather_agent], termination_condition=text_termination)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running a Team\n", + "\n", + "Let's calls the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n", + "to start the team with a task." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=15), content=[FunctionCall(id='call_6qWxrK1VdEVSryXKyIVpwE0h', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 72 degrees and Sunny.', call_id='call_6qWxrK1VdEVSryXKyIVpwE0h')]), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=96, completion_tokens=13), content='The weather in New York is 72 degrees and sunny.'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=125, completion_tokens=4), content='TERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" + ] + } + ], + "source": [ + "async def run_team() -> None:\n", + " result = await single_agent_team.run(task=\"What is the weather in New York?\")\n", + " print(result)\n", + "\n", + "\n", + "# Use `asyncio.run(run_team())` when running in a script.\n", + "await run_team()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The team ran the same agent until the termination condition was met.\n", + "In this case, the termination condition was met when the word \"TERMINATE\" is detected in the\n", + "agent's response.\n", + "When the team stops, it returns a {py:class}`~autogen_agentchat.base.TaskResult` object with all the messages produced by the agents in the team." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Observability\n", + "\n", + "Similar to the agent's {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method, you can stream the team's messages by calling the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream` method. This method returns a generator that yields messages produced by the agents in the team as they are generated, with the final item being the task result.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='What is the weather in New York?'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=70, completion_tokens=15) content=[FunctionCall(id='call_FcJlzgDmPUgGSKLjeGajFZ7V', arguments='{\"city\":\"New York\"}', name='get_weather')]\n", + "source='assistant' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 72 degrees and Sunny.', call_id='call_FcJlzgDmPUgGSKLjeGajFZ7V')]\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=96, completion_tokens=14) content='The weather in New York is currently 72 degrees and sunny.'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=126, completion_tokens=4) content='TERMINATE'\n", + "Stop Reason: Text 'TERMINATE' mentioned\n" + ] + } + ], + "source": [ + "from autogen_agentchat.base import TaskResult\n", + "\n", + "\n", + "async def run_team_stream() -> None:\n", + " async for message in single_agent_team.run_stream(task=\"What is the weather in New York?\"):\n", + " if isinstance(message, TaskResult):\n", + " print(\"Stop Reason:\", message.stop_reason)\n", + " else:\n", + " print(message)\n", + "\n", + "\n", + "# Use `asyncio.run(run_team_stream())` when running in a script.\n", + "await run_team_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As demonstrated in the example above, you can determine the reason why the team stopped by checking the {py:attr}`~autogen_agentchat.base.TaskResult.stop_reason` attribute.\n", + "\n", + "The {py:meth}`~autogen_agentchat.ui.Console` method provides a convenient way to print messages to the console with proper formatting.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What is the weather in Seattle?\n", + "---------- assistant ----------\n", + "[FunctionCall(id='call_QBqpeKQlczRYIlCIzKh43Kha', arguments='{\"city\":\"Seattle\"}', name='get_weather')]\n", + "[Prompt tokens: 69, Completion tokens: 14]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='The weather in Seattle is 72 degrees and Sunny.', call_id='call_QBqpeKQlczRYIlCIzKh43Kha')]\n", + "---------- assistant ----------\n", + "The weather in Seattle is currently 72 degrees and sunny.\n", + "[Prompt tokens: 93, Completion tokens: 13]\n", + "---------- assistant ----------\n", + "TERMINATE\n", + "[Prompt tokens: 122, Completion tokens: 4]\n", + "---------- Summary ----------\n", + "Number of messages: 5\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 284\n", + "Total completion tokens: 31\n", + "Duration: 1.82 seconds\n" + ] + } + ], + "source": [ + "from autogen_agentchat.ui import Console\n", + "\n", + "# Use `asyncio.run(single_agent_team.reset())` when running in a script.\n", + "await single_agent_team.reset() # Reset the team for the next run.\n", + "# Use `asyncio.run(single_agent_team.run_stream(task=\"What is the weather in Seattle?\"))` when running in a script.\n", + "await Console(\n", + " single_agent_team.run_stream(task=\"What is the weather in Seattle?\")\n", + ") # Stream the messages to the console." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Resetting a Team\n", + "\n", + "You can reset the team by calling the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset` method. This method will clear the team's state, including all agents." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "await single_agent_team.reset() # Reset the team for the next run." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is usually a good idea to reset the team if the next task is not related to the previous task.\n", + "However, if the next task is related to the previous task, you don't need to reset and you can instead resume. We will cover this in the next section." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Team Usage Guide\n", + "\n", + "We will now implement a slightly more complex team with multiple agents and learn how to resume the team after stopping it and even involve the user in the conversation.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reflection Pattern\n", + "\n", + "We will now create a team with two agents that implement the _reflection_ pattern, a multi-agent design pattern where a critic agent evaluates the responses of a primary agent.\n", + "\n", + "Learn more about the reflection pattern using the [Core API](../../core-user-guide/design-patterns/reflection.ipynb).\n", + "\n", + "In this example, we will use the {py:class}`~autogen_agentchat.agents.AssistantAgent` class for both the primary and critic agents. We will combine the {py:class}`~autogen_agentchat.conditions.TextMentionTermination` and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` conditions to stop the team.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "# Create an OpenAI model client.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-2024-08-06\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")\n", + "\n", + "# Define a termination condition that stops the task if the critic approves.\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "# Define a termination condition that stops the task after 5 messages.\n", + "max_message_termination = MaxMessageTermination(5)\n", + "# Combine the termination conditions using the `|`` operator so that the\n", + "# task stops when either condition is met.\n", + "termination = text_termination | max_message_termination\n", + "\n", + "# Create a team with the primary and critic agents.\n", + "reflection_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=termination)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's give a poem-writing task to the team and see how the agents interact with each other." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a short poem about fall season.\n", + "---------- primary ----------\n", + "Golden leaves dance on the breeze, \n", + "Whispering secrets through the trees. \n", + "Crisp air nips at cheeks so bright, \n", + "As daylight fades to early night. \n", + "\n", + "Pumpkins sit on porches grand, \n", + "In a painted, harvest land. \n", + "Sweaters hug us, warm and snug, \n", + "While cider fills each steamy mug. \n", + "\n", + "In this season's gentle sway, \n", + "Nature tells of time's ballet. \n", + "With each leaf's descent and flight, \n", + "Autumn sings its soft goodnight. \n", + "[Prompt tokens: 27, Completion tokens: 107]\n", + "---------- critic ----------\n", + "Your poem is beautiful and elegantly captures the essence of the fall season. The imagery you used creates vivid pictures of autumn landscapes and activities, making it easy for the reader to visualize the scene. The rhyming and rhythm contribute to the poem's musicality, enhancing its appeal. Each stanza highlights different aspects of fall, creating a well-rounded depiction of the season.\n", + "\n", + "To make the poem even more evocative, consider including a few additional sensory details or emotions tied to the season. For instance, you might evoke the sounds of rustling leaves or the feeling of warmth from a fireplace. Overall, it's a delightful and charming poem that effectively conveys the spirit of fall.\n", + "\n", + "If these suggestions are considered, please share the revised poem for additional feedback!\n", + "[Prompt tokens: 152, Completion tokens: 148]\n", + "---------- primary ----------\n", + "Thank you for the thoughtful feedback! Here's a revised version of the poem, incorporating more sensory details and emotions:\n", + "\n", + "---\n", + "\n", + "Golden leaves dance on the breeze, \n", + "Whispering secrets through the trees. \n", + "Crisp air kisses cheeks aglow, \n", + "As twilight casts a gentle show. \n", + "\n", + "Pumpkins guard each porch with pride, \n", + "In this painted, harvest tide. \n", + "Sweaters hug us, warm and snug, \n", + "While cider steams in every mug. \n", + "\n", + "Children laugh in rustling leaves, \n", + "As branches weave autumnal eaves. \n", + "Fireplaces crackle, whisper warmth, \n", + "Embracing hearts in homey charms. \n", + "\n", + "In this season's tender sway, \n", + "Nature turns in grand ballet. \n", + "With each leaf's descent and flight, \n", + "Autumn sings its soft goodnight. \n", + "\n", + "---\n", + "\n", + "I hope this version resonates even more deeply with the spirit of fall.\n", + "[Prompt tokens: 294, Completion tokens: 178]\n", + "---------- critic ----------\n", + "Your revised poem beautifully captures the essence of the fall season with delightful sensory details and emotions. The inclusion of words like \"twilight casts a gentle show,\" \"children laugh in rustling leaves,\" and \"fireplaces crackle\" adds depth and paints a vivid picture of autumn scenes. The addition of emotions, particularly the \"embracing hearts in homey charms,\" evokes a sense of warmth and comfort associated with this season.\n", + "\n", + "The poem flows smoothly with its rhythmic quality and maintains a harmonious balance in its description of autumn. Overall, it now provides an even richer and more immersive experience for the reader. Excellent work on enhancing the sensory experience—this version resonates wonderfully with the spirit of fall. \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 490, Completion tokens: 142]\n", + "---------- Summary ----------\n", + "Number of messages: 5\n", + "Finish reason: Text 'APPROVE' mentioned, Maximum number of messages 5 reached, current message count: 5\n", + "Total prompt tokens: 963\n", + "Total completion tokens: 575\n", + "Duration: 8.10 seconds\n" + ] + } + ], + "source": [ + "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"Write a short poem about fall season.\")))` when running in a script.\n", + "await Console(\n", + " reflection_team.run_stream(task=\"Write a short poem about fall season.\")\n", + ") # Stream the messages to the console." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Resuming a Team\n", + "\n", + "Let's run the team again with a new task while keeping the context about the previous task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "将这首诗用中文唐诗风格写一遍。\n", + "---------- primary ----------\n", + "金叶飘舞随风起, \n", + "林间低语秋声细。 \n", + "凉风轻拂面颊红, \n", + "夕光渐隐天色丽。 \n", + "\n", + "南瓜门前静自笑, \n", + "丰收景色绘秋貌。 \n", + "暖衣贴身享温柔, \n", + "热饮氤氲杯中绕。 \n", + "\n", + "童声笑逐落叶中, \n", + "枝叶缀出秋帷浓。 \n", + "炉火轻鸣诉温情, \n", + "温馨满怀思乡容。 \n", + "\n", + "此时佳季自徘徊, \n", + "秋之舞步若梦来。 \n", + "片片叶落随风去, \n", + "秋声袅袅道安睡。 \n", + "[Prompt tokens: 664, Completion tokens: 155]\n", + "---------- critic ----------\n", + "这首诗成功地以唐诗的风格捕捉了秋天的精髓,以古雅的语言和流畅的节奏展示了秋日的美丽。你运用了优美的意象,如“金叶飘舞”和“夕光渐隐”,将秋季的景色描绘得栩栩如生。词句简炼而意境深远,充满了诗意。\n", + "\n", + "同时,诗中融入的情感,如“温馨满怀思乡容”以及“炉火轻鸣诉温情”,有效传达了秋天的暖意与思乡之情,令人感到温暖和亲切。\n", + "\n", + "整体而言,这是一首极具唐诗魅力的作品,成功地展现了秋天的许多层面,并引发读者的共鸣。恭喜你完成了这次优雅的改编!\n", + "\n", + "APPROVE\n", + "[Prompt tokens: 837, Completion tokens: 199]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1501\n", + "Total completion tokens: 354\n", + "Duration: 4.44 seconds\n" + ] + } + ], + "source": [ + "# Write the poem in Chinese Tang poetry style.\n", + "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"将这首诗用中文唐诗风格写一遍。\")))` when running in a script.\n", + "await Console(reflection_team.run_stream(task=\"将这首诗用中文唐诗风格写一遍。\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Resume with another task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write the poem in Spanish.\n", + "---------- primary ----------\n", + "Certainly! Here's a translation of the poem into Spanish:\n", + "\n", + "---\n", + "\n", + "Hojas doradas bailan en la brisa, \n", + "Susurran secretos en la arboleda. \n", + "El aire fresco besa las mejillas, \n", + "Mientras el crepúsculo se despide a su manera. \n", + "\n", + "Calabazas vigilan cada entrada, \n", + "En esta tierra de cosecha pintada. \n", + "Los suéteres nos abrazan calurosos, \n", + "Mientras el vapor sube de cada taza amorosa. \n", + "\n", + "Risas de niños suenan en hojas caídas, \n", + "Mientras ramas tejen toldos de caricias. \n", + "Las chimeneas crepitan, susurran calor, \n", + "Abrazando los corazones con su amor hogareño. \n", + "\n", + "En la danza de esta estación serena, \n", + "La naturaleza gira con su escena. \n", + "Con cada hoja que desciende en el viento, \n", + "El otoño canta su suave cuento. \n", + "\n", + "---\n", + "\n", + "Espero que esta traducción refleje el mismo espíritu y encantamiento del poema original.\n", + "[Prompt tokens: 1719, Completion tokens: 209]\n", + "---------- critic ----------\n", + "Your translation of the poem into Spanish beautifully captures the essence and lyrical quality of the original. The imagery and emotions are conveyed effectively, maintaining the warmth and serene atmosphere of fall that the poem embodies. Each stanza mirrors the themes presented in the English version, like the golden leaves, harvest, and cozy reflections of autumn.\n", + "\n", + "Overall, your translation is both poetic and faithful to the original content. If you have further adjustments or specific stylistic preferences, feel free to share. Great job on this translation!\n", + "\n", + "APPROVE\n", + "[Prompt tokens: 1946, Completion tokens: 102]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 3665\n", + "Total completion tokens: 311\n", + "Duration: 4.22 seconds\n" + ] + } + ], + "source": [ + "# Write the poem in Spanish.\n", + "# Use `asyncio.run(Console(reflection_team.run_stream(task=\"Write the poem in Spanish.\")))` when running in a script.\n", + "await Console(reflection_team.run_stream(task=\"Write the poem in Spanish.\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Resuming a Previous Task\n", + "\n", + "We can call {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream` methods\n", + "without setting the `task` again to resume the previous task. The team will continue from where it left off." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your kind words! I'm glad the translation captures the essence and lyrical quality of the original poem. If you have any more requests or need further assistance, please feel free to let me know. I'm here to help!\n", + "[Prompt tokens: 2042, Completion tokens: 46]\n", + "---------- critic ----------\n", + "You're very welcome! I'm glad to hear that you're satisfied with the translation. If you have any more requests, whether it's poetry, translation, or any other kind of assistance, don't hesitate to reach out. I'm here to help with whatever you need. Enjoy the rest of your creative journey!\n", + "[Prompt tokens: 2106, Completion tokens: 58]\n", + "---------- primary ----------\n", + "Thank you for your encouraging words! I'm here to assist you with any other requests you might have, whether related to creativity, translation, or any other topic. Feel free to reach out whenever you need help or inspiration. Enjoy your journey in creativity!\n", + "[Prompt tokens: 2158, Completion tokens: 50]\n", + "---------- critic ----------\n", + "You're welcome! It's always a pleasure to assist you. If you ever have more questions or need inspiration, don't hesitate to reach out. Happy creating, and enjoy every moment of your creative journey!\n", + "[Prompt tokens: 2226, Completion tokens: 39]\n", + "---------- primary ----------\n", + "Thank you so much! Your encouragement is greatly appreciated. Feel free to reach out at any time for assistance or inspiration. I wish you the best on your creative journey and hope you enjoy every step of the way!\n", + "[Prompt tokens: 2259, Completion tokens: 43]\n", + "---------- Summary ----------\n", + "Number of messages: 5\n", + "Finish reason: Maximum number of messages 5 reached, current message count: 5\n", + "Total prompt tokens: 10791\n", + "Total completion tokens: 236\n", + "Duration: 5.00 seconds\n" + ] + } + ], + "source": [ + "# Use the `asyncio.run(Console(reflection_team.run_stream()))` when running in a script.\n", + "await Console(reflection_team.run_stream())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pausing for User Input\n", + "\n", + "Sometimes, teams may require additional input from the application (e.g., the user) to continue making meaningful progress on a task. Here are two ways to achieve this:\n", + "\n", + "- Set the maximum number of turns so that the team stops after the specified number of turns.\n", + "- Use the {py:class}`~autogen_agentchat.conditions.HandoffTermination` termination condition.\n", + "\n", + "You can also create custom termination conditions. For more details, see [Termination Conditions](./termination.ipynb).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Via Max Turns\n", + "\n", + "This method allows you to pause the team for user input by setting a maximum number of turns. For instance, you can configure the team to stop after the first agent responds by setting `max_turns` to 1. This is particularly useful in scenarios where continuous user engagement is required, such as in a chatbot.\n", + "\n", + "To implement this, set the `max_turns` parameter in the {py:meth}`~autogen_agentchat.teams.RoundRobinGroupChat` constructor.\n", + "\n", + "```python\n", + "team = RoundRobinGroupChat([...], max_turns=1)\n", + "```\n", + "\n", + "Once the team stops, the turn count will be reset. When you resume the team,\n", + "it will start from 0 again.\n", + "\n", + "Note that `max_turn` is specific to the team class and is currently only supported by\n", + "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, {py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`.\n", + "When used with termination conditions, the team will stop when either condition is met." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Via Handoff\n", + "\n", + "You can use the {py:class}`~autogen_agentchat.conditions.HandoffTermination` termination condition\n", + "to stop the team when an agent sends a {py:class}`~autogen_agentchat.messages.HandoffMessage` message.\n", + "\n", + "Let's create a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` agent\n", + "with a handoff setting.\n", + "\n", + "```{note}\n", + "The model used with {py:class}`~autogen_agentchat.agents.AssistantAgent` must support tool call\n", + "to use the handoff feature.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.base import Handoff\n", + "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "# Create an OpenAI model client.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-2024-08-06\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create a lazy assistant agent that always hands off to the user.\n", + "lazy_agent = AssistantAgent(\n", + " \"lazy_assistant\",\n", + " model_client=model_client,\n", + " handoffs=[Handoff(target=\"user\", message=\"Transfer to user.\")],\n", + " system_message=\"Always transfer to user when you don't know the answer. Respond 'TERMINATE' when task is complete.\",\n", + ")\n", + "\n", + "# Define a termination condition that checks for handoff message targetting helper and text \"TERMINATE\".\n", + "handoff_termination = HandoffTermination(target=\"user\")\n", + "text_termination = TextMentionTermination(\"TERMINATE\")\n", + "termination = handoff_termination | text_termination\n", + "\n", + "# Create a single-agent team.\n", + "lazy_agent_team = RoundRobinGroupChat([lazy_agent], termination_condition=termination)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's run the team with a task that requires additional input from the user\n", + "because the agent doesn't have relevant tools to continue processing the task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What is the weather in New York?\n", + "---------- lazy_assistant ----------\n", + "[FunctionCall(id='call_YHm4KPjFIWZE95YrJWlJwcv4', arguments='{}', name='transfer_to_user')]\n", + "[Prompt tokens: 68, Completion tokens: 11]\n", + "---------- lazy_assistant ----------\n", + "[FunctionExecutionResult(content='Transfer to user.', call_id='call_YHm4KPjFIWZE95YrJWlJwcv4')]\n", + "---------- lazy_assistant ----------\n", + "Transfer to user.\n", + "---------- Summary ----------\n", + "Number of messages: 4\n", + "Finish reason: Handoff to user from lazy_assistant detected.\n", + "Total prompt tokens: 68\n", + "Total completion tokens: 11\n", + "Duration: 0.73 seconds\n" + ] + } + ], + "source": [ + "from autogen_agentchat.ui import Console\n", + "\n", + "# Use `asyncio.run(Console(lazy_agent_team.run_stream(task=\"What is the weather in New York?\")))` when running in a script.\n", + "await Console(lazy_agent_team.run_stream(task=\"What is the weather in New York?\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the team stopped due to the handoff message was detected.\n", + "Let's continue the team by providing the information the agent needs." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "It is raining in New York.\n", + "---------- lazy_assistant ----------\n", + "I hope you stay dry! Is there anything else you would like to know or do?\n", + "[Prompt tokens: 108, Completion tokens: 19]\n", + "---------- lazy_assistant ----------\n", + "TERMINATE\n", + "[Prompt tokens: 134, Completion tokens: 4]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 242\n", + "Total completion tokens: 23\n", + "Duration: 6.77 seconds\n" + ] + } + ], + "source": [ + "# Use `asyncio.run(Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\")))` when running in a script.\n", + "await Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\"))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index a115df68b..998e9f352 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -1,304 +1,304 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Termination \n", - "\n", - "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", - "\n", - "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", - "\n", - "A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", - "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", - "\n", - "Some important things to note about termination conditions: \n", - "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", - "- They can be combined using the AND and OR operators.\n", - "\n", - "```{note}\n", - "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", - "the termination condition is called after each agent responds.\n", - "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", - "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Built-In Termination Conditions: \n", - "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", - "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", - "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", - "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", - "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", - "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", - "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", - "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " temperature=1,\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Gentle rain whispers, \n", - "Cobblestones glisten softly— \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 30, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", - "\n", - "For example:\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "\n", - "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", - "[Prompt tokens: 70, Completion tokens: 120]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 100\n", - "Total completion tokens: 139\n", - "Duration: 3.34 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Termination \n", + "\n", + "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", + "\n", + "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", + "\n", + "A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", + "\n", + "Some important things to note about termination conditions: \n", + "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", + "- They can be combined using the AND and OR operators.\n", + "\n", + "```{note}\n", + "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", + "the termination condition is called after each agent responds.\n", + "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", + "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Built-In Termination Conditions: \n", + "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", + "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", + "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", + "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", + "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", + "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", + "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", + "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " temperature=1,\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Gentle rain whispers, \n", + "Cobblestones glisten softly— \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 30, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", + "\n", + "For example:\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "\n", + "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", + "[Prompt tokens: 70, Completion tokens: 120]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 100\n", + "Total completion tokens: 139\n", + "Duration: 3.34 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=3)\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your feedback. Here is the revised Haiku:\n", + "\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 181, Completion tokens: 32]\n", + "---------- critic ----------\n", + "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 234, Completion tokens: 54]\n", + "---------- primary ----------\n", + "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", + "[Prompt tokens: 279, Completion tokens: 39]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 694\n", + "Total completion tokens: 125\n", + "Duration: 6.43 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The team continued from where it left off, allowing the primary agent to respond to the feedback." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Spring breeze gently hums, \n", + "Cherry blossoms in full bloom— \n", + "Paris wakes to life.\n", + "[Prompt tokens: 467, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", + "\n", + "APPROVE\n", + "[Prompt tokens: 746, Completion tokens: 93]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1213\n", + "Total completion tokens: 112\n", + "Duration: 2.75 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=10)\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "combined_termination = max_msg_termination | text_termination\n", + "\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", + "\n", + "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "combined_termination = max_msg_termination & text_termination" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=3)\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- primary ----------\n", - "Thank you for your feedback. Here is the revised Haiku:\n", - "\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 181, Completion tokens: 32]\n", - "---------- critic ----------\n", - "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", - "\n", - "APPROVE\n", - "[Prompt tokens: 234, Completion tokens: 54]\n", - "---------- primary ----------\n", - "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", - "[Prompt tokens: 279, Completion tokens: 39]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 694\n", - "Total completion tokens: 125\n", - "Duration: 6.43 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The team continued from where it left off, allowing the primary agent to respond to the feedback." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Spring breeze gently hums, \n", - "Cherry blossoms in full bloom— \n", - "Paris wakes to life.\n", - "[Prompt tokens: 467, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", - "\n", - "APPROVE\n", - "[Prompt tokens: 746, Completion tokens: 93]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'APPROVE' mentioned\n", - "Total prompt tokens: 1213\n", - "Total completion tokens: 112\n", - "Duration: 2.75 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=10)\n", - "text_termination = TextMentionTermination(\"APPROVE\")\n", - "combined_termination = max_msg_termination | text_termination\n", - "\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", - "\n", - "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "combined_termination = max_msg_termination & text_termination" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 4e16d7001..99e8e13fe 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -23,14 +23,14 @@ from autogen_agentchat import EVENT_LOGGER_NAME from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallRequestEvent, + ToolCallExecutionEvent, ) from autogen_core import CancellationToken, FunctionCall from autogen_core.models._types import FunctionExecutionResult @@ -350,7 +350,7 @@ class OpenAIAssistantAgent(BaseChatAgent): async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handle incoming messages and return a response.""" await self._ensure_initialized() @@ -362,7 +362,7 @@ class OpenAIAssistantAgent(BaseChatAgent): await self.handle_text_message(message.content, cancellation_token) # Inner messages for tool calls - inner_messages: List[AgentMessage] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] # Create and start a run run: Run = await cancellation_token.link_future( @@ -402,7 +402,7 @@ class OpenAIAssistantAgent(BaseChatAgent): ) # Add tool call message to inner messages - tool_call_msg = ToolCallMessage(source=self.name, content=tool_calls) + tool_call_msg = ToolCallRequestEvent(source=self.name, content=tool_calls) inner_messages.append(tool_call_msg) event_logger.debug(tool_call_msg) yield tool_call_msg @@ -414,7 +414,7 @@ class OpenAIAssistantAgent(BaseChatAgent): tool_outputs.append(FunctionExecutionResult(content=result, call_id=tool_call.id)) # Add tool result message to inner messages - tool_result_msg = ToolCallResultMessage(source=self.name, content=tool_outputs) + tool_result_msg = ToolCallExecutionEvent(source=self.name, content=tool_outputs) inner_messages.append(tool_result_msg) event_logger.debug(tool_result_msg) yield tool_result_msg diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index 16eeab9bb..6cbcfd65f 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -23,7 +23,7 @@ import aiofiles import PIL.Image from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import AgentMessage, ChatMessage, MultiModalMessage, TextMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage from autogen_core import EVENT_LOGGER_NAME, CancellationToken, FunctionCall from autogen_core import Image as AGImage from autogen_core.models import ( @@ -365,13 +365,13 @@ class MultimodalWebSurfer(BaseChatAgent): async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentMessage | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: for chat_message in messages: if isinstance(chat_message, TextMessage | MultiModalMessage): self._chat_history.append(UserMessage(content=chat_message.content, source=chat_message.source)) else: raise ValueError(f"Unexpected message in MultiModalWebSurfer: {chat_message}") - self.inner_messages: List[AgentMessage] = [] + self.inner_messages: List[AgentEvent | ChatMessage] = [] self.model_usage: List[RequestUsage] = [] try: content = await self._generate_reply(cancellation_token=cancellation_token) diff --git a/python/packages/autogen-studio/autogenstudio/teammanager.py b/python/packages/autogen-studio/autogenstudio/teammanager.py index fdad55fb4..b4ba92460 100644 --- a/python/packages/autogen-studio/autogenstudio/teammanager.py +++ b/python/packages/autogen-studio/autogenstudio/teammanager.py @@ -2,7 +2,7 @@ import time from typing import AsyncGenerator, Callable, Optional, Union from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import AgentMessage, ChatMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_core import CancellationToken from .database import Component, ComponentFactory @@ -27,7 +27,7 @@ class TeamManager: team_config: ComponentConfigInput, input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None, - ) -> AsyncGenerator[Union[AgentMessage, ChatMessage, TaskResult], None]: + ) -> AsyncGenerator[Union[AgentEvent | ChatMessage, ChatMessage, TaskResult], None]: """Stream the team's execution results""" start_time = time.time() diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py index a42ca4ba4..bc1ae0ac5 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py @@ -6,14 +6,14 @@ from uuid import UUID from autogen_agentchat.base._task import TaskResult from autogen_agentchat.messages import ( - AgentMessage, + AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallRequestEvent, + ToolCallExecutionEvent, ) from autogen_core import CancellationToken from autogen_core import Image as AGImage @@ -108,8 +108,8 @@ class WebSocketManager: MultiModalMessage, StopMessage, HandoffMessage, - ToolCallMessage, - ToolCallResultMessage, + ToolCallRequestEvent, + ToolCallExecutionEvent, ), ): await self._save_message(run_id, message) @@ -141,7 +141,7 @@ class WebSocketManager: finally: self._cancellation_tokens.pop(run_id, None) - async def _save_message(self, run_id: UUID, message: Union[AgentMessage, ChatMessage]) -> None: + async def _save_message(self, run_id: UUID, message: Union[AgentEvent | ChatMessage, ChatMessage]) -> None: """Save a message to the database""" run = await self._get_run(run_id) if run: @@ -325,7 +325,7 @@ class WebSocketManager: } elif isinstance( - message, (TextMessage, StopMessage, HandoffMessage, ToolCallMessage, ToolCallResultMessage) + message, (TextMessage, StopMessage, HandoffMessage, ToolCallRequestEvent, ToolCallExecutionEvent) ): return {"type": "message", "data": message.model_dump()} diff --git a/python/packages/autogen-studio/notebooks/tutorial.ipynb b/python/packages/autogen-studio/notebooks/tutorial.ipynb index 47fb858c4..8a267b52e 100644 --- a/python/packages/autogen-studio/notebooks/tutorial.ipynb +++ b/python/packages/autogen-studio/notebooks/tutorial.ipynb @@ -1,342 +1,342 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## AutoGen Studio Agent Workflow API Example\n", - "\n", - "This notebook focuses on demonstrating capabilities of the autogen studio workflow python api. \n", - "\n", - "- Declarative Specification of an Agent Team\n", - "- Loading the specification and running the resulting agent\n", - "\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_jcgtAVlBvTFzVpPxKX88Xsa4', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallMessage'), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_jcgtAVlBvTFzVpPxKX88Xsa4')], type='ToolCallResultMessage'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The current weather in New York is 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=5.103050947189331\n" - ] - } - ], - "source": [ - "from autogenstudio.teammanager import TeamManager\n", - "\n", - "wm = TeamManager()\n", - "result = await wm.run(task=\"What is the weather in New York?\", team_config=\"team.json\")\n", - "print(result)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='What is the weather in New York?' type='TextMessage'\n", - "source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')] type='ToolCallMessage'\n", - "source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')] type='ToolCallResultMessage'\n", - "source='writing_agent' models_usage=None content='The weather in New York is 73 degrees and Sunny.' type='TextMessage'\n", - "source='writing_agent' models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.' type='TextMessage'\n", - "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallMessage'), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')], type='ToolCallResultMessage'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.284574270248413\n" - ] - } - ], - "source": [ - "result_stream = wm.run_stream(task=\"What is the weather in New York?\", team_config=\"team.json\")\n", - "async for response in result_stream:\n", - " print(response)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## AutoGen Studio Database API\n", - "\n", - "Api for creating objects and serializing to a database." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Response(message='Database is ready', status=True, data=None)" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogenstudio.database import DatabaseManager\n", - "import os\n", - "# delete database\n", - "# if os.path.exists(\"test.db\"):\n", - "# os.remove(\"test.db\")\n", - "\n", - "os.makedirs(\"test\", exist_ok=True)\n", - "# create a database\n", - "dbmanager = DatabaseManager(engine_uri=\"sqlite:///test.db\", base_dir=\"test\")\n", - "dbmanager.initialize_database()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "from sqlmodel import Session, text, select\n", - "from autogenstudio.datamodel.types import ModelTypes, TeamTypes, AgentTypes, ToolConfig, ToolTypes, OpenAIModelConfig, RoundRobinTeamConfig, MaxMessageTerminationConfig, AssistantAgentConfig, TerminationTypes\n", - "\n", - "from autogenstudio.datamodel.db import Model, Team, Agent, Tool,LinkTypes\n", - "\n", - "user_id = \"guestuser@gmail.com\" \n", - "\n", - "gpt4_model = Model(user_id=user_id, config= OpenAIModelConfig(model=\"gpt-4o-2024-08-06\", model_type=ModelTypes.OPENAI).model_dump() )\n", - "\n", - "weather_tool = Tool(user_id=user_id, config=ToolConfig(name=\"get_weather\", description=\"Get the weather for a city\", content=\"async def get_weather(city: str) -> str:\\n return f\\\"The weather in {city} is 73 degrees and Sunny.\\\"\",tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n", - "\n", - "adding_tool = Tool(user_id=user_id, config=ToolConfig(name=\"add\", description=\"Add two numbers\", content=\"async def add(a: int, b: int) -> int:\\n return a + b\", tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n", - "\n", - "writing_agent = Agent(user_id=user_id,\n", - " config=AssistantAgentConfig(\n", - " name=\"writing_agent\",\n", - " tools=[weather_tool.config],\n", - " agent_type=AgentTypes.ASSISTANT,\n", - " model_client=gpt4_model.config\n", - " ).model_dump()\n", - " )\n", - "\n", - "team = Team(user_id=user_id, config=RoundRobinTeamConfig(\n", - " name=\"weather_team\",\n", - " participants=[writing_agent.config],\n", - " termination_condition=MaxMessageTerminationConfig(termination_type=TerminationTypes.MAX_MESSAGES, max_messages=5).model_dump(),\n", - " team_type=TeamTypes.ROUND_ROBIN\n", - " ).model_dump()\n", - ")\n", - "\n", - "with Session(dbmanager.engine) as session:\n", - " session.add(gpt4_model)\n", - " session.add(weather_tool)\n", - " session.add(adding_tool)\n", - " session.add(writing_agent)\n", - " session.add(team)\n", - " session.commit()\n", - "\n", - " dbmanager.link(LinkTypes.AGENT_MODEL, writing_agent.id, gpt4_model.id)\n", - " dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, weather_tool.id)\n", - " dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, adding_tool.id)\n", - " dbmanager.link(LinkTypes.TEAM_AGENT, team.id, writing_agent.id)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2 teams in database\n" - ] - } - ], - "source": [ - "all_teams = dbmanager.get(Team)\n", - "print(len(all_teams.data), \"teams in database\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Configuration Manager\n", - "\n", - "Helper class to mostly import teams/agents/models/tools etc into a database." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "from autogenstudio.database import ConfigurationManager\n", - "\n", - "config_manager = ConfigurationManager(dbmanager)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "message='Team Created Successfully' status=True data={'id': 4, 'updated_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674916), 'version': '0.0.1', 'created_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674910), 'user_id': 'user_id', 'config': {'version': '1.0.0', 'component_type': 'team', 'name': 'weather_team', 'participants': [{'version': '1.0.0', 'component_type': 'agent', 'name': 'writing_agent', 'agent_type': 'AssistantAgent', 'description': None, 'model_client': {'version': '1.0.0', 'component_type': 'model', 'model': 'gpt-4o-2024-08-06', 'model_type': 'OpenAIChatCompletionClient', 'api_key': None, 'base_url': None}, 'tools': [{'version': '1.0.0', 'component_type': 'tool', 'name': 'get_weather', 'description': 'Get the weather for a city', 'content': 'async def get_weather(city: str) -> str:\\n return f\"The weather in {city} is 73 degrees and Sunny.\"', 'tool_type': 'PythonFunction'}], 'system_message': None}], 'team_type': 'RoundRobinGroupChat', 'termination_condition': {'version': '1.0.0', 'component_type': 'termination', 'termination_type': 'MaxMessageTermination', 'max_messages': 5}, 'max_turns': None}}\n" - ] - } - ], - "source": [ - "result = await config_manager.import_component(\"team.json\", user_id=\"user_id\", check_exists=True)\n", - "print(result)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 5}]\n" - ] - } - ], - "source": [ - "result = await config_manager.import_directory(\".\", user_id=\"user_id\", check_exists=False)\n", - "print(result)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "5 teams in database\n" - ] - } - ], - "source": [ - "all_teams = dbmanager.get(Team)\n", - "print(len(all_teams.data), \"teams in database\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Sample AgentChat Example (Python)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "planner_agent = AssistantAgent(\n", - " \"planner_agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", - " description=\"A helpful assistant that can plan trips.\",\n", - " system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request. Respond with a single sentence\",\n", - ")\n", - "\n", - "local_agent = AssistantAgent(\n", - " \"local_agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", - " description=\"A local assistant that can suggest local activities or places to visit.\",\n", - " system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided. Respond with a single sentence\",\n", - ")\n", - "\n", - "language_agent = AssistantAgent(\n", - " \"language_agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", - " description=\"A helpful assistant that can provide language tips for a given destination.\",\n", - " system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.Respond with a single sentence\",\n", - ")\n", - "\n", - "travel_summary_agent = AssistantAgent(\n", - " \"travel_summary_agent\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", - " description=\"A helpful assistant that can summarize the travel plan.\",\n", - " system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.Respond with a single sentence\",\n", - ")\n", - "\n", - "termination = TextMentionTermination(\"TERMINATE\")\n", - "group_chat = RoundRobinGroupChat(\n", - " [planner_agent, local_agent, language_agent, travel_summary_agent], termination_condition=termination\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='Plan a 3 day trip to Nepal.' type='TextMessage'\n", - "source='planner_agent' models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n", - "source='local_agent' models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n", - "source='language_agent' models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42) content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\" type='TextMessage'\n", - "source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298) content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\" type='TextMessage'\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.', type='TextMessage'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42), content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\", type='TextMessage'), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298), content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" - ] - } - ], - "source": [ - "\n", - "result = group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\")\n", - "async for response in result:\n", - " print(response)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AutoGen Studio Agent Workflow API Example\n", + "\n", + "This notebook focuses on demonstrating capabilities of the autogen studio workflow python api. \n", + "\n", + "- Declarative Specification of an Agent Team\n", + "- Loading the specification and running the resulting agent\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallRequestEvent(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_jcgtAVlBvTFzVpPxKX88Xsa4', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_jcgtAVlBvTFzVpPxKX88Xsa4')], type='ToolCallExecutionEvent'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The current weather in New York is 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=5.103050947189331\n" + ] + } + ], + "source": [ + "from autogenstudio.teammanager import TeamManager\n", + "\n", + "wm = TeamManager()\n", + "result = await wm.run(task=\"What is the weather in New York?\", team_config=\"team.json\")\n", + "print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='What is the weather in New York?' type='TextMessage'\n", + "source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')] type='ToolCallRequestEvent'\n", + "source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')] type='ToolCallExecutionEvent'\n", + "source='writing_agent' models_usage=None content='The weather in New York is 73 degrees and Sunny.' type='TextMessage'\n", + "source='writing_agent' models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.' type='TextMessage'\n", + "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallRequestEvent(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')], type='ToolCallExecutionEvent'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.284574270248413\n" + ] + } + ], + "source": [ + "result_stream = wm.run_stream(task=\"What is the weather in New York?\", team_config=\"team.json\")\n", + "async for response in result_stream:\n", + " print(response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AutoGen Studio Database API\n", + "\n", + "Api for creating objects and serializing to a database." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Response(message='Database is ready', status=True, data=None)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogenstudio.database import DatabaseManager\n", + "import os\n", + "# delete database\n", + "# if os.path.exists(\"test.db\"):\n", + "# os.remove(\"test.db\")\n", + "\n", + "os.makedirs(\"test\", exist_ok=True)\n", + "# create a database\n", + "dbmanager = DatabaseManager(engine_uri=\"sqlite:///test.db\", base_dir=\"test\")\n", + "dbmanager.initialize_database()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from sqlmodel import Session, text, select\n", + "from autogenstudio.datamodel.types import ModelTypes, TeamTypes, AgentTypes, ToolConfig, ToolTypes, OpenAIModelConfig, RoundRobinTeamConfig, MaxMessageTerminationConfig, AssistantAgentConfig, TerminationTypes\n", + "\n", + "from autogenstudio.datamodel.db import Model, Team, Agent, Tool,LinkTypes\n", + "\n", + "user_id = \"guestuser@gmail.com\" \n", + "\n", + "gpt4_model = Model(user_id=user_id, config= OpenAIModelConfig(model=\"gpt-4o-2024-08-06\", model_type=ModelTypes.OPENAI).model_dump() )\n", + "\n", + "weather_tool = Tool(user_id=user_id, config=ToolConfig(name=\"get_weather\", description=\"Get the weather for a city\", content=\"async def get_weather(city: str) -> str:\\n return f\\\"The weather in {city} is 73 degrees and Sunny.\\\"\",tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n", + "\n", + "adding_tool = Tool(user_id=user_id, config=ToolConfig(name=\"add\", description=\"Add two numbers\", content=\"async def add(a: int, b: int) -> int:\\n return a + b\", tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n", + "\n", + "writing_agent = Agent(user_id=user_id,\n", + " config=AssistantAgentConfig(\n", + " name=\"writing_agent\",\n", + " tools=[weather_tool.config],\n", + " agent_type=AgentTypes.ASSISTANT,\n", + " model_client=gpt4_model.config\n", + " ).model_dump()\n", + " )\n", + "\n", + "team = Team(user_id=user_id, config=RoundRobinTeamConfig(\n", + " name=\"weather_team\",\n", + " participants=[writing_agent.config],\n", + " termination_condition=MaxMessageTerminationConfig(termination_type=TerminationTypes.MAX_MESSAGES, max_messages=5).model_dump(),\n", + " team_type=TeamTypes.ROUND_ROBIN\n", + " ).model_dump()\n", + ")\n", + "\n", + "with Session(dbmanager.engine) as session:\n", + " session.add(gpt4_model)\n", + " session.add(weather_tool)\n", + " session.add(adding_tool)\n", + " session.add(writing_agent)\n", + " session.add(team)\n", + " session.commit()\n", + "\n", + " dbmanager.link(LinkTypes.AGENT_MODEL, writing_agent.id, gpt4_model.id)\n", + " dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, weather_tool.id)\n", + " dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, adding_tool.id)\n", + " dbmanager.link(LinkTypes.TEAM_AGENT, team.id, writing_agent.id)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2 teams in database\n" + ] + } + ], + "source": [ + "all_teams = dbmanager.get(Team)\n", + "print(len(all_teams.data), \"teams in database\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration Manager\n", + "\n", + "Helper class to mostly import teams/agents/models/tools etc into a database." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from autogenstudio.database import ConfigurationManager\n", + "\n", + "config_manager = ConfigurationManager(dbmanager)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "message='Team Created Successfully' status=True data={'id': 4, 'updated_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674916), 'version': '0.0.1', 'created_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674910), 'user_id': 'user_id', 'config': {'version': '1.0.0', 'component_type': 'team', 'name': 'weather_team', 'participants': [{'version': '1.0.0', 'component_type': 'agent', 'name': 'writing_agent', 'agent_type': 'AssistantAgent', 'description': None, 'model_client': {'version': '1.0.0', 'component_type': 'model', 'model': 'gpt-4o-2024-08-06', 'model_type': 'OpenAIChatCompletionClient', 'api_key': None, 'base_url': None}, 'tools': [{'version': '1.0.0', 'component_type': 'tool', 'name': 'get_weather', 'description': 'Get the weather for a city', 'content': 'async def get_weather(city: str) -> str:\\n return f\"The weather in {city} is 73 degrees and Sunny.\"', 'tool_type': 'PythonFunction'}], 'system_message': None}], 'team_type': 'RoundRobinGroupChat', 'termination_condition': {'version': '1.0.0', 'component_type': 'termination', 'termination_type': 'MaxMessageTermination', 'max_messages': 5}, 'max_turns': None}}\n" + ] + } + ], + "source": [ + "result = await config_manager.import_component(\"team.json\", user_id=\"user_id\", check_exists=True)\n", + "print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 5}]\n" + ] + } + ], + "source": [ + "result = await config_manager.import_directory(\".\", user_id=\"user_id\", check_exists=False)\n", + "print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5 teams in database\n" + ] + } + ], + "source": [ + "all_teams = dbmanager.get(Team)\n", + "print(len(all_teams.data), \"teams in database\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sample AgentChat Example (Python)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "planner_agent = AssistantAgent(\n", + " \"planner_agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", + " description=\"A helpful assistant that can plan trips.\",\n", + " system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request. Respond with a single sentence\",\n", + ")\n", + "\n", + "local_agent = AssistantAgent(\n", + " \"local_agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", + " description=\"A local assistant that can suggest local activities or places to visit.\",\n", + " system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided. Respond with a single sentence\",\n", + ")\n", + "\n", + "language_agent = AssistantAgent(\n", + " \"language_agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", + " description=\"A helpful assistant that can provide language tips for a given destination.\",\n", + " system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.Respond with a single sentence\",\n", + ")\n", + "\n", + "travel_summary_agent = AssistantAgent(\n", + " \"travel_summary_agent\",\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", + " description=\"A helpful assistant that can summarize the travel plan.\",\n", + " system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.Respond with a single sentence\",\n", + ")\n", + "\n", + "termination = TextMentionTermination(\"TERMINATE\")\n", + "group_chat = RoundRobinGroupChat(\n", + " [planner_agent, local_agent, language_agent, travel_summary_agent], termination_condition=termination\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='Plan a 3 day trip to Nepal.' type='TextMessage'\n", + "source='planner_agent' models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n", + "source='local_agent' models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n", + "source='language_agent' models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42) content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\" type='TextMessage'\n", + "source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298) content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\" type='TextMessage'\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.', type='TextMessage'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42), content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\", type='TextMessage'), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298), content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" + ] + } + ], + "source": [ + "\n", + "result = group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\")\n", + "async for response in result:\n", + " print(response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 }