mirror of
https://github.com/microsoft/autogen.git
synced 2026-04-20 03:02:16 -04:00
Python: organize packages in package directory (#420)
* Move packages to packages directory * remove screenshot * update some paths
This commit is contained in:
174
python/packages/autogen-core/.gitignore
vendored
Normal file
174
python/packages/autogen-core/.gitignore
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
.ruff_cache/
|
||||
|
||||
/docs/src/reference
|
||||
.DS_Store
|
||||
|
||||
# Generated log files
|
||||
log.jsonl
|
||||
|
||||
# Jupyter notebooks executions in docs.
|
||||
docs/**/jupyter_execute
|
||||
|
||||
# Temporary files
|
||||
tmp_code_*.py
|
||||
61
python/packages/autogen-core/README.md
Normal file
61
python/packages/autogen-core/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# AutoGen Core
|
||||
|
||||
- [Documentation](http://microsoft.github.io/agnext)
|
||||
- [Examples](https://github.com/microsoft/agnext/tree/main/python/samples)
|
||||
|
||||
## Package layering
|
||||
|
||||
- `base` are the the foundational generic interfaces upon which all else is built. This module must not depend on any other module.
|
||||
- `application` are implementations of core components that are used to compose an application.
|
||||
- `components` are the building blocks for creating agents.
|
||||
|
||||
## Development
|
||||
|
||||
**TL;DR**, run all checks with:
|
||||
|
||||
```sh
|
||||
hatch run check
|
||||
```
|
||||
|
||||
### Setup
|
||||
|
||||
- [Install `hatch`](https://hatch.pypa.io/1.12/install/).
|
||||
|
||||
### Virtual environment
|
||||
|
||||
To get a shell with the package available (virtual environment),
|
||||
in the current directory,
|
||||
run:
|
||||
|
||||
```sh
|
||||
hatch shell
|
||||
```
|
||||
|
||||
### Common tasks
|
||||
|
||||
- Format: `hatch run check`
|
||||
- Lint: `hatch run lint`
|
||||
- Test: `hatch run pytest -n auto`
|
||||
- Mypy: `hatch run mypy`
|
||||
- Pyright: `hatch run pyright`
|
||||
- Build docs: `hatch run docs:build`
|
||||
- Auto rebuild+serve docs: `hatch run docs:serve`
|
||||
|
||||
> [!NOTE]
|
||||
> These don't need to be run in a virtual environment, `hatch` will automatically manage it for you.
|
||||
|
||||
|
||||
#### IntelliJ Support
|
||||
To enable the `hatch` virtual environment in IntelliJ, follow these steps:
|
||||
Under the `[tool.hatch.envs.default]` heading in `pyproject.toml`, add this:
|
||||
|
||||
```toml
|
||||
[tool.hatch.envs.default]
|
||||
type = "virtual"
|
||||
path = ".venv"
|
||||
```
|
||||
Run `hatch shell` in the terminal to create the virtual environment.
|
||||
|
||||
Then, in IntelliJ, go to `File` -> `Project Structure` -> `Project Settings` -> `Project` -> `Project SDK` and select the Python interpreter in the `.venv` directory.
|
||||
|
||||
Once complete, your IDE should be able to resolve, run, and debug code.
|
||||
@@ -0,0 +1,52 @@
|
||||
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0" version="24.7.5">
|
||||
<diagram name="Page-1" id="cddb7oONEilqIw1Y7nf5">
|
||||
<mxGraphModel dx="1071" dy="1138" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-6" value="Application Logic" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="290" y="390" width="240" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-1" value="Behavior Contract (Message Protocol)" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||
<mxGeometry x="290" y="430" width="240" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-2" value="Message Types" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||
<mxGeometry x="290" y="470" width="240" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-3" value="Message Routing" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
|
||||
<mxGeometry x="290" y="540" width="240" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-4" value="Protobuf + gRPC" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
|
||||
<mxGeometry x="290" y="580" width="240" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-6" value="" style="endArrow=none;html=1;rounded=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="265" y="520" as="sourcePoint" />
|
||||
<mxPoint x="555" y="520" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-7" value="Agent Communication Stack" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="570" y="560" width="90" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-8" value="Your Multi-Agent Application" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="560" y="430" width="111" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-9" value="Multi-Agent Patterns" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="130" y="410" width="120" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="aPJ7GLReoFj_4gOym3_0-3" target="aPJ7GLReoFj_4gOym3_0-3">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-11" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;labelPosition=left;verticalLabelPosition=middle;align=right;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="260" y="395" width="20" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-12" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;labelPosition=right;verticalLabelPosition=middle;align=left;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="540" y="395" width="20" height="95" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="aPJ7GLReoFj_4gOym3_0-13" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;labelPosition=right;verticalLabelPosition=middle;align=left;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="540" y="550" width="20" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,151 @@
|
||||
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0" version="24.7.5">
|
||||
<diagram name="Page-1" id="RjQ-yjMprM0l9Swcy9ED">
|
||||
<mxGraphModel dx="1071" dy="1138" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-1" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="160" y="440" width="410" height="380" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-3" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="330" y="675" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-5" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="180" y="465" width="130" height="250" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-6" value="Model Client" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
||||
<mxGeometry x="200" y="480" width="90" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-7" value="Tools" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
||||
<mxGeometry x="200" y="530" width="90" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-8" value="Memory" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
||||
<mxGeometry x="200" y="580" width="90" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-9" value="Custom Components" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
||||
<mxGeometry x="200" y="630" width="90" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-10" value="Agent" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="215" y="680" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-11" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="410" y="675" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-12" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="490" y="675" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-17" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=classic;startFill=1;" parent="1" source="8GsP74nl-6KZMK6J00mR-3" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="223" y="725" as="sourcePoint" />
|
||||
<mxPoint x="350" y="770" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-18" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=classic;startFill=1;" parent="1" source="8GsP74nl-6KZMK6J00mR-11" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="370" y="725" as="sourcePoint" />
|
||||
<mxPoint x="440" y="770" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-19" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=classic;startFill=1;" parent="1" source="8GsP74nl-6KZMK6J00mR-12" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="450" y="725" as="sourcePoint" />
|
||||
<mxPoint x="470" y="770" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-21" value="Host Agent Runtime" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="375" y="465" width="130" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-23" value="Messages" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="275" y="730" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-24" value="" style="endArrow=none;html=1;rounded=0;" parent="1" target="8GsP74nl-6KZMK6J00mR-1" edge="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="160" y="765" as="sourcePoint" />
|
||||
<mxPoint x="210" y="715" as="targetPoint" />
|
||||
<Array as="points">
|
||||
<mxPoint x="570" y="765" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-25" value="Runtime Internals" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="310" y="775" width="110" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-26" value="Messages" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="390" y="730" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-35" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="390" y="900" width="160" height="130" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-36" value="Worker Agent Runtime" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="405" y="997" width="130" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-37" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="400" y="951" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-38" value="" style="endArrow=none;html=1;rounded=0;entryX=1;entryY=0.25;entryDx=0;entryDy=0;" parent="1" edge="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="390" y="940" as="sourcePoint" />
|
||||
<mxPoint x="550" y="940" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-39" value="Runtime Internals" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="415" y="910" width="110" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-45" value="Gateway" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" parent="1" vertex="1">
|
||||
<mxGeometry x="330" y="810" width="70" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-46" value="Gateway" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" parent="1" vertex="1">
|
||||
<mxGeometry x="435" y="880" width="70" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-47" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="480" y="951" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-48" style="rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=classic;startFill=1;" parent="1" source="8GsP74nl-6KZMK6J00mR-5" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="370" y="725" as="sourcePoint" />
|
||||
<mxPoint x="250" y="770" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-50" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;exitX=0.75;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="1" source="8GsP74nl-6KZMK6J00mR-45" target="8GsP74nl-6KZMK6J00mR-46" edge="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="400" y="610" as="sourcePoint" />
|
||||
<mxPoint x="450" y="560" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-51" value="Messages" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="335" y="847" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-52" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="180" y="897" width="160" height="130" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-53" value="Worker Agent Runtime" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="195" y="994" width="130" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-54" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="190" y="948" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-55" value="" style="endArrow=none;html=1;rounded=0;entryX=1;entryY=0.25;entryDx=0;entryDy=0;" parent="1" edge="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="180" y="937" as="sourcePoint" />
|
||||
<mxPoint x="340" y="937" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-56" value="Runtime Internals" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="205" y="907" width="110" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-57" value="Gateway" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" parent="1" vertex="1">
|
||||
<mxGeometry x="225" y="877" width="70" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-58" value="Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="270" y="948" width="60" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="8GsP74nl-6KZMK6J00mR-59" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;" parent="1" source="8GsP74nl-6KZMK6J00mR-57" target="8GsP74nl-6KZMK6J00mR-45" edge="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="393" y="850" as="sourcePoint" />
|
||||
<mxPoint x="490" y="890" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,64 @@
|
||||
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0" version="24.7.5">
|
||||
<diagram name="Page-1" id="T0VXZc8v9pY_-jcHJENn">
|
||||
<mxGraphModel dx="1071" dy="1138" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-15" target="D0hljkLHYirHcF4gpA4y-19">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-15" value="CodingTaskMsg" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" vertex="1" parent="1">
|
||||
<mxGeometry x="63.75" y="270" width="105" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-16" target="D0hljkLHYirHcF4gpA4y-20">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-16" value="CodeGenMsg" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" vertex="1" parent="1">
|
||||
<mxGeometry x="65" y="373" width="102.5" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-17" value="CodingResultMsg" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" vertex="1" parent="1">
|
||||
<mxGeometry x="58.75" y="620" width="112.5" height="35" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;endArrow=none;endFill=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-19" target="D0hljkLHYirHcF4gpA4y-16">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-19" value="Coder Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||
<mxGeometry x="62.5" y="320" width="107.5" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-20" target="D0hljkLHYirHcF4gpA4y-26">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-20" value="Executor Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||
<mxGeometry x="60" y="420" width="110" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-21" target="D0hljkLHYirHcF4gpA4y-17">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-21" target="D0hljkLHYirHcF4gpA4y-29">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-21" value="Reviewer Agent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||
<mxGeometry x="60" y="540" width="110" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-26" target="D0hljkLHYirHcF4gpA4y-21">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-26" value="ExecutionResultMsg" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" vertex="1" parent="1">
|
||||
<mxGeometry x="50" y="480" width="130" height="35" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="D0hljkLHYirHcF4gpA4y-29" target="D0hljkLHYirHcF4gpA4y-19">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-29" value="ReviewMsg" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=#666666;fontColor=#333333;" vertex="1" parent="1">
|
||||
<mxGeometry x="200" y="445" width="80" height="35" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-38" value="Approved==False" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="200" y="515" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="D0hljkLHYirHcF4gpA4y-39" value="Approved==True" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="139" y="584" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,28 @@
|
||||
<mxfile host="app.diagrams.net" modified="2024-07-11T07:42:49.053Z" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0" etag="OK-OoeWcNWr8T5Wb_Dpd" version="24.6.4" type="device">
|
||||
<diagram name="Page-1" id="cddb7oONEilqIw1Y7nf5">
|
||||
<mxGraphModel dx="748" dy="690" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-12" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#666666;" parent="1" vertex="1">
|
||||
<mxGeometry x="160" y="240" width="260" height="230" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-1" value="<b>Core:</b> Agent and Agent Runtime" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
|
||||
<mxGeometry x="170" y="380" width="240" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-2" value="<b>AI Components: </b>Base Agents, Models, Tools, Memory, etc." style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="170" y="250" width="240" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-4" value="<b>Application Components</b>: Runtime Implementations, Loggings, etc." style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
|
||||
<mxGeometry x="170" y="320" width="240" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-6" value="Multi-Agent Applications" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
|
||||
<mxGeometry x="170" y="183" width="240" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="e_Se_iOjKQHndGvTYtJ0-10" value="AGNext" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="260" y="436" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,86 @@
|
||||
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0" version="24.7.6">
|
||||
<diagram name="Page-1" id="kM63aGWDAVgwnXhMnwsJ">
|
||||
<mxGraphModel dx="1773" dy="1145" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-6" value="approved=False" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="360" y="290" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-8" target="rwyUPL19n1b9p3f1DsXw-10">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-8" value="CoderAgent:<div>handle_writing_task</div>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" vertex="1" parent="1">
|
||||
<mxGeometry x="160" y="200" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-16" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-10" target="rwyUPL19n1b9p3f1DsXw-15">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-10" value="ReviewerAgent:<div>handle_review_task</div>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
|
||||
<mxGeometry x="305" y="200" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-15" target="rwyUPL19n1b9p3f1DsXw-8">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<Array as="points">
|
||||
<mxPoint x="540" y="320" />
|
||||
<mxPoint x="220" y="320" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-15" target="rwyUPL19n1b9p3f1DsXw-21">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="630" y="230" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-15" value="CoderAgent:<div>handle_review_result</div>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" vertex="1" parent="1">
|
||||
<mxGeometry x="450" y="200" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-18" value="approved=True" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="580" y="245" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-21" value="Application:<div>Receive Result</div>" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="620" y="215" width="90" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-22" target="rwyUPL19n1b9p3f1DsXw-8">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-22" value="Application:<div>Send Task</div>" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="80" y="215" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;entryX=0;entryY=0.25;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-25" target="rwyUPL19n1b9p3f1DsXw-26">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.75;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-25" target="rwyUPL19n1b9p3f1DsXw-33">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-25" value="CoderAgent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" vertex="1" parent="1">
|
||||
<mxGeometry x="260" y="620" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.75;exitDx=0;exitDy=0;entryX=1;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-26" target="rwyUPL19n1b9p3f1DsXw-25">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-26" value="ReviewerAgent" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
|
||||
<mxGeometry x="480" y="620" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-29" value="CodeReviewTask" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="395" y="600" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-30" value="CodeReviewResult" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="395" y="677" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.25;entryDx=0;entryDy=0;" edge="1" parent="1" source="rwyUPL19n1b9p3f1DsXw-31" target="rwyUPL19n1b9p3f1DsXw-25">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-31" value="CodeWritingTask" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="100" y="620" width="100" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-33" value="CodeWritingResult" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="90" y="650" width="110" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="rwyUPL19n1b9p3f1DsXw-36" value="approved=True" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="220" y="677" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,48 @@
|
||||
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0" version="24.7.5">
|
||||
<diagram name="Page-1" id="2U5l3ylZQluw78bWm4Ui">
|
||||
<mxGraphModel dx="970" dy="1091" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=0;curved=1;" parent="1" source="HfMixxTfmlLCSILu5LXk-1" target="HfMixxTfmlLCSILu5LXk-2" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<Array as="points">
|
||||
<mxPoint x="240" y="370" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-1" value="ToolAgent:handle_function_call" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="265" y="370" width="190" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-8" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0;entryDx=0;entryDy=0;curved=1;" parent="1" source="HfMixxTfmlLCSILu5LXk-2" target="HfMixxTfmlLCSILu5LXk-1" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<Array as="points">
|
||||
<mxPoint x="500" y="300" />
|
||||
<mxPoint x="500" y="370" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="HfMixxTfmlLCSILu5LXk-2" edge="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="600" y="270" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-2" value="ToolUseAgent:handle_user_message<div>model_client.create</div>" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
|
||||
<mxGeometry x="240" y="240" width="240" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="HfMixxTfmlLCSILu5LXk-3" target="HfMixxTfmlLCSILu5LXk-2" edge="1">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-3" value="send message" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="120" y="255" width="60" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-12" value="model response is tool call" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="510" y="330" width="90" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="HfMixxTfmlLCSILu5LXk-16" value="model response is text" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
|
||||
<mxGeometry x="500" y="225" width="90" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
@@ -0,0 +1,8 @@
|
||||
{%- if show_headings %}
|
||||
{{- basename | e | heading }}
|
||||
|
||||
{% endif -%}
|
||||
.. automodule:: {{ qualname }}
|
||||
{%- for option in automodule_options %}
|
||||
:{{ option }}:
|
||||
{%- endfor %}
|
||||
@@ -0,0 +1,53 @@
|
||||
{%- macro automodule(modname, options) -%}
|
||||
.. automodule:: {{ modname }}
|
||||
{%- for option in options %}
|
||||
:{{ option }}:
|
||||
{%- endfor %}
|
||||
{%- endmacro %}
|
||||
|
||||
{%- macro toctree(docnames) -%}
|
||||
.. toctree::
|
||||
:maxdepth: {{ maxdepth }}
|
||||
:hidden:
|
||||
{% for docname in docnames %}
|
||||
{{ docname }}
|
||||
{%- endfor %}
|
||||
{%- endmacro %}
|
||||
|
||||
{%- if is_namespace %}
|
||||
{{- [pkgname, "namespace"] | join(" ") | e | heading }}
|
||||
{% else %}
|
||||
{{- pkgname | e | heading }}
|
||||
{% endif %}
|
||||
|
||||
{%- if is_namespace %}
|
||||
.. py:module:: {{ pkgname }}
|
||||
{% endif %}
|
||||
|
||||
{%- if modulefirst and not is_namespace %}
|
||||
{{ automodule(pkgname, automodule_options) }}
|
||||
{% endif %}
|
||||
|
||||
{%- if subpackages %}
|
||||
|
||||
{{ toctree(subpackages) }}
|
||||
{% endif %}
|
||||
|
||||
{%- if submodules %}
|
||||
|
||||
{% if separatemodules %}
|
||||
{{ toctree(submodules) }}
|
||||
{% else %}
|
||||
{%- for submodule in submodules %}
|
||||
{% if show_headings %}
|
||||
{{- [submodule, "module"] | join(" ") | e | heading(2) }}
|
||||
{% endif %}
|
||||
{{ automodule(submodule, automodule_options) }}
|
||||
{% endfor %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
{%- if not modulefirst and not is_namespace %}
|
||||
|
||||
{{ automodule(pkgname, automodule_options) }}
|
||||
{% endif %}
|
||||
66
python/packages/autogen-core/docs/src/conf.py
Normal file
66
python/packages/autogen-core/docs/src/conf.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "autogen_core"
|
||||
copyright = "2024, Microsoft"
|
||||
author = "Microsoft"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.autosummary",
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinxcontrib.apidoc",
|
||||
"myst_nb",
|
||||
"sphinx.ext.intersphinx",
|
||||
]
|
||||
|
||||
apidoc_module_dir = "../../src/autogen_core"
|
||||
apidoc_output_dir = "reference"
|
||||
apidoc_template_dir = "_apidoc_templates"
|
||||
apidoc_separate_modules = True
|
||||
apidoc_extra_args = ["--no-toc"]
|
||||
napoleon_custom_sections = [("Returns", "params_style")]
|
||||
apidoc_excluded_paths = ["./application/protos/"]
|
||||
|
||||
templates_path = []
|
||||
exclude_patterns = ["reference/autogen_core.rst"]
|
||||
|
||||
autoclass_content = "init"
|
||||
|
||||
# TODO: incldue all notebooks excluding those requiring remote API access.
|
||||
nb_execution_mode = "off"
|
||||
|
||||
# Guides and tutorials must succeed.
|
||||
nb_execution_raise_on_error = True
|
||||
nb_execution_timeout = 60
|
||||
|
||||
myst_heading_anchors = 5
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_title = "AGNext"
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = []
|
||||
|
||||
html_theme_options = {
|
||||
"source_repository": "https://github.com/microsoft/autogen_core",
|
||||
"source_branch": "main",
|
||||
"source_directory": "python/docs/src/",
|
||||
}
|
||||
|
||||
autodoc_default_options = {
|
||||
"members": True,
|
||||
"undoc-members": True,
|
||||
}
|
||||
|
||||
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
|
||||
1
python/packages/autogen-core/docs/src/contributing.md
Normal file
1
python/packages/autogen-core/docs/src/contributing.md
Normal file
@@ -0,0 +1 @@
|
||||
# Contributing to AGNext
|
||||
@@ -0,0 +1,41 @@
|
||||
# Azure OpenAI with AAD Auth
|
||||
|
||||
This guide will show you how to use the Azure OpenAI client with Azure Active Directory (AAD) authentication.
|
||||
|
||||
The identity used must be assigned the [**Cognitive Services OpenAI User**](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role.
|
||||
|
||||
## Install Azure Identity client
|
||||
|
||||
The Azure identity client is used to authenticate with Azure Active Directory.
|
||||
|
||||
```sh
|
||||
pip install azure-identity
|
||||
```
|
||||
|
||||
## Using the Model Client
|
||||
|
||||
```python
|
||||
from autogen_core.components.models import AzureOpenAIChatCompletionClient
|
||||
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
|
||||
|
||||
# Create the token provider
|
||||
token_provider = get_bearer_token_provider(
|
||||
DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
|
||||
)
|
||||
|
||||
client = AzureOpenAIChatCompletionClient(
|
||||
model="{your-azure-deployment}",
|
||||
api_version="2024-02-01",
|
||||
azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/",
|
||||
azure_ad_token_provider=token_provider,
|
||||
model_capabilities={
|
||||
"vision":True,
|
||||
"function_calling":True,
|
||||
"json_output":True,
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
```{note}
|
||||
See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info.
|
||||
```
|
||||
@@ -0,0 +1,51 @@
|
||||
# Buffered Memory
|
||||
|
||||
Here is an example of a custom memory implementation that keeps a view of the
|
||||
last N messages:
|
||||
|
||||
```python
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import FunctionExecutionResultMessage, LLMMessage
|
||||
|
||||
|
||||
class BufferedChatMemory(ChatMemory[LLMMessage]):
|
||||
"""A buffered chat memory that keeps a view of the last n messages,
|
||||
where n is the buffer size. The buffer size is set at initialization.
|
||||
|
||||
Args:
|
||||
buffer_size (int): The size of the buffer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, buffer_size: int) -> None:
|
||||
self._messages: List[LLMMessage] = []
|
||||
self._buffer_size = buffer_size
|
||||
|
||||
async def add_message(self, message: LLMMessage) -> None:
|
||||
"""Add a message to the memory."""
|
||||
self._messages.append(message)
|
||||
|
||||
async def get_messages(self) -> List[LLMMessage]:
|
||||
"""Get at most `buffer_size` recent messages."""
|
||||
messages = self._messages[-self._buffer_size :]
|
||||
# Handle the first message is a function call result message.
|
||||
if messages and isinstance(messages[0], FunctionExecutionResultMessage):
|
||||
# Remove the first message from the list.
|
||||
messages = messages[1:]
|
||||
return messages
|
||||
|
||||
async def clear(self) -> None:
|
||||
"""Clear the message memory."""
|
||||
self._messages = []
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"messages": [message for message in self._messages],
|
||||
"buffer_size": self._buffer_size,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._messages = state["messages"]
|
||||
self._buffer_size = state["buffer_size"]
|
||||
@@ -0,0 +1,35 @@
|
||||
# Extracting Results with an Agent
|
||||
|
||||
When running a multi-agent system to solve some task, you may want to extract the result of the system once it has reached termination. This guide showcases one way to achieve this. Given that agent instances are not directly accessible from the outside, we will use an agent to publish the final result to an accessible location.
|
||||
|
||||
If you model your system to publish some `FinalResult` type then you can create an agent whose sole job is to subscribe to this and make it available externally. For simple agents like this the {py:class}`~autogen_core.components.ClosureAgent` is an option to reduce the amount of boilerplate code. This allows you to define a function that will be associated as the agent's message handler. In this example, we're going to use a queue shared between the agent and the external code to pass the result.
|
||||
|
||||
```{note}
|
||||
When considering how to extract results from a multi-agent system, you must always consider the namespace of the agent and by extension the message.
|
||||
```
|
||||
|
||||
```python
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentRuntime, AgentId, CancellationToken
|
||||
from autogen_core.components import ClosureAgent
|
||||
|
||||
import asyncio
|
||||
|
||||
@dataclass
|
||||
class FinalResult:
|
||||
result: str
|
||||
|
||||
# ...
|
||||
|
||||
queue = asyncio.Queue[tuple[str, FinalResult]]()
|
||||
|
||||
async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, cancellation_token: CancellationToken) -> None:
|
||||
namespace = id.namespace
|
||||
await queue.put((namespace, FinalResult))
|
||||
|
||||
runtime.register("OutputResult", lambda: ClosureAgent("Outputs messages", output_result))
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
When using a `ClosureAgent` the third parameter, named `message` in this example determines what messages are subscribed to. In this case, the agent will only receive messages of type `FinalResult`. This can also be a union of types if you want to subscribe to multiple types of messages.
|
||||
@@ -0,0 +1,300 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using LangGraph-Backed Agent\n",
|
||||
"\n",
|
||||
"This example demonstrates how to create an AI agent using LangGraph.\n",
|
||||
"Based on the example in the LangGraph documentation:\n",
|
||||
"https://langchain-ai.github.io/langgraph/.\n",
|
||||
"\n",
|
||||
"First install the dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install langgraph langchain-openai azure-identity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's import the modules."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import Any, Callable, List, Literal\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import AgentId, MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.tools import tool # pyright: ignore\n",
|
||||
"from langchain_openai import AzureChatOpenAI, ChatOpenAI\n",
|
||||
"from langgraph.graph import END, MessagesState, StateGraph\n",
|
||||
"from langgraph.prebuilt import ToolNode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define our message type that will be used to communicate with the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define the tools the agent will use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool # pyright: ignore\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" \"\"\"Call to surf the web.\"\"\"\n",
|
||||
" # This is a placeholder, but don't tell the LLM that...\n",
|
||||
" if \"sf\" in location.lower() or \"san francisco\" in location.lower():\n",
|
||||
" return \"It's 60 degrees and foggy.\"\n",
|
||||
" return \"It's 90 degrees and sunny.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define the agent using LangGraph's API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class LangGraphToolUseAgent(RoutedAgent):\n",
|
||||
" def __init__(self, description: str, model: ChatOpenAI, tools: List[Callable[..., Any]]) -> None: # pyright: ignore\n",
|
||||
" super().__init__(description)\n",
|
||||
" self._model = model.bind_tools(tools) # pyright: ignore\n",
|
||||
"\n",
|
||||
" # Define the function that determines whether to continue or not\n",
|
||||
" def should_continue(state: MessagesState) -> Literal[\"tools\", END]: # type: ignore\n",
|
||||
" messages = state[\"messages\"]\n",
|
||||
" last_message = messages[-1]\n",
|
||||
" # If the LLM makes a tool call, then we route to the \"tools\" node\n",
|
||||
" if last_message.tool_calls: # type: ignore\n",
|
||||
" return \"tools\"\n",
|
||||
" # Otherwise, we stop (reply to the user)\n",
|
||||
" return END\n",
|
||||
"\n",
|
||||
" # Define the function that calls the model\n",
|
||||
" async def call_model(state: MessagesState): # type: ignore\n",
|
||||
" messages = state[\"messages\"]\n",
|
||||
" response = await self._model.ainvoke(messages)\n",
|
||||
" # We return a list, because this will get added to the existing list\n",
|
||||
" return {\"messages\": [response]}\n",
|
||||
"\n",
|
||||
" tool_node = ToolNode(tools) # pyright: ignore\n",
|
||||
"\n",
|
||||
" # Define a new graph\n",
|
||||
" self._workflow = StateGraph(MessagesState)\n",
|
||||
"\n",
|
||||
" # Define the two nodes we will cycle between\n",
|
||||
" self._workflow.add_node(\"agent\", call_model) # pyright: ignore\n",
|
||||
" self._workflow.add_node(\"tools\", tool_node) # pyright: ignore\n",
|
||||
"\n",
|
||||
" # Set the entrypoint as `agent`\n",
|
||||
" # This means that this node is the first one called\n",
|
||||
" self._workflow.set_entry_point(\"agent\")\n",
|
||||
"\n",
|
||||
" # We now add a conditional edge\n",
|
||||
" self._workflow.add_conditional_edges(\n",
|
||||
" # First, we define the start node. We use `agent`.\n",
|
||||
" # This means these are the edges taken after the `agent` node is called.\n",
|
||||
" \"agent\",\n",
|
||||
" # Next, we pass in the function that will determine which node is called next.\n",
|
||||
" should_continue, # type: ignore\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # We now add a normal edge from `tools` to `agent`.\n",
|
||||
" # This means that after `tools` is called, `agent` node is called next.\n",
|
||||
" self._workflow.add_edge(\"tools\", \"agent\")\n",
|
||||
"\n",
|
||||
" # Finally, we compile it!\n",
|
||||
" # This compiles it into a LangChain Runnable,\n",
|
||||
" # meaning you can use it as you would any other runnable.\n",
|
||||
" # Note that we're (optionally) passing the memory when compiling the graph\n",
|
||||
" self._app = self._workflow.compile()\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # Use the Runnable\n",
|
||||
" final_state = await self._app.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI assistant. You can use tools to help answer questions.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=message.content),\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" config={\"configurable\": {\"thread_id\": 42}},\n",
|
||||
" )\n",
|
||||
" response = Message(content=final_state[\"messages\"][-1].content)\n",
|
||||
" return response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's test the agent. First we need to create an agent runtime and\n",
|
||||
"register the agent, by providing the agent's name and a factory function\n",
|
||||
"that will create the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"langgraph_tool_use_agent\",\n",
|
||||
" lambda: LangGraphToolUseAgent(\n",
|
||||
" \"Tool use agent\",\n",
|
||||
" ChatOpenAI(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" # api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
|
||||
" ),\n",
|
||||
" # AzureChatOpenAI(\n",
|
||||
" # azure_deployment=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n",
|
||||
" # azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n",
|
||||
" # api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n",
|
||||
" # # Using Azure Active Directory authentication.\n",
|
||||
" # azure_ad_token_provider=get_bearer_token_provider(DefaultAzureCredential()),\n",
|
||||
" # # Using API key.\n",
|
||||
" # # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n",
|
||||
" # ),\n",
|
||||
" [get_weather],\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"agent = AgentId(\"langgraph_tool_use_agent\", key=\"default\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Start the agent runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime.start()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Send a direct message to the agent, and print the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in San Francisco is currently 60 degrees and foggy.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = await runtime.send_message(Message(\"What's the weather in SF?\"), agent)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Stop the agent runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await runtime.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,533 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using LlamaIndex-Backed Agent\n",
|
||||
"\n",
|
||||
"This example demonstrates how to create an AI agent using LlamaIndex.\n",
|
||||
"\n",
|
||||
"First install the dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install \"llama-index-readers-web\" \"llama-index-readers-wikipedia\" \"llama-index-tools-wikipedia\" \"llama-index-embeddings-azure-openai\" \"llama-index-llms-azure-openai\" \"llama-index\" \"azure-identity\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's import the modules."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"import os\n",
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import AgentId, MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"from llama_index.core import Settings\n",
|
||||
"from llama_index.core.agent import ReActAgent\n",
|
||||
"from llama_index.core.agent.runner.base import AgentRunner\n",
|
||||
"from llama_index.core.base.llms.types import (\n",
|
||||
" ChatMessage,\n",
|
||||
" MessageRole,\n",
|
||||
")\n",
|
||||
"from llama_index.core.chat_engine.types import AgentChatResponse\n",
|
||||
"from llama_index.core.memory import ChatSummaryMemoryBuffer\n",
|
||||
"from llama_index.core.memory.types import BaseMemory\n",
|
||||
"from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n",
|
||||
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
|
||||
"from llama_index.llms.azure_openai import AzureOpenAI\n",
|
||||
"from llama_index.llms.openai import OpenAI\n",
|
||||
"from llama_index.tools.wikipedia import WikipediaToolSpec"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define our message type that will be used to communicate with the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@dataclass\n",
|
||||
"class Resource:\n",
|
||||
" content: str\n",
|
||||
" node_id: str\n",
|
||||
" score: Optional[float] = None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
" sources: Optional[List[Resource]] = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define the agent using LLamaIndex's API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class LlamaIndexAgent(RoutedAgent):\n",
|
||||
" def __init__(self, description: str, llama_index_agent: AgentRunner, memory: BaseMemory | None = None) -> None:\n",
|
||||
" super().__init__(description)\n",
|
||||
"\n",
|
||||
" self._llama_index_agent = llama_index_agent\n",
|
||||
" self._memory = memory\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # retriever history messages from memory!\n",
|
||||
" history_messages: List[ChatMessage] = []\n",
|
||||
"\n",
|
||||
" response: AgentChatResponse # pyright: ignore\n",
|
||||
" if self._memory is not None:\n",
|
||||
" history_messages = self._memory.get(input=message.content)\n",
|
||||
"\n",
|
||||
" response = await self._llama_index_agent.achat(message=message.content, history_messages=history_messages) # pyright: ignore\n",
|
||||
" else:\n",
|
||||
" response = await self._llama_index_agent.achat(message=message.content) # pyright: ignore\n",
|
||||
"\n",
|
||||
" if isinstance(response, AgentChatResponse):\n",
|
||||
" if self._memory is not None:\n",
|
||||
" self._memory.put(ChatMessage(role=MessageRole.USER, content=message.content))\n",
|
||||
" self._memory.put(ChatMessage(role=MessageRole.ASSISTANT, content=response.response))\n",
|
||||
"\n",
|
||||
" assert isinstance(response.response, str)\n",
|
||||
"\n",
|
||||
" resources: List[Resource] = [\n",
|
||||
" Resource(content=source_node.get_text(), score=source_node.score, node_id=source_node.id_)\n",
|
||||
" for source_node in response.source_nodes\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" tools: List[Resource] = [\n",
|
||||
" Resource(content=source.content, node_id=source.tool_name) for source in response.sources\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" resources.extend(tools)\n",
|
||||
" return Message(content=response.response, sources=resources)\n",
|
||||
" else:\n",
|
||||
" return Message(content=\"I'm sorry, I don't have an answer for you.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setting up LlamaIndex."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# llm = AzureOpenAI(\n",
|
||||
"# deployment_name=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n",
|
||||
"# temperature=0.0,\n",
|
||||
"# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n",
|
||||
"# # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n",
|
||||
"# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n",
|
||||
"# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n",
|
||||
"# )\n",
|
||||
"llm = OpenAI(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" temperature=0.0,\n",
|
||||
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# embed_model = AzureOpenAIEmbedding(\n",
|
||||
"# deployment_name=os.getenv(\"AZURE_OPENAI_EMBEDDING_MODEL\"),\n",
|
||||
"# temperature=0.0,\n",
|
||||
"# azure_ad_token_provider = get_bearer_token_provider(DefaultAzureCredential()),\n",
|
||||
"# api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n",
|
||||
"# azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n",
|
||||
"# api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n",
|
||||
"# )\n",
|
||||
"embed_model = OpenAIEmbedding(\n",
|
||||
" model=\"text-embedding-ada-002\",\n",
|
||||
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"Settings.llm = llm\n",
|
||||
"Settings.embed_model = embed_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create the tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"wiki_spec = WikipediaToolSpec()\n",
|
||||
"wikipedia_tool = wiki_spec.to_tool_list()[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's test the agent. First we need to create an agent runtime and\n",
|
||||
"register the agent, by providing the agent's name and a factory function\n",
|
||||
"that will create the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"chat_agent\",\n",
|
||||
" lambda: LlamaIndexAgent(\n",
|
||||
" description=\"Llama Index Agent\",\n",
|
||||
" llama_index_agent=ReActAgent.from_tools(\n",
|
||||
" tools=[wikipedia_tool],\n",
|
||||
" llm=llm,\n",
|
||||
" max_iterations=8,\n",
|
||||
" memory=ChatSummaryMemoryBuffer(llm=llm, token_limit=16000),\n",
|
||||
" verbose=True,\n",
|
||||
" ),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"agent = AgentId(\"chat_agent\", \"default\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Start the agent runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime.start()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Send a direct message to the agent, and print the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"> Running step 3cbf60cd-9827-4dfe-a3a9-eaff2bed9b75. Step input: What are the best movies from studio Ghibli?\n",
|
||||
"\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n",
|
||||
"Action: search_data\n",
|
||||
"Action Input: {'query': 'best movies from Studio Ghibli'}\n",
|
||||
"\u001b[0m\u001b[1;3;34mObservation: This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Works ==\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Feature films ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Television ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Short films ===\n",
|
||||
"\n",
|
||||
"These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Commercials ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Video games ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Stage productions ===\n",
|
||||
"Princess Mononoke (2013)\n",
|
||||
"Nausicaä of the Valley of the Wind (2019)\n",
|
||||
"Spirited Away (2022)\n",
|
||||
"My Neighbour Totoro (2022)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Other works ===\n",
|
||||
"The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Exhibitions ===\n",
|
||||
"A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Related works ==\n",
|
||||
"These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Pre-Ghibli ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Cooperative works ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Distributive works ===\n",
|
||||
"These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Contributive works ===\n",
|
||||
"Studio Ghibli has made contributions to the following anime series and movies:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Significant achievements ==\n",
|
||||
"The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n",
|
||||
"The highest-grossing film of 1991 in Japan: Only Yesterday\n",
|
||||
"The highest-grossing film of 1992 in Japan: Porco Rosso\n",
|
||||
"The highest-grossing film of 1994 in Japan: Pom Poko\n",
|
||||
"The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n",
|
||||
"The highest-grossing film of 2002 in Japan: Spirited Away\n",
|
||||
"The highest-grossing film of 2008 in Japan: Ponyo\n",
|
||||
"The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n",
|
||||
"The highest-grossing film of 2013 in Japan: The Wind Rises\n",
|
||||
"The first Studio Ghibli film to use computer graphics: Pom Poko\n",
|
||||
"The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n",
|
||||
"The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n",
|
||||
"The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n",
|
||||
"The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Notes ==\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== References ==\n",
|
||||
"\u001b[0m> Running step 561e3dd3-d98b-4d37-b612-c99387182ee0. Step input: None\n",
|
||||
"\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer.\n",
|
||||
"Answer: Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n",
|
||||
"\n",
|
||||
"1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n",
|
||||
"2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n",
|
||||
"3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n",
|
||||
"4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n",
|
||||
"5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n",
|
||||
"6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n",
|
||||
"7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n",
|
||||
"8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n",
|
||||
"9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n",
|
||||
"10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n",
|
||||
"\n",
|
||||
"These films are celebrated for their storytelling, animation quality, and emotional depth.\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Studio Ghibli has produced many acclaimed films over the years. Some of the best and most popular movies from Studio Ghibli include:\n",
|
||||
"\n",
|
||||
"1. **Spirited Away (2001)** - Directed by Hayao Miyazaki, this film won the Academy Award for Best Animated Feature and is one of the highest-grossing films in Japanese history.\n",
|
||||
"2. **My Neighbor Totoro (1988)** - Another classic by Hayao Miyazaki, this film is beloved for its heartwarming story and iconic characters.\n",
|
||||
"3. **Princess Mononoke (1997)** - This epic fantasy film, also directed by Miyazaki, is known for its complex themes and stunning animation.\n",
|
||||
"4. **Howl's Moving Castle (2004)** - Based on the novel by Diana Wynne Jones, this film features a magical story and beautiful animation.\n",
|
||||
"5. **Kiki's Delivery Service (1989)** - A charming coming-of-age story about a young witch starting her own delivery service.\n",
|
||||
"6. **Grave of the Fireflies (1988)** - Directed by Isao Takahata, this poignant film is a heartbreaking tale of two siblings struggling to survive during World War II.\n",
|
||||
"7. **Ponyo (2008)** - A delightful and visually stunning film about a young fish-girl who wants to become human.\n",
|
||||
"8. **The Wind Rises (2013)** - A more mature film by Miyazaki, focusing on the life of an aircraft designer during wartime Japan.\n",
|
||||
"9. **The Secret World of Arrietty (2010)** - Based on Mary Norton's novel \"The Borrowers,\" this film tells the story of tiny people living secretly in a human house.\n",
|
||||
"10. **Whisper of the Heart (1995)** - A touching story about a young girl discovering her passion for writing.\n",
|
||||
"\n",
|
||||
"These films are celebrated for their storytelling, animation quality, and emotional depth.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = Message(content=\"What are the best movies from studio Ghibli?\")\n",
|
||||
"response = await runtime.send_message(message, agent)\n",
|
||||
"assert isinstance(response, Message)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This is a list of works (films, television, shorts etc.) by the Japanese animation studio Studio Ghibli.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Works ==\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Feature films ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Television ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Short films ===\n",
|
||||
"\n",
|
||||
"These are short films, including those created for television, theatrical release, and the Ghibli Museum. Original video animation releases and music videos (theatrical and television) are also listed in this section.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Commercials ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Video games ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Stage productions ===\n",
|
||||
"Princess Mononoke (2013)\n",
|
||||
"Nausicaä of the Valley of the Wind (2019)\n",
|
||||
"Spirited Away (2022)\n",
|
||||
"My Neighbour Totoro (2022)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Other works ===\n",
|
||||
"The works listed here consist of works that do not fall into the above categories. All of these films have been released on DVD or Blu-ray in Japan as part of the Ghibli Gakujutsu Library.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Exhibitions ===\n",
|
||||
"A selection of layout designs for animated productions was exhibited in the Studio Ghibli Layout Designs: Understanding the Secrets of Takahata and Miyazaki Animation exhibition tour, which started in the Museum of Contemporary Art Tokyo (July 28, 2008 to September 28, 2008) and subsequently travelled to different museums throughout Japan and Asia, concluding its tour of Japan in the Fukuoka Asian Art Museum (October 12, 2013 to January 26, 2014) and its tour of Asia in the Hong Kong Heritage Museum (May 14, 2014 to August 31, 2014). Between October 4, 2014 and March 1, 2015 the layout designs were exhibited at Art Ludique in Paris. The exhibition catalogues contain annotated reproductions of the displayed artwork.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Related works ==\n",
|
||||
"These works were not created by Studio Ghibli, but were produced by a variety of studios and people who went on to form or join Studio Ghibli. This includes members of Topcraft that went on to create Studio Ghibli in 1985; works produced by Toei Animation, TMS Entertainment, Nippon Animation or other studios and featuring involvement by Hayao Miyazaki, Isao Takahata or other Ghibli staffers. The list also includes works created in cooperation with Studio Ghibli.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Pre-Ghibli ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Cooperative works ===\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Distributive works ===\n",
|
||||
"These Western animated films (plus one Japanese film) have been distributed by Studio Ghibli, and now through their label, Ghibli Museum Library.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"=== Contributive works ===\n",
|
||||
"Studio Ghibli has made contributions to the following anime series and movies:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Significant achievements ==\n",
|
||||
"The highest-grossing film of 1989 in Japan: Kiki's Delivery Service\n",
|
||||
"The highest-grossing film of 1991 in Japan: Only Yesterday\n",
|
||||
"The highest-grossing film of 1992 in Japan: Porco Rosso\n",
|
||||
"The highest-grossing film of 1994 in Japan: Pom Poko\n",
|
||||
"The highest-grossing film of 1995 in Japan; the first Japanese film in Dolby Digital: Whisper of the Heart\n",
|
||||
"The highest-grossing film of 2002 in Japan: Spirited Away\n",
|
||||
"The highest-grossing film of 2008 in Japan: Ponyo\n",
|
||||
"The highest-grossing Japanese film of 2010 in Japan: The Secret World of Arrietty\n",
|
||||
"The highest-grossing film of 2013 in Japan: The Wind Rises\n",
|
||||
"The first Studio Ghibli film to use computer graphics: Pom Poko\n",
|
||||
"The first Miyazaki feature to use computer graphics, and the first Studio Ghibli film to use digital coloring; the first animated feature in Japan's history to gross more than 10 billion yen at the box office and the first animated film ever to win a National Academy Award for Best Picture of the Year: Princess Mononoke\n",
|
||||
"The first Studio Ghibli film to be shot using a 100% digital process: My Neighbors the Yamadas\n",
|
||||
"The first Miyazaki feature to be shot using a 100% digital process; the first film to gross $200 million worldwide before opening in North America; the film to finally overtake Titanic at the Japanese box office, becoming the top-grossing film in the history of Japanese cinema: Spirited Away\n",
|
||||
"The first anime and traditionally animated winner of the Academy Award for Best Animated Feature: Spirited Away at the 75th Academy Awards. They would later win this award for a second time with The Boy and the Heron at the 96th Academy Awards, marking the second time a traditionally animated film won the award.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== Notes ==\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"== References ==\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"if response.sources is not None:\n",
|
||||
" for source in response.sources:\n",
|
||||
" print(source.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Stop the agent runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await runtime.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,842 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAI Assistant Agent\n",
|
||||
"\n",
|
||||
"[Open AI Assistant](https://platform.openai.com/docs/assistants/overview) \n",
|
||||
"and [Azure OpenAI Assistant](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistant)\n",
|
||||
"are server-side APIs for building\n",
|
||||
"agents.\n",
|
||||
"They can be used to build agents in AGNext. This cookbook demonstrates how to\n",
|
||||
"to use OpenAI Assistant to create an agent that can run code and Q&A over document.\n",
|
||||
"\n",
|
||||
"## Message Protocol\n",
|
||||
"\n",
|
||||
"First, we need to specify the message protocol for the agent backed by \n",
|
||||
"OpenAI Assistant. The message protocol defines the structure of messages\n",
|
||||
"handled and published by the agent. \n",
|
||||
"For illustration, we define a simple\n",
|
||||
"message protocol of 4 message types: `Message`, `Reset`, `UploadForCodeInterpreter` and `UploadForFileSearch`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class TextMessage:\n",
|
||||
" content: str\n",
|
||||
" source: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Reset:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class UploadForCodeInterpreter:\n",
|
||||
" file_path: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class UploadForFileSearch:\n",
|
||||
" file_path: str\n",
|
||||
" vector_store_id: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `TextMessage` message type is used to communicate with the agent. It has a\n",
|
||||
"`content` field that contains the message content, and a `source` field\n",
|
||||
"for the sender. The `Reset` message type is a control message that resets\n",
|
||||
"the memory of the agent. It has no fields. This is useful when we need to\n",
|
||||
"start a new conversation with the agent.\n",
|
||||
"\n",
|
||||
"The `UploadForCodeInterpreter` message type is used to upload data files\n",
|
||||
"for the code interpreter and `UploadForFileSearch` message type is used to upload\n",
|
||||
"documents for file search. Both message types have a `file_path` field that contains\n",
|
||||
"the local path to the file to be uploaded.\n",
|
||||
"\n",
|
||||
"## Defining the Agent\n",
|
||||
"\n",
|
||||
"Next, we define the agent class.\n",
|
||||
"The agent class constructor has the following arguments: `description`,\n",
|
||||
"`client`, `assistant_id`, `thread_id`, and `assistant_event_handler_factory`.\n",
|
||||
"The `client` argument is the OpenAI async client object, and the\n",
|
||||
"`assistant_event_handler_factory` is for creating an assistant event handler\n",
|
||||
"to handle OpenAI Assistant events.\n",
|
||||
"This can be used to create streaming output from the assistant.\n",
|
||||
"\n",
|
||||
"The agent class has the following message handlers:\n",
|
||||
"- `handle_message`: Handles the `TextMessage` message type, and sends back the\n",
|
||||
" response from the assistant.\n",
|
||||
"- `handle_reset`: Handles the `Reset` message type, and resets the memory\n",
|
||||
" of the assistant agent.\n",
|
||||
"- `handle_upload_for_code_interpreter`: Handles the `UploadForCodeInterpreter`\n",
|
||||
" message type, and uploads the file to the code interpreter.\n",
|
||||
"- `handle_upload_for_file_search`: Handles the `UploadForFileSearch`\n",
|
||||
" message type, and uploads the document to the file search.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The memory of the assistant is stored inside a thread, which is kept in the\n",
|
||||
"server side. The thread is referenced by the `thread_id` argument."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"import os\n",
|
||||
"from typing import Any, Callable, List\n",
|
||||
"\n",
|
||||
"import aiofiles\n",
|
||||
"from autogen_core.base import AgentId, MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from openai import AsyncAssistantEventHandler, AsyncClient\n",
|
||||
"from openai.types.beta.thread import ToolResources, ToolResourcesFileSearch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class OpenAIAssistantAgent(RoutedAgent):\n",
|
||||
" \"\"\"An agent implementation that uses the OpenAI Assistant API to generate\n",
|
||||
" responses.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" description (str): The description of the agent.\n",
|
||||
" client (openai.AsyncClient): The client to use for the OpenAI API.\n",
|
||||
" assistant_id (str): The assistant ID to use for the OpenAI API.\n",
|
||||
" thread_id (str): The thread ID to use for the OpenAI API.\n",
|
||||
" assistant_event_handler_factory (Callable[[], AsyncAssistantEventHandler], optional):\n",
|
||||
" A factory function to create an async assistant event handler. Defaults to None.\n",
|
||||
" If provided, the agent will use the streaming mode with the event handler.\n",
|
||||
" If not provided, the agent will use the blocking mode to generate responses.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" description: str,\n",
|
||||
" client: AsyncClient,\n",
|
||||
" assistant_id: str,\n",
|
||||
" thread_id: str,\n",
|
||||
" assistant_event_handler_factory: Callable[[], AsyncAssistantEventHandler],\n",
|
||||
" ) -> None:\n",
|
||||
" super().__init__(description)\n",
|
||||
" self._client = client\n",
|
||||
" self._assistant_id = assistant_id\n",
|
||||
" self._thread_id = thread_id\n",
|
||||
" self._assistant_event_handler_factory = assistant_event_handler_factory\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_message(self, message: TextMessage, ctx: MessageContext) -> TextMessage:\n",
|
||||
" \"\"\"Handle a message. This method adds the message to the thread and publishes a response.\"\"\"\n",
|
||||
" # Save the message to the thread.\n",
|
||||
" await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(\n",
|
||||
" self._client.beta.threads.messages.create(\n",
|
||||
" thread_id=self._thread_id,\n",
|
||||
" content=message.content,\n",
|
||||
" role=\"user\",\n",
|
||||
" metadata={\"sender\": message.source},\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" # Generate a response.\n",
|
||||
" async with self._client.beta.threads.runs.stream(\n",
|
||||
" thread_id=self._thread_id,\n",
|
||||
" assistant_id=self._assistant_id,\n",
|
||||
" event_handler=self._assistant_event_handler_factory(),\n",
|
||||
" ) as stream:\n",
|
||||
" await ctx.cancellation_token.link_future(asyncio.ensure_future(stream.until_done()))\n",
|
||||
"\n",
|
||||
" # Get the last message.\n",
|
||||
" messages = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, order=\"desc\", limit=1))\n",
|
||||
" )\n",
|
||||
" last_message_content = messages.data[0].content\n",
|
||||
"\n",
|
||||
" # Get the text content from the last message.\n",
|
||||
" text_content = [content for content in last_message_content if content.type == \"text\"]\n",
|
||||
" if not text_content:\n",
|
||||
" raise ValueError(f\"Expected text content in the last message: {last_message_content}\")\n",
|
||||
"\n",
|
||||
" return TextMessage(content=text_content[0].text.value, source=self.metadata[\"type\"])\n",
|
||||
"\n",
|
||||
" @message_handler()\n",
|
||||
" async def on_reset(self, message: Reset, ctx: MessageContext) -> None:\n",
|
||||
" \"\"\"Handle a reset message. This method deletes all messages in the thread.\"\"\"\n",
|
||||
" # Get all messages in this thread.\n",
|
||||
" all_msgs: List[str] = []\n",
|
||||
" while True:\n",
|
||||
" if not all_msgs:\n",
|
||||
" msgs = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id))\n",
|
||||
" )\n",
|
||||
" else:\n",
|
||||
" msgs = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, after=all_msgs[-1]))\n",
|
||||
" )\n",
|
||||
" for msg in msgs.data:\n",
|
||||
" all_msgs.append(msg.id)\n",
|
||||
" if not msgs.has_next_page():\n",
|
||||
" break\n",
|
||||
" # Delete all the messages.\n",
|
||||
" for msg_id in all_msgs:\n",
|
||||
" status = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(\n",
|
||||
" self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" assert status.deleted is True\n",
|
||||
"\n",
|
||||
" @message_handler()\n",
|
||||
" async def on_upload_for_code_interpreter(self, message: UploadForCodeInterpreter, ctx: MessageContext) -> None:\n",
|
||||
" \"\"\"Handle an upload for code interpreter. This method uploads a file and updates the thread with the file.\"\"\"\n",
|
||||
" # Get the file content.\n",
|
||||
" async with aiofiles.open(message.file_path, mode=\"rb\") as f:\n",
|
||||
" file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(f.read()))\n",
|
||||
" file_name = os.path.basename(message.file_path)\n",
|
||||
" # Upload the file.\n",
|
||||
" file = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(self._client.files.create(file=(file_name, file_content), purpose=\"assistants\"))\n",
|
||||
" )\n",
|
||||
" # Get existing file ids from tool resources.\n",
|
||||
" thread = await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(self._client.beta.threads.retrieve(thread_id=self._thread_id))\n",
|
||||
" )\n",
|
||||
" tool_resources: ToolResources = thread.tool_resources if thread.tool_resources else ToolResources()\n",
|
||||
" assert tool_resources.code_interpreter is not None\n",
|
||||
" if tool_resources.code_interpreter.file_ids:\n",
|
||||
" file_ids = tool_resources.code_interpreter.file_ids\n",
|
||||
" else:\n",
|
||||
" file_ids = [file.id]\n",
|
||||
" # Update thread with new file.\n",
|
||||
" await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(\n",
|
||||
" self._client.beta.threads.update(\n",
|
||||
" thread_id=self._thread_id,\n",
|
||||
" tool_resources={\n",
|
||||
" \"code_interpreter\": {\"file_ids\": file_ids},\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" @message_handler()\n",
|
||||
" async def on_upload_for_file_search(self, message: UploadForFileSearch, ctx: MessageContext) -> None:\n",
|
||||
" \"\"\"Handle an upload for file search. This method uploads a file and updates the vector store.\"\"\"\n",
|
||||
" # Get the file content.\n",
|
||||
" async with aiofiles.open(message.file_path, mode=\"rb\") as file:\n",
|
||||
" file_content = await ctx.cancellation_token.link_future(asyncio.ensure_future(file.read()))\n",
|
||||
" file_name = os.path.basename(message.file_path)\n",
|
||||
" # Upload the file.\n",
|
||||
" await ctx.cancellation_token.link_future(\n",
|
||||
" asyncio.ensure_future(\n",
|
||||
" self._client.beta.vector_stores.file_batches.upload_and_poll(\n",
|
||||
" vector_store_id=message.vector_store_id,\n",
|
||||
" files=[(file_name, file_content)],\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The agent class is a thin wrapper around the OpenAI Assistant API to implement\n",
|
||||
"the message protocol. More features, such as multi-modal message handling,\n",
|
||||
"can be added by extending the message protocol."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Assistant Event Handler\n",
|
||||
"\n",
|
||||
"The assistant event handler provides call-backs for handling Assistant API\n",
|
||||
"specific events. This is useful for handling streaming output from the assistant\n",
|
||||
"and further user interface integration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from openai import AsyncAssistantEventHandler, AsyncClient\n",
|
||||
"from openai.types.beta.threads import Message, Text, TextDelta\n",
|
||||
"from openai.types.beta.threads.runs import RunStep, RunStepDelta\n",
|
||||
"from typing_extensions import override\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class EventHandler(AsyncAssistantEventHandler):\n",
|
||||
" @override\n",
|
||||
" async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:\n",
|
||||
" print(delta.value, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
" @override\n",
|
||||
" async def on_run_step_created(self, run_step: RunStep) -> None:\n",
|
||||
" details = run_step.step_details\n",
|
||||
" if details.type == \"tool_calls\":\n",
|
||||
" for tool in details.tool_calls:\n",
|
||||
" if tool.type == \"code_interpreter\":\n",
|
||||
" print(\"\\nGenerating code to interpret:\\n\\n```python\")\n",
|
||||
"\n",
|
||||
" @override\n",
|
||||
" async def on_run_step_done(self, run_step: RunStep) -> None:\n",
|
||||
" details = run_step.step_details\n",
|
||||
" if details.type == \"tool_calls\":\n",
|
||||
" for tool in details.tool_calls:\n",
|
||||
" if tool.type == \"code_interpreter\":\n",
|
||||
" print(\"\\n```\\nExecuting code...\")\n",
|
||||
"\n",
|
||||
" @override\n",
|
||||
" async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:\n",
|
||||
" details = delta.step_details\n",
|
||||
" if details is not None and details.type == \"tool_calls\":\n",
|
||||
" for tool in details.tool_calls or []:\n",
|
||||
" if tool.type == \"code_interpreter\" and tool.code_interpreter and tool.code_interpreter.input:\n",
|
||||
" print(tool.code_interpreter.input, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
" @override\n",
|
||||
" async def on_message_created(self, message: Message) -> None:\n",
|
||||
" print(f\"{'-'*80}\\nAssistant:\\n\")\n",
|
||||
"\n",
|
||||
" @override\n",
|
||||
" async def on_message_done(self, message: Message) -> None:\n",
|
||||
" # print a citation to the file searched\n",
|
||||
" if not message.content:\n",
|
||||
" return\n",
|
||||
" content = message.content[0]\n",
|
||||
" if not content.type == \"text\":\n",
|
||||
" return\n",
|
||||
" text_content = content.text\n",
|
||||
" annotations = text_content.annotations\n",
|
||||
" citations: List[str] = []\n",
|
||||
" for index, annotation in enumerate(annotations):\n",
|
||||
" text_content.value = text_content.value.replace(annotation.text, f\"[{index}]\")\n",
|
||||
" if file_citation := getattr(annotation, \"file_citation\", None):\n",
|
||||
" client = AsyncClient()\n",
|
||||
" cited_file = await client.files.retrieve(file_citation.file_id)\n",
|
||||
" citations.append(f\"[{index}] {cited_file.filename}\")\n",
|
||||
" if citations:\n",
|
||||
" print(\"\\n\".join(citations))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the Agent\n",
|
||||
"\n",
|
||||
"First we need to use the `openai` client to create the actual assistant,\n",
|
||||
"thread, and vector store. Our AGNext agent will be using these."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import openai\n",
|
||||
"\n",
|
||||
"# Create an assistant with code interpreter and file search tools.\n",
|
||||
"oai_assistant = openai.beta.assistants.create(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" description=\"An AI assistant that helps with everyday tasks.\",\n",
|
||||
" instructions=\"Help the user with their task.\",\n",
|
||||
" tools=[{\"type\": \"code_interpreter\"}, {\"type\": \"file_search\"}],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create a vector store to be used for file search.\n",
|
||||
"vector_store = openai.beta.vector_stores.create()\n",
|
||||
"\n",
|
||||
"# Create a thread which is used as the memory for the assistant.\n",
|
||||
"thread = openai.beta.threads.create(\n",
|
||||
" tool_resources={\"file_search\": {\"vector_store_ids\": [vector_store.id]}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, we create a runtime, and register an agent factory function for this \n",
|
||||
"agent with the runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"assistant\",\n",
|
||||
" lambda: OpenAIAssistantAgent(\n",
|
||||
" description=\"OpenAI Assistant Agent\",\n",
|
||||
" client=openai.AsyncClient(),\n",
|
||||
" assistant_id=oai_assistant.id,\n",
|
||||
" thread_id=thread.id,\n",
|
||||
" assistant_event_handler_factory=lambda: EventHandler(),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"agent = AgentId(\"assistant\", \"default\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's turn on logging to see what's happening under the hood."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.WARNING)\n",
|
||||
"logging.getLogger(\"autogen_core\").setLevel(logging.DEBUG)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's send a greeting message to the agent, and see the response streamed back."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type TextMessage to assistant: {'content': 'Hello, how are you today!', 'source': 'user'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Assistant:\n",
|
||||
"\n",
|
||||
"Hello! I'm here and ready to assist you. How can I help you today?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"Hello! I'm here and ready to assist you. How can I help you today?\", 'source': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(TextMessage(content=\"Hello, how are you today!\", source=\"user\"), agent)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Assistant with Code Interpreter\n",
|
||||
"\n",
|
||||
"Let's ask some math question to the agent, and see it uses the code interpreter\n",
|
||||
"to answer the question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type TextMessage to assistant: {'content': 'What is 1332322 x 123212?', 'source': 'user'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Calculating the product of 1332322 and 123212\n",
|
||||
"result = 1332322 * 123212\n",
|
||||
"result\n",
|
||||
"```\n",
|
||||
"Executing code...\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Assistant:\n",
|
||||
"\n",
|
||||
"The product of 1,332,322 and 123,212 is 164,158,058,264."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The product of 1,332,322 and 123,212 is 164,158,058,264.', 'source': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(TextMessage(content=\"What is 1332322 x 123212?\", source=\"user\"), agent)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's get some data from Seattle Open Data portal. We will be using the\n",
|
||||
"[City of Seattle Wage Data](https://data.seattle.gov/City-Business/City-of-Seattle-Wage-Data/2khk-5ukd/).\n",
|
||||
"Let's download it first."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"response = requests.get(\"https://data.seattle.gov/resource/2khk-5ukd.csv\")\n",
|
||||
"with open(\"seattle_city_wages.csv\", \"wb\") as file:\n",
|
||||
" file.write(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's send the file to the agent using an `UploadForCodeInterpreter` message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type UploadForCodeInterpreter to assistant: {'file_path': 'seattle_city_wages.csv'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type UploadForCodeInterpreter sent by Unknown\n",
|
||||
"INFO:autogen_core:Resolving response with message type NoneType for recipient None from assistant: None\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(UploadForCodeInterpreter(file_path=\"seattle_city_wages.csv\"), agent)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now ask some questions about the data to the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type TextMessage to assistant: {'content': 'Take a look at the uploaded CSV file.', 'source': 'user'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Load the uploaded CSV file to examine its contents\n",
|
||||
"file_path = '/mnt/data/file-oEvRiyGyHc2jZViKyDqL8aoh'\n",
|
||||
"csv_data = pd.read_csv(file_path)\n",
|
||||
"\n",
|
||||
"# Display the first few rows of the dataframe to understand its structure\n",
|
||||
"csv_data.head()\n",
|
||||
"```\n",
|
||||
"Executing code...\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Assistant:\n",
|
||||
"\n",
|
||||
"The uploaded CSV file contains the following columns:\n",
|
||||
"\n",
|
||||
"1. **department**: The department in which the individual works.\n",
|
||||
"2. **last_name**: The last name of the employee.\n",
|
||||
"3. **first_name**: The first name of the employee.\n",
|
||||
"4. **job_title**: The job title of the employee.\n",
|
||||
"5. **hourly_rate**: The hourly rate for the employee's position.\n",
|
||||
"\n",
|
||||
"Here are the first few entries from the file:\n",
|
||||
"\n",
|
||||
"| department | last_name | first_name | job_title | hourly_rate |\n",
|
||||
"|--------------------------------|-----------|------------|------------------------------------|-------------|\n",
|
||||
"| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\n",
|
||||
"| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\n",
|
||||
"| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\n",
|
||||
"| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\n",
|
||||
"| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\n",
|
||||
"\n",
|
||||
"If you need any specific analysis or information from this data, please let me know!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Resolving response with message type TextMessage for recipient None from assistant: {'content': \"The uploaded CSV file contains the following columns:\\n\\n1. **department**: The department in which the individual works.\\n2. **last_name**: The last name of the employee.\\n3. **first_name**: The first name of the employee.\\n4. **job_title**: The job title of the employee.\\n5. **hourly_rate**: The hourly rate for the employee's position.\\n\\nHere are the first few entries from the file:\\n\\n| department | last_name | first_name | job_title | hourly_rate |\\n|--------------------------------|-----------|------------|------------------------------------|-------------|\\n| Police Department | Aagard | Lori | Pol Capt-Precinct | 112.70 |\\n| Police Department | Aakervik | Dag | Pol Ofcr-Detective | 75.61 |\\n| Seattle City Light | Aaltonen | Evan | Pwrline Clear Tree Trimmer | 53.06 |\\n| Seattle Public Utilities | Aar | Abdimallik | Civil Engrng Spec,Sr | 64.43 |\\n| Seattle Dept of Transportation | Abad | Abigail | Admin Spec II-BU | 37.40 |\\n\\nIf you need any specific analysis or information from this data, please let me know!\", 'source': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(TextMessage(content=\"Take a look at the uploaded CSV file.\", source=\"user\"), agent)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type TextMessage to assistant: {'content': 'What are the top-10 salaries?', 'source': 'user'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Sorting the data by hourly_rate in descending order and selecting the top 10 salaries\n",
|
||||
"top_10_salaries = csv_data[['first_name', 'last_name', 'job_title', 'hourly_rate']].sort_values(by='hourly_rate', ascending=False).head(10)\n",
|
||||
"top_10_salaries.reset_index(drop=True, inplace=True)\n",
|
||||
"top_10_salaries\n",
|
||||
"```\n",
|
||||
"Executing code...\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Assistant:\n",
|
||||
"\n",
|
||||
"Here are the top 10 salaries based on the hourly rates from the CSV file:\n",
|
||||
"\n",
|
||||
"| First Name | Last Name | Job Title | Hourly Rate |\n",
|
||||
"|------------|-----------|------------------------------------|-------------|\n",
|
||||
"| Eric | Barden | Executive4 | 139.61 |\n",
|
||||
"| Idris | Beauregard| Executive3 | 115.90 |\n",
|
||||
"| Lori | Aagard | Pol Capt-Precinct | 112.70 |\n",
|
||||
"| Krista | Bair | Pol Capt-Precinct | 108.74 |\n",
|
||||
"| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\n",
|
||||
"| Ginger | Armbruster| Executive2 | 102.42 |\n",
|
||||
"| William | Andersen | Executive2 | 102.42 |\n",
|
||||
"| Valarie | Anderson | Executive2 | 102.42 |\n",
|
||||
"| Paige | Alderete | Executive2 | 102.42 |\n",
|
||||
"| Kathryn | Aisenberg | Executive2 | 100.65 |\n",
|
||||
"\n",
|
||||
"If you need any further details or analysis, let me know!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'Here are the top 10 salaries based on the hourly rates from the CSV file:\\n\\n| First Name | Last Name | Job Title | Hourly Rate |\\n|------------|-----------|------------------------------------|-------------|\\n| Eric | Barden | Executive4 | 139.61 |\\n| Idris | Beauregard| Executive3 | 115.90 |\\n| Lori | Aagard | Pol Capt-Precinct | 112.70 |\\n| Krista | Bair | Pol Capt-Precinct | 108.74 |\\n| Amy | Bannister | Fire Chief, Dep Adm-80 Hrs | 104.07 |\\n| Ginger | Armbruster| Executive2 | 102.42 |\\n| William | Andersen | Executive2 | 102.42 |\\n| Valarie | Anderson | Executive2 | 102.42 |\\n| Paige | Alderete | Executive2 | 102.42 |\\n| Kathryn | Aisenberg | Executive2 | 100.65 |\\n\\nIf you need any further details or analysis, let me know!', 'source': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(TextMessage(content=\"What are the top-10 salaries?\", source=\"user\"), agent)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Assistant with File Search\n",
|
||||
"\n",
|
||||
"Let's try the Q&A over document feature. We first download Wikipedia page\n",
|
||||
"on the Third Anglo-Afghan War."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = requests.get(\"https://en.wikipedia.org/wiki/Third_Anglo-Afghan_War\")\n",
|
||||
"with open(\"third_anglo_afghan_war.html\", \"wb\") as file:\n",
|
||||
" file.write(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Send the file to the agent using an `UploadForFileSearch` message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type UploadForFileSearch to assistant: {'file_path': 'third_anglo_afghan_war.html', 'vector_store_id': 'vs_h3xxPbJFnd1iZ9WdjsQwNdrp'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type UploadForFileSearch sent by Unknown\n",
|
||||
"INFO:autogen_core:Resolving response with message type NoneType for recipient None from assistant: None\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(\n",
|
||||
" UploadForFileSearch(file_path=\"third_anglo_afghan_war.html\", vector_store_id=vector_store.id), agent\n",
|
||||
")\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's ask some questions about the document to the agent. Before asking,\n",
|
||||
"we reset the agent memory to start a new conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Sending message of type Reset to assistant: {}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type Reset sent by Unknown\n",
|
||||
"INFO:autogen_core:Resolving response with message type NoneType for recipient None from assistant: None\n",
|
||||
"INFO:autogen_core:Sending message of type TextMessage to assistant: {'content': 'When and where was the treaty of Rawalpindi signed? Answer using the document provided.', 'source': 'user'}\n",
|
||||
"INFO:autogen_core:Calling message handler for assistant:default with message type TextMessage sent by Unknown\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Assistant:\n",
|
||||
"\n",
|
||||
"The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan【6:0†source】."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:autogen_core:Resolving response with message type TextMessage for recipient None from assistant: {'content': 'The Treaty of Rawalpindi was signed on **8 August 1919**. The location of the signing was in **Rawalpindi**, which is in present-day Pakistan【6:0†source】.', 'source': 'assistant'}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0] third_anglo_afghan_war.html\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(Reset(), agent)\n",
|
||||
"await runtime.send_message(\n",
|
||||
" TextMessage(\n",
|
||||
" content=\"When and where was the treaty of Rawalpindi signed? Answer using the document provided.\", source=\"user\"\n",
|
||||
" ),\n",
|
||||
" agent,\n",
|
||||
")\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That's it! We have successfully built an agent backed by OpenAI Assistant."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
# Termination using Intervention Handler
|
||||
|
||||
```{note}
|
||||
This method is only really valid for single-tenant applications. If multiple parallel users are using the application via namespaces this approach will not work without modification.
|
||||
```
|
||||
|
||||
There are many different ways to handle termination in `autogen_core`. Ultimately, the goal is to detect that the runtime no longer needs to be executed and you can proceed to finalization tasks. One way to do this is to use an `InterventionHandler` to detect a termination message and then act on it.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from autogen_core.base import AgentId, CancellationToken
|
||||
from autogen_core.base.intervention import DefaultInterventionHandler
|
||||
```
|
||||
|
||||
First, we define a dataclass that will be used to signal termination.
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class Termination:
|
||||
reason: str
|
||||
```
|
||||
|
||||
We code our agent to publish a termination message when it decides it is time to terminate.
|
||||
|
||||
```python
|
||||
class AnAgent(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("MyAgent")
|
||||
self.received = 0
|
||||
|
||||
@message_handler
|
||||
async def on_new_message(self, message: str, cancellation_token: CancellationToken) -> None:
|
||||
self.received += 1
|
||||
if self.received > 3:
|
||||
self.publish_message(Termination(reason="Reached maximum number of messages"))
|
||||
```
|
||||
|
||||
Next, we create an InterventionHandler that will detect the termination message and act on it. This one hooks into publishes and when it encounters `Termination` it alters its internal state to indicate that termination has been requested.
|
||||
```python
|
||||
|
||||
class TerminationHandler(DefaultInterventionHandler):
|
||||
|
||||
def __init__(self):
|
||||
self.termination_value: Termination | None = None
|
||||
|
||||
async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any:
|
||||
if isinstance(message, Termination):
|
||||
self.termination_value = message
|
||||
return message
|
||||
|
||||
@property
|
||||
def termination_value(self) -> Termination | None:
|
||||
return self.termination_value
|
||||
|
||||
@property
|
||||
def has_terminated(self) -> bool:
|
||||
return self.termination_value is not None
|
||||
```
|
||||
|
||||
Finally, we add this handler to the runtime and use it to detect termination and cease running the `process_next` loop once it has encountered termination.
|
||||
|
||||
```python
|
||||
async def main() -> None:
|
||||
termination_handler = TerminationHandler()
|
||||
runtime = SingleThreadedAgentRuntime(
|
||||
intervention_handler=termination_handler
|
||||
)
|
||||
|
||||
# Add Agents and kick off task
|
||||
|
||||
while not termination_handler.has_terminated:
|
||||
await runtime.process_next()
|
||||
|
||||
print(termination_handler.termination_value)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
@@ -0,0 +1,66 @@
|
||||
# Using Type Routed Agent
|
||||
|
||||
To make it easier to implement agents that respond to certain message types there is a base class called {py:class}`~autogen_core.components.RoutedAgent`. This class provides a simple decorator pattern for associating message types with message handlers.
|
||||
|
||||
The decorator {py:func}`autogen_core.components.message_handler` should be added to functions in the class that are intended to handle messages. These functions have a specific signature that needs to be followed for it to be recognized as a message handler.
|
||||
|
||||
- The function must be an `async` function.
|
||||
- The function must be decorated with the `message_handler` decorator.
|
||||
- The function must have exactly 3 arguments.
|
||||
- `self`
|
||||
- `message`: The message to be handled, this must be type hinted with the message type that it is intended to handle.
|
||||
- `cancellation_token`: A {py:class}`autogen_core.base.CancellationToken` object
|
||||
- The function must be type hinted with what message types it can return.
|
||||
|
||||
```{tip}
|
||||
Handlers can handle more than one message type by accepting a Union of the message types. It can also return more than one message type by returning a Union of the message types.
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
The following is an example of a simple agent that broadcasts the fact it received messages, and resets its internal counter when it receives a reset message.
|
||||
|
||||
One important thing to point out is that when an agent is constructed it must be passed a runtime object. This allows the agent to communicate with other agents via the runtime.
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
from autogen_core.components import RoutedAgent, message_handler, Image
|
||||
from autogen_core.base import AgentRuntime, CancellationToken
|
||||
|
||||
@dataclass
|
||||
class TextMessage:
|
||||
content: str
|
||||
source: str
|
||||
|
||||
@dataclass
|
||||
class MultiModalMessage:
|
||||
content: List[Union[str, Image]]
|
||||
source: str
|
||||
|
||||
@dataclass
|
||||
class Reset:
|
||||
pass
|
||||
|
||||
|
||||
class MyAgent(RoutedAgent):
|
||||
def __init__(self):
|
||||
super().__init__(description="I am a demo agent")
|
||||
self._received_count = 0
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(
|
||||
self, message: TextMessage | MultiModalMessage, cancellation_token: CancellationToken
|
||||
) -> None:
|
||||
self._received_count += 1
|
||||
await self.publish_message(
|
||||
TextMessage(
|
||||
content=f"I received a message from {message.source}. Message received #{self._received_count}",
|
||||
source=self.metadata["type"],
|
||||
)
|
||||
)
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, cancellation_token: CancellationToken) -> None:
|
||||
self._received_count = 0
|
||||
```
|
||||
@@ -0,0 +1,32 @@
|
||||
# Agent and Multi-Agent Application
|
||||
|
||||
An agent is a software entity that
|
||||
communicates via messages, maintains a state,
|
||||
and performs actions in response to messages or a change in its state.
|
||||
Actions can result in changes to the agent's state and external effects,
|
||||
for example, updating message history, sending a message, executing code,
|
||||
or making external API calls.
|
||||
|
||||
A wide variety of software applications can be modeled as a collection of independent
|
||||
agents that communicate with each other:
|
||||
sensors on a factory floor,
|
||||
distributed services powering web applications,
|
||||
business workflows involving multiple stakeholders,
|
||||
and more recently, artificial intelligence (AI) agents powered by language models
|
||||
(e.g., GPT-4) that can write code and interact with
|
||||
other software systems.
|
||||
We refer to them as multi-agent applications.
|
||||
|
||||
```{note}
|
||||
AI agents make use of language models as part of
|
||||
their software stacks to perform actions.
|
||||
```
|
||||
|
||||
In a multi-agent application, agents can live in the same process, on the same machine,
|
||||
or on different machines and across organizational boundaries.
|
||||
They can be implemented using different AI models, instructions, and programming languages.
|
||||
They can collaborate and work toward a common goal.
|
||||
|
||||
Each agent is a self-contained unit:
|
||||
developers can build, test and deploy it independently, and reuse it for different scenarios.
|
||||
Agents are composable: simple agents can form complex applications.
|
||||
@@ -0,0 +1,54 @@
|
||||
# Agent Identity and Lifecycle
|
||||
|
||||
In AGNext, the agent runtime manages agents' identities
|
||||
and lifecycles.
|
||||
Application does not create agents directly, rather,
|
||||
it registers an agent type with a factory function for
|
||||
agent instances.
|
||||
In this section, we explain how agents are identified
|
||||
and created by the runtime.
|
||||
|
||||
## Agent ID
|
||||
|
||||
Agent ID uniquely identifies an agent instance within
|
||||
an agent runtime -- including distributed runtime.
|
||||
It is the "address" of the agent instance for receiving messages.
|
||||
It has two components: agent type and agent key.
|
||||
|
||||
```{note}
|
||||
Agent ID = (Agent Type, Agent Key)
|
||||
```
|
||||
|
||||
The agent type is not an agent class.
|
||||
It associate an agent with a specific
|
||||
factory function, which produces instances of agents
|
||||
of the same agent type.
|
||||
For example, different factory functions can produce the same
|
||||
agent class but with different constructor perameters.
|
||||
The agent key is an instance identifier
|
||||
for the given agent type.
|
||||
|
||||
In a multi-agent application, agent types are
|
||||
typically defined directly by the application, i.e., they
|
||||
are defined in the application code.
|
||||
On the other hand, agent keys are typically generated given
|
||||
messages delivered to the agents, i.e., they are defined
|
||||
by the application data.
|
||||
|
||||
For example, a runtime has registered the agent type `"code_reviewer"`
|
||||
with a factory function producing agent instances that perform
|
||||
code reviews. Each code review request has a unique ID `review_request_id`
|
||||
to mark a dedicated
|
||||
session.
|
||||
In this case, each request can be handled by a new instance
|
||||
with an agent ID, `("code_reviewer", review_request_id)`.
|
||||
|
||||
## Agent Lifecycle
|
||||
|
||||
When a runtime delivers a message to an agent instance given its ID,
|
||||
it either fetches the instance,
|
||||
or creates it if it does not exist.
|
||||
|
||||
The runtime is also responsible for "paging in" or "out" agent instances
|
||||
to conserve resources and balance load across multiple machines.
|
||||
This is not implemented yet.
|
||||
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 114 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 196 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 109 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 100 KiB |
@@ -0,0 +1,51 @@
|
||||
# AGNext Application Stack
|
||||
|
||||
AGNext is designed to be an unopinionated framework that can be used to build
|
||||
a wide variety of multi-agent applications. It is not tied to any specific
|
||||
agent abstraction or multi-agent pattern.
|
||||
|
||||
The following diagram shows the AGNext application stack.
|
||||
|
||||

|
||||
|
||||
At the bottom of the stack is the base messaging and routing facilities that
|
||||
enable agents to communicate with each other. These are managed by the
|
||||
agent runtime, and for most applications, developers only need to interact
|
||||
with the high-level APIs provided by the runtime (see [Agent and Agent Runtime](../getting-started/agent-and-agent-runtime.ipynb)).
|
||||
|
||||
On top of the communication stack, developers need to define the
|
||||
types of the messages that agents exchange. A set of message types
|
||||
forms a behavior contract that agents must adhere to, and the
|
||||
implementation of the contracts determines how agents handle messages.
|
||||
The behavior contract is sometimes referred to as the message protocol.
|
||||
It is the developer's responsibility to implement the behavior contract.
|
||||
Multi-agent patterns are design patterns that emerge from behavior contracts
|
||||
(see [Multi-Agent Design Patterns](../getting-started/multi-agent-design-patterns.md)).
|
||||
|
||||
## An Example Application
|
||||
|
||||
Consider a concrete example of a multi-agent application for
|
||||
code generation. The application consists of three agents:
|
||||
Coder Agent, Executor Agent, and Reviewer Agent.
|
||||
The following diagram shows the data flow between the agents,
|
||||
and the message types exchanged between them.
|
||||
|
||||

|
||||
|
||||
In this example, the behavior contract consists of the following:
|
||||
|
||||
- `CodingTaskMsg` message from application to the Coder Agent
|
||||
- `CodeGenMsg` from Coder Agent to Executor Agent
|
||||
- `ExecutionResultMsg` from Executor Agent to Reviewer Agent
|
||||
- `ReviewMsg` from Reviewer Agent to Coder Agent
|
||||
- `CodingResultMsg` from the Reviewer Agent to the application
|
||||
|
||||
The behavior contract is implemented by the agents' handling of these messages. For example, the Reviewer Agent listens for `ExecutionResultMsg`
|
||||
and evaluates the code execution result to decide whether to approve or reject,
|
||||
if approved, it sends a `CodingResultMsg` to the application,
|
||||
otherwise, it sends a `ReviewMsg` to the Coder Agent for another round of
|
||||
code generation.
|
||||
|
||||
This behavior contract is a case of a multi-agent pattern called Reflection,
|
||||
where a generation result is reviewed by another round of generation,
|
||||
to improve the overall quality.
|
||||
@@ -0,0 +1,56 @@
|
||||
# AGNext Architecture
|
||||
|
||||
AGNext is a framework for building multi-agent applications with AI agents.
|
||||
At the foundation level, it provides a runtime envionment to facilitate
|
||||
communication between agents, manage their identities and lifecycles,
|
||||
and enforce security and privacy boundaries.
|
||||
|
||||
## Runtime Architecture
|
||||
|
||||
The following diagram shows the runtime architecture of AGNext.
|
||||
|
||||

|
||||
|
||||
Agent communicate via messages through the runtime.
|
||||
A runtime, as shown in the diagram,
|
||||
can consist of a hosted runtime and multiple worker runtimes.
|
||||
Agents in worker runtimes communicate with other agents via the hosted runtime
|
||||
through gateways, while agents in the hosted runtime communicate
|
||||
directly with each other.
|
||||
Most single-process applications need only an embedded hosted runtime.
|
||||
|
||||
AGNext also offers a set of unopinionated and extensible components for building AI agents.
|
||||
It does not prescribe an abstraction for AI agents, rather, it provides
|
||||
a minimal base layer that can be extended to suit the application's needs.
|
||||
Developers can build agents quickly by using the provided components including
|
||||
routed agent, AI model clients, tools for AI models, code execution sandboxes,
|
||||
memory stores, and more.
|
||||
|
||||
## API Layers
|
||||
|
||||
The API consists of the following layers:
|
||||
|
||||
- {py:mod}`autogen_core.base`
|
||||
- {py:mod}`autogen_core.application`
|
||||
- {py:mod}`autogen_core.components`
|
||||
|
||||
The following diagram shows the relationship between the layers.
|
||||
|
||||

|
||||
|
||||
The {py:mod}`autogen_core.base` layer defines the
|
||||
core interfaces and base classes for agents, messages, and runtime.
|
||||
This layer is the foundation of the framework and is used by the other layers.
|
||||
|
||||
The {py:mod}`autogen_core.application` layer provides concrete implementations of
|
||||
runtime and utilities like logging for building multi-agent applications.
|
||||
|
||||
The {py:mod}`autogen_core.components` layer provides reusable components for building
|
||||
AI agents, including type-routed agents, AI model clients, tools for AI models,
|
||||
code execution sandboxes, and memory stores.
|
||||
|
||||
The layers are loosely coupled and can be used independently. For example,
|
||||
you can swap out the runtime in the {py:mod}`autogen_core.application` layer with your own
|
||||
runtime implementation.
|
||||
You can also skip the components in the {py:mod}`autogen_core.components` layer and
|
||||
build your own components.
|
||||
@@ -0,0 +1,76 @@
|
||||
# Topic and Subscription in Broadcast
|
||||
|
||||
In AGNext, there are two ways for runtime to deliver messages,
|
||||
direct messaging or broadcast. Direct messaging is one to one: the sender
|
||||
must provide the recipient's agent ID. On the other hand,
|
||||
broadcast is one to many and the sender does not provide recpients'
|
||||
agent IDs.
|
||||
|
||||
Many scenarios are suitable for broadcast.
|
||||
For example, in event-driven workflows, agents do not always know who
|
||||
will handle their messages, and a workflow can be composed of agents
|
||||
with no inter-dependencies.
|
||||
This section focuses on the core concepts in broadcast: topic and subscription.
|
||||
|
||||
## Topic
|
||||
|
||||
A topic defines the scope of a broadcast message.
|
||||
In essence, AGNext agent runtime implements a publish-subscribe model through
|
||||
its broadcast API: when publishing a message, the topic mus be specified.
|
||||
It is an indirection over agent IDs.
|
||||
|
||||
A topic consists of two components: topic type and topic source.
|
||||
|
||||
```{note}
|
||||
Topic = (Topic Type, Topic Source)
|
||||
```
|
||||
|
||||
Similar to [agent ID](./agent-identity-and-lifecycle.md#agent-id),
|
||||
which also has two components, topic type is usually defined by
|
||||
application code to mark the type of messages the topic is for.
|
||||
For example, a GitHub agent may use `"GitHub_Issues"` as the topic type
|
||||
when publishing messages about new issues.
|
||||
|
||||
Topic source is the unique identifier for a topic within a topic type.
|
||||
It is typically defined by application data.
|
||||
For example, the GitHub agent may use `"github.com/{repo_name}/issues/{issue_number}"`
|
||||
as the topic source to uniquely identifies the topic.
|
||||
Topic source allows the publisher to limit the scope of messages and create
|
||||
silos.
|
||||
|
||||
## Subscription
|
||||
|
||||
A subscription maps topic to agent IDs.
|
||||
|
||||
If a topic has no subscription, messages published to this topic will
|
||||
not be delivered to any agent.
|
||||
If a topic has many subscriptions, messages will be delivered
|
||||
following all the subscriptions to every recipient agent only once.
|
||||
Applications can add or remove subscriptions using agent runtime's API.
|
||||
|
||||
### Type-based Subscription
|
||||
|
||||
A type-based subscription maps a topic type to an agent type
|
||||
(see [agent ID](./agent-identity-and-lifecycle.md#agent-id)).
|
||||
It declares a mapping from topics to agent IDs without knowing the
|
||||
exact topic sources and agent keys.
|
||||
The mechanism is simple: any topic matching the type-based subscription's
|
||||
topic type will be mapped to an agent ID with the subscription's agent type
|
||||
and the agent key assigned to the value of the topic source.
|
||||
|
||||
```{note}
|
||||
Type-Based Subscription = Topic Type --> Agent Type
|
||||
```
|
||||
|
||||
For example, a type-based subscription maps topic type `"GitHub_Issues"`
|
||||
to agent type `"Triage_Agent"`.
|
||||
When a broadcast message is published to the topic
|
||||
`("GitHub_Issues", "github.com/microsoft/autogen/issues/99"),
|
||||
the subscription maps the topic to an agent instance with ID
|
||||
`("Triage_Agent", "github.com/microsoft/autogen/issues/99")`,
|
||||
and the runtime will deliver the message to that agent, creating it
|
||||
if not exist.
|
||||
|
||||
Generally speaking, type-based subscription is the preferred way to delcare
|
||||
subscriptions. It is portable and data-independent:
|
||||
developers do not need to write application code that depends on specific agent IDs.
|
||||
@@ -0,0 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agent and Agent Runtime\n",
|
||||
"\n",
|
||||
"In this and the following section, we focus on the core concepts of AGNext:\n",
|
||||
"agents, agent runtime, messages, and communication.\n",
|
||||
"You will not find any AI models or tools here, just the foundational\n",
|
||||
"building blocks for building multi-agent applications."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An agent in AGNext is an entity defined by the base class {py:class}`autogen_core.base.BaseAgent`.\n",
|
||||
"It has a unique identifier of the type {py:class}`autogen_core.base.AgentId`,\n",
|
||||
"a metadata dictionary of the type {py:class}`autogen_core.base.AgentMetadata`,\n",
|
||||
"and method for handling messages {py:meth}`autogen_core.base.BaseAgent.on_message`.\n",
|
||||
"\n",
|
||||
"An agent runtime is the execution environment for agents in AGNext.\n",
|
||||
"Similar to the runtime environment of a programming language,\n",
|
||||
"an agent runtime provides the necessary infrastructure to facilitate communication\n",
|
||||
"between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n",
|
||||
"debugging.\n",
|
||||
"For local development, developers can use {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n",
|
||||
"which can be embedded in a Python application.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"Agents are not directly instantiated and managed by application code.\n",
|
||||
"Instead, they are created by the runtime when needed and managed by the runtime.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Implementing an Agent\n",
|
||||
"\n",
|
||||
"To implement an agent, developer must subclass the {py:class}`~autogen_core.base.BaseAgent` class,\n",
|
||||
"declare the message types it can handle in the {py:attr}`~autogen_core.base.AgentMetadata.subscriptions` metadata,\n",
|
||||
"and implement the {py:meth}`~autogen_core.base.BaseAgent.on_message` method.\n",
|
||||
"This method is invoked when the agent receives a message. For example,\n",
|
||||
"the following agent handles a simple message type and simply prints message it receives:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core.base import AgentId, BaseAgent, MessageContext\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class MyMessage:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyAgent(BaseAgent):\n",
|
||||
" def __init__(self) -> None:\n",
|
||||
" super().__init__(\"MyAgent\")\n",
|
||||
"\n",
|
||||
" async def on_message(self, message: MyMessage, ctx: MessageContext) -> None:\n",
|
||||
" print(f\"Received message: {message.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For convenience, developers can subclass the {py:class}`~autogen_core.components.RoutedAgent` class\n",
|
||||
"which provides an easy-to use API to implement different message handlers for different message types.\n",
|
||||
"See the section on message handlers below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Registering Agent Type\n",
|
||||
"\n",
|
||||
"To make agents available to the runtime, developers can use the\n",
|
||||
"{py:meth}`~autogen_core.base.AgentRuntime.register` method.\n",
|
||||
"The process of registration associates an agent type and a factory function\n",
|
||||
"that creates an instance of the agent type.\n",
|
||||
"The factory function is used to allow automatic creation of agent instances \n",
|
||||
"when they are needed.\n",
|
||||
"Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n",
|
||||
"about agent type an identity.\n",
|
||||
"\n",
|
||||
"For example, to register an agent type with the \n",
|
||||
"{py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n",
|
||||
"the following code can be used:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='my_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\"my_agent\", lambda: MyAgent())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once an agent type is registered, we can send a direct message to an agent instance\n",
|
||||
"using an {py:class}`~autogen_core.base.AgentId`.\n",
|
||||
"The runtime will create the instance the first time it delivers a\n",
|
||||
"message to this instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received message: Hello, World!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_id = AgentId(\"my_agent\", \"default\")\n",
|
||||
"runtime.start() # Start processing messages in the background.\n",
|
||||
"await runtime.send_message(MyMessage(content=\"Hello, World!\"), agent_id)\n",
|
||||
"await runtime.stop() # Stop processing messages in the background."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"Because the runtime manages the lifecycle of agents, an {py:class}`~autogen_core.base.AgentId`\n",
|
||||
"is only used to communicate with the agent or retrieve its metadata (e.g., description).\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Running the Single-Threaded Agent Runtime\n",
|
||||
"\n",
|
||||
"The above code snippet uses `runtime.start()` to start a background task\n",
|
||||
"to process and deliver messages to recepients' message handlers.\n",
|
||||
"This is a feature of the\n",
|
||||
"local embedded runtime {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`.\n",
|
||||
"\n",
|
||||
"To stop the background task immediately, use the `stop()` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"# ... Send messages, publish messages, etc.\n",
|
||||
"await runtime.stop() # This will return immediately but will not cancel\n",
|
||||
"# any in-progress message handling."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can resume the background task by calling `start()` again.\n",
|
||||
"\n",
|
||||
"For batch scenarios such as running benchmarks for evaluating agents,\n",
|
||||
"you may want to wait for the background task to stop automatically when\n",
|
||||
"there are no unprocessed messages and no agent is handling messages --\n",
|
||||
"the batch may considered complete.\n",
|
||||
"You can achieve this by using the `stop_when_idle()` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"# ... Send messages, publish messages, etc.\n",
|
||||
"await runtime.stop_when_idle() # This will block until the runtime is idle."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also directly process messages one-by-one without a background task using:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await runtime.process_next()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Other runtime implementations will have their own ways of running the runtime."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 61 KiB |
File diff suppressed because one or more lines are too long
@@ -0,0 +1,37 @@
|
||||
# Installation
|
||||
|
||||
The repo is private, so the installation process is a bit more involved than usual.
|
||||
|
||||
## Option 1: Install from a local clone
|
||||
|
||||
Make a clone of the repo:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/microsoft/autogen_core.git
|
||||
```
|
||||
|
||||
You can install the package by running:
|
||||
|
||||
```sh
|
||||
cd autogen_core/python
|
||||
pip install .
|
||||
```
|
||||
|
||||
## Option 2: Install from GitHub
|
||||
|
||||
To install the package from GitHub, you will need to authenticate with GitHub.
|
||||
|
||||
```sh
|
||||
GITHUB_TOKEN=$(gh auth token)
|
||||
pip install "git+https://oauth2:$GITHUB_TOKEN@github.com/microsoft/autogen_core.git#subdirectory=python"
|
||||
```
|
||||
|
||||
### Using a Personal Access Token instead of `gh` CLI
|
||||
|
||||
If you don't have the `gh` CLI installed, you can generate a personal access token from the GitHub website.
|
||||
|
||||
1. Go to [New fine-grained personal access token](https://github.com/settings/personal-access-tokens/new)
|
||||
2. Set `Resource Owner` to `Microsoft`
|
||||
3. Set `Repository Access` to `Only select repositories` and select `Microsoft/autogen_core`
|
||||
4. Set `Permissions` to `Repository permissions` and select `Contents: Read`
|
||||
5. Use the generated token for `GITHUB_TOKEN` in the commad above
|
||||
@@ -0,0 +1,531 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Message and Communication\n",
|
||||
"\n",
|
||||
"An agent in AGNext can react to, send, and publish messages,\n",
|
||||
"and messages are the only means through which agents can communicate\n",
|
||||
"with each other."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Messages\n",
|
||||
"\n",
|
||||
"Messages are serializable objects, they can be defined using:\n",
|
||||
"\n",
|
||||
"- A subclass of Pydantic's {py:class}`pydantic.BaseModel`, or\n",
|
||||
"- A dataclass\n",
|
||||
"\n",
|
||||
"For example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class TextMessage:\n",
|
||||
" content: str\n",
|
||||
" source: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class ImageMessage:\n",
|
||||
" url: str\n",
|
||||
" source: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"Messages are purely data, and should not contain any logic.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Message Handlers\n",
|
||||
"\n",
|
||||
"When an agent receives a message the runtime will invoke the agent's message handler\n",
|
||||
"({py:meth}`~agnext.base.Agent.on_message`) which should implement the agents message handling logic.\n",
|
||||
"If this message cannot be handled by the agent, the agent should raise a\n",
|
||||
"{py:class}`~agnext.base.exceptions.CantHandleException`.\n",
|
||||
"\n",
|
||||
"For convenience, the {py:class}`~agnext.components.RoutedAgent` base class\n",
|
||||
"provides the {py:meth}`~agnext.components.message_handler` decorator\n",
|
||||
"for associating message types with message handlers,\n",
|
||||
"so developers do not need to implement the {py:meth}`~agnext.base.Agent.on_message` method.\n",
|
||||
"\n",
|
||||
"For example, the following type-routed agent responds to `TextMessage` and `ImageMessage`\n",
|
||||
"using different message handlers:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import AgentId, MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyAgent(RoutedAgent):\n",
|
||||
" @message_handler\n",
|
||||
" async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:\n",
|
||||
" print(f\"Hello, {message.source}, you said {message.content}!\")\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def on_image_message(self, message: ImageMessage, ctx: MessageContext) -> None:\n",
|
||||
" print(f\"Hello, {message.source}, you sent me {message.url}!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create the agent runtime and register the agent type (see [Agent and Agent Runtime](agent-and-agent-runtime.ipynb)):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='my_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\"my_agent\", lambda: MyAgent(\"My Agent\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Test this agent with `TextMessage` and `ImageMessage`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello, User, you said Hello, World!!\n",
|
||||
"Hello, User, you sent me https://example.com/image.jpg!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime.start()\n",
|
||||
"agent_id = AgentId(\"my_agent\", \"default\")\n",
|
||||
"await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"User\"), agent_id)\n",
|
||||
"await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"User\"), agent_id)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Messaging\n",
|
||||
"\n",
|
||||
"There are two types of communication in AGNext:\n",
|
||||
"\n",
|
||||
"- **Direct Messaging**: sends a direct message to another agent.\n",
|
||||
"- **Broadcast**: publishes a message to a topic.\n",
|
||||
"\n",
|
||||
"Let's first look at direct messaging.\n",
|
||||
"To send a direct message to another agent, within a message handler use\n",
|
||||
"the {py:meth}`agnext.base.BaseAgent.send_message` method,\n",
|
||||
"from the runtime use the {py:meth}`agnext.base.AgentRuntime.send_message` method.\n",
|
||||
"Awaiting calls to these methods will return the return value of the\n",
|
||||
"receiving agent's message handler.\n",
|
||||
"When the receiving agent's handler returns `None`, `None` will be returned.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"If the invoked agent raises an exception while the sender is awaiting,\n",
|
||||
"the exception will be propagated back to the sender.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Request/Response\n",
|
||||
"\n",
|
||||
"Direct messaging can be used for request/response scenarios,\n",
|
||||
"where the sender expects a response from the receiver.\n",
|
||||
"The receiver can respond to the message by returning a value from its message handler.\n",
|
||||
"You can think of this as a function call between agents.\n",
|
||||
"\n",
|
||||
"For example, consider the following agents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class InnerAgent(RoutedAgent):\n",
|
||||
" @message_handler\n",
|
||||
" async def on_my_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" return Message(content=f\"Hello from inner, {message.content}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class OuterAgent(RoutedAgent):\n",
|
||||
" def __init__(self, description: str, inner_agent_type: str):\n",
|
||||
" super().__init__(description)\n",
|
||||
" self.inner_agent_id = AgentId(inner_agent_type, self.id.key)\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n",
|
||||
" print(f\"Received message: {message.content}\")\n",
|
||||
" # Send a direct message to the inner agent and receves a response.\n",
|
||||
" response = await self.send_message(Message(f\"Hello from outer, {message.content}\"), self.inner_agent_id)\n",
|
||||
" print(f\"Received inner response: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Upone receving a message, the `OuterAgent` sends a direct message to the `InnerAgent` and receives\n",
|
||||
"a message in response.\n",
|
||||
"\n",
|
||||
"We can test these agents by sending a `Message` to the `OuterAgent`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received message: Hello, World!\n",
|
||||
"Received inner response: Hello from inner, Hello from outer, Hello, World!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\"inner_agent\", lambda: InnerAgent(\"InnerAgent\"))\n",
|
||||
"await runtime.register(\"outer_agent\", lambda: OuterAgent(\"OuterAgent\", \"inner_agent\"))\n",
|
||||
"runtime.start()\n",
|
||||
"outer = AgentId(\"outer_agent\", \"default\")\n",
|
||||
"await runtime.send_message(Message(content=\"Hello, World!\"), outer)\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Both outputs are produced by the `OuterAgent`'s message handler, however the second output is based on the response from the `InnerAgent`.\n",
|
||||
"\n",
|
||||
"Generally speaking, direct messaging is appropriate for scenarios when the sender and\n",
|
||||
"recipient are tightly coupled -- they are created together and the sender\n",
|
||||
"is linked to a specific instance of the recipient.\n",
|
||||
"For example, an agent executes tool calls by sending direct messages to\n",
|
||||
"an instance of {py:class}`~agnext.components.tool_agent.ToolAgent`,\n",
|
||||
"and uses the responses to form an action-observation loop."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Broadcast\n",
|
||||
"\n",
|
||||
"Broadcast is effectively the publish/subscribe model with topic and subscription.\n",
|
||||
"Read [Topic and Subscription](../core-concepts/topic-and-subscription.md)\n",
|
||||
"to learn the core concepts.\n",
|
||||
"\n",
|
||||
"The key difference between direct messaging and broadcast is that broadcast\n",
|
||||
"cannot be used for request/response scenarios.\n",
|
||||
"When an agent publishes a message it is one way only, it cannot receive a response\n",
|
||||
"from any other agent, even if a receiving agent's handler returns a value.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"If a response is given to a published message, it will be thrown away.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"If an agent publishes a message type for which it is subscribed it will not\n",
|
||||
"receive the message it published. This is to prevent infinite loops.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Publishing to Topics\n",
|
||||
"\n",
|
||||
"To publish a message from an agent's handler,\n",
|
||||
"use the {py:meth}`agnext.base.BaseAgent.publish_message` method and specify\n",
|
||||
"a {py:class}`agnext.base.TopicId`.\n",
|
||||
"This call must still be awaited to allow the runtime to schedule delivery of \n",
|
||||
"the message to all subscribers,\n",
|
||||
"but it will always return `None`.\n",
|
||||
"If an agent raises an exception while handling a published message,\n",
|
||||
"this will be logged but will not be propagated back to the publishing agent.\n",
|
||||
"\n",
|
||||
"The following example shows a `BroadcastingAgent` that publishes a message\n",
|
||||
"to a topic upon receiving a message. A `ReceivingAgent` subscribes to the topic\n",
|
||||
"and prints the message it receives."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import MessageContext, TopicId\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class BroadcastingAgent(RoutedAgent):\n",
|
||||
" @message_handler\n",
|
||||
" async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n",
|
||||
" # Publish a message to all agents in the same namespace.\n",
|
||||
" await self.publish_message(\n",
|
||||
" Message(f\"Publishing a message: {message.content}!\"),\n",
|
||||
" topic_id=TopicId(type=\"default\", source=self.id.key),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ReceivingAgent(RoutedAgent):\n",
|
||||
" @message_handler\n",
|
||||
" async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n",
|
||||
" print(f\"Received a message: {message.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`BroadcastingAgent` publishes message to a topic with type `\"default\"`\n",
|
||||
"and source assigned to the agent instance's agent key.\n",
|
||||
"\n",
|
||||
"Next, we specify the a subscription for this topic so the receiving agent\n",
|
||||
"can get message from the broadcasting agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Registering Subscriptions\n",
|
||||
"\n",
|
||||
"Subscriptions are registered with the agent runtime, either as part of\n",
|
||||
"agent type's registeration or through a separate API method.\n",
|
||||
"\n",
|
||||
"Here is how we can register {py:class}`~agnext.components.TypeSubscription`\n",
|
||||
"for the broadcasting agent and receiving agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received a message: Publishing a message: Hello, World! From the runtime!!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.components import TypeSubscription\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"\n",
|
||||
"# Registering subscription as part of the `register` method, by providing\n",
|
||||
"# a factory function that produces a list of subscriptions.\n",
|
||||
"await runtime.register(\n",
|
||||
" \"broadcasting_agent\",\n",
|
||||
" lambda: BroadcastingAgent(\"Broadcasting Agent\"),\n",
|
||||
" subscriptions=lambda: [TypeSubscription(topic_type=\"default\", agent_type=\"broadcasting_agent\")],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"await runtime.register(\"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n",
|
||||
"\n",
|
||||
"# Registering subscription directly with the runtime.\n",
|
||||
"await runtime.add_subscription(TypeSubscription(topic_type=\"default\", agent_type=\"receiving_agent\"))\n",
|
||||
"\n",
|
||||
"# Start the runtime and send a message to a broadcasting agent.\n",
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(\n",
|
||||
" Message(\"Hello, World! From the runtime!\"), recipient=AgentId(\"broadcasting_agent\", \"default\")\n",
|
||||
")\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the output, you can see the broadcasting agent published\n",
|
||||
"a message, and the message was received by the receiving agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Default Topic and Subscriptions\n",
|
||||
"\n",
|
||||
"In the above example, we used\n",
|
||||
"{py:class}`~agnext.base.TopicId` and {py:class}`~agnext.components.TypeSubscription`\n",
|
||||
"to specify the topic and subscriptions respectively.\n",
|
||||
"This is the appropriate way for many scenarios.\n",
|
||||
"However, when there is a single scope of publishing, that is, \n",
|
||||
"all agents publish and subscribe to all broadcasted messages,\n",
|
||||
"we can use the convenience classes {py:class}`~agnext.components.DefaultTopicId`\n",
|
||||
"and {py:class}`~agnext.components.DefaultSubscription` to simply our code.\n",
|
||||
"\n",
|
||||
"{py:class}`~agnext.components.DefaultTopicId` is\n",
|
||||
"for creating a topic that uses `\"default\"` as the default value for the topic type\n",
|
||||
"and the publishing agent's key as the default value for the topic source.\n",
|
||||
"We can simplify `BroadcastingAgent` by using \n",
|
||||
"{py:class}`~agnext.components.DefaultTopicId`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.components import DefaultSubscription, DefaultTopicId\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class BroadcastingAgentDefaultTopic(RoutedAgent):\n",
|
||||
" @message_handler\n",
|
||||
" async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n",
|
||||
" # Publish a message to all agents in the same namespace.\n",
|
||||
" await self.publish_message(\n",
|
||||
" Message(f\"Publishing a message: {message.content}!\"),\n",
|
||||
" topic_id=DefaultTopicId(),\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also simplify subscription using\n",
|
||||
"{py:class}`~agnext.components.DefaultSubscription`\n",
|
||||
"when registering it as part of agent type registration.\n",
|
||||
"\n",
|
||||
"When the runtime calls {py:class}`~agnext.components.DefaultSubscription` in\n",
|
||||
"the context of the factory function, \n",
|
||||
"it creates a {py:class}`~agnext.components.TypeSubscription`\n",
|
||||
"whose topic type uses `\"default\"` as the default value and \n",
|
||||
"agent type uses the same agent type that is being registered in the same context."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received a message: Publishing a message: Hello, World! From the runtime!!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"broadcasting_agent\",\n",
|
||||
" lambda: BroadcastingAgentDefaultTopic(\"Broadcasting Agent\"),\n",
|
||||
" subscriptions=lambda: [DefaultSubscription()],\n",
|
||||
")\n",
|
||||
"await runtime.register(\"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"), lambda: [DefaultSubscription()])\n",
|
||||
"runtime.start()\n",
|
||||
"await runtime.send_message(\n",
|
||||
" Message(\"Hello, World! From the runtime!\"), recipient=AgentId(\"broadcasting_agent\", \"default\")\n",
|
||||
")\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"If your scenario allows all agents to publish and subscribe to\n",
|
||||
"all broadcasted messages, use {py:class}`~agnext.components.DefaultTopicId`\n",
|
||||
"and {py:class}`~agnext.components.DefaultSubscription` without setting its\n",
|
||||
"parameters.\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,365 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Model Clients\n",
|
||||
"\n",
|
||||
"AGNext provides the {py:mod}`autogen_core.components.models` module with a suite of built-in\n",
|
||||
"model clients for using ChatCompletion API.\n",
|
||||
"All model clients implement the {py:class}`autogen_core.components.models.ChatCompletionClient` protocol class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in Model Clients\n",
|
||||
"\n",
|
||||
"Currently there are two built-in model clients:\n",
|
||||
"{py:class}`autogen_core.components.models.OpenAIChatCompletionClient` and\n",
|
||||
"{py:class}`autogen_core.components.models.AzureOpenAIChatCompletionClient`.\n",
|
||||
"Both clients are asynchronous.\n",
|
||||
"\n",
|
||||
"To use the {py:class}`~autogen_core.components.models.OpenAIChatCompletionClient`, you need to provide the API key\n",
|
||||
"either through the environment variable `OPENAI_API_KEY` or through the `api_key` argument."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.components.models import OpenAIChatCompletionClient, UserMessage\n",
|
||||
"\n",
|
||||
"# Create an OpenAI model client.\n",
|
||||
"model_client = OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an API key set in the environment.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can call the {py:meth}`~autogen_core.components.models.OpenAIChatCompletionClient.create` method to create a\n",
|
||||
"chat completion request, and await for an {py:class}`~autogen_core.components.models.CreateResult` object in return."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The capital of France is Paris.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Send a message list to the model and await the response.\n",
|
||||
"messages = [\n",
|
||||
" UserMessage(content=\"What is the capital of France?\", source=\"user\"),\n",
|
||||
"]\n",
|
||||
"response = await model_client.create(messages=messages)\n",
|
||||
"\n",
|
||||
"# Print the response\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming Response\n",
|
||||
"\n",
|
||||
"You can use the {py:meth}`~autogen_core.components.models.OpenAIChatCompletionClient.create_streaming` method to create a\n",
|
||||
"chat completion request with streaming response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Streamed responses:\n",
|
||||
"In the heart of the Whispering Woods lived Ember, a small dragon with scales of shimmering gold. Unlike other dragons, Ember breathed not fire but music, each note a whisper of ancient songs. The villagers, initially fearful, soon realized her gift brought harmony and joy.\n",
|
||||
"\n",
|
||||
"One night, as darkness threatened the land, Ember's melodies summoned the stars, casting a protective glow. The villagers danced beneath the celestial orchestra, their worries dissolving like morning mist.\n",
|
||||
"\n",
|
||||
"From that day on, Ember's song became a nightly ritual, a promise that light and harmony would always prevail. The dragon of the Whispering Woods was a symbol of peace, her golden scales a testament to the magic of gentleness.\n",
|
||||
"\n",
|
||||
"------------\n",
|
||||
"\n",
|
||||
"The complete response:\n",
|
||||
"In the heart of the Whispering Woods lived Ember, a small dragon with scales of shimmering gold. Unlike other dragons, Ember breathed not fire but music, each note a whisper of ancient songs. The villagers, initially fearful, soon realized her gift brought harmony and joy.\n",
|
||||
"\n",
|
||||
"One night, as darkness threatened the land, Ember's melodies summoned the stars, casting a protective glow. The villagers danced beneath the celestial orchestra, their worries dissolving like morning mist.\n",
|
||||
"\n",
|
||||
"From that day on, Ember's song became a nightly ritual, a promise that light and harmony would always prevail. The dragon of the Whispering Woods was a symbol of peace, her golden scales a testament to the magic of gentleness.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" UserMessage(content=\"Write a very short story about a dragon.\", source=\"user\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Create a stream.\n",
|
||||
"stream = model_client.create_stream(messages=messages)\n",
|
||||
"\n",
|
||||
"# Iterate over the stream and print the responses.\n",
|
||||
"print(\"Streamed responses:\")\n",
|
||||
"async for response in stream: # type: ignore\n",
|
||||
" if isinstance(response, str):\n",
|
||||
" # A partial response is a string.\n",
|
||||
" print(response, flush=True, end=\"\")\n",
|
||||
" else:\n",
|
||||
" # The last response is a CreateResult object with the complete message.\n",
|
||||
" print(\"\\n\\n------------\\n\")\n",
|
||||
" print(\"The complete response:\", flush=True)\n",
|
||||
" print(response.content, flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"The last response in the streaming response is always the final response\n",
|
||||
"of the type {py:class}`~autogen_core.components.models.CreateResult`.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Azure OpenAI\n",
|
||||
"\n",
|
||||
"To use the {py:class}`~autogen_core.components.models.AzureOpenAIChatCompletionClient`, you need to provide\n",
|
||||
"the deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n",
|
||||
"For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n",
|
||||
"To use AAD authentication, you need to first install the `azure-identity` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install azure-identity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code snippet shows how to use AAD authentication.\n",
|
||||
"The identity used must be assigned the [**Cognitive Services OpenAI User**](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_core.components.models import AzureOpenAIChatCompletionClient\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"\n",
|
||||
"# Create the token provider\n",
|
||||
"token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n",
|
||||
"\n",
|
||||
"az_model_client = AzureOpenAIChatCompletionClient(\n",
|
||||
" model=\"{your-azure-deployment}\",\n",
|
||||
" api_version=\"2024-06-01\",\n",
|
||||
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
|
||||
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
|
||||
" # api_key=\"sk-...\", # For key-based authentication.\n",
|
||||
" model_capabilities={\n",
|
||||
" \"vision\": True,\n",
|
||||
" \"function_calling\": True,\n",
|
||||
" \"json_output\": True,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build Agent using Model Client\n",
|
||||
"\n",
|
||||
"Let's create a simple AI agent that can respond to messages using the ChatCompletion API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from autogen_core.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage, UserMessage\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class SimpleAgent(RoutedAgent):\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient) -> None:\n",
|
||||
" super().__init__(\"A simple agent\")\n",
|
||||
" self._system_messages = [SystemMessage(\"You are a helpful AI assistant.\")]\n",
|
||||
" self._model_client = model_client\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # Prepare input to the chat completion model.\n",
|
||||
" user_message = UserMessage(content=message.content, source=\"user\")\n",
|
||||
" response = await self._model_client.create(\n",
|
||||
" self._system_messages + [user_message], cancellation_token=ctx.cancellation_token\n",
|
||||
" )\n",
|
||||
" # Return with the model's response.\n",
|
||||
" assert isinstance(response.content, str)\n",
|
||||
" return Message(content=response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `SimpleAgent` class is a subclass of the\n",
|
||||
"{py:class}`autogen_core.components.TypeRoutedAgent` class for the convenience of automatically routing messages to the appropriate handlers.\n",
|
||||
"It has a single handler, `handle_user_message`, which handles message from the user. It uses the `ChatCompletionClient` to generate a response to the message.\n",
|
||||
"It then returns the response to the user, following the direct communication model.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"The `cancellation_token` of the type {py:class}`autogen_core.base.CancellationToken` is used to cancel\n",
|
||||
"asynchronous operations. It is linked to async calls inside the message handlers\n",
|
||||
"and can be used by the caller to cancel the handlers.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Seattle offers a wide range of activities and attractions for visitors. Here are some fun things to do in the city:\n",
|
||||
"\n",
|
||||
"1. **Space Needle**: Visit this iconic landmark for stunning panoramic views of the city and surrounding mountains.\n",
|
||||
"\n",
|
||||
"2. **Pike Place Market**: Explore this historic market where you can shop for fresh produce, local crafts, and enjoy delicious street food. Don't miss the famous fish-throwing!\n",
|
||||
"\n",
|
||||
"3. **Chihuly Garden and Glass**: Admire the breathtaking glass art installations by artist Dale Chihuly, both indoors and in the beautiful outdoor garden.\n",
|
||||
"\n",
|
||||
"4. **Museum of Pop Culture (MoPOP)**: Discover exhibits focused on music, science fiction, and pop culture, including artifacts from famous films and music legends.\n",
|
||||
"\n",
|
||||
"5. **Seattle Aquarium**: Learn about marine life native to the Pacific Northwest and see fascinating exhibits, including sea otters and jellyfish.\n",
|
||||
"\n",
|
||||
"6. **Fremont Troll**: Take a photo with this quirky public art installation, a large troll sculpture located under the Aurora Bridge.\n",
|
||||
"\n",
|
||||
"7. **Kerry Park**: Enjoy one of the best viewpoints of Seattle's skyline, especially at sunset or during the evening when the city lights up.\n",
|
||||
"\n",
|
||||
"8. **Discovery Park**: Explore this large urban park with trails, beaches, and beautiful views of Puget Sound and the Olympic Mountains.\n",
|
||||
"\n",
|
||||
"9. **Seattle Art Museum**: Browse a diverse collection of art from around the world, including contemporary and Native American art.\n",
|
||||
"\n",
|
||||
"10. **Take a Ferry Ride**: Enjoy a scenic boat ride to nearby islands like Bainbridge Island or Vashon Island. The views of the Seattle skyline from the water are stunning.\n",
|
||||
"\n",
|
||||
"11. **Underground Tour**: Learn about Seattle's history on a guided tour of the underground passageways that played a significant role in the city’s development.\n",
|
||||
"\n",
|
||||
"12. **Ballard Locks**: Visit the Hiram M. Chittenden Locks to see boats pass between Lake Washington and Puget Sound and watch salmon swim upstream in the fish ladder (seasonal).\n",
|
||||
"\n",
|
||||
"13. **Local Breweries**: Seattle is known for its craft beer scene; take a brewery tour or visit a taproom to sample local brews.\n",
|
||||
"\n",
|
||||
"14. **Attend a Sports Game**: Catch a Seattle Seahawks (NFL), Seattle Mariners (MLB), or Seattle Sounders (MLS) game, depending on the season.\n",
|
||||
"\n",
|
||||
"15. **Seattle Great Wheel**: Ride this Ferris wheel on the waterfront for beautiful views, especially at night when it’s illuminated.\n",
|
||||
"\n",
|
||||
"These activities showcase Seattle’s vibrant culture, unique attractions, and stunning natural beauty. Enjoy your visit!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create the runtime and register the agent.\n",
|
||||
"from autogen_core.base import AgentId\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"simple-agent\",\n",
|
||||
" lambda: SimpleAgent(\n",
|
||||
" OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"# Start the runtime processing messages.\n",
|
||||
"runtime.start()\n",
|
||||
"# Send a message to the agent and get the response.\n",
|
||||
"message = Message(\"Hello, what are some fun things to do in Seattle?\")\n",
|
||||
"response = await runtime.send_message(message, AgentId(\"simple-agent\", \"default\"))\n",
|
||||
"print(response.content)\n",
|
||||
"# Stop the runtime processing messages.\n",
|
||||
"await runtime.stop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
# Multi-Agent Design Patterns
|
||||
|
||||
Agents can work together in a variety of ways to solve problems.
|
||||
Research works like [AutoGen](https://aka.ms/autogen-paper),
|
||||
[MetaGPT](https://arxiv.org/abs/2308.00352)
|
||||
and [ChatDev](https://arxiv.org/abs/2307.07924) have shown
|
||||
multi-agent systems out-performing single agent systems at complex tasks
|
||||
like software development.
|
||||
|
||||
A multi-agent design pattern is a structure that emerges from message protocols:
|
||||
it describes how agents interact with each other to solve problems.
|
||||
For example, the [tool-equiped agent](./tools.ipynb#tool-equipped-agent) in
|
||||
the previous section employs a design pattern called ReAct,
|
||||
which involves an agent interacting with tools.
|
||||
|
||||
You can implement any multi-agent design pattern using AGNext agents.
|
||||
In the next two sections, we will discuss two common design patterns:
|
||||
group chat for task decomposition, and reflection for robustness.
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,529 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reflection\n",
|
||||
"\n",
|
||||
"Reflection is a design pattern where an LLM generation is followed by a reflection,\n",
|
||||
"which in itself is another LLM generation conditioned on the output of the first one.\n",
|
||||
"For example, given a task to write code, the first LLM can generate a code snippet,\n",
|
||||
"and the second LLM can generate a critique of the code snippet.\n",
|
||||
"\n",
|
||||
"In the context of AGNext and agents, reflection can be implemented as a pair\n",
|
||||
"of agents, where the first agent generates a message and the second agent\n",
|
||||
"generates a response to the message. The two agents continue to interact\n",
|
||||
"until they reach a stopping condition, such as a maximum number of iterations\n",
|
||||
"or an approval from the second agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's implement a simple reflection design pattern using AGNext agents.\n",
|
||||
"There will be two agents: a coder agent and a reviewer agent, the coder agent\n",
|
||||
"will generate a code snippet, and the reviewer agent will generate a critique\n",
|
||||
"of the code snippet.\n",
|
||||
"\n",
|
||||
"## Message Protocol\n",
|
||||
"\n",
|
||||
"Before we define the agents, we need to first define the message protocol for the agents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class CodeWritingTask:\n",
|
||||
" task: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class CodeWritingResult:\n",
|
||||
" task: str\n",
|
||||
" code: str\n",
|
||||
" review: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class CodeReviewTask:\n",
|
||||
" session_id: str\n",
|
||||
" code_writing_task: str\n",
|
||||
" code_writing_scratchpad: str\n",
|
||||
" code: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class CodeReviewResult:\n",
|
||||
" review: str\n",
|
||||
" session_id: str\n",
|
||||
" approved: bool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above set of messages defines the protocol for our example reflection design pattern:\n",
|
||||
"- The application sends a `CodeWritingTask` message to the coder agent\n",
|
||||
"- The coder agent generates a `CodeReviewTask` message, which is sent to the reviewer agent\n",
|
||||
"- The reviewer agent generates a `CodeReviewResult` message, which is sent back to the coder agent\n",
|
||||
"- Depending on the `CodeReview` message, if the code is approved, the coder agent sends a `CodeWritingResult` message\n",
|
||||
"back to the application, otherwise, the coder agent sends another `CodeWritingTask` message to the reviewer agent,\n",
|
||||
"and the process continues.\n",
|
||||
"\n",
|
||||
"We can visualize the message protocol using a data flow diagram:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Agents\n",
|
||||
"\n",
|
||||
"Now, let's define the agents for the reflection design pattern."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import re\n",
|
||||
"import uuid\n",
|
||||
"from typing import Dict, List, Union\n",
|
||||
"\n",
|
||||
"from autogen_core.base import MessageContext, TopicId\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" AssistantMessage,\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We use the [Broadcast](./message-and-communication.ipynb#broadcast) API\n",
|
||||
"to implement the design pattern. The agents implements the pub/sub model.\n",
|
||||
"The coder agent subscribes to the `CodeWritingTask` and `CodeReviewResult` messages,\n",
|
||||
"and publishes the `CodeReviewTask` and `CodeWritingResult` messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CoderAgent(RoutedAgent):\n",
|
||||
" \"\"\"An agent that performs code writing tasks.\"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient) -> None:\n",
|
||||
" super().__init__(\"A code writing agent.\")\n",
|
||||
" self._system_messages: List[LLMMessage] = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"\"\"You are a proficient coder. You write code to solve problems.\n",
|
||||
"Work with the reviewer to improve your code.\n",
|
||||
"Always put all finished code in a single Markdown code block.\n",
|
||||
"For example:\n",
|
||||
"```python\n",
|
||||
"def hello_world():\n",
|
||||
" print(\"Hello, World!\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Respond using the following format:\n",
|
||||
"\n",
|
||||
"Thoughts: <Your comments>\n",
|
||||
"Code: <Your code>\n",
|
||||
"\"\"\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._session_memory: Dict[str, List[CodeWritingTask | CodeReviewTask | CodeReviewResult]] = {}\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_code_writing_task(self, message: CodeWritingTask, ctx: MessageContext) -> None:\n",
|
||||
" # Store the messages in a temporary memory for this request only.\n",
|
||||
" session_id = str(uuid.uuid4())\n",
|
||||
" self._session_memory.setdefault(session_id, []).append(message)\n",
|
||||
" # Generate a response using the chat completion API.\n",
|
||||
" response = await self._model_client.create(\n",
|
||||
" self._system_messages + [UserMessage(content=message.task, source=self.metadata[\"type\"])],\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" assert isinstance(response.content, str)\n",
|
||||
" # Extract the code block from the response.\n",
|
||||
" code_block = self._extract_code_block(response.content)\n",
|
||||
" if code_block is None:\n",
|
||||
" raise ValueError(\"Code block not found.\")\n",
|
||||
" # Create a code review task.\n",
|
||||
" code_review_task = CodeReviewTask(\n",
|
||||
" session_id=session_id,\n",
|
||||
" code_writing_task=message.task,\n",
|
||||
" code_writing_scratchpad=response.content,\n",
|
||||
" code=code_block,\n",
|
||||
" )\n",
|
||||
" # Store the code review task in the session memory.\n",
|
||||
" self._session_memory[session_id].append(code_review_task)\n",
|
||||
" # Publish a code review task.\n",
|
||||
" await self.publish_message(code_review_task, topic_id=TopicId(\"default\", self.id.key))\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_code_review_result(self, message: CodeReviewResult, ctx: MessageContext) -> None:\n",
|
||||
" # Store the review result in the session memory.\n",
|
||||
" self._session_memory[message.session_id].append(message)\n",
|
||||
" # Obtain the request from previous messages.\n",
|
||||
" review_request = next(\n",
|
||||
" m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, CodeReviewTask)\n",
|
||||
" )\n",
|
||||
" assert review_request is not None\n",
|
||||
" # Check if the code is approved.\n",
|
||||
" if message.approved:\n",
|
||||
" # Publish the code writing result.\n",
|
||||
" await self.publish_message(\n",
|
||||
" CodeWritingResult(\n",
|
||||
" code=review_request.code,\n",
|
||||
" task=review_request.code_writing_task,\n",
|
||||
" review=message.review,\n",
|
||||
" ),\n",
|
||||
" topic_id=TopicId(\"default\", self.id.key),\n",
|
||||
" )\n",
|
||||
" print(\"Code Writing Result:\")\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(f\"Task:\\n{review_request.code_writing_task}\")\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(f\"Code:\\n{review_request.code}\")\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(f\"Review:\\n{message.review}\")\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" else:\n",
|
||||
" # Create a list of LLM messages to send to the model.\n",
|
||||
" messages: List[LLMMessage] = [*self._system_messages]\n",
|
||||
" for m in self._session_memory[message.session_id]:\n",
|
||||
" if isinstance(m, CodeReviewResult):\n",
|
||||
" messages.append(UserMessage(content=m.review, source=\"Reviewer\"))\n",
|
||||
" elif isinstance(m, CodeReviewTask):\n",
|
||||
" messages.append(AssistantMessage(content=m.code_writing_scratchpad, source=\"Coder\"))\n",
|
||||
" elif isinstance(m, CodeWritingTask):\n",
|
||||
" messages.append(UserMessage(content=m.task, source=\"User\"))\n",
|
||||
" else:\n",
|
||||
" raise ValueError(f\"Unexpected message type: {m}\")\n",
|
||||
" # Generate a revision using the chat completion API.\n",
|
||||
" response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n",
|
||||
" assert isinstance(response.content, str)\n",
|
||||
" # Extract the code block from the response.\n",
|
||||
" code_block = self._extract_code_block(response.content)\n",
|
||||
" if code_block is None:\n",
|
||||
" raise ValueError(\"Code block not found.\")\n",
|
||||
" # Create a new code review task.\n",
|
||||
" code_review_task = CodeReviewTask(\n",
|
||||
" session_id=message.session_id,\n",
|
||||
" code_writing_task=review_request.code_writing_task,\n",
|
||||
" code_writing_scratchpad=response.content,\n",
|
||||
" code=code_block,\n",
|
||||
" )\n",
|
||||
" # Store the code review task in the session memory.\n",
|
||||
" self._session_memory[message.session_id].append(code_review_task)\n",
|
||||
" # Publish a new code review task.\n",
|
||||
" await self.publish_message(code_review_task, topic_id=TopicId(\"default\", self.id.key))\n",
|
||||
"\n",
|
||||
" def _extract_code_block(self, markdown_text: str) -> Union[str, None]:\n",
|
||||
" pattern = r\"```(\\w+)\\n(.*?)\\n```\"\n",
|
||||
" # Search for the pattern in the markdown text\n",
|
||||
" match = re.search(pattern, markdown_text, re.DOTALL)\n",
|
||||
" # Extract the language and code block if a match is found\n",
|
||||
" if match:\n",
|
||||
" return match.group(2)\n",
|
||||
" return None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A few things to note about `CoderAgent`:\n",
|
||||
"- It uses chain-of-thought prompting in its system message.\n",
|
||||
"- It stores message histories for different `CodeWritingTask` in a dictionary,\n",
|
||||
"so each task has its own history.\n",
|
||||
"- When making an LLM inference request using its model client, it transforms\n",
|
||||
"the message history into a list of {py:class}`agnext.components.models.LLMMessage` objects\n",
|
||||
"to pass to the model client.\n",
|
||||
"\n",
|
||||
"The reviewer agent subscribes to the `CodeReviewTask` message and publishes the `CodeReviewResult` message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ReviewerAgent(RoutedAgent):\n",
|
||||
" \"\"\"An agent that performs code review tasks.\"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient) -> None:\n",
|
||||
" super().__init__(\"A code reviewer agent.\")\n",
|
||||
" self._system_messages: List[LLMMessage] = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"\"\"You are a code reviewer. You focus on correctness, efficiency and safety of the code.\n",
|
||||
"Respond using the following JSON format:\n",
|
||||
"{\n",
|
||||
" \"correctness\": \"<Your comments>\",\n",
|
||||
" \"efficiency\": \"<Your comments>\",\n",
|
||||
" \"safety\": \"<Your comments>\",\n",
|
||||
" \"approval\": \"<APPROVE or REVISE>\",\n",
|
||||
" \"suggested_changes\": \"<Your comments>\"\n",
|
||||
"}\n",
|
||||
"\"\"\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" self._session_memory: Dict[str, List[CodeReviewTask | CodeReviewResult]] = {}\n",
|
||||
" self._model_client = model_client\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_code_review_task(self, message: CodeReviewTask, ctx: MessageContext) -> None:\n",
|
||||
" # Format the prompt for the code review.\n",
|
||||
" # Gather the previous feedback if available.\n",
|
||||
" previous_feedback = \"\"\n",
|
||||
" if message.session_id in self._session_memory:\n",
|
||||
" previous_review = next(\n",
|
||||
" (m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, CodeReviewResult)),\n",
|
||||
" None,\n",
|
||||
" )\n",
|
||||
" if previous_review is not None:\n",
|
||||
" previous_feedback = previous_review.review\n",
|
||||
" # Store the messages in a temporary memory for this request only.\n",
|
||||
" self._session_memory.setdefault(message.session_id, []).append(message)\n",
|
||||
" prompt = f\"\"\"The problem statement is: {message.code_writing_task}\n",
|
||||
"The code is:\n",
|
||||
"```\n",
|
||||
"{message.code}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Previous feedback:\n",
|
||||
"{previous_feedback}\n",
|
||||
"\n",
|
||||
"Please review the code. If previous feedback was provided, see if it was addressed.\n",
|
||||
"\"\"\"\n",
|
||||
" # Generate a response using the chat completion API.\n",
|
||||
" response = await self._model_client.create(\n",
|
||||
" self._system_messages + [UserMessage(content=prompt, source=self.metadata[\"type\"])],\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" json_output=True,\n",
|
||||
" )\n",
|
||||
" assert isinstance(response.content, str)\n",
|
||||
" # TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.\n",
|
||||
" # Parse the response JSON.\n",
|
||||
" review = json.loads(response.content)\n",
|
||||
" # Construct the review text.\n",
|
||||
" review_text = \"Code review:\\n\" + \"\\n\".join([f\"{k}: {v}\" for k, v in review.items()])\n",
|
||||
" approved = review[\"approval\"].lower().strip() == \"approve\"\n",
|
||||
" result = CodeReviewResult(\n",
|
||||
" review=review_text,\n",
|
||||
" session_id=message.session_id,\n",
|
||||
" approved=approved,\n",
|
||||
" )\n",
|
||||
" # Store the review result in the session memory.\n",
|
||||
" self._session_memory[message.session_id].append(result)\n",
|
||||
" # Publish the review result.\n",
|
||||
" await self.publish_message(result, topic_id=TopicId(\"default\", self.id.key))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `ReviewerAgent` uses JSON-mode when making an LLM inference request, and\n",
|
||||
"also uses chain-of-thought prompting in its system message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Logging\n",
|
||||
"\n",
|
||||
"Turn on logging to see the messages exchanged between the agents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.WARNING)\n",
|
||||
"logging.getLogger(\"autogen_core\").setLevel(logging.DEBUG)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Running the Design Pattern\n",
|
||||
"\n",
|
||||
"Let's test the design pattern with a coding task."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:agnext:Publishing message of type CodeWritingTask to all subscribers: {'task': 'Write a function to find the sum of all even numbers in a list.'}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeWritingTask published by Unknown\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 101, \"completion_tokens\": 101, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: To find the sum of all even numbers in a list, I will iterate through the list and check if each number is even. If it is, I will add it to a cumulative sum. Finally, I will return this sum. This approach is straightforward and efficient.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n total = 0\\n for number in numbers:\\n if number % 2 == 0:\\n total += number\\n return total\\n```', 'code': 'def sum_of_evens(numbers):\\n total = 0\\n for number in numbers:\\n if number % 2 == 0:\\n total += number\\n return total'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 176, \"completion_tokens\": 191, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly sums all even numbers in the list as intended. There are no logical errors in the implementation.\\nefficiency: The implementation has a time complexity of O(n), where n is the number of elements in the list. This is optimal for this problem since each element has to be examined. However, using a generator expression with the sum function could potentially improve readability.\\nsafety: The function currently does not handle cases where the input 'numbers' is None or not a list. Adding input validation could enhance the safety of the function to prevent runtime errors.\\napproval: REVISE\\nsuggested_changes: Consider adding input validation to check if 'numbers' is a list. Also, you could improve readability and potentially performance by using a generator expression like 'return sum(number for number in numbers if number % 2 == 0)'.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 389, \"completion_tokens\": 97, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: I appreciate the reviewer\\'s feedback on improving the function\\'s safety and readability. I will incorporate input validation to ensure the input is a list, and I will use a generator expression to simplify the summation of even numbers. \\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n return sum(number for number in numbers if number % 2 == 0)\\n```', 'code': 'def sum_of_evens(numbers):\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n return sum(number for number in numbers if number % 2 == 0)'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 357, \"completion_tokens\": 211, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly calculates the sum of all even numbers from the provided list. The logic appears sound and there are no errors in the implementation.\\nefficiency: The time complexity remains O(n), which is optimal for the task at hand since all numbers must be examined to determine if they are even. While using a generator expression improves readability, it does not enhance performance significantly over a list comprehension for this particular case.\\nsafety: The function still raises a ValueError if the input is not a list, which is good. However, it would be beneficial to explicitly handle cases where 'numbers' is None to prevent potential runtime errors. An additional check would enhance its robustness.\\napproval: REVISE\\nsuggested_changes: Consider adding a check at the beginning of the function to handle cases where 'numbers' is None and return a meaningful error or handle it gracefully. An example could be to raise a TypeError or to explicitly check for None before continuing.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 695, \"completion_tokens\": 123, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: I appreciate the continued feedback for enhancing the robustness of the function. I will add an explicit check for `None` at the beginning of the function to raise a `TypeError` if the input is `None`. This will ensure the function is more resilient against unexpected input.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n return sum(number for number in numbers if number % 2 == 0)\\n```', 'code': 'def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n return sum(number for number in numbers if number % 2 == 0)'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 394, \"completion_tokens\": 218, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly computes the sum of all even numbers from the provided list. It raises appropriate errors for incorrect input types, which is a good practice.\\nefficiency: The time complexity of O(n) is optimal for this problem since all elements must be checked to determine if they are even. The use of a generator expression is efficient in terms of memory usage.\\nsafety: The function includes checks for None and enforces input types, which is commendable. However, you should note that if an element within the list is not an integer, it might cause a runtime error when performing the modulus operation. A check to ensure all elements are integers (or can be handled appropriately) would improve safety.\\napproval: REVISE\\nsuggested_changes: Consider adding a check to ensure all elements within the 'numbers' list are integers. If any element is not an integer, raise a ValueError with a suitable message. This will prevent potential runtime errors due to invalid operations on unsupported types.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 1034, \"completion_tokens\": 160, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: Thank you for the constructive feedback. I will add a check to ensure that all elements in the list are integers before attempting to perform the modulus operation. If any element is not an integer, I will raise a `ValueError` with an appropriate message. This will make the function more robust and prevent runtime errors.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n if not all(isinstance(number, int) for number in numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n return sum(number for number in numbers if number % 2 == 0)\\n```', 'code': 'def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n if not all(isinstance(number, int) for number in numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n return sum(number for number in numbers if number % 2 == 0)'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 430, \"completion_tokens\": 249, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly computes the sum of all even numbers from the provided list and raises appropriate errors for incorrect input types, which is a good practice. The checks for None, list type, and integer types are implemented effectively.\\nefficiency: The time complexity of O(n) is optimal, since checking every number in the list is necessary, and the use of a generator expression for summing is both efficient in terms of time and memory.\\nsafety: The function includes checks for None and enforces input types. However, while it checks if all elements are integers, it presumes that this check is sufficient to avoid runtime errors. If the list were to contain non-integer types that can be tested against 'number % 2', the modulus operation could still lead to runtime errors.\\napproval: REVISE\\nsuggested_changes: Consider implementing a specific check to ensure all elements are integers prior to performing the summation, which could raise a ValueError. This could prevent runtime errors if non-integer types somehow bypass the initial check. You could enhance safety by using a try-except block to catch any unexpected types during the modulus operation.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 1441, \"completion_tokens\": 198, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: Thank you for the thorough code review and highlighting the potential issues. I will enhance safety by implementing a try-except block around the modulus operation to catch any errors related to non-integer types during the sum computation. This will ensure that the function handles unexpected types gracefully and avoids runtime errors.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n if not all(isinstance(number, int) for number in numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n \\n total = 0\\n for number in numbers:\\n try:\\n if number % 2 == 0:\\n total += number\\n except TypeError:\\n raise ValueError(f\"Invalid element found: {number}. All elements must be integers.\")\\n \\n return total\\n```', 'code': 'def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n if not all(isinstance(number, int) for number in numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n \\n total = 0\\n for number in numbers:\\n try:\\n if number % 2 == 0:\\n total += number\\n except TypeError:\\n raise ValueError(f\"Invalid element found: {number}. All elements must be integers.\")\\n \\n return total'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 504, \"completion_tokens\": 181, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function still correctly computes the sum of all even numbers and raises appropriate errors for incorrect input types, which maintains a good level of correctness.\\nefficiency: The efficiency remains satisfactory with a time complexity of O(n). The use of a for loop is appropriate for iterating through the list.\\nsafety: While the previous feedback regarding safety was noted, the function doesn't fully mitigate potential runtime errors during the modulus operation. It's still possible for non-integer types to cause issues, which should be handled more effectively.\\napproval: REVISE\\nsuggested_changes: Consider using a try-except block that encapsulates the entire loop or implement an additional step where you filter or validate the elements before processing to ensure that only integers are considered. This will help avoid unexpected runtime errors while performing operations on list elements.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 1818, \"completion_tokens\": 186, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: Thank you for your patience and for highlighting the remaining safety concern. To address the issue of non-integer types causing potential runtime errors, I will refactor the code to filter the input list before attempting any operations. This way, we can ensure that only integers are processed, thus reducing the risk of unexpected errors.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n \\n # Filter the list to ensure only integers are processed.\\n integers = [number for number in numbers if isinstance(number, int)]\\n \\n if len(integers) != len(numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n \\n return sum(number for number in integers if number % 2 == 0)\\n```', 'code': 'def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n \\n # Filter the list to ensure only integers are processed.\\n integers = [number for number in numbers if isinstance(number, int)]\\n \\n if len(integers) != len(numbers):\\n raise ValueError(\"All elements in the list must be integers.\")\\n \\n return sum(number for number in integers if number % 2 == 0)'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 419, \"completion_tokens\": 198, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function still correctly computes the sum of all even numbers and raises appropriate errors for incorrect input types, which maintains a good level of correctness.\\nefficiency: The efficiency remains satisfactory with a time complexity of O(n). The use of list comprehensions for filtering is both clear and efficient for this type of operation.\\nsafety: The function raises errors appropriately for None and non-list inputs, but it doesn't handle scenarios where non-integer types may still exist in the list before the summation. There's still a risk of runtime errors during the modulus operation if non-integers are present.\\napproval: REVISE\\nsuggested_changes: To improve safety, consider filtering the list more robustly by checking types before performing operations. You could also implement a try-except block around the summation operation to catch potential runtime errors, although filtering out non-integers beforehand is a better solution.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 2200, \"completion_tokens\": 187, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': 'Thoughts: Thank you for your insightful feedback. To make the function more robust against non-integer types, I will use a filtering process to ensure that only integers are included in the summation. Additionally, I will provide a clear error message if non-integer types slip through. This should eliminate runtime errors related to the modulus operation.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n\\n # Filtering to ensure only integers are included\\n integers = []\\n for number in numbers:\\n if isinstance(number, int):\\n integers.append(number)\\n else:\\n raise ValueError(f\"Invalid element found: {number}. All elements must be integers.\")\\n\\n return sum(number for number in integers if number % 2 == 0)\\n```', 'code': 'def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError(\"Input cannot be None.\")\\n if not isinstance(numbers, list):\\n raise ValueError(\"Input must be a list.\")\\n\\n # Filtering to ensure only integers are included\\n integers = []\\n for number in numbers:\\n if isinstance(number, int):\\n integers.append(number)\\n else:\\n raise ValueError(f\"Invalid element found: {number}. All elements must be integers.\")\\n\\n return sum(number for number in integers if number % 2 == 0)'}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 435, \"completion_tokens\": 319, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly handles None and non-list inputs by raising appropriate exceptions. It also accurately computes the sum of even integers in the list, which maintains correctness.\\nefficiency: The time complexity remains O(n) due to the single pass needed to filter integers and another pass for summation. However, it can be slightly optimized by combining both filtering and summation in a single comprehension, which reduces the overall pass through the list.\\nsafety: The function raises errors for invalid input types, but it does not account for cases where non-integer types might appear in the list prior to the sum calculation. This could lead to runtime errors if non-integers are inadvertently included. The error handling can be improved by ensuring that all elements are integers before performing the modulus operation.\\napproval: REVISE\\nsuggested_changes: Consider using a single loop with a generator expression to both filter and sum the even integers in one pass. This would enhance both efficiency and safety by eliminating the risk of runtime errors when performing operations on non-integer values. You can adjust the code as follows:\\n\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError('Input cannot be None.')\\n if not isinstance(numbers, list):\\n raise ValueError('Input must be a list.')\\n\\n return sum(number for number in numbers if isinstance(number, int) and number % 2 == 0)\\n```\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': False}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 2690, \"completion_tokens\": 130, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewTask to all subscribers: {'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'code_writing_task': 'Write a function to find the sum of all even numbers in a list.', 'code_writing_scratchpad': \"Thoughts: I appreciate the thorough review and suggestions. By combining filtering and summation into a single iteration using a generator expression, we can enhance both efficiency and safety. This ensures that only integers are considered for summation, which minimizes the risk of runtime errors. I will implement this change.\\n\\nCode:\\n```python\\ndef sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError('Input cannot be None.')\\n if not isinstance(numbers, list):\\n raise ValueError('Input must be a list.')\\n\\n return sum(number for number in numbers if isinstance(number, int) and number % 2 == 0)\\n```\", 'code': \"def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError('Input cannot be None.')\\n if not isinstance(numbers, list):\\n raise ValueError('Input must be a list.')\\n\\n return sum(number for number in numbers if isinstance(number, int) and number % 2 == 0)\"}\n",
|
||||
"INFO:agnext:Calling message handler for ReviewerAgent with message type CodeReviewTask published by CoderAgent\n",
|
||||
"INFO:agnext.events:{\"prompt_tokens\": 495, \"completion_tokens\": 253, \"type\": \"LLMCall\"}\n",
|
||||
"INFO:agnext:Publishing message of type CodeReviewResult to all subscribers: {'review': \"Code review:\\ncorrectness: The function correctly implements the requirement of finding the sum of even numbers and raises appropriate exceptions for invalid input. It maintains accuracy in terms of results as it only sums integers that are even.\\nefficiency: While the implementation uses a generator expression to sum even integers efficiently, the previous feedback suggested combining filtering and summation into a single pass. The current approach still iterates through the list only once due to the nature of the generator expression, so it is efficient in its current form, but the suggestion for optimization is valid.\\nsafety: The function checks for None and ensures the input is a list, which increases safety. However, the filter in the generator ensures that only integers are considered for the modulus operation, preventing runtime errors from non-integer types. This aspect has been addressed adequately since the generator will exclude non-integer values.\\napproval: APPROVE\\nsuggested_changes: No further changes are necessary, but a small improvement could be adding a check to handle the situation where the list may contain only non-integers, returning 0 in such cases. Additionally, consider enhancing the documentation with a brief description of the function's behavior on edge cases.\", 'session_id': '633c4a66-2f94-472f-b4a2-09f77c164c17', 'approved': True}\n",
|
||||
"INFO:agnext:Calling message handler for CoderAgent with message type CodeReviewResult published by ReviewerAgent\n",
|
||||
"INFO:agnext:Publishing message of type CodeWritingResult to all subscribers: {'task': 'Write a function to find the sum of all even numbers in a list.', 'code': \"def sum_of_evens(numbers):\\n if numbers is None:\\n raise TypeError('Input cannot be None.')\\n if not isinstance(numbers, list):\\n raise ValueError('Input must be a list.')\\n\\n return sum(number for number in numbers if isinstance(number, int) and number % 2 == 0)\", 'review': \"Code review:\\ncorrectness: The function correctly implements the requirement of finding the sum of even numbers and raises appropriate exceptions for invalid input. It maintains accuracy in terms of results as it only sums integers that are even.\\nefficiency: While the implementation uses a generator expression to sum even integers efficiently, the previous feedback suggested combining filtering and summation into a single pass. The current approach still iterates through the list only once due to the nature of the generator expression, so it is efficient in its current form, but the suggestion for optimization is valid.\\nsafety: The function checks for None and ensures the input is a list, which increases safety. However, the filter in the generator ensures that only integers are considered for the modulus operation, preventing runtime errors from non-integer types. This aspect has been addressed adequately since the generator will exclude non-integer values.\\napproval: APPROVE\\nsuggested_changes: No further changes are necessary, but a small improvement could be adding a check to handle the situation where the list may contain only non-integers, returning 0 in such cases. Additionally, consider enhancing the documentation with a brief description of the function's behavior on edge cases.\"}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Code Writing Result:\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Task:\n",
|
||||
"Write a function to find the sum of all even numbers in a list.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Code:\n",
|
||||
"def sum_of_evens(numbers):\n",
|
||||
" if numbers is None:\n",
|
||||
" raise TypeError('Input cannot be None.')\n",
|
||||
" if not isinstance(numbers, list):\n",
|
||||
" raise ValueError('Input must be a list.')\n",
|
||||
"\n",
|
||||
" return sum(number for number in numbers if isinstance(number, int) and number % 2 == 0)\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Review:\n",
|
||||
"Code review:\n",
|
||||
"correctness: The function correctly implements the requirement of finding the sum of even numbers and raises appropriate exceptions for invalid input. It maintains accuracy in terms of results as it only sums integers that are even.\n",
|
||||
"efficiency: While the implementation uses a generator expression to sum even integers efficiently, the previous feedback suggested combining filtering and summation into a single pass. The current approach still iterates through the list only once due to the nature of the generator expression, so it is efficient in its current form, but the suggestion for optimization is valid.\n",
|
||||
"safety: The function checks for None and ensures the input is a list, which increases safety. However, the filter in the generator ensures that only integers are considered for the modulus operation, preventing runtime errors from non-integer types. This aspect has been addressed adequately since the generator will exclude non-integer values.\n",
|
||||
"approval: APPROVE\n",
|
||||
"suggested_changes: No further changes are necessary, but a small improvement could be adding a check to handle the situation where the list may contain only non-integers, returning 0 in such cases. Additionally, consider enhancing the documentation with a brief description of the function's behavior on edge cases.\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.components._type_subscription import TypeSubscription\n",
|
||||
"from autogen_core.components.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"await runtime.register(\n",
|
||||
" \"ReviewerAgent\",\n",
|
||||
" lambda: ReviewerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\")),\n",
|
||||
")\n",
|
||||
"await runtime.add_subscription(TypeSubscription(\"default\", \"CoderAgent\"))\n",
|
||||
"await runtime.register(\n",
|
||||
" \"CoderAgent\",\n",
|
||||
" lambda: CoderAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\")),\n",
|
||||
")\n",
|
||||
"await runtime.add_subscription(TypeSubscription(\"default\", \"ReviewerAgent\"))\n",
|
||||
"runtime.start()\n",
|
||||
"await runtime.publish_message(\n",
|
||||
" message=CodeWritingTask(task=\"Write a function to find the sum of all even numbers in a list.\"),\n",
|
||||
" topic_id=TopicId(\"default\", \"default\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Keep processing messages until idle.\n",
|
||||
"await runtime.stop_when_idle()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The log messages show the interaction between the coder and reviewer agents.\n",
|
||||
"The final output shows the code snippet generated by the coder agent and the critique generated by the reviewer agent."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 82 KiB |
@@ -0,0 +1,320 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tools\n",
|
||||
"\n",
|
||||
"Tools are code that can be executed by an agent to perform actions. A tool\n",
|
||||
"can be a simple function such as a calculator, or an API call to a third-party service\n",
|
||||
"such as stock price lookup and weather forecast.\n",
|
||||
"In the context of AI agents, tools are designed to be executed by agents in\n",
|
||||
"response to model-generated function calls.\n",
|
||||
"\n",
|
||||
"AGNext provides the {py:mod}`autogen_core.components.tools` module with a suite of built-in\n",
|
||||
"tools and utilities for creating and running custom tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in Tools\n",
|
||||
"\n",
|
||||
"One of the built-in tools is the {py:class}`autogen_core.components.tools.PythonCodeExecutionTool`,\n",
|
||||
"which allows agents to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Here is how you create the tool and use it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello, world!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.base import CancellationToken\n",
|
||||
"from autogen_core.components.code_executor import LocalCommandLineCodeExecutor\n",
|
||||
"from autogen_core.components.tools import PythonCodeExecutionTool\n",
|
||||
"\n",
|
||||
"# Create the tool.\n",
|
||||
"code_executor = LocalCommandLineCodeExecutor()\n",
|
||||
"code_execution_tool = PythonCodeExecutionTool(code_executor)\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"\n",
|
||||
"# Use the tool directly without an agent.\n",
|
||||
"code = \"print('Hello, world!')\"\n",
|
||||
"result = await code_execution_tool.run_json({\"code\": code}, cancellation_token)\n",
|
||||
"print(code_execution_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The {py:class}`~autogen_core.components.code_executor.LocalCommandLineCodeExecutor`\n",
|
||||
"class is a built-in code executor that runs Python code snippets in a subprocess\n",
|
||||
"in the local command line environment.\n",
|
||||
"The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n",
|
||||
"and provides a simple interface to execute Python code snippets.\n",
|
||||
"\n",
|
||||
"Other built-in tools will be added in the future."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Function Tools\n",
|
||||
"\n",
|
||||
"A tool can also be a simple Python function that performs a specific action.\n",
|
||||
"To create a custom function tool, you just need to create a Python function\n",
|
||||
"and use the {py:class}`autogen_core.components.tools.FunctionTool` class to wrap it.\n",
|
||||
"\n",
|
||||
"For example, a simple tool to obtain the stock price of a company might look like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"194.71306528148511\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"\n",
|
||||
"from autogen_core.base import CancellationToken\n",
|
||||
"from autogen_core.components.tools import FunctionTool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def get_stock_price(ticker: str, date: Annotated[str, \"Date in YYYY/MM/DD\"]) -> float:\n",
|
||||
" # Returns a random stock price for demonstration purposes.\n",
|
||||
" return random.uniform(10, 200)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a function tool.\n",
|
||||
"stock_price_tool = FunctionTool(get_stock_price, description=\"Get the stock price.\")\n",
|
||||
"\n",
|
||||
"# Run the tool.\n",
|
||||
"cancellation_token = CancellationToken()\n",
|
||||
"result = await stock_price_tool.run_json({\"ticker\": \"AAPL\", \"date\": \"2021/01/01\"}, cancellation_token)\n",
|
||||
"\n",
|
||||
"# Print the result.\n",
|
||||
"print(stock_price_tool.return_value_as_string(result))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool-Equipped Agent\n",
|
||||
"\n",
|
||||
"To use tools with an agent, you can use {py:class}`autogen_core.components.tool_agent.ToolAgent`,\n",
|
||||
"by using it in a composition pattern.\n",
|
||||
"Here is an example tool-use agent that uses {py:class}`~autogen_core.components.tool_agent.ToolAgent`\n",
|
||||
"as an inner agent for executing tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from autogen_core.application import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_core.base import AgentId, AgentInstantiationContext, MessageContext\n",
|
||||
"from autogen_core.components import RoutedAgent, message_handler\n",
|
||||
"from autogen_core.components.models import (\n",
|
||||
" ChatCompletionClient,\n",
|
||||
" LLMMessage,\n",
|
||||
" OpenAIChatCompletionClient,\n",
|
||||
" SystemMessage,\n",
|
||||
" UserMessage,\n",
|
||||
")\n",
|
||||
"from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop\n",
|
||||
"from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class Message:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ToolUseAgent(RoutedAgent):\n",
|
||||
" def __init__(self, model_client: ChatCompletionClient, tool_schema: List[ToolSchema], tool_agent: AgentId) -> None:\n",
|
||||
" super().__init__(\"An agent with tools\")\n",
|
||||
" self._system_messages: List[LLMMessage] = [SystemMessage(\"You are a helpful AI assistant.\")]\n",
|
||||
" self._model_client = model_client\n",
|
||||
" self._tool_schema = tool_schema\n",
|
||||
" self._tool_agent = tool_agent\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n",
|
||||
" # Create a session of messages.\n",
|
||||
" session: List[LLMMessage] = [UserMessage(content=message.content, source=\"user\")]\n",
|
||||
" # Run the caller loop to handle tool calls.\n",
|
||||
" messages = await tool_agent_caller_loop(\n",
|
||||
" self,\n",
|
||||
" tool_agent_id=self._tool_agent,\n",
|
||||
" model_client=self._model_client,\n",
|
||||
" input_messages=session,\n",
|
||||
" tool_schema=self._tool_schema,\n",
|
||||
" cancellation_token=ctx.cancellation_token,\n",
|
||||
" )\n",
|
||||
" # Return the final response.\n",
|
||||
" assert isinstance(messages[-1].content, str)\n",
|
||||
" return Message(content=messages[-1].content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `ToolUseAgent` class uses a convenience function {py:meth}`autogen_core.components.tool_agent.tool_agent_caller_loop`, \n",
|
||||
"to handle the interaction between the model and the tool agent.\n",
|
||||
"The core idea can be described using a simple control flow graph:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The `ToolUseAgent`'s `handle_user_message` handler handles messages from the user,\n",
|
||||
"and determines whether the model has generated a tool call.\n",
|
||||
"If the model has generated tool calls, then the handler sends a function call\n",
|
||||
"message to the {py:class}`~autogen_core.components.tool_agent.ToolAgent` agent\n",
|
||||
"to execute the tools,\n",
|
||||
"and then queries the model again with the results of the tool calls.\n",
|
||||
"This process continues until the model stops generating tool calls,\n",
|
||||
"at which point the final response is returned to the user.\n",
|
||||
"\n",
|
||||
"By having the tool execution logic in a separate agent,\n",
|
||||
"we expose the model-tool interactions to the agent runtime as messages, so the tool executions\n",
|
||||
"can be observed externally and intercepted if necessary.\n",
|
||||
"\n",
|
||||
"To run the agent, we need to create a runtime and register the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AgentType(type='tool_use_agent')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a runtime.\n",
|
||||
"runtime = SingleThreadedAgentRuntime()\n",
|
||||
"# Create the tools.\n",
|
||||
"tools: List[Tool] = [FunctionTool(get_stock_price, description=\"Get the stock price.\")]\n",
|
||||
"# Register the agents.\n",
|
||||
"await runtime.register(\n",
|
||||
" \"tool_executor_agent\",\n",
|
||||
" lambda: ToolAgent(\n",
|
||||
" description=\"Tool Executor Agent\",\n",
|
||||
" tools=tools,\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"await runtime.register(\n",
|
||||
" \"tool_use_agent\",\n",
|
||||
" lambda: ToolUseAgent(\n",
|
||||
" OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
|
||||
" tool_schema=[tool.schema for tool in tools],\n",
|
||||
" tool_agent=AgentId(\"tool_executor_agent\", AgentInstantiationContext.current_agent_id().key),\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example uses the {py:class}`autogen_core.components.models.OpenAIChatCompletionClient`,\n",
|
||||
"for Azure OpenAI and other clients, see [Model Clients](./model-clients.ipynb).\n",
|
||||
"Let's test the agent with a question about stock price."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The stock price of NVIDIA (NVDA) on June 1, 2024, was approximately $148.86.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Start processing messages.\n",
|
||||
"runtime.start()\n",
|
||||
"# Send a direct message to the tool agent.\n",
|
||||
"tool_use_agent = AgentId(\"tool_use_agent\", \"default\")\n",
|
||||
"response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n",
|
||||
"print(response.content)\n",
|
||||
"# Stop processing messages.\n",
|
||||
"await runtime.stop()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [samples](https://github.com/microsoft/autogen_core/tree/main/python/samples#tool-use-examples)\n",
|
||||
"for more examples of using tools with agents, including how to use\n",
|
||||
"broadcast communication model for tool execution, and how to intercept tool\n",
|
||||
"execution for human-in-the-loop approval."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "autogen_core",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
97
python/packages/autogen-core/docs/src/guides/logging.md
Normal file
97
python/packages/autogen-core/docs/src/guides/logging.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Logging
|
||||
|
||||
AGNext uses Python's built-in [`logging`](https://docs.python.org/3/library/logging.html) module.
|
||||
|
||||
There are two kinds of logging:
|
||||
|
||||
- **Trace logging**: This is used for debugging and is human readable messages to indicate what is going on. This is intended for a developer to understand what is happening in the code. The content and format of these logs should not be depended on by other systems.
|
||||
- Name: {py:attr}`~autogen_core.application.logging.TRACE_LOGGER_NAME`.
|
||||
- **Structured logging**: This logger emits structured events that can be consumed by other systems. The content and format of these logs should be can be depended on by other systems.
|
||||
- Name: {py:attr}`~autogen_core.application.logging.EVENT_LOGGER_NAME`.
|
||||
- See the module {py:mod}`autogen_core.application.logging.events` to see the available events.
|
||||
- {py:attr}`~autogen_core.application.logging.ROOT_LOGGER` can be used to enable or disable all logs at the same time.
|
||||
|
||||
## Enabling logging output
|
||||
|
||||
To enable trace logging, you can use the following code:
|
||||
|
||||
```python
|
||||
import logging
|
||||
|
||||
from autogen_core.application.logging import TRACE_LOGGER_NAME
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logger = logging.getLogger(TRACE_LOGGER_NAME)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
```
|
||||
|
||||
### Structured logging
|
||||
|
||||
Structured logging allows you to write handling logic that deals with the actual events including all fields rather than just a formatted string.
|
||||
|
||||
For example, if you had defined this custom event and were emitting it. Then you could write the following handler to receive it.
|
||||
|
||||
```python
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class MyEvent:
|
||||
timestamp: str
|
||||
message: str
|
||||
|
||||
class MyHandler(logging.Handler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
try:
|
||||
# Use the StructuredMessage if the message is an instance of it
|
||||
if isinstance(record.msg, MyEvent):
|
||||
print(f"Timestamp: {record.msg.timestamp}, Message: {record.msg.message}")
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
```
|
||||
|
||||
And this is how you could use it:
|
||||
|
||||
```python
|
||||
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||
logger.setLevel(logging.INFO)
|
||||
my_handler = MyHandler()
|
||||
logger.handlers = [my_handler]
|
||||
```
|
||||
|
||||
|
||||
## Emitting logs
|
||||
|
||||
These two names are the root loggers for these types. Code that emits logs should use a child logger of these loggers. For example, if you are writing a module `my_module` and you want to emit trace logs, you should use the logger named:
|
||||
|
||||
```python
|
||||
import logging
|
||||
|
||||
from autogen_core.application.logging import TRACE_LOGGER_NAME
|
||||
logger = logging.getLogger(f"{TRACE_LOGGER_NAME}.my_module")
|
||||
```
|
||||
|
||||
### Emitting structured logs
|
||||
|
||||
If your event looks like:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class MyEvent:
|
||||
timestamp: str
|
||||
message: str
|
||||
```
|
||||
|
||||
Then it could be emitted in code like this:
|
||||
|
||||
```python
|
||||
from autogen_core.application.logging import EVENT_LOGGER_NAME
|
||||
|
||||
logger = logging.getLogger(EVENT_LOGGER_NAME + ".my_module")
|
||||
logger.info(MyEvent("timestamp", "message"))
|
||||
```
|
||||
@@ -0,0 +1,52 @@
|
||||
# Agent Worker Protocol
|
||||
|
||||
## System architecture
|
||||
|
||||
The system consists of multiple processes, each being either a _service_ process or a _worker_ process.
|
||||
Worker processes host application code (agents) and connect to a service process.
|
||||
Workers advertise the agents which they support to the service, so the service can decide which worker to place agents on.
|
||||
Service processes coordinate placement of agents on worker processes and facilitate communication between agents.
|
||||
|
||||
Agent instances are identified by the tuple of `(namespace: str, name: str)`.
|
||||
Both _namespace_ and _name_ are application-defined.
|
||||
The _namespace_ has no semantics implied by the system: it is free-form, and any semantics are implemented by application code.
|
||||
The _name_ is used to route requests to a worker which supports agents with that name.
|
||||
Workers advertise the set of agent names which they are capable of hosting to the service.
|
||||
Workers activate agents in response to messages received from the service.
|
||||
The service uses the _name_ to determine where to place currently-inactive agents, maintaining a mapping from agent name to a set of workers which support that agent.
|
||||
The service maintains a _directory_ mapping active agent ids to worker processes which host the identified agent.
|
||||
|
||||
### Agent lifecycle
|
||||
|
||||
Agents are never explicitly created or destroyed. When a request is received for an agent which is not currently active, it is the responsibility of the service to select a worker which is capable of hosting that agent, and to route the request to that worker.
|
||||
|
||||
## Worker protocol flow
|
||||
|
||||
The worker protocol has three phases, following the lifetime of the worker: initiation, operation, and termination.
|
||||
|
||||
### Initialization
|
||||
|
||||
When the worker process starts, it initiates a connection to a service process, establishing a bi-directional communication channel which messages are passed across.
|
||||
Next, the worker issues zero or more `RegisterAgentType(name: str)` messages, which tell the service the names of the agents which it is able to host.
|
||||
|
||||
* TODO: What other metadata should the worker give to the service?
|
||||
* TODO: Should we give the worker a unique id which can be used to identify it for its lifetime? Should we allow this to be specified by the worker process itself?
|
||||
|
||||
### Operation
|
||||
|
||||
Once the connection is established, and the service knows which agents the worker is capable of hosting, the worker may begin receiving requests for agents which it must host.
|
||||
Placement of agents happens in response to an `Event(...)` or `RpcRequest(...)` message.
|
||||
The worker maintains a _catalog_ of locally active agents: a mapping from agent id to agent instance.
|
||||
If a message arrives for an agent which does not have a corresponding entry in the catalog, the worker activates a new instance of that agent and inserts it into the catalog.
|
||||
The worker dispatches the message to the agent:
|
||||
|
||||
* For an `Event`, the agent processes the message and no response is generated.
|
||||
* For an `RpcRequest` message, the agent processes the message and generates a response of type `RpcResponse`. The worker routes the response to the original sender.
|
||||
|
||||
The worker maintains a mapping of outstanding requests, identified by `RpcRequest.id`, to a promise for a future `RpcResponse`.
|
||||
When an `RpcResponse` is received, the worker finds the corresponding request id and fulfils the promise using that response.
|
||||
If no response is received in a specified time frame (eg, 30s), the worker breaks the promise with a timeout error.
|
||||
|
||||
### Termination
|
||||
|
||||
When the worker is ready to shutdown, it closes the connection to the service and terminates. The service de-registers the worker and all agent instances which were hosted on it.
|
||||
76
python/packages/autogen-core/docs/src/index.rst
Normal file
76
python/packages/autogen-core/docs/src/index.rst
Normal file
@@ -0,0 +1,76 @@
|
||||
AGNext
|
||||
------
|
||||
|
||||
AGNext is a framework for building multi-agent applications with AI agents.
|
||||
|
||||
At a high level, it provides a framework for inter-agent communication and a
|
||||
suite of independent components for building and managing agents.
|
||||
You can implement agents in
|
||||
different programming languages and deploy them on different machines across organizational boundaries.
|
||||
You can also implement agents using other agent frameworks and run them in AGNext.
|
||||
|
||||
To start quickly, read the `Quick Start <getting-started/quickstart.html>`_ and
|
||||
follow the tutorial sections.
|
||||
|
||||
To learn about the core concepts of AGNext, read the sections starting
|
||||
from `Agent and Multi-Agent Application <core-concepts/agent-and-multi-agent-application.html>`_.
|
||||
|
||||
.. toctree::
|
||||
:caption: Core Concepts
|
||||
:hidden:
|
||||
|
||||
core-concepts/agent-and-multi-agent-application
|
||||
core-concepts/architecture
|
||||
core-concepts/application-stack
|
||||
core-concepts/agent-identity-and-lifecycle
|
||||
core-concepts/topic-and-subscription
|
||||
|
||||
.. toctree::
|
||||
:caption: Getting started
|
||||
:hidden:
|
||||
|
||||
getting-started/installation
|
||||
getting-started/quickstart
|
||||
getting-started/agent-and-agent-runtime
|
||||
getting-started/message-and-communication
|
||||
getting-started/model-clients
|
||||
getting-started/tools
|
||||
getting-started/multi-agent-design-patterns
|
||||
getting-started/group-chat
|
||||
getting-started/reflection
|
||||
|
||||
.. toctree::
|
||||
:caption: Guides
|
||||
:hidden:
|
||||
|
||||
guides/logging
|
||||
guides/worker-protocol
|
||||
|
||||
.. toctree::
|
||||
:caption: Cookbook
|
||||
:hidden:
|
||||
|
||||
cookbook/type-routed-agent
|
||||
cookbook/azure-openai-with-aad-auth
|
||||
cookbook/termination-with-intervention
|
||||
cookbook/buffered-memory
|
||||
cookbook/extracting-results-with-an-agent
|
||||
cookbook/openai-assistant-agent
|
||||
cookbook/langgraph-agent
|
||||
cookbook/llamaindex-agent
|
||||
|
||||
|
||||
.. toctree::
|
||||
:caption: Reference
|
||||
:hidden:
|
||||
|
||||
reference/autogen_core.components
|
||||
reference/autogen_core.application
|
||||
reference/autogen_core.base
|
||||
|
||||
.. toctree::
|
||||
:caption: Other
|
||||
:hidden:
|
||||
|
||||
contributing
|
||||
|
||||
195
python/packages/autogen-core/pyproject.toml
Normal file
195
python/packages/autogen-core/pyproject.toml
Normal file
@@ -0,0 +1,195 @@
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "autogen-core"
|
||||
version = "0.0.1"
|
||||
description = "A small example package"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
classifiers = [
|
||||
"Programming Language :: Python :: 3",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
]
|
||||
dependencies = [
|
||||
"openai>=1.3",
|
||||
"pillow",
|
||||
"aiohttp",
|
||||
"typing-extensions",
|
||||
"pydantic>=1.10,<3",
|
||||
"types-aiofiles",
|
||||
"grpcio",
|
||||
"protobuf",
|
||||
"tiktoken",
|
||||
"azure-core"
|
||||
]
|
||||
|
||||
[tool.hatch.envs.default]
|
||||
installer = "uv"
|
||||
dependencies = [
|
||||
"azure-identity",
|
||||
"aiofiles",
|
||||
"chess",
|
||||
"colorama",
|
||||
"grpcio-tools",
|
||||
"langgraph",
|
||||
"langchain-openai",
|
||||
"llama-index-readers-web",
|
||||
"llama-index-readers-wikipedia",
|
||||
"llama-index-tools-wikipedia",
|
||||
"llama-index-embeddings-azure-openai",
|
||||
"llama-index-llms-azure-openai",
|
||||
"llama-index",
|
||||
"markdownify",
|
||||
"mypy==1.10.0",
|
||||
"pip",
|
||||
"polars",
|
||||
"pyright==1.1.368",
|
||||
"pytest-asyncio",
|
||||
"pytest-mock",
|
||||
"pytest-xdist",
|
||||
"pytest",
|
||||
"python-dotenv",
|
||||
"requests",
|
||||
"ruff==0.4.8",
|
||||
"tavily-python",
|
||||
"textual-dev",
|
||||
"textual-imageview",
|
||||
"textual",
|
||||
"tiktoken",
|
||||
"types-aiofiles",
|
||||
"types-pillow",
|
||||
"types-protobuf",
|
||||
"types-requests",
|
||||
"wikipedia",
|
||||
"nbqa"
|
||||
]
|
||||
|
||||
[tool.hatch.envs.default.extra-scripts]
|
||||
pip = "{env:HATCH_UV} pip {args}"
|
||||
|
||||
[tool.hatch.envs.default.scripts]
|
||||
fmt = "ruff format"
|
||||
lint = "ruff check"
|
||||
test = "pytest -n auto"
|
||||
check = [
|
||||
"ruff format",
|
||||
"ruff check --fix",
|
||||
"pyright",
|
||||
"mypy --non-interactive --install-types",
|
||||
"pytest -n auto",
|
||||
# Only checking notebooks with mypy for now due to the fact pyright doesn't seem to be able to ignore the top level await
|
||||
"nbqa mypy docs/src",
|
||||
]
|
||||
|
||||
[tool.hatch.envs.test-matrix]
|
||||
template = "default"
|
||||
|
||||
[[tool.hatch.envs.test-matrix.matrix]]
|
||||
python = ["3.10", "3.11", "3.12"]
|
||||
|
||||
[tool.hatch.envs.docs]
|
||||
template = "default"
|
||||
dependencies = [
|
||||
"sphinx",
|
||||
"furo",
|
||||
"sphinxcontrib-apidoc",
|
||||
"myst-nb",
|
||||
"sphinx-autobuild",
|
||||
]
|
||||
|
||||
[tool.hatch.envs.docs.scripts]
|
||||
build = "sphinx-build docs/src docs/build"
|
||||
serve = "sphinx-autobuild --watch src docs/src docs/build"
|
||||
check = [
|
||||
"sphinx-build --fail-on-warning docs/src docs/build"
|
||||
]
|
||||
|
||||
# Benchmark environments
|
||||
[tool.hatch.envs.bench-humaneval-teamone]
|
||||
installer = "uv"
|
||||
detached = true
|
||||
dependencies = [
|
||||
"autogen_core@{root:uri}",
|
||||
"agbench@{root:uri}/tools/agbench",
|
||||
"team-one@{root:uri}/teams/team-one",
|
||||
]
|
||||
|
||||
[tool.hatch.envs.bench-humaneval-twoagents]
|
||||
installer = "uv"
|
||||
detached = true
|
||||
dependencies = [
|
||||
"autogen_core@{root:uri}",
|
||||
"agbench@{root:uri}/tools/agbench",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 120
|
||||
fix = true
|
||||
exclude = ["build", "dist", "src/autogen_core/application/protos"]
|
||||
target-version = "py310"
|
||||
include = ["src/**", "samples/*.py", "docs/**/*.ipynb"]
|
||||
|
||||
[tool.ruff.format]
|
||||
docstring-code-format = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "W", "B", "Q", "I", "ASYNC"]
|
||||
ignore = ["F401", "E501"]
|
||||
|
||||
[tool.ruff.lint.flake8-tidy-imports]
|
||||
[tool.ruff.lint.flake8-tidy-imports.banned-api]
|
||||
"unittest".msg = "Use `pytest` instead."
|
||||
|
||||
[tool.mypy]
|
||||
files = ["src", "samples", "tests"]
|
||||
exclude = ["src/autogen_core/application/protos"]
|
||||
|
||||
strict = true
|
||||
python_version = "3.10"
|
||||
ignore_missing_imports = true
|
||||
|
||||
# from https://blog.wolt.com/engineering/2021/09/30/professional-grade-mypy-configuration/
|
||||
disallow_untyped_defs = true
|
||||
no_implicit_optional = true
|
||||
check_untyped_defs = true
|
||||
warn_return_any = true
|
||||
show_error_codes = true
|
||||
warn_unused_ignores = false
|
||||
|
||||
disallow_incomplete_defs = true
|
||||
disallow_untyped_decorators = true
|
||||
disallow_any_unimported = true
|
||||
|
||||
[tool.pyright]
|
||||
include = ["src", "tests", "samples"]
|
||||
typeCheckingMode = "strict"
|
||||
reportUnnecessaryIsInstance = false
|
||||
reportMissingTypeStubs = false
|
||||
exclude = ["src/autogen_core/application/protos"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
testpaths = ["tests"]
|
||||
|
||||
[tool.hatch.build.hooks.protobuf]
|
||||
dependencies = ["hatch-protobuf", "mypy-protobuf~=3.0"]
|
||||
generate_pyi = false
|
||||
proto_paths = ["../protos"]
|
||||
output_path = "src/autogen_core/application/protos"
|
||||
|
||||
|
||||
[[tool.hatch.build.hooks.protobuf.generators]]
|
||||
name = "mypy"
|
||||
outputs = ["{proto_path}/{proto_name}_pb2.pyi"]
|
||||
|
||||
[[tool.hatch.build.hooks.protobuf.generators]]
|
||||
name = "mypy_grpc"
|
||||
outputs = ["{proto_path}/{proto_name}_pb2_grpc.pyi"]
|
||||
|
||||
[tool.nbqa.addopts]
|
||||
mypy = [
|
||||
"--disable-error-code=top-level-await"
|
||||
]
|
||||
113
python/packages/autogen-core/samples/README.md
Normal file
113
python/packages/autogen-core/samples/README.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples and demos of how to use AGNext.
|
||||
|
||||
- `common`: Contains common implementations and utilities used by the examples.
|
||||
- `core`: Contains examples that illustrate the core concepts of AGNext.
|
||||
- `tool-use`: Contains examples that illustrate tool use in AGNext.
|
||||
- `patterns`: Contains examples that illustrate how multi-agent patterns can be implemented in AGNext.
|
||||
- `demos`: Contains interactive demos that showcase applications that can be built using AGNext.
|
||||
|
||||
See [Running the examples](#running-the-examples) for instructions on how to run the examples.
|
||||
|
||||
## Core examples
|
||||
|
||||
We provide examples to illustrate the core concepts of AGNext: agents, runtime, and message passing.
|
||||
|
||||
- [`one_agent_direct.py`](core/one_agent_direct.py): A simple example of how to create a single agent powered by ChatCompletion model client. Communicate with the agent using direct communication.
|
||||
- [`inner_outer_direct.py`](core/inner_outer_direct.py): A simple example of how to create an agent that calls an inner agent using direct communication.
|
||||
- [`two_agents_pub_sub.py`](core/two_agents_pub_sub.py): An example of how to create two agents that communicate using broadcast communication (i.e., pub/sub).
|
||||
|
||||
## Tool use examples
|
||||
|
||||
We provide examples to illustrate how to use tools in AGNext:
|
||||
|
||||
- [`coding_direct.py`](tool-use/coding_direct.py): a code execution example with one agent that calls and executes tools to demonstrate tool use and reflection on tool use. This example uses direct communication.
|
||||
- [`coding_pub_sub.py`](tool-use/coding_pub_sub.py): a code execution example with two agents, one for calling tool and one for executing the tool, to demonstrate tool use and reflection on tool use. This example uses broadcast communication.
|
||||
- [`custom_tool_direct.py`](tool-use/custom_tool_direct.py): a custom function tool example with one agent that calls and executes tools to demonstrate tool use and reflection on tool use. This example uses direct communication.
|
||||
- [`coding_direct_with_intercept.py`](tool-use/coding_direct_with_intercept.py): an example showing human-in-the-loop for approving or denying tool execution.
|
||||
|
||||
## Pattern examples
|
||||
|
||||
We provide examples to illustrate how multi-agent patterns can be implemented in AGNext:
|
||||
|
||||
- [`coder_executor.py`](patterns/coder_executor.py): An example of how to create a coder-executor reflection pattern. This example creates a plot of stock prices using the Yahoo Finance API.
|
||||
- [`coder_reviewer.py`](patterns/coder_reviewer.py): An example of how to create a coder-reviewer reflection pattern.
|
||||
- [`group_chat.py`](patterns/group_chat.py): An example of how to create a round-robin group chat among three agents.
|
||||
- [`mixture_of_agents.py`](patterns/mixture_of_agents.py): An example of how to create a [mixture of agents](https://github.com/togethercomputer/moa).
|
||||
- [`multi_agent_debate.py`](patterns/multi_agent_debate.py): An example of how to create a [sparse multi-agent debate](https://arxiv.org/abs/2406.11776) pattern.
|
||||
|
||||
## Demos
|
||||
|
||||
We provide interactive demos that showcase applications that can be built using AGNext:
|
||||
|
||||
- [`assistant.py`](demos/assistant.py): a demonstration of how to use the OpenAI Assistant API to create
|
||||
a ChatGPT agent.
|
||||
- [`chat_room.py`](demos/chat_room.py): An example of how to create a chat room of custom agents without
|
||||
a centralized orchestrator.
|
||||
- [`illustrator_critics.py`](demos/illustrator_critics.py): a demo that uses an illustrator, critics and descriptor agent
|
||||
to implement the reflection pattern for image generation.
|
||||
- [`software_consultancy.py`](demos/software_consultancy.py): a demonstration of multi-agent interaction using
|
||||
the group chat pattern.
|
||||
- [`chest_game.py`](demos/chess_game.py): an example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use.
|
||||
|
||||
## Bring Your Own Agent
|
||||
|
||||
We provide examples on how to integrate other agents with the platform:
|
||||
|
||||
- [`llamaindex_agent.py`](byoa/llamaindex_agent.py): An example that shows how to consume a LlamaIndex agent.
|
||||
- [`langgraph_agent.py`](byoa/langgraph_agent.py): An example that shows how to consume a LangGraph agent.
|
||||
|
||||
## Running the examples
|
||||
|
||||
### Prerequisites
|
||||
|
||||
First, you need a shell with AGNext and required dependencies installed.
|
||||
To do this, in the samples directory, run:
|
||||
|
||||
```bash
|
||||
hatch shell
|
||||
```
|
||||
|
||||
### Using Azure OpenAI API
|
||||
|
||||
For Azure OpenAI API, you need to set the following environment variables:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_TYPE=azure
|
||||
export AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint
|
||||
export AZURE_OPENAI_API_VERSION=your_azure_openai_api_version
|
||||
```
|
||||
|
||||
By default, we use Azure Active Directory (AAD) for authentication.
|
||||
You need to run `az login` first to authenticate with Azure.
|
||||
You can also
|
||||
use API key authentication by setting the following environment variables:
|
||||
|
||||
```bash
|
||||
export AZURE_OPENAI_API_KEY=your_azure_openai_api_key
|
||||
```
|
||||
|
||||
### Using OpenAI API
|
||||
|
||||
For OpenAI API, you need to set the following environment variables.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_TYPE=openai
|
||||
export OPENAI_API_KEY=your_openai_api_key
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
To run an example, just run the corresponding Python script. For example:
|
||||
|
||||
```bash
|
||||
hatch shell
|
||||
python core/one_agent_direct.py
|
||||
```
|
||||
|
||||
Or simply:
|
||||
|
||||
```bash
|
||||
hatch run python core/one_agent_direct.py
|
||||
```
|
||||
136
python/packages/autogen-core/samples/byoa/langgraph_agent.py
Normal file
136
python/packages/autogen-core/samples/byoa/langgraph_agent.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""
|
||||
This example demonstrates how to create an AI agent using LangGraph.
|
||||
Based on the example in the LangGraph documentation:
|
||||
https://langchain-ai.github.io/langgraph/
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable, List, Literal
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from langchain_core.tools import tool # pyright: ignore
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langgraph.graph import END, MessagesState, StateGraph
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
content: str
|
||||
|
||||
|
||||
# Define the tools for the agent to use
|
||||
@tool # pyright: ignore
|
||||
def get_weather(location: str) -> str:
|
||||
"""Call to surf the web."""
|
||||
# This is a placeholder, but don't tell the LLM that...
|
||||
if "sf" in location.lower() or "san francisco" in location.lower():
|
||||
return "It's 60 degrees and foggy."
|
||||
return "It's 90 degrees and sunny."
|
||||
|
||||
|
||||
# Define the tool-use agent using LangGraph.
|
||||
class LangGraphToolUseAgent(RoutedAgent):
|
||||
def __init__(self, description: str, model: ChatOpenAI, tools: List[Callable[..., Any]]) -> None: # pyright: ignore
|
||||
super().__init__(description)
|
||||
self._model = model.bind_tools(tools) # pyright: ignore
|
||||
|
||||
# Define the function that determines whether to continue or not
|
||||
def should_continue(state: MessagesState) -> Literal["tools", END]: # type: ignore
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
# If the LLM makes a tool call, then we route to the "tools" node
|
||||
if last_message.tool_calls: # type: ignore
|
||||
return "tools"
|
||||
# Otherwise, we stop (reply to the user)
|
||||
return END
|
||||
|
||||
# Define the function that calls the model
|
||||
async def call_model(state: MessagesState): # type: ignore
|
||||
messages = state["messages"]
|
||||
response = await self._model.ainvoke(messages)
|
||||
# We return a list, because this will get added to the existing list
|
||||
return {"messages": [response]}
|
||||
|
||||
tool_node = ToolNode(tools) # pyright: ignore
|
||||
|
||||
# Define a new graph
|
||||
self._workflow = StateGraph(MessagesState)
|
||||
|
||||
# Define the two nodes we will cycle between
|
||||
self._workflow.add_node("agent", call_model) # pyright: ignore
|
||||
self._workflow.add_node("tools", tool_node) # pyright: ignore
|
||||
|
||||
# Set the entrypoint as `agent`
|
||||
# This means that this node is the first one called
|
||||
self._workflow.set_entry_point("agent")
|
||||
|
||||
# We now add a conditional edge
|
||||
self._workflow.add_conditional_edges(
|
||||
# First, we define the start node. We use `agent`.
|
||||
# This means these are the edges taken after the `agent` node is called.
|
||||
"agent",
|
||||
# Next, we pass in the function that will determine which node is called next.
|
||||
should_continue, # type: ignore
|
||||
)
|
||||
|
||||
# We now add a normal edge from `tools` to `agent`.
|
||||
# This means that after `tools` is called, `agent` node is called next.
|
||||
self._workflow.add_edge("tools", "agent")
|
||||
|
||||
# Finally, we compile it!
|
||||
# This compiles it into a LangChain Runnable,
|
||||
# meaning you can use it as you would any other runnable.
|
||||
# Note that we're (optionally) passing the memory when compiling the graph
|
||||
self._app = self._workflow.compile()
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:
|
||||
# Use the Runnable
|
||||
final_state = await self._app.ainvoke(
|
||||
{
|
||||
"messages": [
|
||||
SystemMessage(
|
||||
content="You are a helpful AI assistant. You can use tools to help answer questions."
|
||||
),
|
||||
HumanMessage(content=message.content),
|
||||
]
|
||||
},
|
||||
config={"configurable": {"thread_id": 42}},
|
||||
)
|
||||
response = Message(content=final_state["messages"][-1].content)
|
||||
return response
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
# Register the agent.
|
||||
await runtime.register(
|
||||
"langgraph_tool_use_agent",
|
||||
lambda: LangGraphToolUseAgent(
|
||||
"Tool use agent",
|
||||
ChatOpenAI(model="gpt-4o-mini"),
|
||||
[get_weather],
|
||||
),
|
||||
)
|
||||
agent = AgentId("langgraph_tool_use_agent", key="default")
|
||||
# Start the runtime.
|
||||
runtime.start()
|
||||
# Send a message to the agent and get a response.
|
||||
response = await runtime.send_message(Message("What's the weather in SF?"), agent)
|
||||
print(response.content)
|
||||
# Stop the runtime.
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
148
python/packages/autogen-core/samples/byoa/llamaindex_agent.py
Normal file
148
python/packages/autogen-core/samples/byoa/llamaindex_agent.py
Normal file
@@ -0,0 +1,148 @@
|
||||
"""
|
||||
This example shows how integrate llamaindex agent.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.agent import ReActAgent
|
||||
from llama_index.core.agent.runner.base import AgentRunner
|
||||
from llama_index.core.base.llms.types import (
|
||||
ChatMessage,
|
||||
MessageRole,
|
||||
)
|
||||
from llama_index.core.chat_engine.types import AgentChatResponse
|
||||
from llama_index.core.memory import ChatSummaryMemoryBuffer
|
||||
from llama_index.core.memory.types import BaseMemory
|
||||
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
||||
from llama_index.llms.azure_openai import AzureOpenAI
|
||||
from llama_index.tools.wikipedia import WikipediaToolSpec
|
||||
|
||||
|
||||
@dataclass
|
||||
class Resource:
|
||||
content: str
|
||||
node_id: str
|
||||
score: Optional[float] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
content: str
|
||||
sources: Optional[List[Resource]] = None
|
||||
|
||||
|
||||
class LlamaIndexAgent(RoutedAgent):
|
||||
def __init__(self, description: str, llama_index_agent: AgentRunner, memory: BaseMemory | None = None) -> None:
|
||||
super().__init__(description)
|
||||
|
||||
self._llama_index_agent = llama_index_agent
|
||||
self._memory = memory
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:
|
||||
# retriever history messages from memory!
|
||||
history_messages: List[ChatMessage] = []
|
||||
|
||||
# type: ignore
|
||||
# pyright: ignore
|
||||
response: AgentChatResponse # pyright: ignore
|
||||
if self._memory is not None:
|
||||
history_messages = self._memory.get(input=message.content)
|
||||
|
||||
response = await self._llama_index_agent.achat(message=message.content, history_messages=history_messages) # pyright: ignore
|
||||
else:
|
||||
response = await self._llama_index_agent.achat(message=message.content) # pyright: ignore
|
||||
|
||||
if isinstance(response, AgentChatResponse):
|
||||
if self._memory is not None:
|
||||
self._memory.put(ChatMessage(role=MessageRole.USER, content=message.content))
|
||||
self._memory.put(ChatMessage(role=MessageRole.ASSISTANT, content=response.response))
|
||||
|
||||
assert isinstance(response.response, str)
|
||||
|
||||
resources: List[Resource] = [
|
||||
Resource(content=source_node.get_text(), score=source_node.score, node_id=source_node.id_)
|
||||
for source_node in response.source_nodes
|
||||
]
|
||||
|
||||
tools: List[Resource] = [
|
||||
Resource(content=source.content, node_id=source.tool_name) for source in response.sources
|
||||
]
|
||||
|
||||
resources.extend(tools)
|
||||
return Message(content=response.response, sources=resources)
|
||||
else:
|
||||
return Message(content="I'm sorry, I don't have an answer for you.")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
|
||||
# setup llamaindex
|
||||
llm = AzureOpenAI(
|
||||
deployment_name=os.environ.get("AZURE_OPENAI_MODEL", ""),
|
||||
temperature=0.0,
|
||||
api_key=os.environ.get("AZURE_OPENAI_KEY", ""),
|
||||
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT", ""),
|
||||
api_version=os.environ.get("AZURE_OPENAI_API_VERSION", ""),
|
||||
)
|
||||
|
||||
embed_model = AzureOpenAIEmbedding(
|
||||
deployment_name=os.environ.get("AZURE_OPENAI_EMBEDDING_MODEL", ""),
|
||||
temperature=0.0,
|
||||
api_key=os.environ.get("AZURE_OPENAI_KEY", ""),
|
||||
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT", ""),
|
||||
api_version=os.environ.get("AZURE_OPENAI_API_VERSION", ""),
|
||||
)
|
||||
|
||||
Settings.llm = llm
|
||||
Settings.embed_model = embed_model
|
||||
|
||||
# create a react agent to use wikipedia tool
|
||||
# Get the wikipedia tool spec for llamaindex agents
|
||||
|
||||
wiki_spec = WikipediaToolSpec()
|
||||
wikipedia_tool = wiki_spec.to_tool_list()[1]
|
||||
|
||||
# create a memory buffer for the react agent
|
||||
memory = ChatSummaryMemoryBuffer(llm=llm, token_limit=16000)
|
||||
|
||||
# create the agent using the ReAct agent pattern
|
||||
llama_index_agent = ReActAgent.from_tools(
|
||||
tools=[wikipedia_tool], llm=llm, max_iterations=8, memory=memory, verbose=True
|
||||
)
|
||||
|
||||
await runtime.register(
|
||||
"chat_agent",
|
||||
lambda: LlamaIndexAgent("Chat agent", llama_index_agent=llama_index_agent),
|
||||
)
|
||||
agent = AgentId("chat_agent", key="default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a message to the agent and get the response.
|
||||
message = Message(content="What are the best movies from studio Ghibli?")
|
||||
response = await runtime.send_message(message, agent)
|
||||
assert isinstance(response, Message)
|
||||
print(response.content)
|
||||
|
||||
if response.sources is not None:
|
||||
for source in response.sources:
|
||||
print(source.content)
|
||||
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,11 @@
|
||||
from ._chat_completion_agent import ChatCompletionAgent
|
||||
from ._image_generation_agent import ImageGenerationAgent
|
||||
from ._oai_assistant import OpenAIAssistantAgent
|
||||
from ._user_proxy import UserProxyAgent
|
||||
|
||||
__all__ = [
|
||||
"ChatCompletionAgent",
|
||||
"OpenAIAssistantAgent",
|
||||
"UserProxyAgent",
|
||||
"ImageGenerationAgent",
|
||||
]
|
||||
@@ -0,0 +1,262 @@
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Any, Coroutine, Dict, List, Mapping, Sequence, Tuple
|
||||
|
||||
from autogen_core.base import AgentId, CancellationToken, MessageContext
|
||||
from autogen_core.components import (
|
||||
DefaultTopicId,
|
||||
FunctionCall,
|
||||
RoutedAgent,
|
||||
message_handler,
|
||||
)
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import (
|
||||
ChatCompletionClient,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from autogen_core.components.tools import Tool
|
||||
|
||||
from ..types import (
|
||||
FunctionCallMessage,
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
RespondNow,
|
||||
ResponseFormat,
|
||||
TextMessage,
|
||||
ToolApprovalRequest,
|
||||
ToolApprovalResponse,
|
||||
)
|
||||
from ..utils import convert_messages_to_llm_messages
|
||||
|
||||
|
||||
class ChatCompletionAgent(RoutedAgent):
|
||||
"""An agent implementation that uses the ChatCompletion API to gnenerate
|
||||
responses and execute tools.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
system_messages (List[SystemMessage]): The system messages to use for
|
||||
the ChatCompletion API.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
model_client (ChatCompletionClient): The client to use for the
|
||||
ChatCompletion API.
|
||||
tools (Sequence[Tool], optional): The tools used by the agent. Defaults
|
||||
to []. If no tools are provided, the agent cannot handle tool calls.
|
||||
If tools are provided, and the response from the model is a list of
|
||||
tool calls, the agent will call itselfs with the tool calls until it
|
||||
gets a response that is not a list of tool calls, and then use that
|
||||
response as the final response.
|
||||
tool_approver (Agent | None, optional): The agent that approves tool
|
||||
calls. Defaults to None. If no tool approver is provided, the agent
|
||||
will execute the tools without approval. If a tool approver is
|
||||
provided, the agent will send a request to the tool approver before
|
||||
executing the tools.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
memory: ChatMemory[Message],
|
||||
model_client: ChatCompletionClient,
|
||||
tools: Sequence[Tool] = [],
|
||||
tool_approver: AgentId | None = None,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._description = description
|
||||
self._system_messages = system_messages
|
||||
self._client = model_client
|
||||
self._memory = memory
|
||||
self._tools = tools
|
||||
self._tool_approver = tool_approver
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:
|
||||
"""Handle a text message. This method adds the message to the memory and
|
||||
does not generate any message."""
|
||||
# Add a user message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler()
|
||||
async def on_multi_modal_message(self, message: MultiModalMessage, ctx: MessageContext) -> None:
|
||||
"""Handle a multimodal message. This method adds the message to the memory
|
||||
and does not generate any message."""
|
||||
# Add a user message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, ctx: MessageContext) -> None:
|
||||
"""Handle a reset message. This method clears the memory."""
|
||||
# Reset the chat messages.
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler()
|
||||
async def on_respond_now(self, message: RespondNow, ctx: MessageContext) -> TextMessage | FunctionCallMessage:
|
||||
"""Handle a respond now message. This method generates a response and
|
||||
returns it to the sender."""
|
||||
# Generate a response.
|
||||
response = await self._generate_response(message.response_format, ctx)
|
||||
|
||||
# Return the response.
|
||||
return response
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None:
|
||||
"""Handle a publish now message. This method generates a response and
|
||||
publishes it."""
|
||||
# Generate a response.
|
||||
response = await self._generate_response(message.response_format, ctx)
|
||||
|
||||
# Publish the response.
|
||||
await self.publish_message(response, topic_id=DefaultTopicId())
|
||||
|
||||
@message_handler()
|
||||
async def on_tool_call_message(
|
||||
self, message: FunctionCallMessage, ctx: MessageContext
|
||||
) -> FunctionExecutionResultMessage:
|
||||
"""Handle a tool call message. This method executes the tools and
|
||||
returns the results."""
|
||||
if len(self._tools) == 0:
|
||||
raise ValueError("No tools available")
|
||||
|
||||
# Add a tool call message.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
# Execute the tool calls.
|
||||
results: List[FunctionExecutionResult] = []
|
||||
execution_futures: List[Coroutine[Any, Any, Tuple[str, str]]] = []
|
||||
for function_call in message.content:
|
||||
# Parse the arguments.
|
||||
try:
|
||||
arguments = json.loads(function_call.arguments)
|
||||
except json.JSONDecodeError:
|
||||
results.append(
|
||||
FunctionExecutionResult(
|
||||
content=f"Error: Could not parse arguments for function {function_call.name}.",
|
||||
call_id=function_call.id,
|
||||
)
|
||||
)
|
||||
continue
|
||||
# Execute the function.
|
||||
future = self._execute_function(
|
||||
function_call.name,
|
||||
arguments,
|
||||
function_call.id,
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
# Append the async result.
|
||||
execution_futures.append(future)
|
||||
if execution_futures:
|
||||
# Wait for all async results.
|
||||
execution_results = await asyncio.gather(*execution_futures)
|
||||
# Add the results.
|
||||
for execution_result, call_id in execution_results:
|
||||
results.append(FunctionExecutionResult(content=execution_result, call_id=call_id))
|
||||
|
||||
# Create a tool call result message.
|
||||
tool_call_result_msg = FunctionExecutionResultMessage(content=results)
|
||||
|
||||
# Add tool call result message.
|
||||
await self._memory.add_message(tool_call_result_msg)
|
||||
|
||||
# Return the results.
|
||||
return tool_call_result_msg
|
||||
|
||||
async def _generate_response(
|
||||
self,
|
||||
response_format: ResponseFormat,
|
||||
ctx: MessageContext,
|
||||
) -> TextMessage | FunctionCallMessage:
|
||||
# Get a response from the model.
|
||||
hisorical_messages = await self._memory.get_messages()
|
||||
response = await self._client.create(
|
||||
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["type"]),
|
||||
tools=self._tools,
|
||||
json_output=response_format == ResponseFormat.json_object,
|
||||
)
|
||||
|
||||
# If the agent has function executor, and the response is a list of
|
||||
# tool calls, iterate with itself until we get a response that is not a
|
||||
# list of tool calls.
|
||||
while (
|
||||
len(self._tools) > 0
|
||||
and isinstance(response.content, list)
|
||||
and all(isinstance(x, FunctionCall) for x in response.content)
|
||||
):
|
||||
# Send a function call message to itself.
|
||||
response = await self.send_message(
|
||||
message=FunctionCallMessage(content=response.content, source=self.metadata["type"]),
|
||||
recipient=self.id,
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
# Make an assistant message from the response.
|
||||
hisorical_messages = await self._memory.get_messages()
|
||||
response = await self._client.create(
|
||||
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["type"]),
|
||||
tools=self._tools,
|
||||
json_output=response_format == ResponseFormat.json_object,
|
||||
)
|
||||
|
||||
final_response: Message
|
||||
if isinstance(response.content, str):
|
||||
# If the response is a string, return a text message.
|
||||
final_response = TextMessage(content=response.content, source=self.metadata["type"])
|
||||
elif isinstance(response.content, list) and all(isinstance(x, FunctionCall) for x in response.content):
|
||||
# If the response is a list of function calls, return a function call message.
|
||||
final_response = FunctionCallMessage(content=response.content, source=self.metadata["type"])
|
||||
else:
|
||||
raise ValueError(f"Unexpected response: {response.content}")
|
||||
|
||||
# Add the response to the chat messages.
|
||||
await self._memory.add_message(final_response)
|
||||
|
||||
return final_response
|
||||
|
||||
async def _execute_function(
|
||||
self,
|
||||
name: str,
|
||||
args: Dict[str, Any],
|
||||
call_id: str,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> Tuple[str, str]:
|
||||
# Find tool
|
||||
tool = next((t for t in self._tools if t.name == name), None)
|
||||
if tool is None:
|
||||
return (f"Error: tool {name} not found.", call_id)
|
||||
|
||||
# Check if the tool needs approval
|
||||
if self._tool_approver is not None:
|
||||
# Send a tool approval request.
|
||||
approval_request = ToolApprovalRequest(
|
||||
tool_call=FunctionCall(id=call_id, arguments=json.dumps(args), name=name)
|
||||
)
|
||||
approval_response = await self.send_message(
|
||||
message=approval_request,
|
||||
recipient=self._tool_approver,
|
||||
cancellation_token=cancellation_token,
|
||||
)
|
||||
if not isinstance(approval_response, ToolApprovalResponse):
|
||||
raise ValueError(f"Expecting {ToolApprovalResponse.__name__}, received: {type(approval_response)}")
|
||||
if not approval_response.approved:
|
||||
return (f"Error: tool {name} approved, reason: {approval_response.reason}", call_id)
|
||||
|
||||
try:
|
||||
result = await tool.run_json(args, cancellation_token)
|
||||
result_as_str = tool.return_value_as_string(result)
|
||||
except Exception as e:
|
||||
result_as_str = f"Error: {str(e)}"
|
||||
return (result_as_str, call_id)
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"memory": self._memory.save_state(),
|
||||
"system_messages": self._system_messages,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._memory.load_state(state["memory"])
|
||||
self._system_messages = state["system_messages"]
|
||||
@@ -0,0 +1,79 @@
|
||||
from typing import Literal
|
||||
|
||||
import openai
|
||||
from autogen_core.base import CancellationToken, MessageContext
|
||||
from autogen_core.components import (
|
||||
DefaultTopicId,
|
||||
Image,
|
||||
RoutedAgent,
|
||||
message_handler,
|
||||
)
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
|
||||
from ..types import (
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
TextMessage,
|
||||
)
|
||||
|
||||
|
||||
class ImageGenerationAgent(RoutedAgent):
|
||||
"""An agent that generates images using DALL-E models. It publishes the
|
||||
generated images as MultiModalMessage.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
client (openai.AsyncClient): The client to use for the OpenAI API.
|
||||
model (Literal["dall-e-2", "dall-e-3"], optional): The DALL-E model to use. Defaults to "dall-e-2".
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
memory: ChatMemory[Message],
|
||||
client: openai.AsyncClient,
|
||||
model: Literal["dall-e-2", "dall-e-3"] = "dall-e-2",
|
||||
):
|
||||
super().__init__(description)
|
||||
self._client = client
|
||||
self._model = model
|
||||
self._memory = memory
|
||||
|
||||
@message_handler
|
||||
async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:
|
||||
"""Handle a text message. This method adds the message to the memory."""
|
||||
await self._memory.add_message(message)
|
||||
|
||||
@message_handler
|
||||
async def on_reset(self, message: Reset, ctx: MessageContext) -> None:
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler
|
||||
async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None:
|
||||
"""Handle a publish now message. This method generates an image using a DALL-E model with
|
||||
a prompt. The prompt is a concatenation of all TextMessages in the memory. The generated
|
||||
image is published as a MultiModalMessage."""
|
||||
|
||||
response = await self._generate_response(ctx.cancellation_token)
|
||||
await self.publish_message(response, topic_id=DefaultTopicId())
|
||||
|
||||
async def _generate_response(self, cancellation_token: CancellationToken) -> MultiModalMessage:
|
||||
messages = await self._memory.get_messages()
|
||||
if len(messages) == 0:
|
||||
return MultiModalMessage(
|
||||
content=["I need more information to generate an image."], source=self.metadata["type"]
|
||||
)
|
||||
prompt = ""
|
||||
for m in messages:
|
||||
assert isinstance(m, TextMessage)
|
||||
prompt += m.content + "\n"
|
||||
prompt.strip()
|
||||
response = await self._client.images.generate(model=self._model, prompt=prompt, response_format="b64_json")
|
||||
assert len(response.data) > 0 and response.data[0].b64_json is not None
|
||||
# Create a MultiModalMessage with the image.
|
||||
image = Image.from_base64(response.data[0].b64_json)
|
||||
multi_modal_message = MultiModalMessage(content=[image], source=self.metadata["type"])
|
||||
return multi_modal_message
|
||||
@@ -0,0 +1,137 @@
|
||||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
import openai
|
||||
from autogen_core.base import (
|
||||
CancellationToken,
|
||||
MessageContext, # type: ignore
|
||||
)
|
||||
from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler
|
||||
from openai import AsyncAssistantEventHandler
|
||||
from openai.types import ResponseFormatJSONObject, ResponseFormatText
|
||||
|
||||
from ..types import PublishNow, Reset, RespondNow, ResponseFormat, TextMessage
|
||||
|
||||
|
||||
class OpenAIAssistantAgent(RoutedAgent):
|
||||
"""An agent implementation that uses the OpenAI Assistant API to generate
|
||||
responses.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
client (openai.AsyncClient): The client to use for the OpenAI API.
|
||||
assistant_id (str): The assistant ID to use for the OpenAI API.
|
||||
thread_id (str): The thread ID to use for the OpenAI API.
|
||||
assistant_event_handler_factory (Callable[[], AsyncAssistantEventHandler], optional):
|
||||
A factory function to create an async assistant event handler. Defaults to None.
|
||||
If provided, the agent will use the streaming mode with the event handler.
|
||||
If not provided, the agent will use the blocking mode to generate responses.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
client: openai.AsyncClient,
|
||||
assistant_id: str,
|
||||
thread_id: str,
|
||||
assistant_event_handler_factory: (Callable[[], AsyncAssistantEventHandler] | None) = None,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._client = client
|
||||
self._assistant_id = assistant_id
|
||||
self._thread_id = thread_id
|
||||
self._assistant_event_handler_factory = assistant_event_handler_factory
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:
|
||||
"""Handle a text message. This method adds the message to the thread."""
|
||||
# Save the message to the thread.
|
||||
_ = await self._client.beta.threads.messages.create(
|
||||
thread_id=self._thread_id,
|
||||
content=message.content,
|
||||
role="user",
|
||||
metadata={"sender": message.source},
|
||||
)
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, ctx: MessageContext) -> None:
|
||||
"""Handle a reset message. This method deletes all messages in the thread."""
|
||||
# Get all messages in this thread.
|
||||
all_msgs: List[str] = []
|
||||
while True:
|
||||
if not all_msgs:
|
||||
msgs = await self._client.beta.threads.messages.list(self._thread_id)
|
||||
else:
|
||||
msgs = await self._client.beta.threads.messages.list(self._thread_id, after=all_msgs[-1])
|
||||
for msg in msgs.data:
|
||||
all_msgs.append(msg.id)
|
||||
if not msgs.has_next_page():
|
||||
break
|
||||
# Delete all the messages.
|
||||
for msg_id in all_msgs:
|
||||
status = await self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id)
|
||||
assert status.deleted is True
|
||||
|
||||
@message_handler()
|
||||
async def on_respond_now(self, message: RespondNow, ctx: MessageContext) -> TextMessage:
|
||||
"""Handle a respond now message. This method generates a response and returns it to the sender."""
|
||||
return await self._generate_response(message.response_format, ctx.cancellation_token)
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None:
|
||||
"""Handle a publish now message. This method generates a response and publishes it."""
|
||||
response = await self._generate_response(message.response_format, ctx.cancellation_token)
|
||||
await self.publish_message(response, DefaultTopicId())
|
||||
|
||||
async def _generate_response(
|
||||
self,
|
||||
requested_response_format: ResponseFormat,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> TextMessage:
|
||||
# Handle response format.
|
||||
if requested_response_format == ResponseFormat.json_object:
|
||||
response_format = ResponseFormatJSONObject(type="json_object") # type: ignore
|
||||
else:
|
||||
response_format = ResponseFormatText(type="text") # type: ignore
|
||||
|
||||
if self._assistant_event_handler_factory is not None:
|
||||
# Use event handler and streaming mode if available.
|
||||
async with self._client.beta.threads.runs.stream(
|
||||
thread_id=self._thread_id,
|
||||
assistant_id=self._assistant_id,
|
||||
event_handler=self._assistant_event_handler_factory(),
|
||||
response_format=response_format, # type: ignore
|
||||
) as stream:
|
||||
run = await stream.get_final_run()
|
||||
else:
|
||||
# Use blocking mode.
|
||||
run = await self._client.beta.threads.runs.create(
|
||||
thread_id=self._thread_id,
|
||||
assistant_id=self._assistant_id,
|
||||
response_format=response_format, # type: ignore
|
||||
)
|
||||
|
||||
if run.status != "completed":
|
||||
# TODO: handle other statuses.
|
||||
raise ValueError(f"Run did not complete successfully: {run}")
|
||||
|
||||
# Get the last message from the run.
|
||||
response = await self._client.beta.threads.messages.list(self._thread_id, run_id=run.id, order="desc", limit=1)
|
||||
last_message_content = response.data[0].content
|
||||
|
||||
# TODO: handle array of content.
|
||||
text_content = [content for content in last_message_content if content.type == "text"]
|
||||
if not text_content:
|
||||
raise ValueError(f"Expected text content in the last message: {last_message_content}")
|
||||
|
||||
# TODO: handle multiple text content.
|
||||
return TextMessage(content=text_content[0].text.value, source=self.metadata["type"])
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"assistant_id": self._assistant_id,
|
||||
"thread_id": self._thread_id,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._assistant_id = state["assistant_id"]
|
||||
self._thread_id = state["thread_id"]
|
||||
@@ -0,0 +1,33 @@
|
||||
import asyncio
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler
|
||||
|
||||
from ..types import PublishNow, TextMessage
|
||||
|
||||
|
||||
class UserProxyAgent(RoutedAgent):
|
||||
"""An agent that proxies user input from the console. Override the `get_user_input`
|
||||
method to customize how user input is retrieved.
|
||||
|
||||
Args:
|
||||
description (str): The description of the agent.
|
||||
user_input_prompt (str): The console prompt to show to the user when asking for input.
|
||||
"""
|
||||
|
||||
def __init__(self, description: str, user_input_prompt: str) -> None:
|
||||
super().__init__(description)
|
||||
self._user_input_prompt = user_input_prompt
|
||||
|
||||
@message_handler()
|
||||
async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None:
|
||||
"""Handle a publish now message. This method prompts the user for input, then publishes it."""
|
||||
user_input = await self.get_user_input(self._user_input_prompt)
|
||||
await self.publish_message(
|
||||
TextMessage(content=user_input, source=self.metadata["type"]), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
async def get_user_input(self, prompt: str) -> str:
|
||||
"""Get user input from the console. Override this method to customize how user input is retrieved."""
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, input, prompt)
|
||||
@@ -0,0 +1,4 @@
|
||||
from ._buffered import BufferedChatMemory
|
||||
from ._head_and_tail import HeadAndTailChatMemory
|
||||
|
||||
__all__ = ["BufferedChatMemory", "HeadAndTailChatMemory"]
|
||||
@@ -0,0 +1,47 @@
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import FunctionExecutionResultMessage
|
||||
|
||||
from ..types import Message
|
||||
|
||||
|
||||
class BufferedChatMemory(ChatMemory[Message]):
|
||||
"""A buffered chat memory that keeps a view of the last n messages,
|
||||
where n is the buffer size. The buffer size is set at initialization.
|
||||
|
||||
Args:
|
||||
buffer_size (int): The size of the buffer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, buffer_size: int) -> None:
|
||||
self._messages: List[Message] = []
|
||||
self._buffer_size = buffer_size
|
||||
|
||||
async def add_message(self, message: Message) -> None:
|
||||
"""Add a message to the memory."""
|
||||
self._messages.append(message)
|
||||
|
||||
async def get_messages(self) -> List[Message]:
|
||||
"""Get at most `buffer_size` recent messages."""
|
||||
messages = self._messages[-self._buffer_size :]
|
||||
# Handle the first message is a function call result message.
|
||||
if messages and isinstance(messages[0], FunctionExecutionResultMessage):
|
||||
# Remove the first message from the list.
|
||||
messages = messages[1:]
|
||||
return messages
|
||||
|
||||
async def clear(self) -> None:
|
||||
"""Clear the message memory."""
|
||||
self._messages = []
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"messages": [message for message in self._messages],
|
||||
"buffer_size": self._buffer_size,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._messages = state["messages"]
|
||||
self._buffer_size = state["buffer_size"]
|
||||
@@ -0,0 +1,67 @@
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import FunctionExecutionResultMessage
|
||||
|
||||
from ..types import FunctionCallMessage, Message, TextMessage
|
||||
|
||||
|
||||
class HeadAndTailChatMemory(ChatMemory[Message]):
|
||||
"""A chat memory that keeps a view of the first n and last m messages,
|
||||
where n is the head size and m is the tail size. The head and tail sizes
|
||||
are set at initialization.
|
||||
|
||||
Args:
|
||||
head_size (int): The size of the head.
|
||||
tail_size (int): The size of the tail.
|
||||
"""
|
||||
|
||||
def __init__(self, head_size: int, tail_size: int) -> None:
|
||||
self._messages: List[Message] = []
|
||||
self._head_size = head_size
|
||||
self._tail_size = tail_size
|
||||
|
||||
async def add_message(self, message: Message) -> None:
|
||||
"""Add a message to the memory."""
|
||||
self._messages.append(message)
|
||||
|
||||
async def get_messages(self) -> List[Message]:
|
||||
"""Get at most `head_size` recent messages and `tail_size` oldest messages."""
|
||||
head_messages = self._messages[: self._head_size]
|
||||
# Handle the last message is a function call message.
|
||||
if head_messages and isinstance(head_messages[-1], FunctionCallMessage):
|
||||
# Remove the last message from the head.
|
||||
head_messages = head_messages[:-1]
|
||||
|
||||
tail_messages = self._messages[-self._tail_size :]
|
||||
# Handle the first message is a function call result message.
|
||||
if tail_messages and isinstance(tail_messages[0], FunctionExecutionResultMessage):
|
||||
# Remove the first message from the tail.
|
||||
tail_messages = tail_messages[1:]
|
||||
|
||||
num_skipped = len(self._messages) - self._head_size - self._tail_size
|
||||
if num_skipped <= 0:
|
||||
# If there are not enough messages to fill the head and tail,
|
||||
# return all messages.
|
||||
return self._messages
|
||||
|
||||
placeholder_messages = [TextMessage(content=f"Skipped {num_skipped} messages.", source="System")]
|
||||
return head_messages + placeholder_messages + tail_messages
|
||||
|
||||
async def clear(self) -> None:
|
||||
"""Clear the message memory."""
|
||||
self._messages = []
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"messages": [message for message in self._messages],
|
||||
"head_size": self._head_size,
|
||||
"tail_size": self._tail_size,
|
||||
"placeholder_message": self._placeholder_message,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._messages = state["messages"]
|
||||
self._head_size = state["head_size"]
|
||||
self._tail_size = state["tail_size"]
|
||||
self._placeholder_message = state["placeholder_message"]
|
||||
@@ -0,0 +1,4 @@
|
||||
from ._group_chat_manager import GroupChatManager
|
||||
from ._orchestrator_chat import OrchestratorChat
|
||||
|
||||
__all__ = ["GroupChatManager", "OrchestratorChat"]
|
||||
@@ -0,0 +1,153 @@
|
||||
import logging
|
||||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
from autogen_core.base import AgentId, AgentProxy, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import ChatCompletionClient
|
||||
|
||||
from ..types import (
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
Reset,
|
||||
TextMessage,
|
||||
)
|
||||
from ._group_chat_utils import select_speaker
|
||||
|
||||
logger = logging.getLogger("autogen_core.events")
|
||||
|
||||
|
||||
class GroupChatManager(RoutedAgent):
|
||||
"""An agent that manages a group chat through event-driven orchestration.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent.
|
||||
description (str): The description of the agent.
|
||||
runtime (AgentRuntime): The runtime to register the agent.
|
||||
participants (List[AgentId]): The list of participants in the group chat.
|
||||
memory (ChatMemory[Message]): The memory to store and retrieve messages.
|
||||
model_client (ChatCompletionClient, optional): The client to use for the model.
|
||||
If provided, the agent will use the model to select the next speaker.
|
||||
If not provided, the agent will select the next speaker from the list of participants
|
||||
according to the order given.
|
||||
termination_word (str, optional): The word that terminates the group chat. Defaults to "TERMINATE".
|
||||
transitions (Mapping[AgentId, List[AgentId]], optional): The transitions between agents.
|
||||
Keys are the agents, and values are the list of agents that can follow the key agent. Defaults to {}.
|
||||
If provided, the group chat manager will use the transitions to select the next speaker.
|
||||
If a transition is not provided for an agent, the choices fallback to all participants.
|
||||
If no model client is provided, a transition must have a single value.
|
||||
on_message_received (Callable[[TextMessage], None], optional): A custom handler to call when a message is received.
|
||||
Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
participants: List[AgentId],
|
||||
memory: ChatMemory[Message],
|
||||
model_client: ChatCompletionClient | None = None,
|
||||
termination_word: str = "TERMINATE",
|
||||
transitions: Mapping[AgentId, List[AgentId]] = {},
|
||||
on_message_received: Callable[[TextMessage | MultiModalMessage], None] | None = None,
|
||||
):
|
||||
super().__init__(description)
|
||||
self._memory = memory
|
||||
self._client = model_client
|
||||
self._participants = participants
|
||||
self._participant_proxies = dict((p, AgentProxy(p, self.runtime)) for p in participants)
|
||||
self._termination_word = termination_word
|
||||
for key, value in transitions.items():
|
||||
if not value:
|
||||
# Make sure no empty transitions are provided.
|
||||
raise ValueError(f"Empty transition list provided for {key.type}.")
|
||||
if key not in participants:
|
||||
# Make sure all keys are in the list of participants.
|
||||
raise ValueError(f"Transition key {key.type} not found in participants.")
|
||||
for v in value:
|
||||
if v not in participants:
|
||||
# Make sure all values are in the list of participants.
|
||||
raise ValueError(f"Transition value {v.type} not found in participants.")
|
||||
if self._client is None:
|
||||
# Make sure there is only one transition for each key if no model client is provided.
|
||||
if len(value) > 1:
|
||||
raise ValueError(f"Multiple transitions provided for {key.type} but no model client is provided.")
|
||||
self._tranistions = transitions
|
||||
self._on_message_received = on_message_received
|
||||
|
||||
@message_handler()
|
||||
async def on_reset(self, message: Reset, ctx: MessageContext) -> None:
|
||||
"""Handle a reset message. This method clears the memory."""
|
||||
await self._memory.clear()
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(self, message: TextMessage | MultiModalMessage, ctx: MessageContext) -> None:
|
||||
"""Handle a message. This method adds the message to the memory, selects the next speaker,
|
||||
and sends a message to the selected speaker to publish a response."""
|
||||
# Call the custom on_message_received handler if provided.
|
||||
if self._on_message_received is not None:
|
||||
self._on_message_received(message)
|
||||
|
||||
# Check if the message contains the termination word.
|
||||
if isinstance(message, TextMessage) and self._termination_word in message.content:
|
||||
# Terminate the group chat by not selecting the next speaker.
|
||||
return
|
||||
|
||||
# Save the message to chat memory.
|
||||
await self._memory.add_message(message)
|
||||
|
||||
# Get the last speaker.
|
||||
last_speaker_name = message.source
|
||||
last_speaker_index = next((i for i, p in enumerate(self._participants) if p.type == last_speaker_name), None)
|
||||
|
||||
# Get the candidates for the next speaker.
|
||||
if last_speaker_index is not None:
|
||||
logger.debug(f"Last speaker: {last_speaker_name}")
|
||||
last_speaker = self._participants[last_speaker_index]
|
||||
if self._tranistions.get(last_speaker) is not None:
|
||||
candidates = [c for c in self._participants if c in self._tranistions[last_speaker]]
|
||||
else:
|
||||
candidates = self._participants
|
||||
else:
|
||||
candidates = self._participants
|
||||
logger.debug(f"Group chat manager next speaker candidates: {[c.type for c in candidates]}")
|
||||
|
||||
# Select speaker.
|
||||
if len(candidates) == 0:
|
||||
speaker = None
|
||||
elif len(candidates) == 1:
|
||||
speaker = candidates[0]
|
||||
else:
|
||||
# More than one candidate, select the next speaker.
|
||||
if self._client is None:
|
||||
# If no model client is provided, candidates must be the list of participants.
|
||||
assert candidates == self._participants
|
||||
# If no model client is provided, select the next speaker from the list of participants.
|
||||
if last_speaker_index is not None:
|
||||
next_speaker_index = (last_speaker_index + 1) % len(self._participants)
|
||||
speaker = self._participants[next_speaker_index]
|
||||
else:
|
||||
# If no last speaker, select the first speaker.
|
||||
speaker = candidates[0]
|
||||
else:
|
||||
# If a model client is provided, select the speaker based on the transitions and the model.
|
||||
speaker_index = await select_speaker(
|
||||
self._memory, self._client, [self._participant_proxies[c] for c in candidates]
|
||||
)
|
||||
speaker = candidates[speaker_index]
|
||||
|
||||
logger.debug(f"Group chat manager selected speaker: {speaker.type if speaker is not None else None}")
|
||||
|
||||
if speaker is not None:
|
||||
# Send the message to the selected speaker to ask it to publish a response.
|
||||
await self.send_message(PublishNow(), speaker)
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
return {
|
||||
"memory": self._memory.save_state(),
|
||||
"termination_word": self._termination_word,
|
||||
}
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
self._memory.load_state(state["memory"])
|
||||
self._termination_word = state["termination_word"]
|
||||
@@ -0,0 +1,90 @@
|
||||
"""Credit to the original authors: https://github.com/microsoft/autogen/blob/main/autogen/agentchat/groupchat.py"""
|
||||
|
||||
import re
|
||||
from typing import Dict, List
|
||||
|
||||
from autogen_core.base import AgentProxy
|
||||
from autogen_core.components.memory import ChatMemory
|
||||
from autogen_core.components.models import ChatCompletionClient, SystemMessage
|
||||
|
||||
from ..types import Message, TextMessage
|
||||
|
||||
|
||||
async def select_speaker(memory: ChatMemory[Message], client: ChatCompletionClient, agents: List[AgentProxy]) -> int:
|
||||
"""Selects the next speaker in a group chat using a ChatCompletion client."""
|
||||
# TODO: Handle multi-modal messages.
|
||||
|
||||
# Construct formated current message history.
|
||||
history_messages: List[str] = []
|
||||
for msg in await memory.get_messages():
|
||||
assert isinstance(msg, TextMessage)
|
||||
history_messages.append(f"{msg.source}: {msg.content}")
|
||||
history = "\n".join(history_messages)
|
||||
|
||||
# Construct agent roles.
|
||||
roles = "\n".join(
|
||||
[f"{(await agent.metadata)['type']}: {(await agent.metadata)['description']}".strip() for agent in agents]
|
||||
)
|
||||
|
||||
# Construct agent list.
|
||||
participants = str([(await agent.metadata)["type"] for agent in agents])
|
||||
|
||||
# Select the next speaker.
|
||||
select_speaker_prompt = f"""You are in a role play game. The following roles are available:
|
||||
{roles}.
|
||||
Read the following conversation. Then select the next role from {participants} to play. Only return the role.
|
||||
|
||||
{history}
|
||||
|
||||
Read the above conversation. Then select the next role from {participants} to play. Only return the role.
|
||||
"""
|
||||
select_speaker_messages = [SystemMessage(select_speaker_prompt)]
|
||||
response = await client.create(messages=select_speaker_messages)
|
||||
assert isinstance(response.content, str)
|
||||
mentions = await mentioned_agents(response.content, agents)
|
||||
if len(mentions) != 1:
|
||||
raise ValueError(f"Expected exactly one agent to be mentioned, but got {mentions}")
|
||||
agent_name = list(mentions.keys())[0]
|
||||
# Get the index of the selected agent by name
|
||||
agent_index = 0
|
||||
for i, agent in enumerate(agents):
|
||||
if (await agent.metadata)["type"] == agent_name:
|
||||
agent_index = i
|
||||
break
|
||||
|
||||
assert agent_index is not None
|
||||
return agent_index
|
||||
|
||||
|
||||
async def mentioned_agents(message_content: str, agents: List[AgentProxy]) -> Dict[str, int]:
|
||||
"""Counts the number of times each agent is mentioned in the provided message content.
|
||||
Agent names will match under any of the following conditions (all case-sensitive):
|
||||
- Exact name match
|
||||
- If the agent name has underscores it will match with spaces instead (e.g. 'Story_writer' == 'Story writer')
|
||||
- If the agent name has underscores it will match with '\\_' instead of '_' (e.g. 'Story_writer' == 'Story\\_writer')
|
||||
|
||||
Args:
|
||||
message_content (Union[str, List]): The content of the message, either as a single string or a list of strings.
|
||||
agents (List[Agent]): A list of Agent objects, each having a 'name' attribute to be searched in the message content.
|
||||
|
||||
Returns:
|
||||
Dict: a counter for mentioned agents.
|
||||
"""
|
||||
mentions: Dict[str, int] = dict()
|
||||
for agent in agents:
|
||||
# Finds agent mentions, taking word boundaries into account,
|
||||
# accommodates escaping underscores and underscores as spaces
|
||||
name = (await agent.metadata)["type"]
|
||||
regex = (
|
||||
r"(?<=\W)("
|
||||
+ re.escape(name)
|
||||
+ r"|"
|
||||
+ re.escape(name.replace("_", " "))
|
||||
+ r"|"
|
||||
+ re.escape(name.replace("_", r"\_"))
|
||||
+ r")(?=\W)"
|
||||
)
|
||||
count = len(re.findall(regex, f" {message_content} ")) # Pad the message to help with matching
|
||||
if count > 0:
|
||||
mentions[name] = count
|
||||
return mentions
|
||||
@@ -0,0 +1,406 @@
|
||||
import json
|
||||
from typing import Any, Sequence, Tuple
|
||||
|
||||
from autogen_core.base import AgentId, AgentRuntime, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
|
||||
from ..types import Reset, RespondNow, ResponseFormat, TextMessage
|
||||
|
||||
__all__ = ["OrchestratorChat"]
|
||||
|
||||
|
||||
class OrchestratorChat(RoutedAgent):
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
runtime: AgentRuntime,
|
||||
orchestrator: AgentId,
|
||||
planner: AgentId,
|
||||
specialists: Sequence[AgentId],
|
||||
max_turns: int = 30,
|
||||
max_stalled_turns_before_retry: int = 2,
|
||||
max_retry_attempts: int = 1,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._orchestrator = orchestrator
|
||||
self._planner = planner
|
||||
self._specialists = specialists
|
||||
self._max_turns = max_turns
|
||||
self._max_stalled_turns_before_retry = max_stalled_turns_before_retry
|
||||
self._max_retry_attempts_before_educated_guess = max_retry_attempts
|
||||
|
||||
@property
|
||||
def children(self) -> Sequence[AgentId]:
|
||||
return list(self._specialists) + [self._orchestrator, self._planner]
|
||||
|
||||
@message_handler()
|
||||
async def on_text_message(
|
||||
self,
|
||||
message: TextMessage,
|
||||
ctx: MessageContext,
|
||||
) -> TextMessage:
|
||||
# A task is received.
|
||||
task = message.content
|
||||
|
||||
# Prepare the task.
|
||||
team, names, facts, plan = await self._prepare_task(task, message.source)
|
||||
|
||||
# Main loop.
|
||||
total_turns = 0
|
||||
retry_attempts = 0
|
||||
while total_turns < self._max_turns:
|
||||
# Reset all agents.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
await (await self.send_message(Reset(), agent))
|
||||
|
||||
# Create the task specs.
|
||||
task_specs = f"""
|
||||
We are working to address the following user request:
|
||||
|
||||
{task}
|
||||
|
||||
|
||||
To answer this request we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
Some additional points to consider:
|
||||
|
||||
{facts}
|
||||
|
||||
{plan}
|
||||
""".strip()
|
||||
|
||||
# Send the task specs to the orchestrator and specialists.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
await (await self.send_message(TextMessage(content=task_specs, source=self.metadata["type"]), agent))
|
||||
|
||||
# Inner loop.
|
||||
stalled_turns = 0
|
||||
while total_turns < self._max_turns:
|
||||
# Reflect on the task.
|
||||
data = await self._reflect_on_task(task, team, names, message.source)
|
||||
|
||||
# Check if the request is satisfied.
|
||||
if data["is_request_satisfied"]["answer"]:
|
||||
return TextMessage(
|
||||
content=f"The task has been successfully addressed. {data['is_request_satisfied']['reason']}",
|
||||
source=self.metadata["type"],
|
||||
)
|
||||
|
||||
# Update stalled turns.
|
||||
if data["is_progress_being_made"]["answer"]:
|
||||
stalled_turns = max(0, stalled_turns - 1)
|
||||
else:
|
||||
stalled_turns += 1
|
||||
|
||||
# Handle retry.
|
||||
if stalled_turns > self._max_stalled_turns_before_retry:
|
||||
# In a retry, we need to rewrite the facts and the plan.
|
||||
|
||||
# Rewrite the facts.
|
||||
facts = await self._rewrite_facts(facts, message.source)
|
||||
|
||||
# Increment the retry attempts.
|
||||
retry_attempts += 1
|
||||
|
||||
# Check if we should just guess.
|
||||
if retry_attempts > self._max_retry_attempts_before_educated_guess:
|
||||
# Make an educated guess.
|
||||
educated_guess = await self._educated_guess(facts, message.source)
|
||||
if educated_guess["has_educated_guesses"]["answer"]:
|
||||
return TextMessage(
|
||||
content=f"The task is addressed with an educated guess. {educated_guess['has_educated_guesses']['reason']}",
|
||||
source=self.metadata["type"],
|
||||
)
|
||||
|
||||
# Come up with a new plan.
|
||||
plan = await self._rewrite_plan(team, message.source)
|
||||
|
||||
# Exit the inner loop.
|
||||
break
|
||||
|
||||
# Get the subtask.
|
||||
subtask = data["instruction_or_question"]["answer"]
|
||||
if subtask is None:
|
||||
subtask = ""
|
||||
|
||||
# Update agents.
|
||||
for agent in [*self._specialists, self._orchestrator]:
|
||||
_ = await (
|
||||
await self.send_message(
|
||||
TextMessage(content=subtask, source=self.metadata["type"]),
|
||||
agent,
|
||||
)
|
||||
)
|
||||
|
||||
# Find the speaker.
|
||||
try:
|
||||
speaker = next(agent for agent in self._specialists if agent.type == data["next_speaker"]["answer"])
|
||||
except StopIteration as e:
|
||||
raise ValueError(f"Invalid next speaker: {data['next_speaker']['answer']}") from e
|
||||
|
||||
# Ask speaker to speak.
|
||||
speaker_response = await (await self.send_message(RespondNow(), speaker))
|
||||
assert speaker_response is not None
|
||||
|
||||
# Update all other agents with the speaker's response.
|
||||
for agent in [agent for agent in self._specialists if agent != speaker] + [self._orchestrator]:
|
||||
await (
|
||||
await self.send_message(
|
||||
TextMessage(
|
||||
content=speaker_response.content,
|
||||
source=speaker_response.source,
|
||||
),
|
||||
agent,
|
||||
)
|
||||
)
|
||||
|
||||
# Increment the total turns.
|
||||
total_turns += 1
|
||||
|
||||
return TextMessage(
|
||||
content="The task was not addressed. The maximum number of turns was reached.",
|
||||
source=self.metadata["type"],
|
||||
)
|
||||
|
||||
async def _prepare_task(self, task: str, sender: str) -> Tuple[str, str, str, str]:
|
||||
# Reset planner.
|
||||
await (await self.send_message(Reset(), self._planner))
|
||||
|
||||
# A reusable description of the team.
|
||||
team = "\n".join(
|
||||
[
|
||||
agent.type + ": " + (await self.runtime.agent_metadata(agent))["description"]
|
||||
for agent in self._specialists
|
||||
]
|
||||
)
|
||||
names = ", ".join([agent.type for agent in self._specialists])
|
||||
|
||||
# A place to store relevant facts.
|
||||
facts = ""
|
||||
|
||||
# A plance to store the plan.
|
||||
plan = ""
|
||||
|
||||
# Start by writing what we know
|
||||
closed_book_prompt = f"""Below I will present you a request. Before we begin addressing the request, please answer the following pre-survey to the best of your ability. Keep in mind that you are Ken Jennings-level with trivia, and Mensa-level with puzzles, so there should be a deep well to draw from.
|
||||
|
||||
Here is the request:
|
||||
|
||||
{task}
|
||||
|
||||
Here is the pre-survey:
|
||||
|
||||
1. Please list any specific facts or figures that are GIVEN in the request itself. It is possible that there are none.
|
||||
2. Please list any facts that may need to be looked up, and WHERE SPECIFICALLY they might be found. In some cases, authoritative sources are mentioned in the request itself.
|
||||
3. Please list any facts that may need to be derived (e.g., via logical deduction, simulation, or computation)
|
||||
4. Please list any facts that are recalled from memory, hunches, well-reasoned guesses, etc.
|
||||
|
||||
When answering this survey, keep in mind that "facts" will typically be specific names, dates, statistics, etc. Your answer should use headings:
|
||||
|
||||
1. GIVEN OR VERIFIED FACTS
|
||||
2. FACTS TO LOOK UP
|
||||
3. FACTS TO DERIVE
|
||||
4. EDUCATED GUESSES
|
||||
""".strip()
|
||||
|
||||
# Ask the planner to obtain prior knowledge about facts.
|
||||
await (await self.send_message(TextMessage(content=closed_book_prompt, source=sender), self._planner))
|
||||
facts_response = await (await self.send_message(RespondNow(), self._planner))
|
||||
|
||||
facts = str(facts_response.content)
|
||||
|
||||
# Make an initial plan
|
||||
plan_prompt = f"""Fantastic. To address this request we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
Based on the team composition, and known and unknown facts, please devise a short bullet-point plan for addressing the original request. Remember, there is no requirement to involve all team members -- a team member's particular expertise may not be needed for this task.""".strip()
|
||||
|
||||
# Send second messag eto the planner.
|
||||
await self.send_message(TextMessage(content=plan_prompt, source=sender), self._planner)
|
||||
plan_response = await (await self.send_message(RespondNow(), self._planner))
|
||||
plan = str(plan_response.content)
|
||||
|
||||
return team, names, facts, plan
|
||||
|
||||
async def _reflect_on_task(
|
||||
self,
|
||||
task: str,
|
||||
team: str,
|
||||
names: str,
|
||||
sender: str,
|
||||
) -> Any:
|
||||
step_prompt = f"""
|
||||
Recall we are working on the following request:
|
||||
|
||||
{task}
|
||||
|
||||
And we have assembled the following team:
|
||||
|
||||
{team}
|
||||
|
||||
To make progress on the request, please answer the following questions, including necessary reasoning:
|
||||
|
||||
- Is the request fully satisfied? (True if complete, or False if the original request has yet to be SUCCESSFULLY addressed)
|
||||
- Are we making forward progress? (True if just starting, or recent messages are adding value. False if recent messages show evidence of being stuck in a reasoning or action loop, or there is evidence of significant barriers to success such as the inability to read from a required file)
|
||||
- Who should speak next? (select from: {names})
|
||||
- What instruction or question would you give this team member? (Phrase as if speaking directly to them, and include any specific information they may need)
|
||||
|
||||
Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA:
|
||||
|
||||
{{
|
||||
"is_request_satisfied": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}},
|
||||
"is_progress_being_made": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}},
|
||||
"next_speaker": {{
|
||||
"reason": string,
|
||||
"answer": string (select from: {names})
|
||||
}},
|
||||
"instruction_or_question": {{
|
||||
"reason": string,
|
||||
"answer": string
|
||||
}}
|
||||
}}
|
||||
""".strip()
|
||||
request = step_prompt
|
||||
while True:
|
||||
# Send a message to the orchestrator.
|
||||
await (await self.send_message(TextMessage(content=request, source=sender), self._orchestrator))
|
||||
# Request a response.
|
||||
step_response = await (
|
||||
await self.send_message(
|
||||
RespondNow(response_format=ResponseFormat.json_object),
|
||||
self._orchestrator,
|
||||
)
|
||||
)
|
||||
# TODO: use typed dictionary.
|
||||
try:
|
||||
result = json.loads(str(step_response.content))
|
||||
except json.JSONDecodeError as e:
|
||||
request = f"Invalid JSON: {str(e)}"
|
||||
continue
|
||||
if "is_request_satisfied" not in result:
|
||||
request = "Missing key: is_request_satisfied"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["is_request_satisfied"], dict)
|
||||
or "answer" not in result["is_request_satisfied"]
|
||||
or "reason" not in result["is_request_satisfied"]
|
||||
):
|
||||
request = "Invalid value for key: is_request_satisfied, expected 'answer' and 'reason'"
|
||||
continue
|
||||
if "is_progress_being_made" not in result:
|
||||
request = "Missing key: is_progress_being_made"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["is_progress_being_made"], dict)
|
||||
or "answer" not in result["is_progress_being_made"]
|
||||
or "reason" not in result["is_progress_being_made"]
|
||||
):
|
||||
request = "Invalid value for key: is_progress_being_made, expected 'answer' and 'reason'"
|
||||
continue
|
||||
if "next_speaker" not in result:
|
||||
request = "Missing key: next_speaker"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["next_speaker"], dict)
|
||||
or "answer" not in result["next_speaker"]
|
||||
or "reason" not in result["next_speaker"]
|
||||
):
|
||||
request = "Invalid value for key: next_speaker, expected 'answer' and 'reason'"
|
||||
continue
|
||||
elif result["next_speaker"]["answer"] not in names:
|
||||
request = f"Invalid value for key: next_speaker, expected 'answer' in {names}"
|
||||
continue
|
||||
if "instruction_or_question" not in result:
|
||||
request = "Missing key: instruction_or_question"
|
||||
continue
|
||||
elif (
|
||||
not isinstance(result["instruction_or_question"], dict)
|
||||
or "answer" not in result["instruction_or_question"]
|
||||
or "reason" not in result["instruction_or_question"]
|
||||
):
|
||||
request = "Invalid value for key: instruction_or_question, expected 'answer' and 'reason'"
|
||||
continue
|
||||
return result
|
||||
|
||||
async def _rewrite_facts(self, facts: str, sender: str) -> str:
|
||||
new_facts_prompt = f"""It's clear we aren't making as much progress as we would like, but we may have learned something new. Please rewrite the following fact sheet, updating it to include anything new we have learned. This is also a good time to update educated guesses (please add or update at least one educated guess or hunch, and explain your reasoning).
|
||||
|
||||
{facts}
|
||||
""".strip()
|
||||
# Send a message to the orchestrator.
|
||||
await (await self.send_message(TextMessage(content=new_facts_prompt, source=sender), self._orchestrator))
|
||||
# Request a response.
|
||||
new_facts_response = await (await self.send_message(RespondNow(), self._orchestrator))
|
||||
return str(new_facts_response.content)
|
||||
|
||||
async def _educated_guess(self, facts: str, sender: str) -> Any:
|
||||
# Make an educated guess.
|
||||
educated_guess_promt = f"""Given the following information
|
||||
|
||||
{facts}
|
||||
|
||||
Please answer the following question, including necessary reasoning:
|
||||
- Do you have two or more congruent pieces of information that will allow you to make an educated guess for the original request? The educated guess MUST answer the question.
|
||||
Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA:
|
||||
|
||||
{{
|
||||
"has_educated_guesses": {{
|
||||
"reason": string,
|
||||
"answer": boolean
|
||||
}}
|
||||
}}
|
||||
""".strip()
|
||||
request = educated_guess_promt
|
||||
while True:
|
||||
# Send a message to the orchestrator.
|
||||
await (
|
||||
await self.send_message(
|
||||
TextMessage(content=request, source=sender),
|
||||
self._orchestrator,
|
||||
)
|
||||
)
|
||||
# Request a response.
|
||||
response = await (
|
||||
await self.send_message(
|
||||
RespondNow(response_format=ResponseFormat.json_object),
|
||||
self._orchestrator,
|
||||
)
|
||||
)
|
||||
try:
|
||||
result = json.loads(str(response.content))
|
||||
except json.JSONDecodeError as e:
|
||||
request = f"Invalid JSON: {str(e)}"
|
||||
continue
|
||||
# TODO: use typed dictionary.
|
||||
if "has_educated_guesses" not in result:
|
||||
request = "Missing key: has_educated_guesses"
|
||||
continue
|
||||
if (
|
||||
not isinstance(result["has_educated_guesses"], dict)
|
||||
or "answer" not in result["has_educated_guesses"]
|
||||
or "reason" not in result["has_educated_guesses"]
|
||||
):
|
||||
request = "Invalid value for key: has_educated_guesses, expected 'answer' and 'reason'"
|
||||
continue
|
||||
return result
|
||||
|
||||
async def _rewrite_plan(self, team: str, sender: str) -> str:
|
||||
new_plan_prompt = f"""Please come up with a new plan expressed in bullet points. Keep in mind the following team composition, and do not involve any other outside people in the plan -- we cannot contact anyone else.
|
||||
|
||||
Team membership:
|
||||
{team}
|
||||
""".strip()
|
||||
# Send a message to the orchestrator.
|
||||
await (await self.send_message(TextMessage(content=new_plan_prompt, source=sender), self._orchestrator))
|
||||
# Request a response.
|
||||
new_plan_response = await (await self.send_message(RespondNow(), self._orchestrator))
|
||||
return str(new_plan_response.content)
|
||||
75
python/packages/autogen-core/samples/common/types.py
Normal file
75
python/packages/autogen-core/samples/common/types.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import List, Union
|
||||
|
||||
from autogen_core.components import FunctionCall, Image
|
||||
from autogen_core.components.models import FunctionExecutionResultMessage
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class BaseMessage:
|
||||
# Name of the agent that sent this message
|
||||
source: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextMessage(BaseMessage):
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiModalMessage(BaseMessage):
|
||||
content: List[Union[str, Image]]
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionCallMessage(BaseMessage):
|
||||
content: List[FunctionCall]
|
||||
|
||||
|
||||
Message = Union[TextMessage, MultiModalMessage, FunctionCallMessage, FunctionExecutionResultMessage]
|
||||
|
||||
|
||||
class ResponseFormat(Enum):
|
||||
text = "text"
|
||||
json_object = "json_object"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RespondNow:
|
||||
"""A message to request a response from the addressed agent. The sender
|
||||
expects a response upon sening and waits for it synchronously."""
|
||||
|
||||
response_format: ResponseFormat = field(default=ResponseFormat.text)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PublishNow:
|
||||
"""A message to request an event to be published to the addressed agent.
|
||||
Unlike RespondNow, the sender does not expect a response upon sending."""
|
||||
|
||||
response_format: ResponseFormat = field(default=ResponseFormat.text)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Reset: ...
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolApprovalRequest:
|
||||
"""A message to request approval for a tool call. The sender expects a
|
||||
response upon sending and waits for it synchronously."""
|
||||
|
||||
tool_call: FunctionCall
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolApprovalResponse:
|
||||
"""A message to respond to a tool approval request. The response is sent
|
||||
synchronously."""
|
||||
|
||||
tool_call_id: str
|
||||
approved: bool
|
||||
reason: str
|
||||
142
python/packages/autogen-core/samples/common/utils.py
Normal file
142
python/packages/autogen-core/samples/common/utils.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import os
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
AzureOpenAIChatCompletionClient,
|
||||
ChatCompletionClient,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
LLMMessage,
|
||||
OpenAIChatCompletionClient,
|
||||
UserMessage,
|
||||
)
|
||||
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
|
||||
from typing_extensions import Literal
|
||||
|
||||
from .types import (
|
||||
FunctionCallMessage,
|
||||
Message,
|
||||
MultiModalMessage,
|
||||
TextMessage,
|
||||
)
|
||||
|
||||
|
||||
def convert_content_message_to_assistant_message(
|
||||
message: Union[TextMessage, MultiModalMessage, FunctionCallMessage],
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[AssistantMessage]:
|
||||
match message:
|
||||
case TextMessage() | FunctionCallMessage():
|
||||
return AssistantMessage(content=message.content, source=message.source)
|
||||
case MultiModalMessage():
|
||||
if handle_unrepresentable == "error":
|
||||
raise ValueError("Cannot represent multimodal message as AssistantMessage")
|
||||
elif handle_unrepresentable == "ignore":
|
||||
return None
|
||||
elif handle_unrepresentable == "try_slice":
|
||||
return AssistantMessage(
|
||||
content="".join([x for x in message.content if isinstance(x, str)]),
|
||||
source=message.source,
|
||||
)
|
||||
|
||||
|
||||
def convert_content_message_to_user_message(
|
||||
message: Union[TextMessage, MultiModalMessage, FunctionCallMessage],
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[UserMessage]:
|
||||
match message:
|
||||
case TextMessage() | MultiModalMessage():
|
||||
return UserMessage(content=message.content, source=message.source)
|
||||
case FunctionCallMessage():
|
||||
if handle_unrepresentable == "error":
|
||||
raise ValueError("Cannot represent multimodal message as UserMessage")
|
||||
elif handle_unrepresentable == "ignore":
|
||||
return None
|
||||
elif handle_unrepresentable == "try_slice":
|
||||
# TODO: what is a sliced function call?
|
||||
raise NotImplementedError("Sliced function calls not yet implemented")
|
||||
|
||||
|
||||
def convert_tool_call_response_message(
|
||||
message: FunctionExecutionResultMessage,
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> Optional[FunctionExecutionResultMessage]:
|
||||
match message:
|
||||
case FunctionExecutionResultMessage():
|
||||
return FunctionExecutionResultMessage(
|
||||
content=[FunctionExecutionResult(content=x.content, call_id=x.call_id) for x in message.content]
|
||||
)
|
||||
|
||||
|
||||
def convert_messages_to_llm_messages(
|
||||
messages: List[Message],
|
||||
self_name: str,
|
||||
handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error",
|
||||
) -> List[LLMMessage]:
|
||||
result: List[LLMMessage] = []
|
||||
for message in messages:
|
||||
match message:
|
||||
case (
|
||||
TextMessage(content=_, source=source)
|
||||
| MultiModalMessage(content=_, source=source)
|
||||
| FunctionCallMessage(content=_, source=source)
|
||||
) if source == self_name:
|
||||
converted_message_1 = convert_content_message_to_assistant_message(message, handle_unrepresentable)
|
||||
if converted_message_1 is not None:
|
||||
result.append(converted_message_1)
|
||||
case (
|
||||
TextMessage(content=_, source=source)
|
||||
| MultiModalMessage(content=_, source=source)
|
||||
| FunctionCallMessage(content=_, source=source)
|
||||
) if source != self_name:
|
||||
converted_message_2 = convert_content_message_to_user_message(message, handle_unrepresentable)
|
||||
if converted_message_2 is not None:
|
||||
result.append(converted_message_2)
|
||||
case FunctionExecutionResultMessage(_):
|
||||
converted_message_3 = convert_tool_call_response_message(message, handle_unrepresentable)
|
||||
if converted_message_3 is not None:
|
||||
result.append(converted_message_3)
|
||||
case _:
|
||||
raise AssertionError("unreachable")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_chat_completion_client_from_envs(**kwargs: Any) -> ChatCompletionClient:
|
||||
# Check API type.
|
||||
api_type = os.getenv("OPENAI_API_TYPE", "openai")
|
||||
if api_type == "openai":
|
||||
# Check API key.
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
if api_key is None:
|
||||
raise ValueError("OPENAI_API_KEY is not set")
|
||||
kwargs["api_key"] = api_key
|
||||
return OpenAIChatCompletionClient(**kwargs)
|
||||
elif api_type == "azure":
|
||||
# Check Azure API key.
|
||||
azure_api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
||||
if azure_api_key is not None:
|
||||
kwargs["api_key"] = azure_api_key
|
||||
else:
|
||||
# Try to use token from Azure CLI.
|
||||
token_provider = get_bearer_token_provider(
|
||||
DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
|
||||
)
|
||||
kwargs["azure_ad_token_provider"] = token_provider
|
||||
# Check Azure API endpoint.
|
||||
azure_api_endpoint = os.getenv("AZURE_OPENAI_API_ENDPOINT")
|
||||
if azure_api_endpoint is None:
|
||||
raise ValueError("AZURE_OPENAI_API_ENDPOINT is not set")
|
||||
kwargs["azure_endpoint"] = azure_api_endpoint
|
||||
# Get Azure API version.
|
||||
kwargs["api_version"] = os.getenv("AZURE_OPENAI_API_VERSION", "2024-06-01")
|
||||
# Set model capabilities.
|
||||
if "model_capabilities" not in kwargs or kwargs["model_capabilities"] is None:
|
||||
kwargs["model_capabilities"] = {
|
||||
"vision": True,
|
||||
"function_calling": True,
|
||||
"json_output": True,
|
||||
}
|
||||
return AzureOpenAIChatCompletionClient(**kwargs) # type: ignore
|
||||
raise ValueError(f"Unknown API type: {api_type}")
|
||||
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
This example shows how to use direct messaging to implement
|
||||
a simple interaction between an inner and an outer agent.
|
||||
1. The outer agent receives a message, sends a message to the inner agent.
|
||||
2. The inner agent receives the message, processes it, and sends a response to the outer agent.
|
||||
3. The outer agent receives the response and processes it, and returns the final response.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentInstantiationContext, MessageContext
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageType:
|
||||
body: str
|
||||
sender: str
|
||||
|
||||
|
||||
class Inner(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("The inner agent")
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(self, message: MessageType, ctx: MessageContext) -> MessageType:
|
||||
return MessageType(body=f"Inner: {message.body}", sender=self.metadata["type"])
|
||||
|
||||
|
||||
class Outer(RoutedAgent):
|
||||
def __init__(self, inner: AgentId) -> None:
|
||||
super().__init__("The outer agent")
|
||||
self._inner = inner
|
||||
|
||||
@message_handler()
|
||||
async def on_new_message(self, message: MessageType, ctx: MessageContext) -> MessageType:
|
||||
inner_response = self.send_message(message, self._inner)
|
||||
inner_message = await inner_response
|
||||
assert isinstance(inner_message, MessageType)
|
||||
return MessageType(body=f"Outer: {inner_message.body}", sender=self.metadata["type"])
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await runtime.register("inner", Inner)
|
||||
await runtime.register("outer", lambda: Outer(AgentId("outer", AgentInstantiationContext.current_agent_id().key)))
|
||||
outer = AgentId("outer", "default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
response = await runtime.send_message(MessageType(body="Hello", sender="external"), outer)
|
||||
print(response)
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,71 @@
|
||||
"""
|
||||
This example shows how to use direct messaging to implement
|
||||
a simple chat completion agent.
|
||||
The agent receives a message from the main function, sends it to the
|
||||
chat completion model, and returns the response to the main function.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId
|
||||
from autogen_core.components import RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
ChatCompletionClient,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
content: str
|
||||
|
||||
|
||||
class ChatCompletionAgent(RoutedAgent):
|
||||
def __init__(self, description: str, model_client: ChatCompletionClient) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = [SystemMessage("You are a helpful AI assistant.")]
|
||||
self._model_client = model_client
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:
|
||||
user_message = UserMessage(content=message.content, source="User")
|
||||
response = await self._model_client.create(self._system_messages + [user_message])
|
||||
assert isinstance(response.content, str)
|
||||
return Message(content=response.content)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await runtime.register(
|
||||
"chat_agent",
|
||||
lambda: ChatCompletionAgent("Chat agent", get_chat_completion_client_from_envs(model="gpt-4o-mini")),
|
||||
)
|
||||
agent = AgentId("chat_agent", "default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a message to the agent and get the response.
|
||||
message = Message(content="Hello, what are some fun things to do in Seattle?")
|
||||
response = await runtime.send_message(message, agent)
|
||||
assert isinstance(response, Message)
|
||||
print(response.content)
|
||||
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
125
python/packages/autogen-core/samples/core/two_agents_pub_sub.py
Normal file
125
python/packages/autogen-core/samples/core/two_agents_pub_sub.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""
|
||||
This example shows how to use publish-subscribe to implement a simple
|
||||
interaction between two agents that use a chat completion model to respond to messages.
|
||||
|
||||
1. The main function sends a message to Jack to start the conversation.
|
||||
2. The Jack agent receives the message, generates a response using a chat completion model,
|
||||
and publishes the response.
|
||||
3. The Cathy agent receives the message, generates a response using a chat completion model,
|
||||
and publishes the response.
|
||||
4. The conversation continues until a message with termination word is received by any agent.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
source: str
|
||||
content: str
|
||||
|
||||
|
||||
class ChatCompletionAgent(RoutedAgent):
|
||||
"""An agent that uses a chat completion model to respond to messages.
|
||||
It keeps a memory of the conversation and uses it to generate responses.
|
||||
It publishes a termination message when the termination word is mentioned."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
termination_word: str,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = system_messages
|
||||
self._model_client = model_client
|
||||
self._memory: List[Message] = []
|
||||
self._termination_word = termination_word
|
||||
|
||||
@message_handler
|
||||
async def handle_message(self, message: Message, ctx: MessageContext) -> None:
|
||||
self._memory.append(message)
|
||||
if self._termination_word in message.content:
|
||||
return
|
||||
llm_messages: List[LLMMessage] = []
|
||||
for m in self._memory[-10:]:
|
||||
if m.source == self.metadata["type"]:
|
||||
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["type"]))
|
||||
else:
|
||||
llm_messages.append(UserMessage(content=m.content, source=m.source))
|
||||
response = await self._model_client.create(self._system_messages + llm_messages)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
if ctx.topic_id is not None:
|
||||
await self.publish_message(
|
||||
Message(content=response.content, source=self.metadata["type"]), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
|
||||
# Register the agents.
|
||||
await runtime.register(
|
||||
"Jack",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Jack a comedian",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
system_messages=[
|
||||
SystemMessage("You are a comedian likes to make jokes. " "When you are done talking, say 'TERMINATE'.")
|
||||
],
|
||||
termination_word="TERMINATE",
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"Cathy",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Cathy a poet",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
system_messages=[
|
||||
SystemMessage("You are a poet likes to write poems. " "When you are done talking, say 'TERMINATE'.")
|
||||
],
|
||||
termination_word="TERMINATE",
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a message to Jack to start the conversation.
|
||||
message = Message(content="Can you tell me something fun about SF?", source="User")
|
||||
await runtime.send_message(message, AgentId("jack", "default"))
|
||||
|
||||
# Process messages.
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
252
python/packages/autogen-core/samples/demos/assistant.py
Normal file
252
python/packages/autogen-core/samples/demos/assistant.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""This is an example of a terminal-based ChatGPT clone
|
||||
using an OpenAIAssistantAgent and event-based orchestration."""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
import aiofiles
|
||||
import openai
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentRuntime, MessageContext
|
||||
from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler
|
||||
from openai import AsyncAssistantEventHandler
|
||||
from openai.types.beta.thread import ToolResources
|
||||
from openai.types.beta.threads import Message, Text, TextDelta
|
||||
from openai.types.beta.threads.runs import RunStep, RunStepDelta
|
||||
from typing_extensions import override
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from autogen_core.base import AgentInstantiationContext
|
||||
from common.agents import OpenAIAssistantAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from common.types import PublishNow, TextMessage
|
||||
|
||||
sep = "-" * 50
|
||||
|
||||
|
||||
class UserProxyAgent(RoutedAgent):
|
||||
def __init__( # type: ignore
|
||||
self,
|
||||
client: openai.AsyncClient, # type: ignore
|
||||
assistant_id: str,
|
||||
thread_id: str,
|
||||
vector_store_id: str,
|
||||
) -> None: # type: ignore
|
||||
super().__init__(
|
||||
description="A human user",
|
||||
) # type: ignore
|
||||
self._client = client
|
||||
self._assistant_id = assistant_id
|
||||
self._thread_id = thread_id
|
||||
self._vector_store_id = vector_store_id
|
||||
|
||||
@message_handler() # type: ignore
|
||||
async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:
|
||||
# TODO: render image if message has image.
|
||||
# print(f"{message.source}: {message.content}")
|
||||
pass
|
||||
|
||||
async def _get_user_input(self, prompt: str) -> str:
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, input, prompt)
|
||||
|
||||
@message_handler() # type: ignore
|
||||
async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None:
|
||||
while True:
|
||||
user_input = await self._get_user_input(f"\n{sep}\nYou: ")
|
||||
# Parse upload file command '[upload code_interpreter | file_search filename]'.
|
||||
match = re.search(r"\[upload\s+(code_interpreter|file_search)\s+(.+)\]", user_input)
|
||||
if match:
|
||||
# Purpose of the file.
|
||||
purpose = match.group(1)
|
||||
# Extract file path.
|
||||
file_path = match.group(2)
|
||||
if not os.path.exists(file_path):
|
||||
print(f"File not found: {file_path}")
|
||||
continue
|
||||
# Filename.
|
||||
file_name = os.path.basename(file_path)
|
||||
# Read file content.
|
||||
async with aiofiles.open(file_path, "rb") as f:
|
||||
file_content = await f.read()
|
||||
if purpose == "code_interpreter":
|
||||
# Upload file.
|
||||
file = await self._client.files.create(file=(file_name, file_content), purpose="assistants")
|
||||
# Get existing file ids from tool resources.
|
||||
thread = await self._client.beta.threads.retrieve(thread_id=self._thread_id)
|
||||
tool_resources: ToolResources = thread.tool_resources if thread.tool_resources else ToolResources()
|
||||
assert tool_resources.code_interpreter is not None
|
||||
if tool_resources.code_interpreter.file_ids:
|
||||
file_ids = tool_resources.code_interpreter.file_ids
|
||||
else:
|
||||
file_ids = [file.id]
|
||||
# Update thread with new file.
|
||||
await self._client.beta.threads.update(
|
||||
thread_id=self._thread_id,
|
||||
tool_resources={"code_interpreter": {"file_ids": file_ids}},
|
||||
)
|
||||
elif purpose == "file_search":
|
||||
# Upload file to vector store.
|
||||
file_batch = await self._client.beta.vector_stores.file_batches.upload_and_poll(
|
||||
vector_store_id=self._vector_store_id,
|
||||
files=[(file_name, file_content)],
|
||||
)
|
||||
assert file_batch.status == "completed"
|
||||
print(f"Uploaded file: {file_name}")
|
||||
continue
|
||||
elif user_input.startswith("[upload"):
|
||||
print("Invalid upload command. Please use '[upload code_interpreter | file_search filename]'.")
|
||||
continue
|
||||
elif user_input.strip().lower() == "exit":
|
||||
# Exit handler.
|
||||
return
|
||||
else:
|
||||
# Publish user input and exit handler.
|
||||
await self.publish_message(
|
||||
TextMessage(content=user_input, source=self.metadata["type"]), topic_id=DefaultTopicId()
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class EventHandler(AsyncAssistantEventHandler):
|
||||
@override
|
||||
async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
|
||||
print(delta.value, end="", flush=True)
|
||||
|
||||
@override
|
||||
async def on_run_step_created(self, run_step: RunStep) -> None:
|
||||
details = run_step.step_details
|
||||
if details.type == "tool_calls":
|
||||
for tool in details.tool_calls:
|
||||
if tool.type == "code_interpreter":
|
||||
print("\nGenerating code to interpret:\n\n```python")
|
||||
|
||||
@override
|
||||
async def on_run_step_done(self, run_step: RunStep) -> None:
|
||||
details = run_step.step_details
|
||||
if details.type == "tool_calls":
|
||||
for tool in details.tool_calls:
|
||||
if tool.type == "code_interpreter":
|
||||
print("\n```\nExecuting code...")
|
||||
|
||||
@override
|
||||
async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
|
||||
details = delta.step_details
|
||||
if details is not None and details.type == "tool_calls":
|
||||
for tool in details.tool_calls or []:
|
||||
if tool.type == "code_interpreter" and tool.code_interpreter and tool.code_interpreter.input:
|
||||
print(tool.code_interpreter.input, end="", flush=True)
|
||||
|
||||
@override
|
||||
async def on_message_created(self, message: Message) -> None:
|
||||
print(f"{sep}\nAssistant:\n")
|
||||
|
||||
@override
|
||||
async def on_message_done(self, message: Message) -> None:
|
||||
# print a citation to the file searched
|
||||
if not message.content:
|
||||
return
|
||||
content = message.content[0]
|
||||
if not content.type == "text":
|
||||
return
|
||||
text_content = content.text
|
||||
annotations = text_content.annotations
|
||||
citations: List[str] = []
|
||||
for index, annotation in enumerate(annotations):
|
||||
text_content.value = text_content.value.replace(annotation.text, f"[{index}]")
|
||||
if file_citation := getattr(annotation, "file_citation", None):
|
||||
client = openai.AsyncClient()
|
||||
cited_file = await client.files.retrieve(file_citation.file_id)
|
||||
citations.append(f"[{index}] {cited_file.filename}")
|
||||
if citations:
|
||||
print("\n".join(citations))
|
||||
|
||||
|
||||
async def assistant_chat(runtime: AgentRuntime) -> str:
|
||||
oai_assistant = openai.beta.assistants.create(
|
||||
model="gpt-4-turbo",
|
||||
description="An AI assistant that helps with everyday tasks.",
|
||||
instructions="Help the user with their task.",
|
||||
tools=[{"type": "code_interpreter"}, {"type": "file_search"}],
|
||||
)
|
||||
vector_store = openai.beta.vector_stores.create()
|
||||
thread = openai.beta.threads.create(
|
||||
tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}},
|
||||
)
|
||||
await runtime.register(
|
||||
"Assistant",
|
||||
lambda: OpenAIAssistantAgent(
|
||||
description="An AI assistant that helps with everyday tasks.",
|
||||
client=openai.AsyncClient(),
|
||||
assistant_id=oai_assistant.id,
|
||||
thread_id=thread.id,
|
||||
assistant_event_handler_factory=lambda: EventHandler(),
|
||||
),
|
||||
)
|
||||
|
||||
await runtime.register(
|
||||
"User",
|
||||
lambda: UserProxyAgent(
|
||||
client=openai.AsyncClient(),
|
||||
assistant_id=oai_assistant.id,
|
||||
thread_id=thread.id,
|
||||
vector_store_id=vector_store.id,
|
||||
),
|
||||
)
|
||||
# Create a group chat manager to facilitate a turn-based conversation.
|
||||
await runtime.register(
|
||||
"GroupChatManager",
|
||||
lambda: GroupChatManager(
|
||||
description="A group chat manager.",
|
||||
memory=BufferedChatMemory(buffer_size=10),
|
||||
participants=[
|
||||
AgentId("Assistant", AgentInstantiationContext.current_agent_id().key),
|
||||
AgentId("User", AgentInstantiationContext.current_agent_id().key),
|
||||
],
|
||||
),
|
||||
)
|
||||
return "User"
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
usage = """Chat with an AI assistant backed by OpenAI Assistant API.
|
||||
You can upload files to the assistant using the command:
|
||||
|
||||
[upload code_interpreter | file_search filename]
|
||||
|
||||
where 'code_interpreter' or 'file_search' is the purpose of the file and
|
||||
'filename' is the path to the file. For example:
|
||||
|
||||
[upload code_interpreter data.csv]
|
||||
|
||||
This will upload data.csv to the assistant for use with the code interpreter tool.
|
||||
|
||||
Type "exit" to exit the chat.
|
||||
"""
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
user = await assistant_chat(runtime)
|
||||
runtime.start()
|
||||
print(usage)
|
||||
# Request the user to start the conversation.
|
||||
await runtime.send_message(PublishNow(), AgentId(user, "default"))
|
||||
|
||||
# TODO: have a way to exit the loop.
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Chat with an AI assistant.")
|
||||
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
|
||||
args = parser.parse_args()
|
||||
if args.verbose:
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
handler = logging.FileHandler("assistant.log")
|
||||
logging.getLogger("autogen_core").addHandler(handler)
|
||||
asyncio.run(main())
|
||||
231
python/packages/autogen-core/samples/demos/chess_game.py
Normal file
231
python/packages/autogen-core/samples/demos/chess_game.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""This is an example of simulating a chess game with two agents
|
||||
that play against each other, using tools to reason about the game state
|
||||
and make moves, and using a group chat manager to orchestrate the conversation."""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Annotated, Literal
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentInstantiationContext, AgentRuntime
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId
|
||||
from autogen_core.components.models import SystemMessage
|
||||
from autogen_core.components.tools import FunctionTool
|
||||
from chess import BLACK, SQUARE_NAMES, WHITE, Board, Move
|
||||
from chess import piece_name as get_piece_name
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from autogen_core.base import AgentId
|
||||
from common.agents._chat_completion_agent import ChatCompletionAgent
|
||||
from common.memory import BufferedChatMemory
|
||||
from common.patterns._group_chat_manager import GroupChatManager
|
||||
from common.types import TextMessage
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
def validate_turn(board: Board, player: Literal["white", "black"]) -> None:
|
||||
"""Validate that it is the player's turn to move."""
|
||||
last_move = board.peek() if board.move_stack else None
|
||||
if last_move is not None:
|
||||
if player == "white" and board.color_at(last_move.to_square) == WHITE:
|
||||
raise ValueError("It is not your turn to move. Wait for black to move.")
|
||||
if player == "black" and board.color_at(last_move.to_square) == BLACK:
|
||||
raise ValueError("It is not your turn to move. Wait for white to move.")
|
||||
elif last_move is None and player != "white":
|
||||
raise ValueError("It is not your turn to move. Wait for white to move first.")
|
||||
|
||||
|
||||
def get_legal_moves(
|
||||
board: Board, player: Literal["white", "black"]
|
||||
) -> Annotated[str, "A list of legal moves in UCI format."]:
|
||||
"""Get legal moves for the given player."""
|
||||
validate_turn(board, player)
|
||||
legal_moves = list(board.legal_moves)
|
||||
if player == "black":
|
||||
legal_moves = [move for move in legal_moves if board.color_at(move.from_square) == BLACK]
|
||||
elif player == "white":
|
||||
legal_moves = [move for move in legal_moves if board.color_at(move.from_square) == WHITE]
|
||||
else:
|
||||
raise ValueError("Invalid player, must be either 'black' or 'white'.")
|
||||
if not legal_moves:
|
||||
return "No legal moves. The game is over."
|
||||
|
||||
return "Possible moves are: " + ", ".join([move.uci() for move in legal_moves])
|
||||
|
||||
|
||||
def get_board(board: Board) -> str:
|
||||
return str(board)
|
||||
|
||||
|
||||
def make_move(
|
||||
board: Board,
|
||||
player: Literal["white", "black"],
|
||||
thinking: Annotated[str, "Thinking for the move."],
|
||||
move: Annotated[str, "A move in UCI format."],
|
||||
) -> Annotated[str, "Result of the move."]:
|
||||
"""Make a move on the board."""
|
||||
validate_turn(board, player)
|
||||
newMove = Move.from_uci(move)
|
||||
board.push(newMove)
|
||||
|
||||
# Print the move.
|
||||
print("-" * 50)
|
||||
print("Player:", player)
|
||||
print("Move:", newMove.uci())
|
||||
print("Thinking:", thinking)
|
||||
print("Board:")
|
||||
print(board.unicode(borders=True))
|
||||
|
||||
# Get the piece name.
|
||||
piece = board.piece_at(newMove.to_square)
|
||||
assert piece is not None
|
||||
piece_symbol = piece.unicode_symbol()
|
||||
piece_name = get_piece_name(piece.piece_type)
|
||||
if piece_symbol.isupper():
|
||||
piece_name = piece_name.capitalize()
|
||||
return f"Moved {piece_name} ({piece_symbol}) from {SQUARE_NAMES[newMove.from_square]} to {SQUARE_NAMES[newMove.to_square]}."
|
||||
|
||||
|
||||
async def chess_game(runtime: AgentRuntime) -> None: # type: ignore
|
||||
"""Create agents for a chess game and return the group chat."""
|
||||
|
||||
# Create the board.
|
||||
board = Board()
|
||||
|
||||
# Create tools for each player.
|
||||
# @functools.wraps(get_legal_moves)
|
||||
def get_legal_moves_black() -> str:
|
||||
return get_legal_moves(board, "black")
|
||||
|
||||
# @functools.wraps(get_legal_moves)
|
||||
def get_legal_moves_white() -> str:
|
||||
return get_legal_moves(board, "white")
|
||||
|
||||
# @functools.wraps(make_move)
|
||||
def make_move_black(
|
||||
thinking: Annotated[str, "Thinking for the move"],
|
||||
move: Annotated[str, "A move in UCI format"],
|
||||
) -> str:
|
||||
return make_move(board, "black", thinking, move)
|
||||
|
||||
# @functools.wraps(make_move)
|
||||
def make_move_white(
|
||||
thinking: Annotated[str, "Thinking for the move"],
|
||||
move: Annotated[str, "A move in UCI format"],
|
||||
) -> str:
|
||||
return make_move(board, "white", thinking, move)
|
||||
|
||||
def get_board_text() -> Annotated[str, "The current board state"]:
|
||||
return get_board(board)
|
||||
|
||||
black_tools = [
|
||||
FunctionTool(
|
||||
get_legal_moves_black,
|
||||
name="get_legal_moves",
|
||||
description="Get legal moves.",
|
||||
),
|
||||
FunctionTool(
|
||||
make_move_black,
|
||||
name="make_move",
|
||||
description="Make a move.",
|
||||
),
|
||||
FunctionTool(
|
||||
get_board_text,
|
||||
name="get_board",
|
||||
description="Get the current board state.",
|
||||
),
|
||||
]
|
||||
|
||||
white_tools = [
|
||||
FunctionTool(
|
||||
get_legal_moves_white,
|
||||
name="get_legal_moves",
|
||||
description="Get legal moves.",
|
||||
),
|
||||
FunctionTool(
|
||||
make_move_white,
|
||||
name="make_move",
|
||||
description="Make a move.",
|
||||
),
|
||||
FunctionTool(
|
||||
get_board_text,
|
||||
name="get_board",
|
||||
description="Get the current board state.",
|
||||
),
|
||||
]
|
||||
|
||||
await runtime.register(
|
||||
"PlayerBlack",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Player playing black.",
|
||||
system_messages=[
|
||||
SystemMessage(
|
||||
content="You are a chess player and you play as black. "
|
||||
"Use get_legal_moves() to get list of legal moves. "
|
||||
"Use get_board() to get the current board state. "
|
||||
"Think about your strategy and call make_move(thinking, move) to make a move."
|
||||
),
|
||||
],
|
||||
memory=BufferedChatMemory(buffer_size=10),
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o"),
|
||||
tools=black_tools,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"PlayerWhite",
|
||||
lambda: ChatCompletionAgent(
|
||||
description="Player playing white.",
|
||||
system_messages=[
|
||||
SystemMessage(
|
||||
content="You are a chess player and you play as white. "
|
||||
"Use get_legal_moves() to get list of legal moves. "
|
||||
"Use get_board() to get the current board state. "
|
||||
"Think about your strategy and call make_move(thinking, move) to make a move."
|
||||
),
|
||||
],
|
||||
memory=BufferedChatMemory(buffer_size=10),
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o"),
|
||||
tools=white_tools,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
# Create a group chat manager for the chess game to orchestrate a turn-based
|
||||
# conversation between the two agents.
|
||||
await runtime.register(
|
||||
"ChessGame",
|
||||
lambda: GroupChatManager(
|
||||
description="A chess game between two agents.",
|
||||
memory=BufferedChatMemory(buffer_size=10),
|
||||
participants=[
|
||||
AgentId("PlayerWhite", AgentInstantiationContext.current_agent_id().key),
|
||||
AgentId("PlayerBlack", AgentInstantiationContext.current_agent_id().key),
|
||||
], # white goes first
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await chess_game(runtime)
|
||||
runtime.start()
|
||||
# Publish an initial message to trigger the group chat manager to start orchestration.
|
||||
await runtime.publish_message(TextMessage(content="Game started.", source="System"), topic_id=DefaultTopicId())
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run a chess game between two agents.")
|
||||
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
|
||||
args = parser.parse_args()
|
||||
if args.verbose:
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
handler = logging.FileHandler("chess_game.log")
|
||||
logging.getLogger("autogen_core").addHandler(handler)
|
||||
|
||||
asyncio.run(main())
|
||||
190
python/packages/autogen-core/samples/demos/utils.py
Normal file
190
python/packages/autogen-core/samples/demos/utils.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from asyncio import Future
|
||||
|
||||
from autogen_core.base import AgentRuntime, CancellationToken
|
||||
from autogen_core.components import DefaultTopicId, Image, RoutedAgent, message_handler
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.containers import ScrollableContainer
|
||||
from textual.widgets import Button, Footer, Header, Input, Markdown, Static
|
||||
from textual_imageview.viewer import ImageViewer
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from common.types import (
|
||||
MultiModalMessage,
|
||||
PublishNow,
|
||||
RespondNow,
|
||||
TextMessage,
|
||||
ToolApprovalRequest,
|
||||
ToolApprovalResponse,
|
||||
)
|
||||
|
||||
|
||||
class ChatAppMessage(Static):
|
||||
def __init__(self, message: TextMessage | MultiModalMessage) -> None: # type: ignore
|
||||
self._message = message
|
||||
super().__init__()
|
||||
|
||||
def on_mount(self) -> None:
|
||||
self.styles.margin = 1
|
||||
self.styles.padding = 1
|
||||
self.styles.border = ("solid", "blue")
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
if isinstance(self._message, TextMessage):
|
||||
yield Markdown(f"{self._message.source}:")
|
||||
yield Markdown(self._message.content)
|
||||
else:
|
||||
yield Markdown(f"{self._message.source}:")
|
||||
for content in self._message.content:
|
||||
if isinstance(content, str):
|
||||
yield Markdown(content)
|
||||
elif isinstance(content, Image):
|
||||
viewer = ImageViewer(content.image)
|
||||
viewer.styles.min_width = 50
|
||||
viewer.styles.min_height = 50
|
||||
yield viewer
|
||||
|
||||
|
||||
class WelcomeMessage(Static):
|
||||
def on_mount(self) -> None:
|
||||
self.styles.margin = 1
|
||||
self.styles.padding = 1
|
||||
self.styles.border = ("solid", "blue")
|
||||
|
||||
|
||||
class ChatInput(Input):
|
||||
def on_mount(self) -> None:
|
||||
self.focus()
|
||||
|
||||
def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
self.clear()
|
||||
|
||||
|
||||
class ToolApprovalRequestNotice(Static):
|
||||
def __init__(self, request: ToolApprovalRequest, response_future: Future[ToolApprovalResponse]) -> None: # type: ignore
|
||||
self._tool_call = request.tool_call
|
||||
self._future = response_future
|
||||
super().__init__()
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Static(f"Tool call: {self._tool_call.name}, arguments: {self._tool_call.arguments[:50]}")
|
||||
yield Button("Approve", id="approve", variant="warning")
|
||||
yield Button("Deny", id="deny", variant="default")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
self.styles.margin = 1
|
||||
self.styles.padding = 1
|
||||
self.styles.border = ("solid", "red")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
button_id = event.button.id
|
||||
assert button_id is not None
|
||||
if button_id == "approve":
|
||||
self._future.set_result(ToolApprovalResponse(tool_call_id=self._tool_call.id, approved=True, reason=""))
|
||||
else:
|
||||
self._future.set_result(ToolApprovalResponse(tool_call_id=self._tool_call.id, approved=False, reason=""))
|
||||
self.remove()
|
||||
|
||||
|
||||
class TextualChatApp(App): # type: ignore
|
||||
"""A Textual app for a chat interface."""
|
||||
|
||||
def __init__(self, runtime: AgentRuntime, welcoming_notice: str | None = None, user_name: str = "User") -> None: # type: ignore
|
||||
self._runtime = runtime
|
||||
self._welcoming_notice = welcoming_notice
|
||||
self._user_name = user_name
|
||||
super().__init__()
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Header()
|
||||
yield Footer()
|
||||
yield ScrollableContainer(id="chat-messages")
|
||||
yield ChatInput()
|
||||
|
||||
def on_mount(self) -> None:
|
||||
if self._welcoming_notice is not None:
|
||||
chat_messages = self.query_one("#chat-messages")
|
||||
notice = WelcomeMessage(self._welcoming_notice, id="welcome")
|
||||
chat_messages.mount(notice)
|
||||
|
||||
@property
|
||||
def welcoming_notice(self) -> str | None:
|
||||
return self._welcoming_notice
|
||||
|
||||
@welcoming_notice.setter
|
||||
def welcoming_notice(self, value: str) -> None:
|
||||
self._welcoming_notice = value
|
||||
|
||||
async def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
user_input = event.value
|
||||
await self.publish_user_message(user_input)
|
||||
|
||||
async def post_request_user_input_notice(self) -> None:
|
||||
chat_messages = self.query_one("#chat-messages")
|
||||
notice = Static("Please enter your input.", id="typing")
|
||||
chat_messages.mount(notice)
|
||||
notice.scroll_visible()
|
||||
|
||||
async def publish_user_message(self, user_input: str) -> None:
|
||||
chat_messages = self.query_one("#chat-messages")
|
||||
# Remove all typing messages.
|
||||
chat_messages.query("#typing").remove()
|
||||
# Publish the user message to the runtime.
|
||||
await self._runtime.publish_message(
|
||||
TextMessage(source=self._user_name, content=user_input), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
async def post_runtime_message(self, message: TextMessage | MultiModalMessage) -> None: # type: ignore
|
||||
"""Post a message from the agent runtime to the message list."""
|
||||
chat_messages = self.query_one("#chat-messages")
|
||||
msg = ChatAppMessage(message)
|
||||
chat_messages.mount(msg)
|
||||
msg.scroll_visible()
|
||||
|
||||
async def handle_tool_approval_request(self, message: ToolApprovalRequest) -> ToolApprovalResponse: # type: ignore
|
||||
chat_messages = self.query_one("#chat-messages")
|
||||
future: Future[ToolApprovalResponse] = asyncio.get_event_loop().create_future() # type: ignore
|
||||
tool_call_approval_notice = ToolApprovalRequestNotice(message, future)
|
||||
chat_messages.mount(tool_call_approval_notice)
|
||||
tool_call_approval_notice.scroll_visible()
|
||||
return await future
|
||||
|
||||
|
||||
class TextualUserAgent(RoutedAgent): # type: ignore
|
||||
"""An agent that is used to receive messages from the runtime."""
|
||||
|
||||
def __init__(self, description: str, app: TextualChatApp) -> None: # type: ignore
|
||||
super().__init__(description)
|
||||
self._app = app
|
||||
|
||||
@message_handler # type: ignore
|
||||
async def on_text_message(self, message: TextMessage, cancellation_token: CancellationToken) -> None: # type: ignore
|
||||
await self._app.post_runtime_message(message)
|
||||
|
||||
@message_handler # type: ignore
|
||||
async def on_multi_modal_message(self, message: MultiModalMessage, cancellation_token: CancellationToken) -> None: # type: ignore
|
||||
# Save the message to file.
|
||||
# Generate a ramdom file name.
|
||||
for content in message.content:
|
||||
if isinstance(content, Image):
|
||||
filename = f"{self.metadata['type']}_{message.source}_{random.randbytes(16).hex()}.png"
|
||||
content.image.save(filename)
|
||||
await self._app.post_runtime_message(message)
|
||||
|
||||
@message_handler # type: ignore
|
||||
async def on_respond_now(self, message: RespondNow, cancellation_token: CancellationToken) -> None: # type: ignore
|
||||
await self._app.post_request_user_input_notice()
|
||||
|
||||
@message_handler # type: ignore
|
||||
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None: # type: ignore
|
||||
await self._app.post_request_user_input_notice()
|
||||
|
||||
@message_handler # type: ignore
|
||||
async def on_tool_approval_request(
|
||||
self, message: ToolApprovalRequest, cancellation_token: CancellationToken
|
||||
) -> ToolApprovalResponse:
|
||||
return await self._app.handle_tool_approval_request(message)
|
||||
217
python/packages/autogen-core/samples/patterns/coder_executor.py
Normal file
217
python/packages/autogen-core/samples/patterns/coder_executor.py
Normal file
@@ -0,0 +1,217 @@
|
||||
"""
|
||||
This example shows how to use publish-subscribe to implement
|
||||
a simple interaction between a coder and an executor agent.
|
||||
1. The coder agent receives a task message, generates a code block,
|
||||
and publishes a code execution
|
||||
task message.
|
||||
2. The executor agent receives the code execution task message,
|
||||
executes the code block, and publishes a code execution task result message.
|
||||
3. The coder agent receives the code execution task result message, depending
|
||||
on the result: if the task is completed, it publishes a task completion message;
|
||||
otherwise, it generates a new code block and publishes a code execution task message.
|
||||
4. The process continues until the coder agent publishes a task completion message.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.code_executor import CodeBlock, CodeExecutor, LocalCommandLineCodeExecutor
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskMessage:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskCompletion:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeExecutionTask:
|
||||
session_id: str
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeExecutionTaskResult:
|
||||
session_id: str
|
||||
output: str
|
||||
exit_code: int
|
||||
|
||||
|
||||
class Coder(RoutedAgent):
|
||||
"""An agent that writes code."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_client: ChatCompletionClient,
|
||||
) -> None:
|
||||
super().__init__(description="A Python coder assistant.")
|
||||
self._model_client = model_client
|
||||
self._system_messages = [
|
||||
SystemMessage(
|
||||
"""You are a helpful AI assistant.
|
||||
Solve tasks using your coding and language skills.
|
||||
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
|
||||
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
|
||||
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
|
||||
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
|
||||
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
|
||||
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
||||
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
||||
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
||||
Reply "TERMINATE" in the end when everything is done."""
|
||||
)
|
||||
]
|
||||
# A dictionary to store the messages for each task session.
|
||||
self._session_memory: Dict[str, List[LLMMessage]] = {}
|
||||
|
||||
@message_handler
|
||||
async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:
|
||||
# Create a new session.
|
||||
session_id = str(uuid.uuid4())
|
||||
self._session_memory.setdefault(session_id, []).append(UserMessage(content=message.content, source="user"))
|
||||
|
||||
# Make an inference to the model.
|
||||
response = await self._model_client.create(self._system_messages + self._session_memory[session_id])
|
||||
assert isinstance(response.content, str)
|
||||
self._session_memory[session_id].append(
|
||||
AssistantMessage(content=response.content, source=self.metadata["type"])
|
||||
)
|
||||
|
||||
# Publish the code execution task.
|
||||
await self.publish_message(
|
||||
CodeExecutionTask(content=response.content, session_id=session_id),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
@message_handler
|
||||
async def handle_code_execution_result(self, message: CodeExecutionTaskResult, ctx: MessageContext) -> None:
|
||||
# Store the code execution output.
|
||||
self._session_memory[message.session_id].append(UserMessage(content=message.output, source="user"))
|
||||
|
||||
# Make an inference to the model -- reflection on the code execution output happens here.
|
||||
response = await self._model_client.create(self._system_messages + self._session_memory[message.session_id])
|
||||
assert isinstance(response.content, str)
|
||||
self._session_memory[message.session_id].append(
|
||||
AssistantMessage(content=response.content, source=self.metadata["type"])
|
||||
)
|
||||
|
||||
if "TERMINATE" in response.content:
|
||||
# If the task is completed, publish a message with the completion content.
|
||||
await self.publish_message(
|
||||
TaskCompletion(content=response.content),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
print("--------------------")
|
||||
print("Task completed:")
|
||||
print(response.content)
|
||||
return
|
||||
|
||||
# Publish the code execution task.
|
||||
await self.publish_message(
|
||||
CodeExecutionTask(content=response.content, session_id=message.session_id),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
|
||||
class Executor(RoutedAgent):
|
||||
"""An agent that executes code."""
|
||||
|
||||
def __init__(self, executor: CodeExecutor) -> None:
|
||||
super().__init__(description="A code executor agent.")
|
||||
self._executor = executor
|
||||
|
||||
@message_handler
|
||||
async def handle_code_execution(self, message: CodeExecutionTask, ctx: MessageContext) -> None:
|
||||
# Extract the code block from the message.
|
||||
code_blocks = self._extract_code_blocks(message.content)
|
||||
if not code_blocks:
|
||||
# If no code block is found, publish a message with an error.
|
||||
await self.publish_message(
|
||||
CodeExecutionTaskResult(
|
||||
output="Error: no Markdown code block found.", exit_code=1, session_id=message.session_id
|
||||
),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
return
|
||||
# Execute code blocks.
|
||||
result = await self._executor.execute_code_blocks(
|
||||
code_blocks=code_blocks, cancellation_token=ctx.cancellation_token
|
||||
)
|
||||
# Publish the code execution result.
|
||||
await self.publish_message(
|
||||
CodeExecutionTaskResult(output=result.output, exit_code=result.exit_code, session_id=message.session_id),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
def _extract_code_blocks(self, markdown_text: str) -> List[CodeBlock]:
|
||||
pattern = re.compile(r"```(?:\s*([\w\+\-]+))?\n([\s\S]*?)```")
|
||||
matches = pattern.findall(markdown_text)
|
||||
code_blocks: List[CodeBlock] = []
|
||||
for match in matches:
|
||||
language = match[0].strip() if match[0] else ""
|
||||
code_content = match[1]
|
||||
code_blocks.append(CodeBlock(code=code_content, language=language))
|
||||
return code_blocks
|
||||
|
||||
|
||||
async def main(task: str, temp_dir: str) -> None:
|
||||
# Create the runtime with the termination handler.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
|
||||
# Register the agents.
|
||||
await runtime.register(
|
||||
"coder",
|
||||
lambda: Coder(model_client=get_chat_completion_client_from_envs(model="gpt-4-turbo")),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"executor",
|
||||
lambda: Executor(executor=LocalCommandLineCodeExecutor(work_dir=temp_dir)),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
runtime.start()
|
||||
|
||||
# Publish the task message.
|
||||
await runtime.publish_message(TaskMessage(content=task), topic_id=DefaultTopicId())
|
||||
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
|
||||
task = f"Today is {datetime.today()}, create a plot of NVDA and TSLA stock prices YTD using yfinance."
|
||||
|
||||
asyncio.run(main(task, "."))
|
||||
295
python/packages/autogen-core/samples/patterns/coder_reviewer.py
Normal file
295
python/packages/autogen-core/samples/patterns/coder_reviewer.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""
|
||||
This example shows how to use publish-subscribe to implement
|
||||
a simple interaction between a coder and a reviewer agent.
|
||||
1. The coder agent receives a code writing task message, generates a code block,
|
||||
and publishes a code review task message.
|
||||
2. The reviewer agent receives the code review task message, reviews the code block,
|
||||
and publishes a code review result message.
|
||||
3. The coder agent receives the code review result message, depending on the result:
|
||||
if the code is approved, it publishes a code writing result message; otherwise, it generates
|
||||
a new code block and publishes a code review task message.
|
||||
4. The process continues until the coder agent publishes a code writing result message.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeWritingTask:
|
||||
task: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeWritingResult:
|
||||
task: str
|
||||
code: str
|
||||
review: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeReviewTask:
|
||||
session_id: str
|
||||
code_writing_task: str
|
||||
code_writing_scratchpad: str
|
||||
code: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeReviewResult:
|
||||
review: str
|
||||
session_id: str
|
||||
approved: bool
|
||||
|
||||
|
||||
class ReviewerAgent(RoutedAgent):
|
||||
"""An agent that performs code review tasks."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
model_client: ChatCompletionClient,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = [
|
||||
SystemMessage(
|
||||
content="""You are a code reviewer. You focus on correctness, efficiency and safety of the code.
|
||||
Respond using the following JSON format:
|
||||
{
|
||||
"correctness": "<Your comments>",
|
||||
"efficiency": "<Your comments>",
|
||||
"safety": "<Your comments>",
|
||||
"approval": "<APPROVE or REVISE>",
|
||||
"suggested_changes": "<Your comments>"
|
||||
}
|
||||
""",
|
||||
)
|
||||
]
|
||||
self._model_client = model_client
|
||||
|
||||
@message_handler
|
||||
async def handle_code_review_task(self, message: CodeReviewTask, ctx: MessageContext) -> None:
|
||||
# Format the prompt for the code review.
|
||||
prompt = f"""The problem statement is: {message.code_writing_task}
|
||||
The code is:
|
||||
```
|
||||
{message.code}
|
||||
```
|
||||
Please review the code and provide feedback.
|
||||
"""
|
||||
# Generate a response using the chat completion API.
|
||||
response = await self._model_client.create(
|
||||
self._system_messages + [UserMessage(content=prompt, source=self.metadata["type"])]
|
||||
)
|
||||
assert isinstance(response.content, str)
|
||||
# TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.
|
||||
# Parse the response JSON.
|
||||
review = json.loads(response.content)
|
||||
# Construct the review text.
|
||||
review_text = "Code review:\n" + "\n".join([f"{k}: {v}" for k, v in review.items()])
|
||||
approved = review["approval"].lower().strip() == "approve"
|
||||
# Publish the review result.
|
||||
await self.publish_message(
|
||||
CodeReviewResult(
|
||||
review=review_text,
|
||||
approved=approved,
|
||||
session_id=message.session_id,
|
||||
),
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
|
||||
class CoderAgent(RoutedAgent):
|
||||
"""An agent that performs code writing tasks."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
model_client: ChatCompletionClient,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
description,
|
||||
)
|
||||
self._system_messages = [
|
||||
SystemMessage(
|
||||
content="""You are a proficient coder. You write code to solve problems.
|
||||
Work with the reviewer to improve your code.
|
||||
Always put all finished code in a single Markdown code block.
|
||||
For example:
|
||||
```python
|
||||
def hello_world():
|
||||
print("Hello, World!")
|
||||
```
|
||||
|
||||
Respond using the following format:
|
||||
|
||||
Thoughts: <Your comments>
|
||||
Code: <Your code>
|
||||
""",
|
||||
)
|
||||
]
|
||||
self._model_client = model_client
|
||||
self._session_memory: Dict[str, List[CodeWritingTask | CodeReviewTask | CodeReviewResult]] = {}
|
||||
|
||||
@message_handler
|
||||
async def handle_code_writing_task(
|
||||
self,
|
||||
message: CodeWritingTask,
|
||||
ctx: MessageContext,
|
||||
) -> None:
|
||||
# Store the messages in a temporary memory for this request only.
|
||||
session_id = str(uuid.uuid4())
|
||||
self._session_memory.setdefault(session_id, []).append(message)
|
||||
# Generate a response using the chat completion API.
|
||||
response = await self._model_client.create(
|
||||
self._system_messages + [UserMessage(content=message.task, source=self.metadata["type"])]
|
||||
)
|
||||
assert isinstance(response.content, str)
|
||||
# Extract the code block from the response.
|
||||
code_block = self._extract_code_block(response.content)
|
||||
if code_block is None:
|
||||
raise ValueError("Code block not found.")
|
||||
# Create a code review task.
|
||||
code_review_task = CodeReviewTask(
|
||||
session_id=session_id,
|
||||
code_writing_task=message.task,
|
||||
code_writing_scratchpad=response.content,
|
||||
code=code_block,
|
||||
)
|
||||
# Store the code review task in the session memory.
|
||||
self._session_memory[session_id].append(code_review_task)
|
||||
# Publish a code review task.
|
||||
await self.publish_message(
|
||||
code_review_task,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
@message_handler
|
||||
async def handle_code_review_result(self, message: CodeReviewResult, ctx: MessageContext) -> None:
|
||||
# Store the review result in the session memory.
|
||||
self._session_memory[message.session_id].append(message)
|
||||
# Obtain the request from previous messages.
|
||||
review_request = next(
|
||||
m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, CodeReviewTask)
|
||||
)
|
||||
assert review_request is not None
|
||||
# Check if the code is approved.
|
||||
if message.approved:
|
||||
# Publish the code writing result.
|
||||
await self.publish_message(
|
||||
CodeWritingResult(
|
||||
code=review_request.code,
|
||||
task=review_request.code_writing_task,
|
||||
review=message.review,
|
||||
),
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
print("Code Writing Result:")
|
||||
print("-" * 80)
|
||||
print(f"Task:\n{review_request.code_writing_task}")
|
||||
print("-" * 80)
|
||||
print(f"Code:\n{review_request.code}")
|
||||
print("-" * 80)
|
||||
print(f"Review:\n{message.review}")
|
||||
print("-" * 80)
|
||||
else:
|
||||
# Create a list of LLM messages to send to the model.
|
||||
messages: List[LLMMessage] = [*self._system_messages]
|
||||
for m in self._session_memory[message.session_id]:
|
||||
if isinstance(m, CodeReviewResult):
|
||||
messages.append(UserMessage(content=m.review, source="Reviewer"))
|
||||
elif isinstance(m, CodeReviewTask):
|
||||
messages.append(AssistantMessage(content=m.code_writing_scratchpad, source="Coder"))
|
||||
elif isinstance(m, CodeWritingTask):
|
||||
messages.append(UserMessage(content=m.task, source="User"))
|
||||
else:
|
||||
raise ValueError(f"Unexpected message type: {m}")
|
||||
# Generate a revision using the chat completion API.
|
||||
response = await self._model_client.create(messages)
|
||||
assert isinstance(response.content, str)
|
||||
# Extract the code block from the response.
|
||||
code_block = self._extract_code_block(response.content)
|
||||
if code_block is None:
|
||||
raise ValueError("Code block not found.")
|
||||
# Create a new code review task.
|
||||
code_review_task = CodeReviewTask(
|
||||
session_id=message.session_id,
|
||||
code_writing_task=review_request.code_writing_task,
|
||||
code_writing_scratchpad=response.content,
|
||||
code=code_block,
|
||||
)
|
||||
# Store the code review task in the session memory.
|
||||
self._session_memory[message.session_id].append(code_review_task)
|
||||
# Publish a new code review task.
|
||||
await self.publish_message(
|
||||
code_review_task,
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
def _extract_code_block(self, markdown_text: str) -> Union[str, None]:
|
||||
pattern = r"```(\w+)\n(.*?)\n```"
|
||||
# Search for the pattern in the markdown text
|
||||
match = re.search(pattern, markdown_text, re.DOTALL)
|
||||
# Extract the language and code block if a match is found
|
||||
if match:
|
||||
return match.group(2)
|
||||
return None
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
await runtime.register(
|
||||
"ReviewerAgent",
|
||||
lambda: ReviewerAgent(
|
||||
description="Code Reviewer",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"CoderAgent",
|
||||
lambda: CoderAgent(
|
||||
description="Coder",
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
runtime.start()
|
||||
await runtime.publish_message(
|
||||
message=CodeWritingTask(
|
||||
task="Write a function to find the directory with the largest number of files using multi-processing."
|
||||
),
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
# Keep processing messages until idle.
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
171
python/packages/autogen-core/samples/patterns/group_chat.py
Normal file
171
python/packages/autogen-core/samples/patterns/group_chat.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""
|
||||
This example shows how to use publish-subscribe to implement
|
||||
a simple round-robin group chat among multiple agents:
|
||||
each agent in the group chat takes turns speaking in a round-robin fashion.
|
||||
The conversation ends after a specified number of rounds.
|
||||
|
||||
1. Upon receiving a message, the group chat manager selects the next speaker
|
||||
in a round-robin fashion and sends a request to speak message to the selected speaker.
|
||||
2. Upon receiving a request to speak message, the speaker generates a response
|
||||
to the last message in the memory and publishes the response.
|
||||
3. The conversation continues until the specified number of rounds is reached.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentInstantiationContext
|
||||
from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
source: str
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class RequestToSpeak:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class Termination:
|
||||
pass
|
||||
|
||||
|
||||
class RoundRobinGroupChatManager(RoutedAgent):
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
participants: List[AgentId],
|
||||
num_rounds: int,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._participants = participants
|
||||
self._num_rounds = num_rounds
|
||||
self._round_count = 0
|
||||
|
||||
@message_handler
|
||||
async def handle_message(self, message: Message, ctx: MessageContext) -> None:
|
||||
# Select the next speaker in a round-robin fashion
|
||||
speaker = self._participants[self._round_count % len(self._participants)]
|
||||
self._round_count += 1
|
||||
if self._round_count > self._num_rounds * len(self._participants):
|
||||
# End the conversation after the specified number of rounds.
|
||||
await self.publish_message(Termination(), DefaultTopicId())
|
||||
return
|
||||
# Send a request to speak message to the selected speaker.
|
||||
await self.send_message(RequestToSpeak(), speaker)
|
||||
|
||||
|
||||
class GroupChatParticipant(RoutedAgent):
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = system_messages
|
||||
self._model_client = model_client
|
||||
self._memory: List[Message] = []
|
||||
|
||||
@message_handler
|
||||
async def handle_message(self, message: Message, ctx: MessageContext) -> None:
|
||||
self._memory.append(message)
|
||||
|
||||
@message_handler
|
||||
async def handle_request_to_speak(self, message: RequestToSpeak, ctx: MessageContext) -> None:
|
||||
# Generate a response to the last message in the memory
|
||||
if not self._memory:
|
||||
return
|
||||
llm_messages: List[LLMMessage] = []
|
||||
for m in self._memory[-10:]:
|
||||
if m.source == self.metadata["type"]:
|
||||
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["type"]))
|
||||
else:
|
||||
llm_messages.append(UserMessage(content=m.content, source=m.source))
|
||||
response = await self._model_client.create(self._system_messages + llm_messages)
|
||||
assert isinstance(response.content, str)
|
||||
speech = Message(content=response.content, source=self.metadata["type"])
|
||||
self._memory.append(speech)
|
||||
await self.publish_message(speech, topic_id=DefaultTopicId())
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
|
||||
# Register the participants.
|
||||
await runtime.register(
|
||||
"DataScientist",
|
||||
lambda: GroupChatParticipant(
|
||||
description="A data scientist",
|
||||
system_messages=[SystemMessage("You are a data scientist.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
),
|
||||
)
|
||||
|
||||
await runtime.register(
|
||||
"Engineer",
|
||||
lambda: GroupChatParticipant(
|
||||
description="An engineer",
|
||||
system_messages=[SystemMessage("You are an engineer.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
),
|
||||
)
|
||||
await runtime.register(
|
||||
"Artist",
|
||||
lambda: GroupChatParticipant(
|
||||
description="An artist",
|
||||
system_messages=[SystemMessage("You are an artist.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
),
|
||||
)
|
||||
|
||||
# Register the group chat manager.
|
||||
await runtime.register(
|
||||
"GroupChatManager",
|
||||
lambda: RoundRobinGroupChatManager(
|
||||
description="A group chat manager",
|
||||
participants=[
|
||||
AgentId("DataScientist", AgentInstantiationContext.current_agent_id().key),
|
||||
AgentId("Engineer", AgentInstantiationContext.current_agent_id().key),
|
||||
AgentId("Artist", AgentInstantiationContext.current_agent_id().key),
|
||||
],
|
||||
num_rounds=3,
|
||||
),
|
||||
)
|
||||
|
||||
# Start the runtime.
|
||||
runtime.start()
|
||||
|
||||
# Start the conversation.
|
||||
await runtime.publish_message(Message(content="Hello, everyone!", source="Moderator"), topic_id=DefaultTopicId())
|
||||
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,168 @@
|
||||
"""
|
||||
This example demonstrates the mixture of agents implemented using pub/sub.
|
||||
Mixture of agents: https://github.com/togethercomputer/moa
|
||||
|
||||
The example consists of two types of agents: reference agents and an aggregator agent.
|
||||
The aggregator agent distributes tasks to reference agents and aggregates the results.
|
||||
The reference agents handle each task independently and return the results to the aggregator agent.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import MessageContext
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReferenceAgentTask:
|
||||
session_id: str
|
||||
task: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReferenceAgentTaskResult:
|
||||
session_id: str
|
||||
result: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class AggregatorTask:
|
||||
task: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class AggregatorTaskResult:
|
||||
result: str
|
||||
|
||||
|
||||
class ReferenceAgent(RoutedAgent):
|
||||
"""The reference agent that handles each task independently."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = system_messages
|
||||
self._model_client = model_client
|
||||
|
||||
@message_handler
|
||||
async def handle_task(self, message: ReferenceAgentTask, ctx: MessageContext) -> None:
|
||||
"""Handle a task message. This method sends the task to the model and publishes the result."""
|
||||
task_message = UserMessage(content=message.task, source=self.metadata["type"])
|
||||
response = await self._model_client.create(self._system_messages + [task_message])
|
||||
assert isinstance(response.content, str)
|
||||
task_result = ReferenceAgentTaskResult(session_id=message.session_id, result=response.content)
|
||||
await self.publish_message(task_result, topic_id=DefaultTopicId())
|
||||
|
||||
|
||||
class AggregatorAgent(RoutedAgent):
|
||||
"""The aggregator agent that distribute tasks to reference agents and aggregates the results."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
num_references: int,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._system_messages = system_messages
|
||||
self._model_client = model_client
|
||||
self._num_references = num_references
|
||||
self._session_results: Dict[str, List[ReferenceAgentTaskResult]] = {}
|
||||
|
||||
@message_handler
|
||||
async def handle_task(self, message: AggregatorTask, ctx: MessageContext) -> None:
|
||||
"""Handle a task message. This method publishes the task to the reference agents."""
|
||||
session_id = str(uuid.uuid4())
|
||||
ref_task = ReferenceAgentTask(session_id=session_id, task=message.task)
|
||||
await self.publish_message(ref_task, topic_id=DefaultTopicId())
|
||||
|
||||
@message_handler
|
||||
async def handle_result(self, message: ReferenceAgentTaskResult, ctx: MessageContext) -> None:
|
||||
"""Handle a task result message. Once all results are received, this method
|
||||
aggregates the results and publishes the final result."""
|
||||
self._session_results.setdefault(message.session_id, []).append(message)
|
||||
if len(self._session_results[message.session_id]) == self._num_references:
|
||||
result = "\n\n".join([r.result for r in self._session_results[message.session_id]])
|
||||
response = await self._model_client.create(
|
||||
self._system_messages + [UserMessage(content=result, source=self.metadata["type"])]
|
||||
)
|
||||
assert isinstance(response.content, str)
|
||||
task_result = AggregatorTaskResult(result=response.content)
|
||||
await self.publish_message(task_result, topic_id=DefaultTopicId())
|
||||
self._session_results.pop(message.session_id)
|
||||
print(f"Aggregator result: {response.content}")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
# TODO: use different models for each agent.
|
||||
await runtime.register(
|
||||
"ReferenceAgent1",
|
||||
lambda: ReferenceAgent(
|
||||
description="Reference Agent 1",
|
||||
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=0.1),
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"ReferenceAgent2",
|
||||
lambda: ReferenceAgent(
|
||||
description="Reference Agent 2",
|
||||
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=0.5),
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"ReferenceAgent3",
|
||||
lambda: ReferenceAgent(
|
||||
description="Reference Agent 3",
|
||||
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=1.0),
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"AggregatorAgent",
|
||||
lambda: AggregatorAgent(
|
||||
description="Aggregator Agent",
|
||||
system_messages=[
|
||||
SystemMessage(
|
||||
"...synthesize these responses into a single, high-quality response... Responses from models:"
|
||||
)
|
||||
],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
num_references=3,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
runtime.start()
|
||||
await runtime.publish_message(AggregatorTask(task="What are something fun to do in SF?"), topic_id=DefaultTopicId())
|
||||
|
||||
# Keep processing messages.
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,280 @@
|
||||
"""
|
||||
This example shows an implementation of the multi-agent debate pattern
|
||||
for solving math problems from GSM8K benchmark (https://huggingface.co/datasets/openai/gsm8k).
|
||||
|
||||
The example consists of two types of agents: solver agents and an aggregator agent.
|
||||
The solver agents are connected in a sparse manner following the technique described in
|
||||
"Improving Multi-Agent Debate with Sparse Communication Topology" (https://arxiv.org/abs/2406.11776).
|
||||
|
||||
For example, consider the following connection pattern:
|
||||
|
||||
A --- B
|
||||
| |
|
||||
| |
|
||||
C --- D
|
||||
|
||||
In this pattern, each solver agent is connected to two other solver agents.
|
||||
|
||||
The pattern works as follows:
|
||||
1. The main function sends a math problem to the aggregator agent.
|
||||
2. The aggregator agent distributes the problem to the solver agents.
|
||||
3. Each solver agent processes the problem, and publishes a response to all other solver agents.
|
||||
4. Each solver agent again use the responses from other agents to refine its response, publish a new response.
|
||||
5. Repeat step 4 for a fixed number of rounds. In the final round, each solver agent publish a final response.
|
||||
6. The aggregator agent use majority voting to aggregate the final responses from all solver agents to get the final answer, and publishes the answer.
|
||||
|
||||
To make the connection dense, modify SolverAgent's handle_response method
|
||||
to consider all neighbors' responses to use.
|
||||
|
||||
To make the connection probabilistic, modify SolverAgent's handle_response method
|
||||
to sample a random number of neighbors' responses to use.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Question:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Answer:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class SolverRequest:
|
||||
session_id: str
|
||||
content: str
|
||||
question: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntermediateSolverResponse:
|
||||
session_id: str
|
||||
content: str
|
||||
solver_name: str
|
||||
answer: str
|
||||
round: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class FinalSolverResponse:
|
||||
session_id: str
|
||||
answer: str
|
||||
|
||||
|
||||
class MathSolver(RoutedAgent):
|
||||
def __init__(self, model_client: ChatCompletionClient, neighbor_names: List[str], max_round: int) -> None:
|
||||
super().__init__("A debator.")
|
||||
self._model_client = model_client
|
||||
if self.metadata["type"] in neighbor_names:
|
||||
raise ValueError("The agent's name cannot be in the list of neighbor names.")
|
||||
self._neighbor_names = neighbor_names
|
||||
self._memory: Dict[str, List[LLMMessage]] = {}
|
||||
self._buffer: Dict[Tuple[str, int], List[IntermediateSolverResponse]] = {}
|
||||
self._questions: Dict[str, str] = {}
|
||||
self._system_messages = [
|
||||
SystemMessage(
|
||||
(
|
||||
"You are a helpful assistant with expertise in mathematics and reasoning. "
|
||||
"Your task is to assist in solving a math reasoning problem by providing "
|
||||
"a clear and detailed solution. Limit your output within 100 words, "
|
||||
"and your final answer should be a single numerical number, "
|
||||
"in the form of {{answer}}, at the end of your response. "
|
||||
"For example, 'The answer is {{42}}.'"
|
||||
)
|
||||
)
|
||||
]
|
||||
self._counters: Dict[str, int] = {}
|
||||
self._max_round = max_round
|
||||
|
||||
@message_handler
|
||||
async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:
|
||||
if message.solver_name not in self._neighbor_names:
|
||||
return
|
||||
# Add only neighbor's response to the buffer.
|
||||
self._buffer.setdefault((message.session_id, message.round), []).append(message)
|
||||
# Check if all neighbors have responded.
|
||||
if len(self._buffer[(message.session_id, message.round)]) == len(self._neighbor_names):
|
||||
question = self._questions[message.session_id]
|
||||
# Prepare the prompt for the next question.
|
||||
prompt = "These are the solutions to the problem from other agents:\n"
|
||||
for resp in self._buffer[(message.session_id, message.round)]:
|
||||
prompt += f"One agent solution: {resp.content}\n"
|
||||
prompt += (
|
||||
"Using the solutions from other agents as additional information, "
|
||||
"can you provide your answer to the math problem? "
|
||||
f"The original math problem is {question}. "
|
||||
"Your final answer should be a single numerical number, "
|
||||
"in the form of {{answer}}, at the end of your response."
|
||||
)
|
||||
# Send the question to the agent itself.
|
||||
await self.send_message(
|
||||
SolverRequest(content=prompt, session_id=message.session_id, question=question), self.id
|
||||
)
|
||||
# Clear the buffer.
|
||||
self._buffer.pop((message.session_id, message.round))
|
||||
|
||||
@message_handler
|
||||
async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:
|
||||
# Save the question.
|
||||
self._questions[message.session_id] = message.question
|
||||
# Add the question to the memory.
|
||||
self._memory.setdefault(message.session_id, []).append(UserMessage(content=message.content, source="Host"))
|
||||
# Make an inference using the model.
|
||||
response = await self._model_client.create(self._system_messages + self._memory[message.session_id])
|
||||
assert isinstance(response.content, str)
|
||||
# Add the response to the memory.
|
||||
self._memory[message.session_id].append(
|
||||
AssistantMessage(content=response.content, source=self.metadata["type"])
|
||||
)
|
||||
logger.debug(f"Solver {self.metadata['type']} response: {response.content}")
|
||||
# Extract the answer from the response.
|
||||
match = re.search(r"\{\{(\-?\d+(\.\d+)?)\}\}", response.content)
|
||||
if match is None:
|
||||
raise ValueError("The model response does not contain the answer.")
|
||||
answer = match.group(1)
|
||||
# Increment the counter.
|
||||
self._counters[message.session_id] = self._counters.get(message.session_id, 0) + 1
|
||||
if self._counters[message.session_id] == self._max_round:
|
||||
# If the counter reaches the maximum round, publishes a final response.
|
||||
await self.publish_message(
|
||||
FinalSolverResponse(answer=answer, session_id=message.session_id), topic_id=DefaultTopicId()
|
||||
)
|
||||
else:
|
||||
# Publish intermediate response.
|
||||
await self.publish_message(
|
||||
IntermediateSolverResponse(
|
||||
content=response.content,
|
||||
solver_name=self.metadata["type"],
|
||||
answer=answer,
|
||||
session_id=message.session_id,
|
||||
round=self._counters[message.session_id],
|
||||
),
|
||||
topic_id=DefaultTopicId(),
|
||||
)
|
||||
|
||||
|
||||
class MathAggregator(RoutedAgent):
|
||||
def __init__(self, num_solvers: int) -> None:
|
||||
super().__init__("Math Aggregator")
|
||||
self._num_solvers = num_solvers
|
||||
self._responses: Dict[str, List[FinalSolverResponse]] = {}
|
||||
|
||||
@message_handler
|
||||
async def handle_question(self, message: Question, ctx: MessageContext) -> None:
|
||||
prompt = (
|
||||
f"Can you solve the following math problem?\n{message.content}\n"
|
||||
"Explain your reasoning. Your final answer should be a single numerical number, "
|
||||
"in the form of {{answer}}, at the end of your response."
|
||||
)
|
||||
session_id = str(uuid.uuid4())
|
||||
await self.publish_message(
|
||||
SolverRequest(content=prompt, session_id=session_id, question=message.content), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
@message_handler
|
||||
async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:
|
||||
self._responses.setdefault(message.session_id, []).append(message)
|
||||
if len(self._responses[message.session_id]) == self._num_solvers:
|
||||
# Find the majority answer.
|
||||
answers = [resp.answer for resp in self._responses[message.session_id]]
|
||||
majority_answer = max(set(answers), key=answers.count)
|
||||
# Publish the aggregated response.
|
||||
await self.publish_message(Answer(content=majority_answer), topic_id=DefaultTopicId())
|
||||
# Clear the responses.
|
||||
self._responses.pop(message.session_id)
|
||||
print(f"Aggregated answer: {majority_answer}")
|
||||
|
||||
|
||||
async def main(question: str) -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
# Register the solver agents.
|
||||
# Create a sparse connection: each solver agent has two neighbors.
|
||||
# NOTE: to create a dense connection, each solver agent should be connected to all other solver agents.
|
||||
await runtime.register(
|
||||
"MathSolver1",
|
||||
lambda: MathSolver(
|
||||
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
neighbor_names=["MathSolver2", "MathSolver4"],
|
||||
max_round=3,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"MathSolver2",
|
||||
lambda: MathSolver(
|
||||
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
neighbor_names=["MathSolver1", "MathSolver3"],
|
||||
max_round=3,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"MathSolver3",
|
||||
lambda: MathSolver(
|
||||
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
neighbor_names=["MathSolver2", "MathSolver4"],
|
||||
max_round=3,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.register(
|
||||
"MathSolver4",
|
||||
lambda: MathSolver(
|
||||
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
neighbor_names=["MathSolver1", "MathSolver3"],
|
||||
max_round=3,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
# Register the aggregator agent.
|
||||
await runtime.register("MathAggregator", lambda: MathAggregator(num_solvers=4))
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a math problem to the aggregator agent.
|
||||
await runtime.publish_message(Question(content=question), topic_id=DefaultTopicId())
|
||||
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(
|
||||
main(
|
||||
"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?"
|
||||
)
|
||||
)
|
||||
# Expected output: 72
|
||||
145
python/packages/autogen-core/samples/tool-use/coding_direct.py
Normal file
145
python/packages/autogen-core/samples/tool-use/coding_direct.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
This example implements a tool-enabled agent that uses tools to perform tasks.
|
||||
1. The tool use agent receives a user message, and makes an inference using a model.
|
||||
If the response is a list of function calls, the tool use agent executes the tools by
|
||||
sending tool execution task to a tool executor agent.
|
||||
2. The tool executor agent executes the tools and sends the results back to the
|
||||
tool use agent, who makes an inference using the model again.
|
||||
3. The agents keep executing the tools until the inference response is not a
|
||||
list of function calls.
|
||||
4. The tool use agent returns the final response to the user.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentInstantiationContext
|
||||
from autogen_core.components import FunctionCall, RoutedAgent, message_handler
|
||||
from autogen_core.components.code_executor import LocalCommandLineCodeExecutor
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from autogen_core.components.tool_agent import ToolAgent, ToolException
|
||||
from autogen_core.components.tools import PythonCodeExecutionTool, Tool, ToolSchema
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
content: str
|
||||
|
||||
|
||||
class ToolUseAgent(RoutedAgent):
|
||||
"""An agent that uses tools to perform tasks. It executes the tools
|
||||
by itself by sending the tool execution task to itself."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
tool_schema: List[ToolSchema],
|
||||
tool_agent: AgentId,
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._model_client = model_client
|
||||
self._system_messages = system_messages
|
||||
self._tool_schema = tool_schema
|
||||
self._tool_agent = tool_agent
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:
|
||||
"""Handle a user message, execute the model and tools, and returns the response."""
|
||||
session: List[LLMMessage] = []
|
||||
session.append(UserMessage(content=message.content, source="User"))
|
||||
response = await self._model_client.create(self._system_messages + session, tools=self._tool_schema)
|
||||
session.append(AssistantMessage(content=response.content, source=self.metadata["type"]))
|
||||
|
||||
# Keep executing the tools until the response is not a list of function calls.
|
||||
while isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content):
|
||||
results: List[FunctionExecutionResult | BaseException] = await asyncio.gather(
|
||||
*[
|
||||
self.send_message(call, self._tool_agent, cancellation_token=ctx.cancellation_token)
|
||||
for call in response.content
|
||||
],
|
||||
return_exceptions=True,
|
||||
)
|
||||
# Combine the results into a single response and handle exceptions.
|
||||
function_results: List[FunctionExecutionResult] = []
|
||||
for result in results:
|
||||
if isinstance(result, FunctionExecutionResult):
|
||||
function_results.append(result)
|
||||
elif isinstance(result, ToolException):
|
||||
function_results.append(FunctionExecutionResult(content=f"Error: {result}", call_id=result.call_id))
|
||||
elif isinstance(result, BaseException):
|
||||
raise result
|
||||
session.append(FunctionExecutionResultMessage(content=function_results))
|
||||
# Execute the model again with the new response.
|
||||
response = await self._model_client.create(self._system_messages + session, tools=self._tool_schema)
|
||||
session.append(AssistantMessage(content=response.content, source=self.metadata["type"]))
|
||||
|
||||
assert isinstance(response.content, str)
|
||||
return Message(content=response.content)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
# Define the tools.
|
||||
tools: List[Tool] = [
|
||||
# A tool that executes Python code.
|
||||
PythonCodeExecutionTool(
|
||||
LocalCommandLineCodeExecutor(),
|
||||
)
|
||||
]
|
||||
# Register agents.
|
||||
await runtime.register(
|
||||
"tool_executor_agent",
|
||||
lambda: ToolAgent(
|
||||
description="Tool Executor Agent",
|
||||
tools=tools,
|
||||
),
|
||||
)
|
||||
await runtime.register(
|
||||
"tool_enabled_agent",
|
||||
lambda: ToolUseAgent(
|
||||
description="Tool Use Agent",
|
||||
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
tool_schema=[tool.schema for tool in tools],
|
||||
tool_agent=AgentId("tool_executor_agent", AgentInstantiationContext.current_agent_id().key),
|
||||
),
|
||||
)
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a task to the tool user.
|
||||
response = await runtime.send_message(
|
||||
Message("Run the following Python code: print('Hello, World!')"), AgentId("tool_enabled_agent", "default")
|
||||
)
|
||||
print(response.content)
|
||||
|
||||
# Run the runtime until the task is completed.
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
This example show case how to intercept the tool execution using
|
||||
intervention hanlder.
|
||||
The intervention handler is used to intercept the FunctionCall message
|
||||
before it is sent out, and prompt the user for permission to execute the tool.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentId, AgentInstantiationContext
|
||||
from autogen_core.base.intervention import DefaultInterventionHandler, DropMessage
|
||||
from autogen_core.components import FunctionCall
|
||||
from autogen_core.components.code_executor import LocalCommandLineCodeExecutor
|
||||
from autogen_core.components.models import SystemMessage
|
||||
from autogen_core.components.tool_agent import ToolAgent, ToolException
|
||||
from autogen_core.components.tools import PythonCodeExecutionTool, Tool
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from coding_direct import Message, ToolUseAgent
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
class ToolInterventionHandler(DefaultInterventionHandler):
|
||||
async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:
|
||||
if isinstance(message, FunctionCall):
|
||||
# Request user prompt for tool execution.
|
||||
user_input = input(
|
||||
f"Function call: {message.name}\nArguments: {message.arguments}\nDo you want to execute the tool? (y/n): "
|
||||
)
|
||||
if user_input.strip().lower() != "y":
|
||||
raise ToolException(content="User denied tool execution.", call_id=message.id)
|
||||
return message
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime with the intervention handler.
|
||||
runtime = SingleThreadedAgentRuntime(intervention_handler=ToolInterventionHandler())
|
||||
# Define the tools.
|
||||
tools: List[Tool] = [
|
||||
# A tool that executes Python code.
|
||||
PythonCodeExecutionTool(
|
||||
LocalCommandLineCodeExecutor(),
|
||||
)
|
||||
]
|
||||
# Register agents.
|
||||
await runtime.register(
|
||||
"tool_executor_agent",
|
||||
lambda: ToolAgent(
|
||||
description="Tool Executor Agent",
|
||||
tools=tools,
|
||||
),
|
||||
)
|
||||
await runtime.register(
|
||||
"tool_enabled_agent",
|
||||
lambda: ToolUseAgent(
|
||||
description="Tool Use Agent",
|
||||
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
tool_schema=[tool.schema for tool in tools],
|
||||
tool_agent=AgentId("tool_executor_agent", AgentInstantiationContext.current_agent_id().key),
|
||||
),
|
||||
)
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a task to the tool user.
|
||||
response = await runtime.send_message(
|
||||
Message("Run the following Python code: print('Hello, World!')"), AgentId("tool_enabled_agent", "default")
|
||||
)
|
||||
print(response.content)
|
||||
|
||||
# Run the runtime until the task is completed.
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
223
python/packages/autogen-core/samples/tool-use/coding_pub_sub.py
Normal file
223
python/packages/autogen-core/samples/tool-use/coding_pub_sub.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""
|
||||
This example shows how to use pub/sub to implement
|
||||
a simple interaction between a tool executor agent and a tool use agent.
|
||||
1. The tool use agent receives a user message, and makes an inference using a model.
|
||||
If the response is a list of function calls, the agent publishes the function calls
|
||||
to the tool executor agent.
|
||||
2. The tool executor agent receives the function calls, executes the tools, and publishes
|
||||
the results back to the tool use agent.
|
||||
3. The tool use agent receives the tool results, and makes an inference using the model again.
|
||||
4. The process continues until the inference response is not a list of function calls.
|
||||
5. The tool use agent publishes a final response to the user.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, FunctionCall, RoutedAgent, message_handler
|
||||
from autogen_core.components.code_executor import LocalCommandLineCodeExecutor
|
||||
from autogen_core.components.models import (
|
||||
AssistantMessage,
|
||||
ChatCompletionClient,
|
||||
FunctionExecutionResult,
|
||||
FunctionExecutionResultMessage,
|
||||
LLMMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from autogen_core.components.tools import PythonCodeExecutionTool, Tool
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import MessageContext
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolExecutionTask:
|
||||
session_id: str
|
||||
function_call: FunctionCall
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolExecutionTaskResult:
|
||||
session_id: str
|
||||
result: FunctionExecutionResult
|
||||
|
||||
|
||||
@dataclass
|
||||
class UserRequest:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentResponse:
|
||||
content: str
|
||||
|
||||
|
||||
class ToolExecutorAgent(RoutedAgent):
|
||||
"""An agent that executes tools."""
|
||||
|
||||
def __init__(self, description: str, tools: List[Tool]) -> None:
|
||||
super().__init__(description)
|
||||
self._tools = tools
|
||||
|
||||
@message_handler
|
||||
async def handle_tool_call(self, message: ToolExecutionTask, ctx: MessageContext) -> None:
|
||||
"""Handle a tool execution task. This method executes the tool and publishes the result."""
|
||||
# Find the tool
|
||||
tool = next((tool for tool in self._tools if tool.name == message.function_call.name), None)
|
||||
if tool is None:
|
||||
result_as_str = f"Error: Tool not found: {message.function_call.name}"
|
||||
else:
|
||||
try:
|
||||
arguments = json.loads(message.function_call.arguments)
|
||||
result = await tool.run_json(args=arguments, cancellation_token=ctx.cancellation_token)
|
||||
result_as_str = tool.return_value_as_string(result)
|
||||
except json.JSONDecodeError:
|
||||
result_as_str = f"Error: Invalid arguments: {message.function_call.arguments}"
|
||||
except Exception as e:
|
||||
result_as_str = f"Error: {e}"
|
||||
task_result = ToolExecutionTaskResult(
|
||||
session_id=message.session_id,
|
||||
result=FunctionExecutionResult(content=result_as_str, call_id=message.function_call.id),
|
||||
)
|
||||
await self.publish_message(task_result, topic_id=DefaultTopicId())
|
||||
|
||||
|
||||
class ToolUseAgent(RoutedAgent):
|
||||
"""An agent that uses tools to perform tasks. It doesn't execute the tools
|
||||
by itself, but delegates the execution to ToolExecutorAgent using pub/sub
|
||||
mechanism."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
system_messages: List[SystemMessage],
|
||||
model_client: ChatCompletionClient,
|
||||
tools: List[Tool],
|
||||
) -> None:
|
||||
super().__init__(description)
|
||||
self._model_client = model_client
|
||||
self._system_messages = system_messages
|
||||
self._tools = tools
|
||||
self._sessions: Dict[str, List[LLMMessage]] = {}
|
||||
self._tool_results: Dict[str, List[ToolExecutionTaskResult]] = {}
|
||||
self._tool_counter: Dict[str, int] = {}
|
||||
|
||||
@message_handler
|
||||
async def handle_user_message(self, message: UserRequest, ctx: MessageContext) -> None:
|
||||
"""Handle a user message. This method calls the model. If the model response is a string,
|
||||
it publishes the response. If the model response is a list of function calls, it publishes
|
||||
the function calls to the tool executor agent."""
|
||||
session_id = str(uuid.uuid4())
|
||||
self._sessions.setdefault(session_id, []).append(UserMessage(content=message.content, source="User"))
|
||||
response = await self._model_client.create(
|
||||
self._system_messages + self._sessions[session_id], tools=self._tools
|
||||
)
|
||||
self._sessions[session_id].append(AssistantMessage(content=response.content, source=self.metadata["type"]))
|
||||
|
||||
if isinstance(response.content, str):
|
||||
# If the response is a string, just publish the response.
|
||||
response_message = AgentResponse(content=response.content)
|
||||
await self.publish_message(response_message, topic_id=DefaultTopicId())
|
||||
print(f"AI Response: {response.content}")
|
||||
return
|
||||
|
||||
# Handle the response as a list of function calls.
|
||||
assert isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content)
|
||||
self._tool_results.setdefault(session_id, [])
|
||||
self._tool_counter.setdefault(session_id, 0)
|
||||
|
||||
# Publish the function calls to the tool executor agent.
|
||||
for function_call in response.content:
|
||||
task = ToolExecutionTask(session_id=session_id, function_call=function_call)
|
||||
self._tool_counter[session_id] += 1
|
||||
await self.publish_message(task, topic_id=DefaultTopicId())
|
||||
|
||||
@message_handler
|
||||
async def handle_tool_result(self, message: ToolExecutionTaskResult, ctx: MessageContext) -> None:
|
||||
"""Handle a tool execution result. This method aggregates the tool results and
|
||||
calls the model again to get another response. If the response is a string, it
|
||||
publishes the response. If the response is a list of function calls, it publishes
|
||||
the function calls to the tool executor agent."""
|
||||
self._tool_results[message.session_id].append(message)
|
||||
self._tool_counter[message.session_id] -= 1
|
||||
if self._tool_counter[message.session_id] > 0:
|
||||
# Not all tools have finished execution.
|
||||
return
|
||||
# All tools have finished execution.
|
||||
# Aggregate tool results into a single LLM message.
|
||||
result = FunctionExecutionResultMessage(content=[r.result for r in self._tool_results[message.session_id]])
|
||||
# Clear the tool results.
|
||||
self._tool_results[message.session_id].clear()
|
||||
# Get another response from the model.
|
||||
self._sessions[message.session_id].append(result)
|
||||
response = await self._model_client.create(
|
||||
self._system_messages + self._sessions[message.session_id], tools=self._tools
|
||||
)
|
||||
self._sessions[message.session_id].append(
|
||||
AssistantMessage(content=response.content, source=self.metadata["type"])
|
||||
)
|
||||
# If the response is a string, just publish the response.
|
||||
if isinstance(response.content, str):
|
||||
response_message = AgentResponse(content=response.content)
|
||||
await self.publish_message(response_message, topic_id=DefaultTopicId())
|
||||
self._tool_results.pop(message.session_id)
|
||||
self._tool_counter.pop(message.session_id)
|
||||
print(f"AI Response: {response.content}")
|
||||
return
|
||||
# Handle the response as a list of function calls.
|
||||
assert isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content)
|
||||
# Publish the function calls to the tool executor agent.
|
||||
for function_call in response.content:
|
||||
task = ToolExecutionTask(session_id=message.session_id, function_call=function_call)
|
||||
self._tool_counter[message.session_id] += 1
|
||||
await self.publish_message(task, topic_id=DefaultTopicId())
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
# Define the tools.
|
||||
tools: List[Tool] = [
|
||||
PythonCodeExecutionTool(
|
||||
LocalCommandLineCodeExecutor(),
|
||||
)
|
||||
]
|
||||
# Register agents.
|
||||
await runtime.register(
|
||||
"tool_executor", lambda: ToolExecutorAgent("Tool Executor", tools), lambda: [DefaultSubscription()]
|
||||
)
|
||||
await runtime.register(
|
||||
"tool_use_agent",
|
||||
lambda: ToolUseAgent(
|
||||
description="Tool Use Agent",
|
||||
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
tools=tools,
|
||||
),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Publish a task.
|
||||
await runtime.publish_message(
|
||||
UserRequest("Run the following Python code: print('Hello, World!')"), topic_id=DefaultTopicId()
|
||||
)
|
||||
|
||||
await runtime.stop_when_idle()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
This example shows how to use custom function tools with a tool-enabled
|
||||
agent.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
from autogen_core.application import SingleThreadedAgentRuntime
|
||||
from autogen_core.base import AgentInstantiationContext
|
||||
from autogen_core.components.models import (
|
||||
SystemMessage,
|
||||
)
|
||||
from autogen_core.components.tool_agent import ToolAgent
|
||||
from autogen_core.components.tools import FunctionTool, Tool
|
||||
from typing_extensions import Annotated
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from autogen_core.base import AgentId
|
||||
from coding_direct import Message, ToolUseAgent
|
||||
from common.utils import get_chat_completion_client_from_envs
|
||||
|
||||
|
||||
async def get_stock_price(ticker: str, date: Annotated[str, "The date in YYYY/MM/DD format."]) -> float:
|
||||
"""Get the stock price of a company."""
|
||||
# This is a placeholder function that returns a random number.
|
||||
return random.uniform(10, 100)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create the runtime.
|
||||
runtime = SingleThreadedAgentRuntime()
|
||||
tools: List[Tool] = [
|
||||
# A tool that gets the stock price.
|
||||
FunctionTool(
|
||||
get_stock_price,
|
||||
description="Get the stock price of a company given the ticker and date.",
|
||||
name="get_stock_price",
|
||||
)
|
||||
]
|
||||
# Register agents.
|
||||
await runtime.register(
|
||||
"tool_executor_agent",
|
||||
lambda: ToolAgent(
|
||||
description="Tool Executor Agent",
|
||||
tools=tools,
|
||||
),
|
||||
)
|
||||
await runtime.register(
|
||||
"tool_enabled_agent",
|
||||
lambda: ToolUseAgent(
|
||||
description="Tool Use Agent",
|
||||
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
|
||||
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
|
||||
tool_schema=[tool.schema for tool in tools],
|
||||
tool_agent=AgentId("tool_executor_agent", AgentInstantiationContext.current_agent_id().key),
|
||||
),
|
||||
)
|
||||
tool_use_agent = AgentId("tool_enabled_agent", "default")
|
||||
|
||||
runtime.start()
|
||||
|
||||
# Send a task to the tool user.
|
||||
response = await runtime.send_message(Message("What is the stock price of NVDA on 2024/06/01"), tool_use_agent)
|
||||
# Print the result.
|
||||
assert isinstance(response, Message)
|
||||
print(response.content)
|
||||
|
||||
# Run the runtime until the task is completed.
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
53
python/packages/autogen-core/samples/worker/run_host.py
Normal file
53
python/packages/autogen-core/samples/worker/run_host.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import asyncio
|
||||
import signal
|
||||
|
||||
import grpc
|
||||
from autogen_core.application import HostRuntimeServicer
|
||||
from autogen_core.application.protos import agent_worker_pb2_grpc
|
||||
|
||||
|
||||
async def serve(server: grpc.aio.Server) -> None: # type: ignore
|
||||
await server.start()
|
||||
print("Server started")
|
||||
await server.wait_for_termination()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
server = grpc.aio.server()
|
||||
agent_worker_pb2_grpc.add_AgentRpcServicer_to_server(HostRuntimeServicer(), server)
|
||||
server.add_insecure_port("[::]:50051")
|
||||
|
||||
# Set up signal handling for graceful shutdown
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
shutdown_event = asyncio.Event()
|
||||
|
||||
def signal_handler() -> None:
|
||||
print("Received exit signal, shutting down gracefully...")
|
||||
shutdown_event.set()
|
||||
|
||||
loop.add_signal_handler(signal.SIGINT, signal_handler)
|
||||
loop.add_signal_handler(signal.SIGTERM, signal_handler)
|
||||
|
||||
# Start server in background task
|
||||
serve_task = asyncio.create_task(serve(server))
|
||||
|
||||
# Wait for the signal to trigger the shutdown event
|
||||
await shutdown_event.wait()
|
||||
|
||||
# Graceful shutdown
|
||||
await server.stop(5) # 5 second grace period
|
||||
await serve_task
|
||||
print("Server stopped")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("autogen_core").setLevel(logging.DEBUG)
|
||||
|
||||
try:
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
print("Server shutdown interrupted.")
|
||||
@@ -0,0 +1,94 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, NoReturn
|
||||
|
||||
from autogen_core.application import WorkerAgentRuntime
|
||||
from autogen_core.base import MESSAGE_TYPE_REGISTRY, MessageContext
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
|
||||
|
||||
@dataclass
|
||||
class AskToGreet:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Greeting:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReturnedGreeting:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Feedback:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReturnedFeedback:
|
||||
content: str
|
||||
|
||||
|
||||
class ReceiveAgent(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("Receive Agent")
|
||||
|
||||
@message_handler
|
||||
async def on_greet(self, message: Greeting, ctx: MessageContext) -> None:
|
||||
await self.publish_message(ReturnedGreeting(f"Returned greeting: {message.content}"), topic_id=DefaultTopicId())
|
||||
|
||||
@message_handler
|
||||
async def on_feedback(self, message: Feedback, ctx: MessageContext) -> None:
|
||||
await self.publish_message(ReturnedFeedback(f"Returned feedback: {message.content}"), topic_id=DefaultTopicId())
|
||||
|
||||
async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> NoReturn: # type: ignore
|
||||
print(f"Unhandled message: {message}")
|
||||
|
||||
|
||||
class GreeterAgent(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("Greeter Agent")
|
||||
|
||||
@message_handler
|
||||
async def on_ask(self, message: AskToGreet, ctx: MessageContext) -> None:
|
||||
await self.publish_message(Greeting(f"Hello, {message.content}!"), topic_id=DefaultTopicId())
|
||||
|
||||
@message_handler
|
||||
async def on_returned_greet(self, message: ReturnedGreeting, ctx: MessageContext) -> None:
|
||||
await self.publish_message(Feedback(f"Feedback: {message.content}"), topic_id=DefaultTopicId())
|
||||
|
||||
async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> NoReturn: # type: ignore
|
||||
print(f"Unhandled message: {message}")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = WorkerAgentRuntime()
|
||||
MESSAGE_TYPE_REGISTRY.add_type(Greeting)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(AskToGreet)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(Feedback)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(ReturnedGreeting)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(ReturnedFeedback)
|
||||
await runtime.start(host_connection_string="localhost:50051")
|
||||
|
||||
await runtime.register("receiver", ReceiveAgent, lambda: [DefaultSubscription()])
|
||||
await runtime.register("greeter", GreeterAgent, lambda: [DefaultSubscription()])
|
||||
|
||||
await runtime.publish_message(AskToGreet("Hello World!"), topic_id=DefaultTopicId())
|
||||
|
||||
# Just to keep the runtime running
|
||||
try:
|
||||
await asyncio.sleep(1000000)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("autogen_core")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,83 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, NoReturn
|
||||
|
||||
from autogen_core.application import WorkerAgentRuntime
|
||||
from autogen_core.base import MESSAGE_TYPE_REGISTRY, AgentId, AgentInstantiationContext, MessageContext
|
||||
from autogen_core.components import DefaultSubscription, DefaultTopicId, RoutedAgent, message_handler
|
||||
|
||||
|
||||
@dataclass
|
||||
class AskToGreet:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Greeting:
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Feedback:
|
||||
content: str
|
||||
|
||||
|
||||
class ReceiveAgent(RoutedAgent):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("Receive Agent")
|
||||
|
||||
@message_handler
|
||||
async def on_greet(self, message: Greeting, ctx: MessageContext) -> Greeting:
|
||||
return Greeting(content=f"Received: {message.content}")
|
||||
|
||||
@message_handler
|
||||
async def on_feedback(self, message: Feedback, ctx: MessageContext) -> None:
|
||||
print(f"Feedback received: {message.content}")
|
||||
|
||||
async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> NoReturn: # type: ignore
|
||||
print(f"Unhandled message: {message}")
|
||||
|
||||
|
||||
class GreeterAgent(RoutedAgent):
|
||||
def __init__(self, receive_agent_id: AgentId) -> None:
|
||||
super().__init__("Greeter Agent")
|
||||
self._receive_agent_id = receive_agent_id
|
||||
|
||||
@message_handler
|
||||
async def on_ask(self, message: AskToGreet, ctx: MessageContext) -> None:
|
||||
response = await self.send_message(Greeting(f"Hello, {message.content}!"), recipient=self._receive_agent_id)
|
||||
await self.publish_message(Feedback(f"Feedback: {response.content}"), topic_id=DefaultTopicId())
|
||||
|
||||
async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> NoReturn: # type: ignore
|
||||
print(f"Unhandled message: {message}")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
runtime = WorkerAgentRuntime()
|
||||
MESSAGE_TYPE_REGISTRY.add_type(Greeting)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(AskToGreet)
|
||||
MESSAGE_TYPE_REGISTRY.add_type(Feedback)
|
||||
await runtime.start(host_connection_string="localhost:50051")
|
||||
|
||||
await runtime.register("receiver", lambda: ReceiveAgent(), lambda: [DefaultSubscription()])
|
||||
await runtime.register(
|
||||
"greeter",
|
||||
lambda: GreeterAgent(AgentId("receiver", AgentInstantiationContext.current_agent_id().key)),
|
||||
lambda: [DefaultSubscription()],
|
||||
)
|
||||
await runtime.publish_message(AskToGreet("Hello World!"), topic_id=DefaultTopicId())
|
||||
|
||||
# Just to keep the runtime running
|
||||
try:
|
||||
await asyncio.sleep(1000000)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
await runtime.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("autogen_core")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,9 @@
|
||||
"""
|
||||
The :mod:`autogen_core.application` module provides implementations of core components that are used to compose an application
|
||||
"""
|
||||
|
||||
from ._host_runtime_servicer import HostRuntimeServicer
|
||||
from ._single_threaded_agent_runtime import SingleThreadedAgentRuntime
|
||||
from ._worker_runtime import WorkerAgentRuntime
|
||||
|
||||
__all__ = ["SingleThreadedAgentRuntime", "WorkerAgentRuntime", "HostRuntimeServicer"]
|
||||
@@ -0,0 +1,79 @@
|
||||
from collections import defaultdict
|
||||
from typing import Awaitable, Callable, DefaultDict, List, Set
|
||||
|
||||
from ..base._agent import Agent
|
||||
from ..base._agent_id import AgentId
|
||||
from ..base._agent_type import AgentType
|
||||
from ..base._subscription import Subscription
|
||||
from ..base._topic import TopicId
|
||||
|
||||
|
||||
async def get_impl(
|
||||
*,
|
||||
id_or_type: AgentId | AgentType | str,
|
||||
key: str,
|
||||
lazy: bool,
|
||||
instance_getter: Callable[[AgentId], Awaitable[Agent]],
|
||||
) -> AgentId:
|
||||
if isinstance(id_or_type, AgentId):
|
||||
if not lazy:
|
||||
await instance_getter(id_or_type)
|
||||
|
||||
return id_or_type
|
||||
|
||||
type_str = id_or_type if isinstance(id_or_type, str) else id_or_type.type
|
||||
id = AgentId(type_str, key)
|
||||
if not lazy:
|
||||
await instance_getter(id)
|
||||
|
||||
return id
|
||||
|
||||
|
||||
class SubscriptionManager:
|
||||
def __init__(self) -> None:
|
||||
self._subscriptions: List[Subscription] = []
|
||||
self._seen_topics: Set[TopicId] = set()
|
||||
self._subscribed_recipients: DefaultDict[TopicId, List[AgentId]] = defaultdict(list)
|
||||
|
||||
async def add_subscription(self, subscription: Subscription) -> None:
|
||||
# Check if the subscription already exists
|
||||
if any(sub.id == subscription.id for sub in self._subscriptions):
|
||||
raise ValueError("Subscription already exists")
|
||||
|
||||
if len(self._seen_topics) > 0:
|
||||
raise NotImplementedError("Cannot add subscription after topics have been seen yet")
|
||||
|
||||
self._subscriptions.append(subscription)
|
||||
|
||||
async def remove_subscription(self, id: str) -> None:
|
||||
# Check if the subscription exists
|
||||
if not any(sub.id == id for sub in self._subscriptions):
|
||||
raise ValueError("Subscription does not exist")
|
||||
|
||||
def is_not_sub(x: Subscription) -> bool:
|
||||
return x.id != id
|
||||
|
||||
self._subscriptions = list(filter(is_not_sub, self._subscriptions))
|
||||
|
||||
# Rebuild the subscriptions
|
||||
self._rebuild_subscriptions(self._seen_topics)
|
||||
|
||||
async def get_subscribed_recipients(self, topic: TopicId) -> List[AgentId]:
|
||||
if topic not in self._seen_topics:
|
||||
self._build_for_new_topic(topic)
|
||||
return self._subscribed_recipients[topic]
|
||||
|
||||
# TODO: optimize this...
|
||||
def _rebuild_subscriptions(self, topics: Set[TopicId]) -> None:
|
||||
self._subscribed_recipients.clear()
|
||||
for topic in topics:
|
||||
self._build_for_new_topic(topic)
|
||||
|
||||
def _build_for_new_topic(self, topic: TopicId) -> None:
|
||||
if topic in self._seen_topics:
|
||||
return
|
||||
|
||||
self._seen_topics.add(topic)
|
||||
for subscription in self._subscriptions:
|
||||
if subscription.is_match(topic):
|
||||
self._subscribed_recipients[topic].append(subscription.map_to_agent(topic))
|
||||
@@ -0,0 +1,206 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from _collections_abc import AsyncIterator, Iterator
|
||||
from asyncio import Future, Task
|
||||
from typing import Any, Dict, Set
|
||||
|
||||
import grpc
|
||||
|
||||
from ..base import TopicId
|
||||
from ..components import TypeSubscription
|
||||
from ._helpers import SubscriptionManager
|
||||
from .protos import agent_worker_pb2, agent_worker_pb2_grpc
|
||||
|
||||
logger = logging.getLogger("autogen_core")
|
||||
event_logger = logging.getLogger("autogen_core.events")
|
||||
|
||||
|
||||
class HostRuntimeServicer(agent_worker_pb2_grpc.AgentRpcServicer):
|
||||
"""A gRPC servicer that hosts message delivery service for agents."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._client_id = 0
|
||||
self._client_id_lock = asyncio.Lock()
|
||||
self._send_queues: Dict[int, asyncio.Queue[agent_worker_pb2.Message]] = {}
|
||||
self._agent_type_to_client_id_lock = asyncio.Lock()
|
||||
self._agent_type_to_client_id: Dict[str, int] = {}
|
||||
self._pending_requests: Dict[int, Dict[str, Future[Any]]] = {}
|
||||
self._background_tasks: Set[Task[Any]] = set()
|
||||
self._subscription_manager = SubscriptionManager()
|
||||
|
||||
async def OpenChannel( # type: ignore
|
||||
self,
|
||||
request_iterator: AsyncIterator[agent_worker_pb2.Message],
|
||||
context: grpc.aio.ServicerContext[agent_worker_pb2.Message, agent_worker_pb2.Message],
|
||||
) -> Iterator[agent_worker_pb2.Message] | AsyncIterator[agent_worker_pb2.Message]: # type: ignore
|
||||
# Aquire the lock to get a new client id.
|
||||
async with self._client_id_lock:
|
||||
self._client_id += 1
|
||||
client_id = self._client_id
|
||||
|
||||
# Register the client with the server and create a send queue for the client.
|
||||
send_queue: asyncio.Queue[agent_worker_pb2.Message] = asyncio.Queue()
|
||||
self._send_queues[client_id] = send_queue
|
||||
logger.info(f"Client {client_id} connected.")
|
||||
|
||||
try:
|
||||
# Concurrently handle receiving messages from the client and sending messages to the client.
|
||||
# This task will receive messages from the client.
|
||||
receiving_task = asyncio.create_task(self._receive_messages(client_id, request_iterator))
|
||||
|
||||
# Return an async generator that will yield messages from the send queue to the client.
|
||||
while True:
|
||||
message = await send_queue.get()
|
||||
# Yield the message to the client.
|
||||
try:
|
||||
yield message
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send message to client {client_id}: {e}", exc_info=True)
|
||||
break
|
||||
logger.info(f"Sent message to client {client_id}: {message}")
|
||||
# Wait for the receiving task to finish.
|
||||
await receiving_task
|
||||
|
||||
finally:
|
||||
# Clean up the client connection.
|
||||
del self._send_queues[client_id]
|
||||
# Cancel pending requests sent to this client.
|
||||
for future in self._pending_requests.pop(client_id, {}).values():
|
||||
future.cancel()
|
||||
# Remove the client id from the agent type to client id mapping.
|
||||
async with self._agent_type_to_client_id_lock:
|
||||
agent_types = [
|
||||
agent_type for agent_type, id_ in self._agent_type_to_client_id.items() if id_ == client_id
|
||||
]
|
||||
for agent_type in agent_types:
|
||||
del self._agent_type_to_client_id[agent_type]
|
||||
logger.info(f"Client {client_id} disconnected.")
|
||||
|
||||
def _raise_on_exception(self, task: Task[Any]) -> None:
|
||||
exception = task.exception()
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
async def _receive_messages(
|
||||
self, client_id: int, request_iterator: AsyncIterator[agent_worker_pb2.Message]
|
||||
) -> None:
|
||||
# Receive messages from the client and process them.
|
||||
async for message in request_iterator:
|
||||
logger.info(f"Received message from client {client_id}: {message}")
|
||||
oneofcase = message.WhichOneof("message")
|
||||
match oneofcase:
|
||||
case "request":
|
||||
request: agent_worker_pb2.RpcRequest = message.request
|
||||
task = asyncio.create_task(self._process_request(request, client_id))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "response":
|
||||
response: agent_worker_pb2.RpcResponse = message.response
|
||||
task = asyncio.create_task(self._process_response(response, client_id))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "event":
|
||||
event: agent_worker_pb2.Event = message.event
|
||||
task = asyncio.create_task(self._process_event(event))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "registerAgentType":
|
||||
register_agent_type: agent_worker_pb2.RegisterAgentType = message.registerAgentType
|
||||
task = asyncio.create_task(self._process_register_agent_type(register_agent_type, client_id))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "addSubscription":
|
||||
add_subscription: agent_worker_pb2.AddSubscription = message.addSubscription
|
||||
task = asyncio.create_task(self._process_add_subscription(add_subscription))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case None:
|
||||
logger.warning("Received empty message")
|
||||
|
||||
async def _process_request(self, request: agent_worker_pb2.RpcRequest, client_id: int) -> None:
|
||||
# Deliver the message to a client given the target agent type.
|
||||
async with self._agent_type_to_client_id_lock:
|
||||
target_client_id = self._agent_type_to_client_id.get(request.target.name)
|
||||
if target_client_id is None:
|
||||
logger.error(f"Agent {request.target.name} not found, failed to deliver message.")
|
||||
return
|
||||
target_send_queue = self._send_queues.get(target_client_id)
|
||||
if target_send_queue is None:
|
||||
logger.error(f"Client {target_client_id} not found, failed to deliver message.")
|
||||
return
|
||||
await target_send_queue.put(agent_worker_pb2.Message(request=request))
|
||||
|
||||
# Create a future to wait for the response.
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
self._pending_requests.setdefault(client_id, {})[request.request_id] = future
|
||||
|
||||
# Create a task to wait for the response and send it back to the client.
|
||||
send_response_task = asyncio.create_task(self._wait_and_send_response(future, client_id))
|
||||
self._background_tasks.add(send_response_task)
|
||||
send_response_task.add_done_callback(self._raise_on_exception)
|
||||
send_response_task.add_done_callback(self._background_tasks.discard)
|
||||
|
||||
async def _wait_and_send_response(self, future: Future[agent_worker_pb2.RpcResponse], client_id: int) -> None:
|
||||
response = await future
|
||||
message = agent_worker_pb2.Message(response=response)
|
||||
send_queue = self._send_queues.get(client_id)
|
||||
if send_queue is None:
|
||||
logger.error(f"Client {client_id} not found, failed to send response message.")
|
||||
return
|
||||
await send_queue.put(message)
|
||||
|
||||
async def _process_response(self, response: agent_worker_pb2.RpcResponse, client_id: int) -> None:
|
||||
# Setting the result of the future will send the response back to the original sender.
|
||||
future = self._pending_requests[client_id].pop(response.request_id)
|
||||
future.set_result(response)
|
||||
|
||||
async def _process_event(self, event: agent_worker_pb2.Event) -> None:
|
||||
topic_id = TopicId(type=event.topic_type, source=event.topic_source)
|
||||
recipients = await self._subscription_manager.get_subscribed_recipients(topic_id)
|
||||
# Get the client ids of the recipients.
|
||||
async with self._agent_type_to_client_id_lock:
|
||||
client_ids: Set[int] = set()
|
||||
for recipient in recipients:
|
||||
client_id = self._agent_type_to_client_id.get(recipient.type)
|
||||
if client_id is not None:
|
||||
client_ids.add(client_id)
|
||||
else:
|
||||
logger.error(f"Agent {recipient.type} and its client not found for topic {topic_id}.")
|
||||
# Deliver the event to clients.
|
||||
for client_id in client_ids:
|
||||
await self._send_queues[client_id].put(agent_worker_pb2.Message(event=event))
|
||||
|
||||
async def _process_register_agent_type(
|
||||
self, register_agent_type: agent_worker_pb2.RegisterAgentType, client_id: int
|
||||
) -> None:
|
||||
# Register the agent type with the host runtime.
|
||||
async with self._agent_type_to_client_id_lock:
|
||||
if register_agent_type.type in self._agent_type_to_client_id:
|
||||
existing_client_id = self._agent_type_to_client_id[register_agent_type.type]
|
||||
logger.error(
|
||||
f"Agent type {register_agent_type.type} already registered with client {existing_client_id}."
|
||||
)
|
||||
# TODO: send an error response back to the client.
|
||||
else:
|
||||
self._agent_type_to_client_id[register_agent_type.type] = client_id
|
||||
# TODO: send a success response back to the client.
|
||||
|
||||
async def _process_add_subscription(self, add_subscription: agent_worker_pb2.AddSubscription) -> None:
|
||||
oneofcase = add_subscription.subscription.WhichOneof("subscription")
|
||||
match oneofcase:
|
||||
case "typeSubscription":
|
||||
type_subscription_msg: agent_worker_pb2.TypeSubscription = (
|
||||
add_subscription.subscription.typeSubscription
|
||||
)
|
||||
type_subscription = TypeSubscription(
|
||||
topic_type=type_subscription_msg.topic_type, agent_type=type_subscription_msg.agent_type
|
||||
)
|
||||
await self._subscription_manager.add_subscription(type_subscription)
|
||||
# TODO: send a success response back to the client.
|
||||
case None:
|
||||
logger.warning("Received empty subscription message")
|
||||
@@ -0,0 +1,553 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
import logging
|
||||
import threading
|
||||
import warnings
|
||||
from asyncio import CancelledError, Future, Task
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Awaitable, Callable, Dict, List, Mapping, ParamSpec, Set, Type, TypeVar, cast
|
||||
|
||||
from ..base import (
|
||||
Agent,
|
||||
AgentId,
|
||||
AgentInstantiationContext,
|
||||
AgentMetadata,
|
||||
AgentRuntime,
|
||||
AgentType,
|
||||
CancellationToken,
|
||||
MessageContext,
|
||||
MessageHandlerContext,
|
||||
Subscription,
|
||||
SubscriptionInstantiationContext,
|
||||
TopicId,
|
||||
)
|
||||
from ..base.exceptions import MessageDroppedException
|
||||
from ..base.intervention import DropMessage, InterventionHandler
|
||||
from ._helpers import SubscriptionManager, get_impl
|
||||
|
||||
logger = logging.getLogger("autogen_core")
|
||||
event_logger = logging.getLogger("autogen_core.events")
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class PublishMessageEnvelope:
|
||||
"""A message envelope for publishing messages to all agents that can handle
|
||||
the message of the type T."""
|
||||
|
||||
message: Any
|
||||
cancellation_token: CancellationToken
|
||||
sender: AgentId | None
|
||||
topic_id: TopicId
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class SendMessageEnvelope:
|
||||
"""A message envelope for sending a message to a specific agent that can handle
|
||||
the message of the type T."""
|
||||
|
||||
message: Any
|
||||
sender: AgentId | None
|
||||
recipient: AgentId
|
||||
future: Future[Any]
|
||||
cancellation_token: CancellationToken
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class ResponseMessageEnvelope:
|
||||
"""A message envelope for sending a response to a message."""
|
||||
|
||||
message: Any
|
||||
future: Future[Any]
|
||||
sender: AgentId
|
||||
recipient: AgentId | None
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T", bound=Agent)
|
||||
|
||||
|
||||
class Counter:
|
||||
def __init__(self) -> None:
|
||||
self._count: int = 0
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def increment(self) -> None:
|
||||
self.threadLock.acquire()
|
||||
self._count += 1
|
||||
self.threadLock.release()
|
||||
|
||||
def get(self) -> int:
|
||||
return self._count
|
||||
|
||||
def decrement(self) -> None:
|
||||
self.threadLock.acquire()
|
||||
self._count -= 1
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
class RunContext:
|
||||
class RunState(Enum):
|
||||
RUNNING = 0
|
||||
CANCELLED = 1
|
||||
UNTIL_IDLE = 2
|
||||
|
||||
def __init__(self, runtime: SingleThreadedAgentRuntime) -> None:
|
||||
self._runtime = runtime
|
||||
self._run_state = RunContext.RunState.RUNNING
|
||||
self._run_task = asyncio.create_task(self._run())
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def _run(self) -> None:
|
||||
while True:
|
||||
async with self._lock:
|
||||
if self._run_state == RunContext.RunState.CANCELLED:
|
||||
return
|
||||
elif self._run_state == RunContext.RunState.UNTIL_IDLE:
|
||||
if self._runtime.idle:
|
||||
return
|
||||
|
||||
await self._runtime.process_next()
|
||||
|
||||
async def stop(self) -> None:
|
||||
async with self._lock:
|
||||
self._run_state = RunContext.RunState.CANCELLED
|
||||
await self._run_task
|
||||
|
||||
async def stop_when_idle(self) -> None:
|
||||
async with self._lock:
|
||||
self._run_state = RunContext.RunState.UNTIL_IDLE
|
||||
await self._run_task
|
||||
|
||||
|
||||
class SingleThreadedAgentRuntime(AgentRuntime):
|
||||
def __init__(self, *, intervention_handler: InterventionHandler | None = None) -> None:
|
||||
self._message_queue: List[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope] = []
|
||||
# (namespace, type) -> List[AgentId]
|
||||
self._agent_factories: Dict[
|
||||
str, Callable[[], Agent | Awaitable[Agent]] | Callable[[AgentRuntime, AgentId], Agent | Awaitable[Agent]]
|
||||
] = {}
|
||||
self._instantiated_agents: Dict[AgentId, Agent] = {}
|
||||
self._intervention_handler = intervention_handler
|
||||
self._outstanding_tasks = Counter()
|
||||
self._background_tasks: Set[Task[Any]] = set()
|
||||
self._subscription_manager = SubscriptionManager()
|
||||
self._run_context: RunContext | None = None
|
||||
|
||||
@property
|
||||
def unprocessed_messages(
|
||||
self,
|
||||
) -> Sequence[PublishMessageEnvelope | SendMessageEnvelope | ResponseMessageEnvelope]:
|
||||
return self._message_queue
|
||||
|
||||
@property
|
||||
def outstanding_tasks(self) -> int:
|
||||
return self._outstanding_tasks.get()
|
||||
|
||||
@property
|
||||
def _known_agent_names(self) -> Set[str]:
|
||||
return set(self._agent_factories.keys())
|
||||
|
||||
# Returns the response of the message
|
||||
async def send_message(
|
||||
self,
|
||||
message: Any,
|
||||
recipient: AgentId,
|
||||
*,
|
||||
sender: AgentId | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> Any:
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
|
||||
# event_logger.info(
|
||||
# MessageEvent(
|
||||
# payload=message,
|
||||
# sender=sender,
|
||||
# receiver=recipient,
|
||||
# kind=MessageKind.DIRECT,
|
||||
# delivery_stage=DeliveryStage.SEND,
|
||||
# )
|
||||
# )
|
||||
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
if recipient.type not in self._known_agent_names:
|
||||
future.set_exception(Exception("Recipient not found"))
|
||||
|
||||
content = message.__dict__ if hasattr(message, "__dict__") else message
|
||||
logger.info(f"Sending message of type {type(message).__name__} to {recipient.type}: {content}")
|
||||
|
||||
self._message_queue.append(
|
||||
SendMessageEnvelope(
|
||||
message=message,
|
||||
recipient=recipient,
|
||||
future=future,
|
||||
cancellation_token=cancellation_token,
|
||||
sender=sender,
|
||||
)
|
||||
)
|
||||
|
||||
cancellation_token.link_future(future)
|
||||
|
||||
return await future
|
||||
|
||||
async def publish_message(
|
||||
self,
|
||||
message: Any,
|
||||
topic_id: TopicId,
|
||||
*,
|
||||
sender: AgentId | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> None:
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
content = message.__dict__ if hasattr(message, "__dict__") else message
|
||||
logger.info(f"Publishing message of type {type(message).__name__} to all subscribers: {content}")
|
||||
|
||||
# event_logger.info(
|
||||
# MessageEvent(
|
||||
# payload=message,
|
||||
# sender=sender,
|
||||
# receiver=None,
|
||||
# kind=MessageKind.PUBLISH,
|
||||
# delivery_stage=DeliveryStage.SEND,
|
||||
# )
|
||||
# )
|
||||
|
||||
self._message_queue.append(
|
||||
PublishMessageEnvelope(
|
||||
message=message, cancellation_token=cancellation_token, sender=sender, topic_id=topic_id
|
||||
)
|
||||
)
|
||||
|
||||
async def save_state(self) -> Mapping[str, Any]:
|
||||
state: Dict[str, Dict[str, Any]] = {}
|
||||
for agent_id in self._instantiated_agents:
|
||||
state[str(agent_id)] = dict((await self._get_agent(agent_id)).save_state())
|
||||
return state
|
||||
|
||||
async def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
for agent_id_str in state:
|
||||
agent_id = AgentId.from_str(agent_id_str)
|
||||
if agent_id.type in self._known_agent_names:
|
||||
(await self._get_agent(agent_id)).load_state(state[str(agent_id)])
|
||||
|
||||
async def _process_send(self, message_envelope: SendMessageEnvelope) -> None:
|
||||
recipient = message_envelope.recipient
|
||||
# todo: check if recipient is in the known namespaces
|
||||
# assert recipient in self._agents
|
||||
|
||||
try:
|
||||
# TODO use id
|
||||
sender_name = message_envelope.sender.type if message_envelope.sender is not None else "Unknown"
|
||||
logger.info(
|
||||
f"Calling message handler for {recipient} with message type {type(message_envelope.message).__name__} sent by {sender_name}"
|
||||
)
|
||||
# event_logger.info(
|
||||
# MessageEvent(
|
||||
# payload=message_envelope.message,
|
||||
# sender=message_envelope.sender,
|
||||
# receiver=recipient,
|
||||
# kind=MessageKind.DIRECT,
|
||||
# delivery_stage=DeliveryStage.DELIVER,
|
||||
# )
|
||||
# )
|
||||
recipient_agent = await self._get_agent(recipient)
|
||||
message_context = MessageContext(
|
||||
sender=message_envelope.sender,
|
||||
topic_id=None,
|
||||
is_rpc=True,
|
||||
cancellation_token=message_envelope.cancellation_token,
|
||||
)
|
||||
with MessageHandlerContext.populate_context(recipient_agent.id):
|
||||
response = await recipient_agent.on_message(
|
||||
message_envelope.message,
|
||||
ctx=message_context,
|
||||
)
|
||||
except BaseException as e:
|
||||
message_envelope.future.set_exception(e)
|
||||
self._outstanding_tasks.decrement()
|
||||
return
|
||||
|
||||
self._message_queue.append(
|
||||
ResponseMessageEnvelope(
|
||||
message=response,
|
||||
future=message_envelope.future,
|
||||
sender=message_envelope.recipient,
|
||||
recipient=message_envelope.sender,
|
||||
)
|
||||
)
|
||||
self._outstanding_tasks.decrement()
|
||||
|
||||
async def _process_publish(self, message_envelope: PublishMessageEnvelope) -> None:
|
||||
responses: List[Awaitable[Any]] = []
|
||||
recipients = await self._subscription_manager.get_subscribed_recipients(message_envelope.topic_id)
|
||||
for agent_id in recipients:
|
||||
# Avoid sending the message back to the sender
|
||||
if message_envelope.sender is not None and agent_id == message_envelope.sender:
|
||||
continue
|
||||
|
||||
sender_agent = (
|
||||
await self._get_agent(message_envelope.sender) if message_envelope.sender is not None else None
|
||||
)
|
||||
sender_name = str(sender_agent.id) if sender_agent is not None else "Unknown"
|
||||
logger.info(
|
||||
f"Calling message handler for {agent_id.type} with message type {type(message_envelope.message).__name__} published by {sender_name}"
|
||||
)
|
||||
# event_logger.info(
|
||||
# MessageEvent(
|
||||
# payload=message_envelope.message,
|
||||
# sender=message_envelope.sender,
|
||||
# receiver=agent,
|
||||
# kind=MessageKind.PUBLISH,
|
||||
# delivery_stage=DeliveryStage.DELIVER,
|
||||
# )
|
||||
# )
|
||||
message_context = MessageContext(
|
||||
sender=message_envelope.sender,
|
||||
topic_id=message_envelope.topic_id,
|
||||
is_rpc=False,
|
||||
cancellation_token=message_envelope.cancellation_token,
|
||||
)
|
||||
agent = await self._get_agent(agent_id)
|
||||
with MessageHandlerContext.populate_context(agent.id):
|
||||
future = agent.on_message(
|
||||
message_envelope.message,
|
||||
ctx=message_context,
|
||||
)
|
||||
responses.append(future)
|
||||
|
||||
try:
|
||||
_all_responses = await asyncio.gather(*responses)
|
||||
except BaseException as e:
|
||||
# Ignore cancelled errors from logs
|
||||
if isinstance(e, CancelledError):
|
||||
self._outstanding_tasks.decrement()
|
||||
return
|
||||
logger.error("Error processing publish message", exc_info=True)
|
||||
finally:
|
||||
self._outstanding_tasks.decrement()
|
||||
# TODO if responses are given for a publish
|
||||
|
||||
async def _process_response(self, message_envelope: ResponseMessageEnvelope) -> None:
|
||||
content = (
|
||||
message_envelope.message.__dict__
|
||||
if hasattr(message_envelope.message, "__dict__")
|
||||
else message_envelope.message
|
||||
)
|
||||
logger.info(
|
||||
f"Resolving response with message type {type(message_envelope.message).__name__} for recipient {message_envelope.recipient} from {message_envelope.sender.type}: {content}"
|
||||
)
|
||||
# event_logger.info(
|
||||
# MessageEvent(
|
||||
# payload=message_envelope.message,
|
||||
# sender=message_envelope.sender,
|
||||
# receiver=message_envelope.recipient,
|
||||
# kind=MessageKind.RESPOND,
|
||||
# delivery_stage=DeliveryStage.DELIVER,
|
||||
# )
|
||||
# )
|
||||
self._outstanding_tasks.decrement()
|
||||
message_envelope.future.set_result(message_envelope.message)
|
||||
|
||||
async def process_next(self) -> None:
|
||||
"""Process the next message in the queue."""
|
||||
|
||||
if len(self._message_queue) == 0:
|
||||
# Yield control to the event loop to allow other tasks to run
|
||||
await asyncio.sleep(0)
|
||||
return
|
||||
|
||||
message_envelope = self._message_queue.pop(0)
|
||||
|
||||
match message_envelope:
|
||||
case SendMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future):
|
||||
if self._intervention_handler is not None:
|
||||
try:
|
||||
temp_message = await self._intervention_handler.on_send(
|
||||
message, sender=sender, recipient=recipient
|
||||
)
|
||||
except BaseException as e:
|
||||
future.set_exception(e)
|
||||
return
|
||||
if temp_message is DropMessage or isinstance(temp_message, DropMessage):
|
||||
future.set_exception(MessageDroppedException())
|
||||
return
|
||||
|
||||
message_envelope.message = temp_message
|
||||
self._outstanding_tasks.increment()
|
||||
task = asyncio.create_task(self._process_send(message_envelope))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case PublishMessageEnvelope(
|
||||
message=message,
|
||||
sender=sender,
|
||||
):
|
||||
if self._intervention_handler is not None:
|
||||
try:
|
||||
temp_message = await self._intervention_handler.on_publish(message, sender=sender)
|
||||
except BaseException as e:
|
||||
# TODO: we should raise the intervention exception to the publisher.
|
||||
logger.error(f"Exception raised in in intervention handler: {e}", exc_info=True)
|
||||
return
|
||||
if temp_message is DropMessage or isinstance(temp_message, DropMessage):
|
||||
# TODO log message dropped
|
||||
return
|
||||
|
||||
message_envelope.message = temp_message
|
||||
self._outstanding_tasks.increment()
|
||||
task = asyncio.create_task(self._process_publish(message_envelope))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case ResponseMessageEnvelope(message=message, sender=sender, recipient=recipient, future=future):
|
||||
if self._intervention_handler is not None:
|
||||
try:
|
||||
temp_message = await self._intervention_handler.on_response(
|
||||
message, sender=sender, recipient=recipient
|
||||
)
|
||||
except BaseException as e:
|
||||
# TODO: should we raise the exception to sender of the response instead?
|
||||
future.set_exception(e)
|
||||
return
|
||||
if temp_message is DropMessage or isinstance(temp_message, DropMessage):
|
||||
future.set_exception(MessageDroppedException())
|
||||
return
|
||||
|
||||
message_envelope.message = temp_message
|
||||
self._outstanding_tasks.increment()
|
||||
task = asyncio.create_task(self._process_response(message_envelope))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
|
||||
# Yield control to the message loop to allow other tasks to run
|
||||
await asyncio.sleep(0)
|
||||
|
||||
@property
|
||||
def idle(self) -> bool:
|
||||
return len(self._message_queue) == 0 and self._outstanding_tasks.get() == 0
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the runtime message processing loop."""
|
||||
if self._run_context is not None:
|
||||
raise RuntimeError("Runtime is already started")
|
||||
self._run_context = RunContext(self)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the runtime message processing loop."""
|
||||
if self._run_context is None:
|
||||
raise RuntimeError("Runtime is not started")
|
||||
await self._run_context.stop()
|
||||
self._run_context = None
|
||||
|
||||
async def stop_when_idle(self) -> None:
|
||||
"""Stop the runtime message processing loop when there is
|
||||
no outstanding message being processed or queued."""
|
||||
if self._run_context is None:
|
||||
raise RuntimeError("Runtime is not started")
|
||||
await self._run_context.stop_when_idle()
|
||||
self._run_context = None
|
||||
|
||||
async def agent_metadata(self, agent: AgentId) -> AgentMetadata:
|
||||
return (await self._get_agent(agent)).metadata
|
||||
|
||||
async def agent_save_state(self, agent: AgentId) -> Mapping[str, Any]:
|
||||
return (await self._get_agent(agent)).save_state()
|
||||
|
||||
async def agent_load_state(self, agent: AgentId, state: Mapping[str, Any]) -> None:
|
||||
(await self._get_agent(agent)).load_state(state)
|
||||
|
||||
async def register(
|
||||
self,
|
||||
type: str,
|
||||
agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]],
|
||||
subscriptions: Callable[[], list[Subscription] | Awaitable[list[Subscription]]]
|
||||
| list[Subscription]
|
||||
| None = None,
|
||||
) -> AgentType:
|
||||
if type in self._agent_factories:
|
||||
raise ValueError(f"Agent with type {type} already exists.")
|
||||
|
||||
if subscriptions is not None:
|
||||
if callable(subscriptions):
|
||||
with SubscriptionInstantiationContext.populate_context(AgentType(type)):
|
||||
subscriptions_list_result = subscriptions()
|
||||
if inspect.isawaitable(subscriptions_list_result):
|
||||
subscriptions_list = await subscriptions_list_result
|
||||
else:
|
||||
subscriptions_list = subscriptions_list_result
|
||||
else:
|
||||
subscriptions_list = subscriptions
|
||||
|
||||
for subscription in subscriptions_list:
|
||||
await self.add_subscription(subscription)
|
||||
|
||||
self._agent_factories[type] = agent_factory
|
||||
return AgentType(type)
|
||||
|
||||
async def _invoke_agent_factory(
|
||||
self,
|
||||
agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]],
|
||||
agent_id: AgentId,
|
||||
) -> T:
|
||||
with AgentInstantiationContext.populate_context((self, agent_id)):
|
||||
if len(inspect.signature(agent_factory).parameters) == 0:
|
||||
factory_one = cast(Callable[[], T], agent_factory)
|
||||
agent = factory_one()
|
||||
elif len(inspect.signature(agent_factory).parameters) == 2:
|
||||
warnings.warn(
|
||||
"Agent factories that take two arguments are deprecated. Use AgentInstantiationContext instead. Two arg factories will be removed in a future version.",
|
||||
stacklevel=2,
|
||||
)
|
||||
factory_two = cast(Callable[[AgentRuntime, AgentId], T], agent_factory)
|
||||
agent = factory_two(self, agent_id)
|
||||
else:
|
||||
raise ValueError("Agent factory must take 0 or 2 arguments.")
|
||||
|
||||
if inspect.isawaitable(agent):
|
||||
return cast(T, await agent)
|
||||
|
||||
return agent
|
||||
|
||||
async def _get_agent(self, agent_id: AgentId) -> Agent:
|
||||
if agent_id in self._instantiated_agents:
|
||||
return self._instantiated_agents[agent_id]
|
||||
|
||||
if agent_id.type not in self._agent_factories:
|
||||
raise LookupError(f"Agent with name {agent_id.type} not found.")
|
||||
|
||||
agent_factory = self._agent_factories[agent_id.type]
|
||||
agent = await self._invoke_agent_factory(agent_factory, agent_id)
|
||||
self._instantiated_agents[agent_id] = agent
|
||||
return agent
|
||||
|
||||
# TODO: uncomment out the following type ignore when this is fixed in mypy: https://github.com/python/mypy/issues/3737
|
||||
async def try_get_underlying_agent_instance(self, id: AgentId, type: Type[T] = Agent) -> T: # type: ignore[assignment]
|
||||
if id.type not in self._agent_factories:
|
||||
raise LookupError(f"Agent with name {id.type} not found.")
|
||||
|
||||
# TODO: check if remote
|
||||
agent_instance = await self._get_agent(id)
|
||||
|
||||
if not isinstance(agent_instance, type):
|
||||
raise TypeError(f"Agent with name {id.type} is not of type {type.__name__}")
|
||||
|
||||
return agent_instance
|
||||
|
||||
async def add_subscription(self, subscription: Subscription) -> None:
|
||||
await self._subscription_manager.add_subscription(subscription)
|
||||
|
||||
async def remove_subscription(self, id: str) -> None:
|
||||
await self._subscription_manager.remove_subscription(id)
|
||||
|
||||
async def get(
|
||||
self, id_or_type: AgentId | AgentType | str, /, key: str = "default", *, lazy: bool = True
|
||||
) -> AgentId:
|
||||
return await get_impl(
|
||||
id_or_type=id_or_type,
|
||||
key=key,
|
||||
lazy=lazy,
|
||||
instance_getter=self._get_agent,
|
||||
)
|
||||
@@ -0,0 +1,494 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import warnings
|
||||
from asyncio import Future, Task
|
||||
from collections import defaultdict
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncIterable,
|
||||
AsyncIterator,
|
||||
Awaitable,
|
||||
Callable,
|
||||
ClassVar,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
ParamSpec,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
cast,
|
||||
)
|
||||
|
||||
import grpc
|
||||
from grpc.aio import StreamStreamCall
|
||||
from typing_extensions import Self
|
||||
|
||||
from ..base import (
|
||||
MESSAGE_TYPE_REGISTRY,
|
||||
Agent,
|
||||
AgentId,
|
||||
AgentInstantiationContext,
|
||||
AgentMetadata,
|
||||
AgentRuntime,
|
||||
AgentType,
|
||||
CancellationToken,
|
||||
MessageContext,
|
||||
MessageHandlerContext,
|
||||
Subscription,
|
||||
SubscriptionInstantiationContext,
|
||||
TopicId,
|
||||
)
|
||||
from ..components import TypeSubscription
|
||||
from ._helpers import SubscriptionManager, get_impl
|
||||
from .protos import agent_worker_pb2, agent_worker_pb2_grpc
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .protos.agent_worker_pb2_grpc import AgentRpcAsyncStub
|
||||
|
||||
logger = logging.getLogger("autogen_core")
|
||||
event_logger = logging.getLogger("autogen_core.events")
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T", bound=Agent)
|
||||
|
||||
|
||||
class QueueAsyncIterable(AsyncIterator[Any], AsyncIterable[Any]):
|
||||
def __init__(self, queue: asyncio.Queue[Any]) -> None:
|
||||
self._queue = queue
|
||||
|
||||
async def __anext__(self) -> Any:
|
||||
return await self._queue.get()
|
||||
|
||||
def __aiter__(self) -> AsyncIterator[Any]:
|
||||
return self
|
||||
|
||||
|
||||
class HostConnection:
|
||||
DEFAULT_GRPC_CONFIG: ClassVar[Mapping[str, Any]] = {
|
||||
"methodConfig": [
|
||||
{
|
||||
"name": [{}],
|
||||
"retryPolicy": {
|
||||
"maxAttempts": 3,
|
||||
"initialBackoff": "0.01s",
|
||||
"maxBackoff": "5s",
|
||||
"backoffMultiplier": 2,
|
||||
"retryableStatusCodes": ["UNAVAILABLE"],
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, channel: grpc.aio.Channel) -> None: # type: ignore
|
||||
self._channel = channel
|
||||
self._send_queue = asyncio.Queue[agent_worker_pb2.Message]()
|
||||
self._recv_queue = asyncio.Queue[agent_worker_pb2.Message]()
|
||||
self._connection_task: Task[None] | None = None
|
||||
|
||||
@classmethod
|
||||
async def from_connection_string(
|
||||
cls, connection_string: str, grpc_config: Mapping[str, Any] = DEFAULT_GRPC_CONFIG
|
||||
) -> Self:
|
||||
logger.info("Connecting to %s", connection_string)
|
||||
channel = grpc.aio.insecure_channel(
|
||||
connection_string, options=[("grpc.service_config", json.dumps(grpc_config))]
|
||||
)
|
||||
instance = cls(channel)
|
||||
instance._connection_task = asyncio.create_task(
|
||||
instance._connect(channel, instance._send_queue, instance._recv_queue)
|
||||
)
|
||||
return instance
|
||||
|
||||
async def close(self) -> None:
|
||||
await self._channel.close()
|
||||
if self._connection_task is not None:
|
||||
await self._connection_task
|
||||
|
||||
@staticmethod
|
||||
async def _connect( # type: ignore
|
||||
channel: grpc.aio.Channel,
|
||||
send_queue: asyncio.Queue[agent_worker_pb2.Message],
|
||||
receive_queue: asyncio.Queue[agent_worker_pb2.Message],
|
||||
) -> None:
|
||||
stub: AgentRpcAsyncStub = agent_worker_pb2_grpc.AgentRpcStub(channel) # type: ignore
|
||||
|
||||
# TODO: where do exceptions from reading the iterable go? How do we recover from those?
|
||||
recv_stream: StreamStreamCall[agent_worker_pb2.Message, agent_worker_pb2.Message] = stub.OpenChannel( # type: ignore
|
||||
QueueAsyncIterable(send_queue)
|
||||
) # type: ignore
|
||||
|
||||
while True:
|
||||
try:
|
||||
logger.info("Waiting for message from host")
|
||||
message = await recv_stream.read() # type: ignore
|
||||
if message == grpc.aio.EOF: # type: ignore
|
||||
logger.info("EOF")
|
||||
break
|
||||
message = cast(agent_worker_pb2.Message, message)
|
||||
logger.info(f"Received a message from host: {message}")
|
||||
await receive_queue.put(message)
|
||||
logger.info("Put message in receive queue")
|
||||
except Exception as e:
|
||||
print("=========================================================================")
|
||||
print(e)
|
||||
print("=========================================================================")
|
||||
del recv_stream
|
||||
recv_stream = stub.OpenChannel(QueueAsyncIterable(send_queue)) # type: ignore
|
||||
|
||||
async def send(self, message: agent_worker_pb2.Message) -> None:
|
||||
logger.info(f"Send message to host: {message}")
|
||||
await self._send_queue.put(message)
|
||||
logger.info("Put message in send queue")
|
||||
|
||||
async def recv(self) -> agent_worker_pb2.Message:
|
||||
logger.info("Getting message from queue")
|
||||
return await self._recv_queue.get()
|
||||
|
||||
|
||||
class WorkerAgentRuntime(AgentRuntime):
|
||||
def __init__(self) -> None:
|
||||
self._per_type_subscribers: DefaultDict[tuple[str, str], Set[AgentId]] = defaultdict(set)
|
||||
self._agent_factories: Dict[
|
||||
str, Callable[[], Agent | Awaitable[Agent]] | Callable[[AgentRuntime, AgentId], Agent | Awaitable[Agent]]
|
||||
] = {}
|
||||
self._instantiated_agents: Dict[AgentId, Agent] = {}
|
||||
self._known_namespaces: set[str] = set()
|
||||
self._read_task: None | Task[None] = None
|
||||
self._running = False
|
||||
self._pending_requests: Dict[str, Future[Any]] = {}
|
||||
self._pending_requests_lock = asyncio.Lock()
|
||||
self._next_request_id = 0
|
||||
self._host_connection: HostConnection | None = None
|
||||
self._background_tasks: Set[Task[Any]] = set()
|
||||
self._subscription_manager = SubscriptionManager()
|
||||
|
||||
async def start(self, host_connection_string: str) -> None:
|
||||
if self._running:
|
||||
raise ValueError("Runtime is already running.")
|
||||
logger.info(f"Connecting to host: {host_connection_string}")
|
||||
self._host_connection = await HostConnection.from_connection_string(host_connection_string)
|
||||
logger.info("connection")
|
||||
if self._read_task is None:
|
||||
self._read_task = asyncio.create_task(self._run_read_loop())
|
||||
self._running = True
|
||||
|
||||
def _raise_on_exception(self, task: Task[Any]) -> None:
|
||||
exception = task.exception()
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
async def _run_read_loop(self) -> None:
|
||||
logger.info("Starting read loop")
|
||||
# TODO: catch exceptions and reconnect
|
||||
while self._running:
|
||||
try:
|
||||
message = await self._host_connection.recv() # type: ignore
|
||||
oneofcase = agent_worker_pb2.Message.WhichOneof(message, "message")
|
||||
match oneofcase:
|
||||
case "registerAgentType" | "addSubscription":
|
||||
logger.warn(f"Cant handle {oneofcase}, skipping.")
|
||||
case "request":
|
||||
request: agent_worker_pb2.RpcRequest = message.request
|
||||
task = asyncio.create_task(self._process_request(request))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "response":
|
||||
response: agent_worker_pb2.RpcResponse = message.response
|
||||
task = asyncio.create_task(self._process_response(response))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case "event":
|
||||
event: agent_worker_pb2.Event = message.event
|
||||
task = asyncio.create_task(self._process_event(event))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
case None:
|
||||
logger.warn("No message")
|
||||
except Exception as e:
|
||||
logger.error("Error in read loop", exc_info=e)
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
if self._host_connection is not None:
|
||||
await self._host_connection.close()
|
||||
if self._read_task is not None:
|
||||
await self._read_task
|
||||
|
||||
@property
|
||||
def _known_agent_names(self) -> Set[str]:
|
||||
return set(self._agent_factories.keys())
|
||||
|
||||
async def send_message(
|
||||
self,
|
||||
message: Any,
|
||||
recipient: AgentId,
|
||||
*,
|
||||
sender: AgentId | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> Any:
|
||||
if not self._running:
|
||||
raise ValueError("Runtime must be running when sending message.")
|
||||
if self._host_connection is None:
|
||||
raise RuntimeError("Host connection is not set.")
|
||||
# create a new future for the result
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
async with self._pending_requests_lock:
|
||||
self._next_request_id += 1
|
||||
request_id = self._next_request_id
|
||||
request_id_str = str(request_id)
|
||||
self._pending_requests[request_id_str] = future
|
||||
sender = cast(AgentId, sender)
|
||||
data_type = MESSAGE_TYPE_REGISTRY.type_name(message)
|
||||
serialized_message = MESSAGE_TYPE_REGISTRY.serialize(message, type_name=data_type)
|
||||
runtime_message = agent_worker_pb2.Message(
|
||||
request=agent_worker_pb2.RpcRequest(
|
||||
request_id=request_id_str,
|
||||
target=agent_worker_pb2.AgentId(name=recipient.type, namespace=recipient.key),
|
||||
source=agent_worker_pb2.AgentId(name=sender.type, namespace=sender.key),
|
||||
data_type=data_type,
|
||||
data=serialized_message,
|
||||
)
|
||||
)
|
||||
# TODO: Find a way to handle timeouts/errors
|
||||
task = asyncio.create_task(self._host_connection.send(runtime_message))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
return await future
|
||||
|
||||
async def publish_message(
|
||||
self,
|
||||
message: Any,
|
||||
topic_id: TopicId,
|
||||
*,
|
||||
sender: AgentId | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> None:
|
||||
if not self._running:
|
||||
raise ValueError("Runtime must be running when publishing message.")
|
||||
if self._host_connection is None:
|
||||
raise RuntimeError("Host connection is not set.")
|
||||
message_type = MESSAGE_TYPE_REGISTRY.type_name(message)
|
||||
serialized_message = MESSAGE_TYPE_REGISTRY.serialize(message, type_name=message_type)
|
||||
runtime_message = agent_worker_pb2.Message(
|
||||
event=agent_worker_pb2.Event(
|
||||
topic_type=topic_id.type, topic_source=topic_id.source, data_type=message_type, data=serialized_message
|
||||
)
|
||||
)
|
||||
task = asyncio.create_task(self._host_connection.send(runtime_message))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._raise_on_exception)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
|
||||
async def save_state(self) -> Mapping[str, Any]:
|
||||
raise NotImplementedError("Saving state is not yet implemented.")
|
||||
|
||||
async def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
raise NotImplementedError("Loading state is not yet implemented.")
|
||||
|
||||
async def agent_metadata(self, agent: AgentId) -> AgentMetadata:
|
||||
raise NotImplementedError("Agent metadata is not yet implemented.")
|
||||
|
||||
async def agent_save_state(self, agent: AgentId) -> Mapping[str, Any]:
|
||||
raise NotImplementedError("Agent save_state is not yet implemented.")
|
||||
|
||||
async def agent_load_state(self, agent: AgentId, state: Mapping[str, Any]) -> None:
|
||||
raise NotImplementedError("Agent load_state is not yet implemented.")
|
||||
|
||||
async def _process_request(self, request: agent_worker_pb2.RpcRequest) -> None:
|
||||
assert self._host_connection is not None
|
||||
target = AgentId(request.target.name, request.target.namespace)
|
||||
source = AgentId(request.source.name, request.source.namespace)
|
||||
|
||||
logging.info(f"Processing request from {source} to {target}")
|
||||
|
||||
# Deserialize the message.
|
||||
message = MESSAGE_TYPE_REGISTRY.deserialize(request.data, type_name=request.data_type)
|
||||
|
||||
# Get the target agent and prepare the message context.
|
||||
target_agent = await self._get_agent(target)
|
||||
message_context = MessageContext(
|
||||
sender=source,
|
||||
topic_id=None,
|
||||
is_rpc=True,
|
||||
cancellation_token=CancellationToken(),
|
||||
)
|
||||
|
||||
# Call the target agent.
|
||||
try:
|
||||
with MessageHandlerContext.populate_context(target_agent.id):
|
||||
result = await target_agent.on_message(message, ctx=message_context)
|
||||
except BaseException as e:
|
||||
response_message = agent_worker_pb2.Message(
|
||||
response=agent_worker_pb2.RpcResponse(
|
||||
request_id=request.request_id,
|
||||
error=str(e),
|
||||
)
|
||||
)
|
||||
# Send the error response.
|
||||
await self._host_connection.send(response_message)
|
||||
return
|
||||
|
||||
# Serialize the result.
|
||||
result_type = MESSAGE_TYPE_REGISTRY.type_name(result)
|
||||
serialized_result = MESSAGE_TYPE_REGISTRY.serialize(result, type_name=result_type)
|
||||
|
||||
# Create the response message.
|
||||
response_message = agent_worker_pb2.Message(
|
||||
response=agent_worker_pb2.RpcResponse(
|
||||
request_id=request.request_id,
|
||||
result_type=result_type,
|
||||
result=serialized_result,
|
||||
)
|
||||
)
|
||||
|
||||
# Send the response.
|
||||
await self._host_connection.send(response_message)
|
||||
|
||||
async def _process_response(self, response: agent_worker_pb2.RpcResponse) -> None:
|
||||
# Deserialize the result.
|
||||
result = MESSAGE_TYPE_REGISTRY.deserialize(response.result, type_name=response.result_type)
|
||||
# Get the future and set the result.
|
||||
future = self._pending_requests.pop(response.request_id)
|
||||
if len(response.error) > 0:
|
||||
future.set_exception(Exception(response.error))
|
||||
else:
|
||||
future.set_result(result)
|
||||
|
||||
async def _process_event(self, event: agent_worker_pb2.Event) -> None:
|
||||
message = MESSAGE_TYPE_REGISTRY.deserialize(event.data, type_name=event.data_type)
|
||||
topic_id = TopicId(event.topic_type, event.topic_source)
|
||||
# Get the recipients for the topic.
|
||||
recipients = await self._subscription_manager.get_subscribed_recipients(topic_id)
|
||||
# Send the message to each recipient.
|
||||
responses: List[Awaitable[Any]] = []
|
||||
for agent_id in recipients:
|
||||
# TODO: avoid sending to the sender.
|
||||
message_context = MessageContext(
|
||||
sender=None,
|
||||
topic_id=topic_id,
|
||||
is_rpc=False,
|
||||
cancellation_token=CancellationToken(),
|
||||
)
|
||||
agent = await self._get_agent(agent_id)
|
||||
with MessageHandlerContext.populate_context(agent.id):
|
||||
future = agent.on_message(message, ctx=message_context)
|
||||
responses.append(future)
|
||||
# Wait for all responses.
|
||||
try:
|
||||
await asyncio.gather(*responses)
|
||||
except BaseException as e:
|
||||
logger.error("Error handling event", exc_info=e)
|
||||
|
||||
async def register(
|
||||
self,
|
||||
type: str,
|
||||
agent_factory: Callable[[], T | Awaitable[T]],
|
||||
subscriptions: Callable[[], list[Subscription] | Awaitable[list[Subscription]]]
|
||||
| list[Subscription]
|
||||
| None = None,
|
||||
) -> AgentType:
|
||||
if type in self._agent_factories:
|
||||
raise ValueError(f"Agent with type {type} already exists.")
|
||||
self._agent_factories[type] = agent_factory
|
||||
|
||||
if self._host_connection is None:
|
||||
raise RuntimeError("Host connection is not set.")
|
||||
message = agent_worker_pb2.Message(registerAgentType=agent_worker_pb2.RegisterAgentType(type=type))
|
||||
await self._host_connection.send(message)
|
||||
|
||||
if subscriptions is not None:
|
||||
if callable(subscriptions):
|
||||
with SubscriptionInstantiationContext.populate_context(AgentType(type)):
|
||||
subscriptions_list_result = subscriptions()
|
||||
if inspect.isawaitable(subscriptions_list_result):
|
||||
subscriptions_list = await subscriptions_list_result
|
||||
else:
|
||||
subscriptions_list = subscriptions_list_result
|
||||
else:
|
||||
subscriptions_list = subscriptions
|
||||
|
||||
for subscription in subscriptions_list:
|
||||
await self.add_subscription(subscription)
|
||||
|
||||
return AgentType(type)
|
||||
|
||||
async def _invoke_agent_factory(
|
||||
self,
|
||||
agent_factory: Callable[[], T | Awaitable[T]] | Callable[[AgentRuntime, AgentId], T | Awaitable[T]],
|
||||
agent_id: AgentId,
|
||||
) -> T:
|
||||
with AgentInstantiationContext.populate_context((self, agent_id)):
|
||||
if len(inspect.signature(agent_factory).parameters) == 0:
|
||||
factory_one = cast(Callable[[], T], agent_factory)
|
||||
agent = factory_one()
|
||||
elif len(inspect.signature(agent_factory).parameters) == 2:
|
||||
warnings.warn(
|
||||
"Agent factories that take two arguments are deprecated. Use AgentInstantiationContext instead. Two arg factories will be removed in a future version.",
|
||||
stacklevel=2,
|
||||
)
|
||||
factory_two = cast(Callable[[AgentRuntime, AgentId], T], agent_factory)
|
||||
agent = factory_two(self, agent_id)
|
||||
else:
|
||||
raise ValueError("Agent factory must take 0 or 2 arguments.")
|
||||
|
||||
if inspect.isawaitable(agent):
|
||||
return cast(T, await agent)
|
||||
|
||||
return agent
|
||||
|
||||
async def _get_agent(self, agent_id: AgentId) -> Agent:
|
||||
if agent_id in self._instantiated_agents:
|
||||
return self._instantiated_agents[agent_id]
|
||||
|
||||
if agent_id.type not in self._agent_factories:
|
||||
raise ValueError(f"Agent with name {agent_id.type} not found.")
|
||||
|
||||
agent_factory = self._agent_factories[agent_id.type]
|
||||
agent = await self._invoke_agent_factory(agent_factory, agent_id)
|
||||
self._instantiated_agents[agent_id] = agent
|
||||
return agent
|
||||
|
||||
# TODO: uncomment out the following type ignore when this is fixed in mypy: https://github.com/python/mypy/issues/3737
|
||||
async def try_get_underlying_agent_instance(self, id: AgentId, type: Type[T] = Agent) -> T: # type: ignore[assignment]
|
||||
raise NotImplementedError("try_get_underlying_agent_instance is not yet implemented.")
|
||||
|
||||
async def add_subscription(self, subscription: Subscription) -> None:
|
||||
if self._host_connection is None:
|
||||
raise RuntimeError("Host connection is not set.")
|
||||
if not isinstance(subscription, TypeSubscription):
|
||||
raise ValueError("Only TypeSubscription is supported.")
|
||||
# Add to local subscription manager.
|
||||
await self._subscription_manager.add_subscription(subscription)
|
||||
# Send the subscription to the host.
|
||||
message = agent_worker_pb2.Message(
|
||||
addSubscription=agent_worker_pb2.AddSubscription(
|
||||
subscription=agent_worker_pb2.Subscription(
|
||||
typeSubscription=agent_worker_pb2.TypeSubscription(
|
||||
topic_type=subscription.topic_type, agent_type=subscription.agent_type
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
await self._host_connection.send(message)
|
||||
|
||||
async def remove_subscription(self, id: str) -> None:
|
||||
raise NotImplementedError("Subscriptions are not yet implemented.")
|
||||
|
||||
async def get(
|
||||
self, id_or_type: AgentId | AgentType | str, /, key: str = "default", *, lazy: bool = True
|
||||
) -> AgentId:
|
||||
return await get_impl(
|
||||
id_or_type=id_or_type,
|
||||
key=key,
|
||||
lazy=lazy,
|
||||
instance_getter=self._get_agent,
|
||||
)
|
||||
@@ -0,0 +1,18 @@
|
||||
from ._llm_usage import LLMUsageTracker
|
||||
|
||||
ROOT_LOGGER_NAME = "autogen_core"
|
||||
"""str: Logger name used for structured event logging"""
|
||||
|
||||
EVENT_LOGGER_NAME = "autogen_core.events"
|
||||
"""str: Logger name used for structured event logging"""
|
||||
|
||||
|
||||
TRACE_LOGGER_NAME = "autogen_core.trace"
|
||||
"""str: Logger name used for developer intended trace logging. The content and format of this log should not be depended upon."""
|
||||
|
||||
__all__ = [
|
||||
"ROOT_LOGGER_NAME",
|
||||
"EVENT_LOGGER_NAME",
|
||||
"TRACE_LOGGER_NAME",
|
||||
"LLMUsageTracker",
|
||||
]
|
||||
@@ -0,0 +1,57 @@
|
||||
import logging
|
||||
|
||||
from .events import LLMCallEvent
|
||||
|
||||
|
||||
class LLMUsageTracker(logging.Handler):
|
||||
def __init__(self) -> None:
|
||||
"""Logging handler that tracks the number of tokens used in the prompt and completion.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from autogen_core.application.logging import LLMUsageTracker, EVENT_LOGGER_NAME
|
||||
|
||||
# Set up the logging configuration to use the custom handler
|
||||
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||
logger.setLevel(logging.INFO)
|
||||
llm_usage = LLMUsageTracker()
|
||||
logger.handlers = [llm_usage]
|
||||
|
||||
# ...
|
||||
|
||||
print(llm_usage.prompt_tokens)
|
||||
print(llm_usage.completion_tokens)
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self._prompt_tokens = 0
|
||||
self._completion_tokens = 0
|
||||
|
||||
@property
|
||||
def tokens(self) -> int:
|
||||
return self._prompt_tokens + self._completion_tokens
|
||||
|
||||
@property
|
||||
def prompt_tokens(self) -> int:
|
||||
return self._prompt_tokens
|
||||
|
||||
@property
|
||||
def completion_tokens(self) -> int:
|
||||
return self._completion_tokens
|
||||
|
||||
def reset(self) -> None:
|
||||
self._prompt_tokens = 0
|
||||
self._completion_tokens = 0
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
"""Emit the log record. To be used by the logging module."""
|
||||
try:
|
||||
# Use the StructuredMessage if the message is an instance of it
|
||||
if isinstance(record.msg, LLMCallEvent):
|
||||
event = record.msg
|
||||
self._prompt_tokens += event.prompt_tokens
|
||||
self._completion_tokens += event.completion_tokens
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
@@ -0,0 +1,84 @@
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import Any, cast
|
||||
|
||||
from autogen_core.base import AgentId
|
||||
|
||||
|
||||
class LLMCallEvent:
|
||||
def __init__(self, *, prompt_tokens: int, completion_tokens: int, **kwargs: Any) -> None:
|
||||
"""To be used by model clients to log the call to the LLM.
|
||||
|
||||
Args:
|
||||
prompt_tokens (int): Number of tokens used in the prompt.
|
||||
completion_tokens (int): Number of tokens used in the completion.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from autogen_core.application.logging import LLMCallEvent, EVENT_LOGGER_NAME
|
||||
|
||||
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||
logger.info(LLMCallEvent(prompt_tokens=10, completion_tokens=20))
|
||||
|
||||
"""
|
||||
self.kwargs = kwargs
|
||||
self.kwargs["prompt_tokens"] = prompt_tokens
|
||||
self.kwargs["completion_tokens"] = completion_tokens
|
||||
self.kwargs["type"] = "LLMCall"
|
||||
|
||||
@property
|
||||
def prompt_tokens(self) -> int:
|
||||
return cast(int, self.kwargs["prompt_tokens"])
|
||||
|
||||
@property
|
||||
def completion_tokens(self) -> int:
|
||||
return cast(int, self.kwargs["completion_tokens"])
|
||||
|
||||
# This must output the event in a json serializable format
|
||||
def __str__(self) -> str:
|
||||
return json.dumps(self.kwargs)
|
||||
|
||||
|
||||
class MessageKind(Enum):
|
||||
DIRECT = 1
|
||||
PUBLISH = 2
|
||||
RESPOND = 3
|
||||
|
||||
|
||||
class DeliveryStage(Enum):
|
||||
SEND = 1
|
||||
DELIVER = 2
|
||||
|
||||
|
||||
class MessageEvent:
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
payload: Any,
|
||||
sender: AgentId | None,
|
||||
receiver: AgentId | None,
|
||||
kind: MessageKind,
|
||||
delivery_stage: DeliveryStage,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
self.kwargs = kwargs
|
||||
self.kwargs["payload"] = payload
|
||||
self.kwargs["sender"] = None if sender is None else str(sender)
|
||||
self.kwargs["receiver"] = None if receiver is None else str(receiver)
|
||||
self.kwargs["kind"] = kind
|
||||
self.kwargs["delivery_stage"] = delivery_stage
|
||||
self.kwargs["type"] = "Message"
|
||||
|
||||
@property
|
||||
def prompt_tokens(self) -> int:
|
||||
return cast(int, self.kwargs["prompt_tokens"])
|
||||
|
||||
@property
|
||||
def completion_tokens(self) -> int:
|
||||
return cast(int, self.kwargs["completion_tokens"])
|
||||
|
||||
# This must output the event in a json serializable format
|
||||
def __str__(self) -> str:
|
||||
return json.dumps(self.kwargs)
|
||||
@@ -0,0 +1,29 @@
|
||||
"""
|
||||
The :mod:`autogen_core.worker.protos` module provides Google Protobuf classes for agent-worker communication
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .agent_worker_pb2 import AgentId, Event, Message, RegisterAgentType, RpcRequest, RpcResponse
|
||||
from .agent_worker_pb2_grpc import AgentRpcServicer, AgentRpcStub, add_AgentRpcServicer_to_server
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .agent_worker_pb2_grpc import AgentRpcAsyncStub
|
||||
|
||||
__all__ = [
|
||||
"RpcRequest",
|
||||
"RpcResponse",
|
||||
"Event",
|
||||
"RegisterAgentType",
|
||||
"AgentRpcAsyncStub",
|
||||
"AgentRpcStub",
|
||||
"Message",
|
||||
"AgentId",
|
||||
]
|
||||
else:
|
||||
__all__ = ["RpcRequest", "RpcResponse", "Event", "RegisterAgentType", "AgentRpcStub", "Message", "AgentId"]
|
||||
@@ -0,0 +1,66 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# NO CHECKED-IN PROTOBUF GENCODE
|
||||
# source: agent_worker.proto
|
||||
# Protobuf Python Version: 5.27.2
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import runtime_version as _runtime_version
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
_runtime_version.ValidateProtobufRuntimeVersion(
|
||||
_runtime_version.Domain.PUBLIC,
|
||||
5,
|
||||
27,
|
||||
2,
|
||||
'',
|
||||
'agent_worker.proto'
|
||||
)
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x61gent_worker.proto\x12\x06\x61gents\"*\n\x07\x41gentId\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\xf8\x01\n\nRpcRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x1f\n\x06source\x18\x02 \x01(\x0b\x32\x0f.agents.AgentId\x12\x1f\n\x06target\x18\x03 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0e\n\x06method\x18\x04 \x01(\t\x12\x11\n\tdata_type\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\t\x12\x32\n\x08metadata\x18\x07 \x03(\x0b\x32 .agents.RpcRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xbb\x01\n\x0bRpcResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x13\n\x0bresult_type\x18\x02 \x01(\t\x12\x0e\n\x06result\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\x12\x33\n\x08metadata\x18\x05 \x03(\x0b\x32!.agents.RpcResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc5\x01\n\x05\x45vent\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x12\n\ntopic_type\x18\x02 \x01(\t\x12\x14\n\x0ctopic_source\x18\x03 \x01(\t\x12\x11\n\tdata_type\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\t\x12-\n\x08metadata\x18\x06 \x03(\x0b\x32\x1b.agents.Event.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"!\n\x11RegisterAgentType\x12\x0c\n\x04type\x18\x01 \x01(\t\":\n\x10TypeSubscription\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"T\n\x0cSubscription\x12\x34\n\x10typeSubscription\x18\x01 \x01(\x0b\x32\x18.agents.TypeSubscriptionH\x00\x42\x0e\n\x0csubscription\"=\n\x0f\x41\x64\x64Subscription\x12*\n\x0csubscription\x18\x01 \x01(\x0b\x32\x14.agents.Subscription\"\xf0\x01\n\x07Message\x12%\n\x07request\x18\x01 \x01(\x0b\x32\x12.agents.RpcRequestH\x00\x12\'\n\x08response\x18\x02 \x01(\x0b\x32\x13.agents.RpcResponseH\x00\x12\x1e\n\x05\x65vent\x18\x03 \x01(\x0b\x32\r.agents.EventH\x00\x12\x36\n\x11registerAgentType\x18\x04 \x01(\x0b\x32\x19.agents.RegisterAgentTypeH\x00\x12\x32\n\x0f\x61\x64\x64Subscription\x18\x05 \x01(\x0b\x32\x17.agents.AddSubscriptionH\x00\x42\t\n\x07message2?\n\x08\x41gentRpc\x12\x33\n\x0bOpenChannel\x12\x0f.agents.Message\x1a\x0f.agents.Message(\x01\x30\x01\x62\x06proto3')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agent_worker_pb2', _globals)
|
||||
if not _descriptor._USE_C_DESCRIPTORS:
|
||||
DESCRIPTOR._loaded_options = None
|
||||
_globals['_RPCREQUEST_METADATAENTRY']._loaded_options = None
|
||||
_globals['_RPCREQUEST_METADATAENTRY']._serialized_options = b'8\001'
|
||||
_globals['_RPCRESPONSE_METADATAENTRY']._loaded_options = None
|
||||
_globals['_RPCRESPONSE_METADATAENTRY']._serialized_options = b'8\001'
|
||||
_globals['_EVENT_METADATAENTRY']._loaded_options = None
|
||||
_globals['_EVENT_METADATAENTRY']._serialized_options = b'8\001'
|
||||
_globals['_AGENTID']._serialized_start=30
|
||||
_globals['_AGENTID']._serialized_end=72
|
||||
_globals['_RPCREQUEST']._serialized_start=75
|
||||
_globals['_RPCREQUEST']._serialized_end=323
|
||||
_globals['_RPCREQUEST_METADATAENTRY']._serialized_start=276
|
||||
_globals['_RPCREQUEST_METADATAENTRY']._serialized_end=323
|
||||
_globals['_RPCRESPONSE']._serialized_start=326
|
||||
_globals['_RPCRESPONSE']._serialized_end=513
|
||||
_globals['_RPCRESPONSE_METADATAENTRY']._serialized_start=276
|
||||
_globals['_RPCRESPONSE_METADATAENTRY']._serialized_end=323
|
||||
_globals['_EVENT']._serialized_start=516
|
||||
_globals['_EVENT']._serialized_end=713
|
||||
_globals['_EVENT_METADATAENTRY']._serialized_start=276
|
||||
_globals['_EVENT_METADATAENTRY']._serialized_end=323
|
||||
_globals['_REGISTERAGENTTYPE']._serialized_start=715
|
||||
_globals['_REGISTERAGENTTYPE']._serialized_end=748
|
||||
_globals['_TYPESUBSCRIPTION']._serialized_start=750
|
||||
_globals['_TYPESUBSCRIPTION']._serialized_end=808
|
||||
_globals['_SUBSCRIPTION']._serialized_start=810
|
||||
_globals['_SUBSCRIPTION']._serialized_end=894
|
||||
_globals['_ADDSUBSCRIPTION']._serialized_start=896
|
||||
_globals['_ADDSUBSCRIPTION']._serialized_end=957
|
||||
_globals['_MESSAGE']._serialized_start=960
|
||||
_globals['_MESSAGE']._serialized_end=1200
|
||||
_globals['_AGENTRPC']._serialized_start=1202
|
||||
_globals['_AGENTRPC']._serialized_end=1265
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
@@ -0,0 +1,277 @@
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import collections.abc
|
||||
import google.protobuf.descriptor
|
||||
import google.protobuf.internal.containers
|
||||
import google.protobuf.message
|
||||
import typing
|
||||
|
||||
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
||||
|
||||
@typing.final
|
||||
class AgentId(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
NAME_FIELD_NUMBER: builtins.int
|
||||
NAMESPACE_FIELD_NUMBER: builtins.int
|
||||
name: builtins.str
|
||||
namespace: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
name: builtins.str = ...,
|
||||
namespace: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["name", b"name", "namespace", b"namespace"]) -> None: ...
|
||||
|
||||
global___AgentId = AgentId
|
||||
|
||||
@typing.final
|
||||
class RpcRequest(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
@typing.final
|
||||
class MetadataEntry(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
VALUE_FIELD_NUMBER: builtins.int
|
||||
key: builtins.str
|
||||
value: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
key: builtins.str = ...,
|
||||
value: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
||||
|
||||
REQUEST_ID_FIELD_NUMBER: builtins.int
|
||||
SOURCE_FIELD_NUMBER: builtins.int
|
||||
TARGET_FIELD_NUMBER: builtins.int
|
||||
METHOD_FIELD_NUMBER: builtins.int
|
||||
DATA_TYPE_FIELD_NUMBER: builtins.int
|
||||
DATA_FIELD_NUMBER: builtins.int
|
||||
METADATA_FIELD_NUMBER: builtins.int
|
||||
request_id: builtins.str
|
||||
method: builtins.str
|
||||
data_type: builtins.str
|
||||
data: builtins.str
|
||||
@property
|
||||
def source(self) -> global___AgentId: ...
|
||||
@property
|
||||
def target(self) -> global___AgentId: ...
|
||||
@property
|
||||
def metadata(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
request_id: builtins.str = ...,
|
||||
source: global___AgentId | None = ...,
|
||||
target: global___AgentId | None = ...,
|
||||
method: builtins.str = ...,
|
||||
data_type: builtins.str = ...,
|
||||
data: builtins.str = ...,
|
||||
metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["source", b"source", "target", b"target"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["data", b"data", "data_type", b"data_type", "metadata", b"metadata", "method", b"method", "request_id", b"request_id", "source", b"source", "target", b"target"]) -> None: ...
|
||||
|
||||
global___RpcRequest = RpcRequest
|
||||
|
||||
@typing.final
|
||||
class RpcResponse(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
@typing.final
|
||||
class MetadataEntry(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
VALUE_FIELD_NUMBER: builtins.int
|
||||
key: builtins.str
|
||||
value: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
key: builtins.str = ...,
|
||||
value: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
||||
|
||||
REQUEST_ID_FIELD_NUMBER: builtins.int
|
||||
RESULT_TYPE_FIELD_NUMBER: builtins.int
|
||||
RESULT_FIELD_NUMBER: builtins.int
|
||||
ERROR_FIELD_NUMBER: builtins.int
|
||||
METADATA_FIELD_NUMBER: builtins.int
|
||||
request_id: builtins.str
|
||||
result_type: builtins.str
|
||||
result: builtins.str
|
||||
error: builtins.str
|
||||
@property
|
||||
def metadata(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
request_id: builtins.str = ...,
|
||||
result_type: builtins.str = ...,
|
||||
result: builtins.str = ...,
|
||||
error: builtins.str = ...,
|
||||
metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["error", b"error", "metadata", b"metadata", "request_id", b"request_id", "result", b"result", "result_type", b"result_type"]) -> None: ...
|
||||
|
||||
global___RpcResponse = RpcResponse
|
||||
|
||||
@typing.final
|
||||
class Event(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
@typing.final
|
||||
class MetadataEntry(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
VALUE_FIELD_NUMBER: builtins.int
|
||||
key: builtins.str
|
||||
value: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
key: builtins.str = ...,
|
||||
value: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
||||
|
||||
NAMESPACE_FIELD_NUMBER: builtins.int
|
||||
TOPIC_TYPE_FIELD_NUMBER: builtins.int
|
||||
TOPIC_SOURCE_FIELD_NUMBER: builtins.int
|
||||
DATA_TYPE_FIELD_NUMBER: builtins.int
|
||||
DATA_FIELD_NUMBER: builtins.int
|
||||
METADATA_FIELD_NUMBER: builtins.int
|
||||
namespace: builtins.str
|
||||
topic_type: builtins.str
|
||||
topic_source: builtins.str
|
||||
data_type: builtins.str
|
||||
data: builtins.str
|
||||
@property
|
||||
def metadata(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
namespace: builtins.str = ...,
|
||||
topic_type: builtins.str = ...,
|
||||
topic_source: builtins.str = ...,
|
||||
data_type: builtins.str = ...,
|
||||
data: builtins.str = ...,
|
||||
metadata: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["data", b"data", "data_type", b"data_type", "metadata", b"metadata", "namespace", b"namespace", "topic_source", b"topic_source", "topic_type", b"topic_type"]) -> None: ...
|
||||
|
||||
global___Event = Event
|
||||
|
||||
@typing.final
|
||||
class RegisterAgentType(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TYPE_FIELD_NUMBER: builtins.int
|
||||
type: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
type: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["type", b"type"]) -> None: ...
|
||||
|
||||
global___RegisterAgentType = RegisterAgentType
|
||||
|
||||
@typing.final
|
||||
class TypeSubscription(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TOPIC_TYPE_FIELD_NUMBER: builtins.int
|
||||
AGENT_TYPE_FIELD_NUMBER: builtins.int
|
||||
topic_type: builtins.str
|
||||
agent_type: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
topic_type: builtins.str = ...,
|
||||
agent_type: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type", b"topic_type"]) -> None: ...
|
||||
|
||||
global___TypeSubscription = TypeSubscription
|
||||
|
||||
@typing.final
|
||||
class Subscription(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TYPESUBSCRIPTION_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def typeSubscription(self) -> global___TypeSubscription: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
typeSubscription: global___TypeSubscription | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["subscription", b"subscription", "typeSubscription", b"typeSubscription"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["subscription", b"subscription", "typeSubscription", b"typeSubscription"]) -> None: ...
|
||||
def WhichOneof(self, oneof_group: typing.Literal["subscription", b"subscription"]) -> typing.Literal["typeSubscription"] | None: ...
|
||||
|
||||
global___Subscription = Subscription
|
||||
|
||||
@typing.final
|
||||
class AddSubscription(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
SUBSCRIPTION_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def subscription(self) -> global___Subscription: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
subscription: global___Subscription | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["subscription", b"subscription"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["subscription", b"subscription"]) -> None: ...
|
||||
|
||||
global___AddSubscription = AddSubscription
|
||||
|
||||
@typing.final
|
||||
class Message(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
REQUEST_FIELD_NUMBER: builtins.int
|
||||
RESPONSE_FIELD_NUMBER: builtins.int
|
||||
EVENT_FIELD_NUMBER: builtins.int
|
||||
REGISTERAGENTTYPE_FIELD_NUMBER: builtins.int
|
||||
ADDSUBSCRIPTION_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def request(self) -> global___RpcRequest: ...
|
||||
@property
|
||||
def response(self) -> global___RpcResponse: ...
|
||||
@property
|
||||
def event(self) -> global___Event: ...
|
||||
@property
|
||||
def registerAgentType(self) -> global___RegisterAgentType: ...
|
||||
@property
|
||||
def addSubscription(self) -> global___AddSubscription: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
request: global___RpcRequest | None = ...,
|
||||
response: global___RpcResponse | None = ...,
|
||||
event: global___Event | None = ...,
|
||||
registerAgentType: global___RegisterAgentType | None = ...,
|
||||
addSubscription: global___AddSubscription | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["addSubscription", b"addSubscription", "event", b"event", "message", b"message", "registerAgentType", b"registerAgentType", "request", b"request", "response", b"response"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["addSubscription", b"addSubscription", "event", b"event", "message", b"message", "registerAgentType", b"registerAgentType", "request", b"request", "response", b"response"]) -> None: ...
|
||||
def WhichOneof(self, oneof_group: typing.Literal["message", b"message"]) -> typing.Literal["request", "response", "event", "registerAgentType", "addSubscription"] | None: ...
|
||||
|
||||
global___Message = Message
|
||||
@@ -0,0 +1,97 @@
|
||||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
import warnings
|
||||
|
||||
import agent_worker_pb2 as agent__worker__pb2
|
||||
|
||||
GRPC_GENERATED_VERSION = '1.66.0'
|
||||
GRPC_VERSION = grpc.__version__
|
||||
_version_not_supported = False
|
||||
|
||||
try:
|
||||
from grpc._utilities import first_version_is_lower
|
||||
_version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
|
||||
except ImportError:
|
||||
_version_not_supported = True
|
||||
|
||||
if _version_not_supported:
|
||||
raise RuntimeError(
|
||||
f'The grpc package installed is at version {GRPC_VERSION},'
|
||||
+ f' but the generated code in agent_worker_pb2_grpc.py depends on'
|
||||
+ f' grpcio>={GRPC_GENERATED_VERSION}.'
|
||||
+ f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
|
||||
+ f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
|
||||
)
|
||||
|
||||
|
||||
class AgentRpcStub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.OpenChannel = channel.stream_stream(
|
||||
'/agents.AgentRpc/OpenChannel',
|
||||
request_serializer=agent__worker__pb2.Message.SerializeToString,
|
||||
response_deserializer=agent__worker__pb2.Message.FromString,
|
||||
_registered_method=True)
|
||||
|
||||
|
||||
class AgentRpcServicer(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def OpenChannel(self, request_iterator, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
|
||||
def add_AgentRpcServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
'OpenChannel': grpc.stream_stream_rpc_method_handler(
|
||||
servicer.OpenChannel,
|
||||
request_deserializer=agent__worker__pb2.Message.FromString,
|
||||
response_serializer=agent__worker__pb2.Message.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
'agents.AgentRpc', rpc_method_handlers)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
server.add_registered_method_handlers('agents.AgentRpc', rpc_method_handlers)
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class AgentRpc(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
@staticmethod
|
||||
def OpenChannel(request_iterator,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.stream_stream(
|
||||
request_iterator,
|
||||
target,
|
||||
'/agents.AgentRpc/OpenChannel',
|
||||
agent__worker__pb2.Message.SerializeToString,
|
||||
agent__worker__pb2.Message.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
insecure,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
_registered_method=True)
|
||||
@@ -0,0 +1,41 @@
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
"""
|
||||
|
||||
import abc
|
||||
import agent_worker_pb2
|
||||
import collections.abc
|
||||
import grpc
|
||||
import grpc.aio
|
||||
import typing
|
||||
|
||||
_T = typing.TypeVar("_T")
|
||||
|
||||
class _MaybeAsyncIterator(collections.abc.AsyncIterator[_T], collections.abc.Iterator[_T], metaclass=abc.ABCMeta): ...
|
||||
|
||||
class _ServicerContext(grpc.ServicerContext, grpc.aio.ServicerContext): # type: ignore[misc, type-arg]
|
||||
...
|
||||
|
||||
class AgentRpcStub:
|
||||
def __init__(self, channel: typing.Union[grpc.Channel, grpc.aio.Channel]) -> None: ...
|
||||
OpenChannel: grpc.StreamStreamMultiCallable[
|
||||
agent_worker_pb2.Message,
|
||||
agent_worker_pb2.Message,
|
||||
]
|
||||
|
||||
class AgentRpcAsyncStub:
|
||||
OpenChannel: grpc.aio.StreamStreamMultiCallable[
|
||||
agent_worker_pb2.Message,
|
||||
agent_worker_pb2.Message,
|
||||
]
|
||||
|
||||
class AgentRpcServicer(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def OpenChannel(
|
||||
self,
|
||||
request_iterator: _MaybeAsyncIterator[agent_worker_pb2.Message],
|
||||
context: _ServicerContext,
|
||||
) -> typing.Union[collections.abc.Iterator[agent_worker_pb2.Message], collections.abc.AsyncIterator[agent_worker_pb2.Message]]: ...
|
||||
|
||||
def add_AgentRpcServicer_to_server(servicer: AgentRpcServicer, server: typing.Union[grpc.Server, grpc.aio.Server]) -> None: ...
|
||||
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
The :mod:`autogen_core.base` module provides the foundational generic interfaces upon which all else is built. This module must not depend on any other module.
|
||||
"""
|
||||
|
||||
from ._agent import Agent
|
||||
from ._agent_id import AgentId
|
||||
from ._agent_instantiation import AgentInstantiationContext
|
||||
from ._agent_metadata import AgentMetadata
|
||||
from ._agent_props import AgentChildren
|
||||
from ._agent_proxy import AgentProxy
|
||||
from ._agent_runtime import AgentRuntime
|
||||
from ._agent_type import AgentType
|
||||
from ._base_agent import BaseAgent
|
||||
from ._cancellation_token import CancellationToken
|
||||
from ._message_context import MessageContext
|
||||
from ._message_handler_context import MessageHandlerContext
|
||||
from ._serialization import MESSAGE_TYPE_REGISTRY, Serialization, TypeDeserializer, TypeSerializer
|
||||
from ._subscription import Subscription
|
||||
from ._subscription_context import SubscriptionInstantiationContext
|
||||
from ._topic import TopicId
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"AgentId",
|
||||
"AgentProxy",
|
||||
"AgentMetadata",
|
||||
"AgentRuntime",
|
||||
"BaseAgent",
|
||||
"CancellationToken",
|
||||
"AgentChildren",
|
||||
"AgentInstantiationContext",
|
||||
"MESSAGE_TYPE_REGISTRY",
|
||||
"TypeSerializer",
|
||||
"TypeDeserializer",
|
||||
"TopicId",
|
||||
"Subscription",
|
||||
"MessageContext",
|
||||
"Serialization",
|
||||
"AgentType",
|
||||
"SubscriptionInstantiationContext",
|
||||
"MessageHandlerContext",
|
||||
]
|
||||
47
python/packages/autogen-core/src/autogen_core/base/_agent.py
Normal file
47
python/packages/autogen-core/src/autogen_core/base/_agent.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from typing import Any, Mapping, Protocol, runtime_checkable
|
||||
|
||||
from ._agent_id import AgentId
|
||||
from ._agent_metadata import AgentMetadata
|
||||
from ._message_context import MessageContext
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Agent(Protocol):
|
||||
@property
|
||||
def metadata(self) -> AgentMetadata:
|
||||
"""Metadata of the agent."""
|
||||
...
|
||||
|
||||
@property
|
||||
def id(self) -> AgentId:
|
||||
"""ID of the agent."""
|
||||
...
|
||||
|
||||
async def on_message(self, message: Any, ctx: MessageContext) -> Any:
|
||||
"""Message handler for the agent. This should only be called by the runtime, not by other agents.
|
||||
|
||||
Args:
|
||||
message (Any): Received message. Type is one of the types in `subscriptions`.
|
||||
ctx (MessageContext): Context of the message.
|
||||
|
||||
Returns:
|
||||
Any: Response to the message. Can be None.
|
||||
|
||||
Raises:
|
||||
asyncio.CancelledError: If the message was cancelled.
|
||||
CantHandleException: If the agent cannot handle the message.
|
||||
"""
|
||||
...
|
||||
|
||||
def save_state(self) -> Mapping[str, Any]:
|
||||
"""Save the state of the agent. The result must be JSON serializable."""
|
||||
...
|
||||
|
||||
def load_state(self, state: Mapping[str, Any]) -> None:
|
||||
"""Load in the state of the agent obtained from `save_state`.
|
||||
|
||||
Args:
|
||||
state (Mapping[str, Any]): State of the agent. Must be JSON serializable.
|
||||
"""
|
||||
|
||||
...
|
||||
@@ -0,0 +1,45 @@
|
||||
from typing_extensions import Self
|
||||
|
||||
from ._agent_type import AgentType
|
||||
|
||||
|
||||
class AgentId:
|
||||
def __init__(self, type: str | AgentType, key: str) -> None:
|
||||
if isinstance(type, AgentType):
|
||||
type = type.type
|
||||
|
||||
if type.isidentifier() is False:
|
||||
raise ValueError(f"Invalid type: {type}")
|
||||
|
||||
self._type = type
|
||||
self._key = key
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self._type, self._key))
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self._type}:{self._key}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f'AgentId(type="{self._type}", key="{self._key}")'
|
||||
|
||||
def __eq__(self, value: object) -> bool:
|
||||
if not isinstance(value, AgentId):
|
||||
return False
|
||||
return self._type == value.type and self._key == value.key
|
||||
|
||||
@classmethod
|
||||
def from_str(cls, agent_id: str) -> Self:
|
||||
items = agent_id.split(":", maxsplit=1)
|
||||
if len(items) != 2:
|
||||
raise ValueError(f"Invalid agent id: {agent_id}")
|
||||
type, key = items[0], items[1]
|
||||
return cls(type, key)
|
||||
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self._type
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
return self._key
|
||||
@@ -0,0 +1,44 @@
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from typing import Any, ClassVar, Generator
|
||||
|
||||
from ._agent_id import AgentId
|
||||
from ._agent_runtime import AgentRuntime
|
||||
|
||||
|
||||
class AgentInstantiationContext:
|
||||
def __init__(self) -> None:
|
||||
raise RuntimeError(
|
||||
"AgentInstantiationContext cannot be instantiated. It is a static class that provides context management for agent instantiation."
|
||||
)
|
||||
|
||||
AGENT_INSTANTIATION_CONTEXT_VAR: ClassVar[ContextVar[tuple[AgentRuntime, AgentId]]] = ContextVar(
|
||||
"AGENT_INSTANTIATION_CONTEXT_VAR"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def populate_context(cls, ctx: tuple[AgentRuntime, AgentId]) -> Generator[None, Any, None]:
|
||||
token = AgentInstantiationContext.AGENT_INSTANTIATION_CONTEXT_VAR.set(ctx)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
AgentInstantiationContext.AGENT_INSTANTIATION_CONTEXT_VAR.reset(token)
|
||||
|
||||
@classmethod
|
||||
def current_runtime(cls) -> AgentRuntime:
|
||||
try:
|
||||
return cls.AGENT_INSTANTIATION_CONTEXT_VAR.get()[0]
|
||||
except LookupError as e:
|
||||
raise RuntimeError(
|
||||
"AgentInstantiationContext.runtime() must be called within an instantiation context such as when the AgentRuntime is instantiating an agent. Mostly likely this was caused by directly instantiating an agent instead of using the AgentRuntime to do so."
|
||||
) from e
|
||||
|
||||
@classmethod
|
||||
def current_agent_id(cls) -> AgentId:
|
||||
try:
|
||||
return cls.AGENT_INSTANTIATION_CONTEXT_VAR.get()[1]
|
||||
except LookupError as e:
|
||||
raise RuntimeError(
|
||||
"AgentInstantiationContext.agent_id() must be called within an instantiation context such as when the AgentRuntime is instantiating an agent. Mostly likely this was caused by directly instantiating an agent instead of using the AgentRuntime to do so."
|
||||
) from e
|
||||
@@ -0,0 +1,7 @@
|
||||
from typing import TypedDict
|
||||
|
||||
|
||||
class AgentMetadata(TypedDict):
|
||||
type: str
|
||||
key: str
|
||||
description: str
|
||||
@@ -0,0 +1,11 @@
|
||||
from typing import Protocol, Sequence, runtime_checkable
|
||||
|
||||
from ._agent_id import AgentId
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AgentChildren(Protocol):
|
||||
@property
|
||||
def children(self) -> Sequence[AgentId]:
|
||||
"""Ids of the children of the agent."""
|
||||
...
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user