mirror of
https://github.com/crewAIInc/crewAI-examples.git
synced 2026-01-10 06:17:58 -05:00
adding notebooks
This commit is contained in:
File diff suppressed because it is too large
Load Diff
337
Notebooks/CrewAI Flows & Langgraph/QA Agent/crewai.ipynb
Normal file
337
Notebooks/CrewAI Flows & Langgraph/QA Agent/crewai.ipynb
Normal file
@@ -0,0 +1,337 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U --quiet 'crewai[tools]' aisuite"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Set environment variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"source_hidden": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import time\n",
|
||||
"initial_time = time.time()\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")\n",
|
||||
"\n",
|
||||
"# Apply a patch to allow nested asyncio loops in Jupyter\n",
|
||||
"import nest_asyncio\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Create Crew"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/joaomoura/.pyenv/versions/3.11.7/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.47s/it]\n",
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s]\n",
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.65s/it]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Importing Crew related components\n",
|
||||
"# Importing CrewAI Flow related components\n",
|
||||
"# Importing CrewAI Tools\n",
|
||||
"from crewai import Agent, Task, Crew\n",
|
||||
"from crewai.flow.flow import Flow, listen, start\n",
|
||||
"from crewai_tools import WebsiteSearchTool\n",
|
||||
"\n",
|
||||
"# Importing AI Suite for adhoc LLM calls and Pydantic\n",
|
||||
"from pydantic import BaseModel\n",
|
||||
"import aisuite as ai\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"research_agent = Agent(\n",
|
||||
" role=\"You are a helpful assistant that can answer questions about the web.\",\n",
|
||||
" goal=\"Answer the user's question.\",\n",
|
||||
" backstory=\"You have access to a vast knowledge base of information from the web.\",\n",
|
||||
" tools=[\n",
|
||||
" WebsiteSearchTool(website=urls[0]),\n",
|
||||
" WebsiteSearchTool(website=urls[1]),\n",
|
||||
" WebsiteSearchTool(website=urls[2]),\n",
|
||||
" ],\n",
|
||||
" llm=\"gpt-4o-mini\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"task = Task(\n",
|
||||
" description=\"Answer the following question: {question}\",\n",
|
||||
" expected_output=\"A detailed and accurate answer to the user's question.\",\n",
|
||||
" agent=research_agent,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"crew = Crew(\n",
|
||||
" agents=[research_agent],\n",
|
||||
" tasks=[task],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating State"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class QAState(BaseModel):\n",
|
||||
" \"\"\"\n",
|
||||
" State for the documentation flow\n",
|
||||
" \"\"\"\n",
|
||||
" question: str = \"What does Lilian Weng say about the types of agent memory?\"\n",
|
||||
" improved_question: str = \"\"\n",
|
||||
" answer: str = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class QAFlow(Flow[QAState]):\n",
|
||||
" @start()\n",
|
||||
" def rewrite_question(self):\n",
|
||||
" print(f\"# Rewriting question: {self.state.question}\")\n",
|
||||
" client = ai.Client()\n",
|
||||
" messages = [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": f\"\"\"Look at the input and try to reason about the underlying semantic intent / meaning.\n",
|
||||
" Here is the initial question:\n",
|
||||
" -------\n",
|
||||
" {self.state.question}\n",
|
||||
" -------\n",
|
||||
" Formulate an improved question:\"\"\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" response = client.chat.completions.create(\n",
|
||||
" model=\"openai:gpt-4o-mini\",\n",
|
||||
" messages=messages,\n",
|
||||
" temperature=0.3\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(response)\n",
|
||||
"\n",
|
||||
" improved_question = response.choices[0].message.content\n",
|
||||
" self.state.improved_question = improved_question\n",
|
||||
"\n",
|
||||
" @listen(rewrite_question)\n",
|
||||
" def answer_question(self):\n",
|
||||
" print(f\"# Answering question: {self.state.improved_question}\")\n",
|
||||
" result = crew.kickoff(inputs={'question': self.state.improved_question})\n",
|
||||
" self.state.answer = result.raw\n",
|
||||
" return result\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Plotting Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Plot saved as crewai_flow.html\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"100%\"\n",
|
||||
" height=\"600\"\n",
|
||||
" src=\"crewai_flow.html\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" \n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x10350b310>"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flow = QAFlow()\n",
|
||||
"flow.plot()\n",
|
||||
"\n",
|
||||
"# Display the flow visualization using HTML\n",
|
||||
"from IPython.display import IFrame\n",
|
||||
"IFrame(src='crewai_flow.html', width='100%', height=600)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kicking off Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Rewriting question: What does Lilian Weng say about the types of agent memory?\n",
|
||||
"ChatCompletion(id='chatcmpl-Aeo4gBp6YJNqtm6QW3RVqcSoIvcBo', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='What insights does Lilian Weng provide regarding the different types of agent memory in her work?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1734288970, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier=None, system_fingerprint='fp_6fc10e10eb', usage=CompletionUsage(completion_tokens=19, prompt_tokens=56, total_tokens=75, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
|
||||
"# Answering question: What insights does Lilian Weng provide regarding the different types of agent memory in her work?\n",
|
||||
"==========\n",
|
||||
"In her work, Lilian Weng provides insights into the different types of memory used in LLM-powered autonomous agents. She categorizes memory into several types, drawing parallels to the functioning of human memory:\n",
|
||||
"\n",
|
||||
"1. **Short-term Memory**: This is associated with in-context learning utilized by the model, which allows it to learn and process information temporarily.\n",
|
||||
"\n",
|
||||
"2. **Long-term Memory**: This type enables the agent to retain and recall information over extended periods. It often utilizes an external vector store for fast retrieval, thus facilitating the storage of infinite information.\n",
|
||||
"\n",
|
||||
"In relation to human memory, Weng elaborates on the following categories:\n",
|
||||
"\n",
|
||||
"- **Sensory Memory**: The initial stage of memory retaining sensory impressions after stimuli have ended, lasting only a few seconds. Subcategories include:\n",
|
||||
" - **Iconic Memory** (visual)\n",
|
||||
" - **Echoic Memory** (auditory)\n",
|
||||
" - **Haptic Memory** (touch)\n",
|
||||
"\n",
|
||||
"- **Short-Term Memory (STM) or Working Memory**: This type holds information currently in awareness, which is essential for cognitive tasks like learning and reasoning. STM is believed to hold about 7 items for approximately 20-30 seconds.\n",
|
||||
"\n",
|
||||
"- **Long-Term Memory (LTM)**: This can retain information for prolonged periods, ranging from days to decades, with virtually unlimited capacity. It is divided into:\n",
|
||||
" - **Explicit/Declarative Memory**: Conscious recollections of facts and experiences, including:\n",
|
||||
" - **Episodic Memory**: Events and experiences\n",
|
||||
" - **Semantic Memory**: Facts and concepts\n",
|
||||
" - **Implicit/Procedural Memory**: Unconscious memory for skills and routines performed automatically, such as riding a bike or typing.\n",
|
||||
"\n",
|
||||
"Weng suggests that sensory memory can be viewed as the process of learning embedding representations for raw inputs, while short-term and long-term memories serve to organize, retain, and retrieve complex information and experiences that enhance an agent's performance as it interacts with environments.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = flow.kickoff()\n",
|
||||
"print(\"=\" * 10)\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total execution time: 158.21 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"final_time = time.time()\n",
|
||||
"print(f\"Total execution time: {final_time - initial_time:.2f} seconds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
202
Notebooks/CrewAI Flows & Langgraph/QA Agent/crewai_flow.html
Normal file
202
Notebooks/CrewAI Flows & Langgraph/QA Agent/crewai_flow.html
Normal file
File diff suppressed because one or more lines are too long
BIN
Notebooks/CrewAI Flows & Langgraph/QA Agent/db/chroma.sqlite3
Normal file
BIN
Notebooks/CrewAI Flows & Langgraph/QA Agent/db/chroma.sqlite3
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
542
Notebooks/CrewAI Flows & Langgraph/QA Agent/laggraph.ipynb
Normal file
542
Notebooks/CrewAI Flows & Langgraph/QA Agent/laggraph.ipynb
Normal file
File diff suppressed because one or more lines are too long
72
Notebooks/Flows/Landing Page Flow/landing_page_flow.ipynb
Normal file
72
Notebooks/Flows/Landing Page Flow/landing_page_flow.ipynb
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U --quiet 'crewai[tools]' aisuite"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Set environment variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")\n",
|
||||
"\n",
|
||||
"# Apply a patch to allow nested asyncio loops in Jupyter\n",
|
||||
"import nest_asyncio\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
202
Notebooks/Flows/Simple QA Crew + Flow/crewai_flow.html
Normal file
202
Notebooks/Flows/Simple QA Crew + Flow/crewai_flow.html
Normal file
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
Notebooks/Flows/Simple QA Crew + Flow/db/chroma.sqlite3
Normal file
BIN
Notebooks/Flows/Simple QA Crew + Flow/db/chroma.sqlite3
Normal file
Binary file not shown.
@@ -0,0 +1,297 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U --quiet 'crewai[tools]' aisuite"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Set environment variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")\n",
|
||||
"\n",
|
||||
"# Apply a patch to allow nested asyncio loops in Jupyter\n",
|
||||
"import nest_asyncio\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Create Crew"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.36s/it]\n",
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.12s/it]\n",
|
||||
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.19s/it]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Importing Crew related components\n",
|
||||
"# Importing CrewAI Flow related components\n",
|
||||
"# Importing CrewAI Tools\n",
|
||||
"from crewai import Agent, Task, Crew\n",
|
||||
"from crewai.flow.flow import Flow, listen, start\n",
|
||||
"from crewai_tools import WebsiteSearchTool\n",
|
||||
"\n",
|
||||
"# Importing AI Suite for adhoc LLM calls and Pydantic\n",
|
||||
"from pydantic import BaseModel\n",
|
||||
"import aisuite as ai\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"research_agent = Agent(\n",
|
||||
" role=\"You are a helpful assistant that can answer questions about the web.\",\n",
|
||||
" goal=\"Answer the user's question.\",\n",
|
||||
" backstory=\"You have access to a vast knowledge base of information from the web.\",\n",
|
||||
" tools=[\n",
|
||||
" WebsiteSearchTool(website=urls[0]),\n",
|
||||
" WebsiteSearchTool(website=urls[1]),\n",
|
||||
" WebsiteSearchTool(website=urls[2]),\n",
|
||||
" ],\n",
|
||||
" llm=\"gpt-4o-mini\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"task = Task(\n",
|
||||
" description=\"Answer the following question: {question}\",\n",
|
||||
" expected_output=\"A detailed and accurate answer to the user's question.\",\n",
|
||||
" agent=research_agent,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"crew = Crew(\n",
|
||||
" agents=[research_agent],\n",
|
||||
" tasks=[task],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating State"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class QAState(BaseModel):\n",
|
||||
" \"\"\"\n",
|
||||
" State for the documentation flow\n",
|
||||
" \"\"\"\n",
|
||||
" question: str = \"What does Lilian Weng say about the types of agent memory?\"\n",
|
||||
" improved_question: str = \"\"\n",
|
||||
" answer: str = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class QAFlow(Flow[QAState]):\n",
|
||||
" @start()\n",
|
||||
" def rewrite_question(self):\n",
|
||||
" print(f\"# Rewriting question: {self.state.question}\")\n",
|
||||
" client = ai.Client()\n",
|
||||
" messages = [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": f\"\"\"Look at the input and try to reason about the underlying semantic intent / meaning.\n",
|
||||
" Here is the initial question:\n",
|
||||
" -------\n",
|
||||
" {self.state.question}\n",
|
||||
" -------\n",
|
||||
" Formulate an improved question:\"\"\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" response = client.chat.completions.create(\n",
|
||||
" model=\"openai:gpt-4o-mini\",\n",
|
||||
" messages=messages,\n",
|
||||
" temperature=0.3\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(response)\n",
|
||||
"\n",
|
||||
" improved_question = response.choices[0].message.content\n",
|
||||
" self.state.improved_question = improved_question\n",
|
||||
"\n",
|
||||
" @listen(rewrite_question)\n",
|
||||
" def answer_question(self):\n",
|
||||
" print(f\"# Answering question: {self.state.improved_question}\")\n",
|
||||
" result = crew.kickoff(inputs={'question': self.state.improved_question})\n",
|
||||
" self.state.answer = result.raw\n",
|
||||
" return result\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Plotting Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Plot saved as crewai_flow.html\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"100%\"\n",
|
||||
" height=\"400\"\n",
|
||||
" src=\"crewai_flow.html\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" \n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x2a8496cd0>"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flow = QAFlow()\n",
|
||||
"flow.plot()\n",
|
||||
"\n",
|
||||
"# Display the flow visualization using HTML\n",
|
||||
"from IPython.display import IFrame\n",
|
||||
"IFrame(src='crewai_flow.html', width='100%', height=400)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kicking off Flow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Rewriting question: What does Lilian Weng say about the types of agent memory?\n",
|
||||
"ChatCompletion(id='chatcmpl-AemJZZeKKjqXr91KZO8zO8a1xcPZl', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='What insights does Lilian Weng provide regarding the different types of agent memory?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1734282205, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier=None, system_fingerprint='fp_6fc10e10eb', usage=CompletionUsage(completion_tokens=16, prompt_tokens=56, total_tokens=72, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
|
||||
"# Answering question: What insights does Lilian Weng provide regarding the different types of agent memory?\n",
|
||||
"==========\n",
|
||||
"Lilian Weng provides insights on different types of agent memory in her work on LLM-powered autonomous agents. She categorizes memory into three main types:\n",
|
||||
"\n",
|
||||
"1. **Short-term Memory**: This refers to the agent's ability to utilize in-context learning. It allows the agent to process and retain information temporarily for immediate tasks.\n",
|
||||
"\n",
|
||||
"2. **Long-term Memory**: This memory type allows the agent to retain and recall information over extended periods. It often employs external vector stores for infinite retention and quick retrieval of data.\n",
|
||||
"\n",
|
||||
"Weng elaborates on the general concept of memory, drawing parallels with human memory systems. She delineates various categories found in human cognitive science:\n",
|
||||
"\n",
|
||||
"- **Sensory Memory**: The ability to retain impressions of sensory information after stimuli have ceased, typically lasting a few seconds. It includes subcategories like iconic (visual) and echoic (auditory) memory.\n",
|
||||
" \n",
|
||||
"- **Short-Term Memory (STM)** or **Working Memory**: This stores information necessary for immediate cognitive tasks, with a capacity of about seven items and a duration of 20-30 seconds.\n",
|
||||
" \n",
|
||||
"- **Long-Term Memory (LTM)**: Capable of holding vast amounts of information for long periods ranging from days to decades, it is divided into:\n",
|
||||
" - **Explicit/Declarative Memory**: Recallable memories of facts and events (episodic and semantic).\n",
|
||||
" - **Implicit/Procedural Memory**: Unconscious memory for skills and routines.\n",
|
||||
"\n",
|
||||
"In analogy, sensory memory can be likened to the initial processing of raw input data, while other forms of memory facilitate more complex actions and decision-making in agents, such as retaining key operational experiences and making informed choices based on past interactions.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = flow.kickoff()\n",
|
||||
"print(\"=\" * 10)\n",
|
||||
"print(result)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
Reference in New Issue
Block a user