docs: make quickstart async-first (#421)

This commit is contained in:
Twisha Bansal
2025-04-10 17:00:48 +05:30
committed by GitHub
parent e9bd41aa18
commit f232e5387e
2 changed files with 27 additions and 24 deletions

View File

@@ -694,23 +694,26 @@
" \"My check in dates would be from April 10, 2024 to April 19, 2024.\",\n",
"]\n",
"\n",
"# Create an LLM to bind with the agent.\n",
"# TODO(developer): replace this with another model if needed\n",
"model = ChatVertexAI(model_name=\"gemini-1.5-pro\", project=project_id)\n",
"# model = ChatGoogleGenerativeAI(model=\"gemini-1.5-pro\")\n",
"# model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\")\n",
"async def run_application():\n",
" # Create an LLM to bind with the agent.\n",
" # TODO(developer): replace this with another model if needed\n",
" model = ChatVertexAI(model_name=\"gemini-1.5-pro\", project=project_id)\n",
" # model = ChatGoogleGenerativeAI(model=\"gemini-1.5-pro\")\n",
" # model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\")\n",
"\n",
"# Load the tools from the Toolbox server\n",
"client = ToolboxClient(\"http://127.0.0.1:5000\")\n",
"tools = client.load_toolset()\n",
" # Load the tools from the Toolbox server\n",
" client = ToolboxClient(\"http://127.0.0.1:5000\")\n",
" tools = await client.aload_toolset()\n",
"\n",
"# Create a Langraph agent\n",
"agent = create_react_agent(model, tools, checkpointer=MemorySaver())\n",
"config = {\"configurable\": {\"thread_id\": \"thread-1\"}}\n",
"for query in queries:\n",
" inputs = {\"messages\": [(\"user\", prompt + query)]}\n",
" response = agent.invoke(inputs, stream_mode=\"values\", config=config)\n",
" print(response[\"messages\"][-1].content)"
" # Create a Langraph agent\n",
" agent = create_react_agent(model, tools, checkpointer=MemorySaver())\n",
" config = {\"configurable\": {\"thread_id\": \"thread-1\"}}\n",
" for query in queries:\n",
" inputs = {\"messages\": [(\"user\", prompt + query)]}\n",
" response = agent.invoke(inputs, stream_mode=\"values\", config=config)\n",
" print(response[\"messages\"][-1].content)\n",
"\n",
"await run_application()"
]
},
{
@@ -786,7 +789,7 @@
" \"My check in dates would be from April 10, 2024 to April 19, 2024.\",\n",
"]\n",
"\n",
"async def run_agent():\n",
"async def run_application():\n",
" # Create an LLM to bind with the agent.\n",
" # TODO(developer): replace this with another model if needed\n",
" llm = GoogleGenAI(\n",
@@ -804,7 +807,7 @@
"\n",
" # Load the tools from the Toolbox server\n",
" client = ToolboxClient(\"http://127.0.0.1:5000\")\n",
" tools = client.load_toolset()\n",
" tools = await client.aload_toolset()\n",
"\n",
" # Create a LlamaIndex agent\n",
" agent = AgentWorkflow.from_tools_or_functions(\n",
@@ -820,7 +823,7 @@
" print(f\"---- {query} ----\")\n",
" print(str(response))\n",
"\n",
"await run_agent()"
"await run_application()"
]
},
{

View File

@@ -409,7 +409,7 @@ queries = [
"My check in dates would be from April 10, 2024 to April 19, 2024.",
]
def main():
async def run_application():
# TODO(developer): replace this with another model if needed
model = ChatVertexAI(model_name="gemini-1.5-pro")
# model = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
@@ -417,7 +417,7 @@ def main():
# Load the tools from the Toolbox server
client = ToolboxClient("http://127.0.0.1:5000")
tools = client.load_toolset()
tools = await client.aload_toolset()
agent = create_react_agent(model, tools, checkpointer=MemorySaver())
@@ -427,7 +427,7 @@ def main():
response = agent.invoke(inputs, stream_mode="values", config=config)
print(response["messages"][-1].content)
main()
asyncio.run(run_application())
{{< /tab >}}
{{< tab header="LlamaIndex" lang="python" >}}
import asyncio
@@ -460,7 +460,7 @@ queries = [
"My check in dates would be from April 10, 2024 to April 19, 2024.",
]
async def main():
async def run_application():
# TODO(developer): replace this with another model if needed
llm = GoogleGenAI(
model="gemini-1.5-pro",
@@ -477,7 +477,7 @@ async def main():
# Load the tools from the Toolbox server
client = ToolboxClient("http://127.0.0.1:5000")
tools = client.load_toolset()
tools = await client.aload_toolset()
agent = AgentWorkflow.from_tools_or_functions(
tools,
@@ -490,7 +490,7 @@ async def main():
print(f"---- {query} ----")
print(str(response))
asyncio.run(main())
asyncio.run(run_application())
{{< /tab >}}
{{< /tabpane >}}