20250912
This commit is contained in:
272
test/langgraph/quickstart_graph_api.ipynb
Normal file
272
test/langgraph/quickstart_graph_api.ipynb
Normal file
File diff suppressed because one or more lines are too long
144
test/langgraph/quickstart_graph_api.py
Normal file
144
test/langgraph/quickstart_graph_api.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Step 0: Define tools and model
|
||||
|
||||
from langchain_core.tools import tool
|
||||
from langchain_community.chat_models.tongyi import ChatTongyi
|
||||
|
||||
llm = ChatTongyi(
|
||||
model="qwen-max", # 此处以qwen-max为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
streaming=True,
|
||||
# other params...
|
||||
)
|
||||
|
||||
# Define tools
|
||||
@tool
|
||||
def multiply(a: int, b: int) -> int:
|
||||
"""Multiply a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a * b
|
||||
|
||||
|
||||
@tool
|
||||
def add(a: int, b: int) -> int:
|
||||
"""Adds a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a + b
|
||||
|
||||
|
||||
@tool
|
||||
def divide(a: int, b: int) -> float:
|
||||
"""Divide a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a / b
|
||||
|
||||
|
||||
# Augment the LLM with tools
|
||||
tools = [add, multiply, divide]
|
||||
tools_by_name = {tool.name: tool for tool in tools}
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
|
||||
# Step 1: Define state
|
||||
|
||||
from langchain_core.messages import AnyMessage
|
||||
from typing_extensions import TypedDict, Annotated
|
||||
import operator
|
||||
|
||||
class MessagesState(TypedDict):
|
||||
messages: Annotated[list[AnyMessage], operator.add]
|
||||
llm_calls: int
|
||||
|
||||
# Step 2: Define model node
|
||||
from langchain_core.messages import SystemMessage
|
||||
def llm_call(state: dict):
|
||||
"""LLM decides whether to call a tool or not"""
|
||||
|
||||
return {
|
||||
"messages": [
|
||||
llm_with_tools.invoke(
|
||||
[
|
||||
SystemMessage(
|
||||
content="You are a helpful assistant tasked with performing arithmetic on a set of inputs."
|
||||
)
|
||||
]
|
||||
+ state["messages"]
|
||||
)
|
||||
],
|
||||
"llm_calls": state.get('llm_calls', 0) + 1
|
||||
}
|
||||
|
||||
|
||||
# Step 3: Define tool node
|
||||
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
def tool_node(state: dict):
|
||||
"""Performs the tool call"""
|
||||
|
||||
result = []
|
||||
for tool_call in state["messages"][-1].tool_calls:
|
||||
tool = tools_by_name[tool_call["name"]]
|
||||
observation = tool.invoke(tool_call["args"])
|
||||
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
|
||||
return {"messages": result}
|
||||
|
||||
# Step 4: Define logic to determine whether to end
|
||||
|
||||
from typing import Literal
|
||||
from langgraph.graph import StateGraph, START, END
|
||||
|
||||
# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
|
||||
def should_continue(state: MessagesState) -> Literal["tool_node", END]:
|
||||
"""Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
|
||||
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
# If the LLM makes a tool call, then perform an action
|
||||
if last_message.tool_calls:
|
||||
return "tool_node"
|
||||
# Otherwise, we stop (reply to the user)
|
||||
return END
|
||||
|
||||
# Step 5: Build agent
|
||||
|
||||
# Build workflow
|
||||
agent_builder = StateGraph(MessagesState)
|
||||
|
||||
# Add nodes
|
||||
agent_builder.add_node("llm_call", llm_call)
|
||||
agent_builder.add_node("tool_node", tool_node)
|
||||
|
||||
# Add edges to connect nodes
|
||||
agent_builder.add_edge(START, "llm_call")
|
||||
agent_builder.add_conditional_edges(
|
||||
"llm_call",
|
||||
should_continue,
|
||||
["tool_node", END]
|
||||
)
|
||||
agent_builder.add_edge("tool_node", "llm_call")
|
||||
|
||||
# Compile the agent
|
||||
agent = agent_builder.compile()
|
||||
|
||||
|
||||
from IPython.display import Image, display
|
||||
# Show the agent
|
||||
display(Image(agent.get_graph(xray=True).draw_mermaid_png()))
|
||||
|
||||
# Invoke
|
||||
# from langchain_core.messages import HumanMessage
|
||||
# messages = [HumanMessage(content="Add 3 and 4.")]
|
||||
# messages = agent.invoke({"messages": messages})
|
||||
# for m in messages["messages"]:
|
||||
# m.pretty_print()
|
||||
205
test/langgraph/quistart_functional_api.ipynb
Normal file
205
test/langgraph/quistart_functional_api.ipynb
Normal file
@@ -0,0 +1,205 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "initial_id",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-09-12T15:52:56.909385Z",
|
||||
"start_time": "2025-09-12T15:52:56.417088Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ['DASHSCOPE_API_KEY'] = 'sk-e2a05bbcfac84e53b73f98acef15a009'\n",
|
||||
"\n",
|
||||
"# Step 0: Define tools and model\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"\n",
|
||||
"llm = ChatTongyi(\n",
|
||||
" model=\"qwen-max\", # 此处以qwen-max为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models\n",
|
||||
" streaming=True,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-09-12T15:54:08.425580Z",
|
||||
"start_time": "2025-09-12T15:54:08.374623Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define tools\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def divide(a: int, b: int) -> float:\n",
|
||||
" \"\"\"Divide a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a / b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Augment the LLM with tools\n",
|
||||
"tools = [add, multiply, divide]\n",
|
||||
"tools_by_name = {tool.name: tool for tool in tools}\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"\n",
|
||||
"from langgraph.graph import add_messages\n",
|
||||
"from langchain_core.messages import (\n",
|
||||
" SystemMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" ToolCall,\n",
|
||||
")\n",
|
||||
"from langgraph.func import entrypoint, task\n",
|
||||
"\n",
|
||||
"# Step 1: define model node\n",
|
||||
"@task\n",
|
||||
"def call_llm(messages: list[BaseMessage]):\n",
|
||||
" \"\"\"LLM decides whether to call a tool or not\"\"\"\n",
|
||||
" return llm_with_tools.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant tasked with performing arithmetic on a set of inputs.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" + messages\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Step 2: define tool node\n",
|
||||
"@task\n",
|
||||
"def call_tool(tool_call: ToolCall):\n",
|
||||
" \"\"\"Performs the tool call\"\"\"\n",
|
||||
" tool = tools_by_name[tool_call[\"name\"]]\n",
|
||||
" return tool.invoke(tool_call)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Step 3: define agent\n",
|
||||
"@entrypoint()\n",
|
||||
"def agent(messages: list[BaseMessage]):\n",
|
||||
" llm_response = call_llm(messages).result()\n",
|
||||
"\n",
|
||||
" while True:\n",
|
||||
" if not llm_response.tool_calls:\n",
|
||||
" break\n",
|
||||
"\n",
|
||||
" # Execute tools\n",
|
||||
" tool_result_futures = [\n",
|
||||
" call_tool(tool_call) for tool_call in llm_response.tool_calls\n",
|
||||
" ]\n",
|
||||
" tool_results = [fut.result() for fut in tool_result_futures]\n",
|
||||
" messages = add_messages(messages, [llm_response, *tool_results])\n",
|
||||
" llm_response = call_llm(messages).result()\n",
|
||||
"\n",
|
||||
" messages = add_messages(messages, llm_response)\n",
|
||||
" return messages"
|
||||
],
|
||||
"id": "8a77a9b24ee9616d",
|
||||
"outputs": [],
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-09-12T15:54:11.693756Z",
|
||||
"start_time": "2025-09-12T15:54:10.101700Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"# Invoke\n",
|
||||
"messages = [HumanMessage(content=\"Add 3 and 4.\")]\n",
|
||||
"for chunk in agent.stream(messages, stream_mode=\"updates\"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"\\n\")"
|
||||
],
|
||||
"id": "7c4b06da8200b106",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'call_llm': AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'function', 'function': {'name': 'add', 'arguments': '{\"a\": 3, \"b\": 4}'}}]}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'tool_calls', 'request_id': '6d8c6555-1a67-4cc9-a93f-57e94bc20842', 'token_usage': {'input_tokens': 354, 'output_tokens': 22, 'total_tokens': 376, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--afcea3de-940e-45c6-ba96-bbd7e41fa115-0', tool_calls=[{'name': 'add', 'args': {'a': 3, 'b': 4}, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'tool_call'}], chunk_position=None)}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"{'call_tool': ToolMessage(content='7', name='add', id='aeaf3d29-254b-48ab-a933-814e9ea72394', tool_call_id='call_ef8c897dd4f84afbbf0927')}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"{'call_llm': AIMessage(content='The sum of 3 and 4 is 7.', additional_kwargs={}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'stop', 'request_id': '310102ab-48dc-4e80-bc57-ca8814239a65', 'token_usage': {'input_tokens': 386, 'output_tokens': 13, 'total_tokens': 399, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--b3dffae8-42c2-492e-a1f4-e659eba6a879-0', chunk_position=None)}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"{'agent': [HumanMessage(content='Add 3 and 4.', additional_kwargs={}, response_metadata={}, id='40fc3758-a8ab-4302-aff9-8dfbdec16fa0'), AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'function', 'function': {'name': 'add', 'arguments': '{\"a\": 3, \"b\": 4}'}}]}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'tool_calls', 'request_id': '6d8c6555-1a67-4cc9-a93f-57e94bc20842', 'token_usage': {'input_tokens': 354, 'output_tokens': 22, 'total_tokens': 376, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--afcea3de-940e-45c6-ba96-bbd7e41fa115-0', tool_calls=[{'name': 'add', 'args': {'a': 3, 'b': 4}, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'tool_call'}], chunk_position=None), ToolMessage(content='7', name='add', id='aeaf3d29-254b-48ab-a933-814e9ea72394', tool_call_id='call_ef8c897dd4f84afbbf0927'), AIMessage(content='The sum of 3 and 4 is 7.', additional_kwargs={}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'stop', 'request_id': '310102ab-48dc-4e80-bc57-ca8814239a65', 'token_usage': {'input_tokens': 386, 'output_tokens': 13, 'total_tokens': 399, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--b3dffae8-42c2-492e-a1f4-e659eba6a879-0', chunk_position=None)]}\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "",
|
||||
"id": "7e55492ae0289f06"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user