0-1 基于langchain/langgraph快速手搓一个简单的agent
一个agent最小结构实现只有 4 个部分:1️⃣ LLM2️⃣Tools(函数)3️⃣ Prompt(告诉模型如何思考 + 何时调用工具)4️⃣ AgentExecutor(循环调度)而一个最简单的ReAct本质就是用户问题↓LLM 思考↓决定是否调用工具↓工具返回结果↓LLM 再思考↓最终回答。
本文简单介绍如何快速使用langchain/langgraph搭建一个自己的agent,包含自定义大模型、自定义工具及实现简单的ReAct。
完整项目代码可移步这里
一、agent与langgraph 简介
1.1 一个“小 agent”本质是什么?
一个agent最小结构实现只有 4 个部分:
1️⃣ LLM
2️⃣Tools(函数)
3️⃣ Prompt(告诉模型如何思考 + 何时调用工具)
4️⃣ AgentExecutor(循环调度)
而一个最简单的ReAct本质就是
用户问题
↓
LLM 思考
↓
决定是否调用工具
↓
工具返回结果
↓
LLM 再思考
↓
最终回答
1.2 langgraph 介绍
在原始langchain的agent框架中,agent内部执行是一个黑盒状态,不知道什么时候停止、什么时候再调工具、信息是否足够、是否应该重试,或者是否应该走不同流程。而LangGraph 把它变成一个显式的流转图,是一个可编排的 Agent。
一句话,LangGraph 是一个“可控的 Agent 执行状态机框架”。
Langgraph三个核心组件如下:
| 关键组成 | 说明 |
| State(状态) | 本质:全局共享数据结构
|
| Node(状态变换) |
本质:对state 进行更新
|
|
Edge(执行路径) |
普通edge:直接执行路径,如A → B 条件edge:根据 state 决定路径走向 |
二、代码实战
1. 安装环境/依赖
如需pyenv进行环境管理
**安装Python 3.8.10**
pyenv install 3.8.10
**设置当前目录使用Python 3.8.10**
pyenv local 3.8.10
**创建虚拟环境**
python -m venv venv
**激活虚拟环境**
source venv/bin/activate
安装langchain
pip install langchain
pip install langgraph
运行过程中有缺失的直接随时pip就可以
2、构建核心模块
file tree 如下:
project_root/
│
├── main.py # 入口文件(启动 graph)
├── requirements.txt
│
├── graph/ # Agent 执行流相关
│ ├── __init__.py
│ ├── state.py # 定义 AgentState
│ ├── nodes.py # planner_node / tool_node 等
│ └── graph.py # 构建StateGraph
│
├── tools/ # 所有工具相关
│ ├── __init__.py
│ └── tools.py # 具体工具实现(天气、查询等)
│
├── llm/ # 自定义模型封装
│ ├── __init__.py
│ └── custom_model.py # 自定义模型
│
└── utils/
└── parser.py # extract_json 等工具函数
2.1 state
state节点,记录全局状态
from typing import TypedDict, List, Dict, Any, Annotated
from langchain_core.messages import BaseMessage
from langgraph.graph.message import add_messages
class AgentState(TypedDict):
messages: Annotated[List[BaseMessage], add_messages]
steps: list
next_action: str | None
action_args: dict | None
step_count: int
2.2 node
思考node,在此节点选择需要调用的tool,同时记录入参
PROMPT = """
你是一个天气查询助手。
如果用户询问某个城市天气,请调用 get_weather 工具。
如果信息已经足够,请输出:
{
"thought": "分析过程",
"action": "finish",
"action_args": {}
}
如果需要调用工具,请输出:
{
"thought": "分析过程",
"action": "get_weather",
"action_args": {"city": "城市名"}
}
只输出 JSON,不要额外文字。
"""
def planner_node(state: AgentState) -> AgentState:
messages = [
SystemMessage(content=PROMPT),
*state["messages"]
]
resp = llm.invoke(messages)
try:
data = extract_json(resp.content)
except Exception:
return {
**state,
"next_action": "finish",
"action_args": {},
}
action = data.get("action")
action_args = data.get("action_args", {})
if not isinstance(action_args, dict):
action_args = {}
step = {
"type": "planner",
"thought": data.get("thought"),
"action": action,
"action_args": action_args,
}
return {
**state,
"steps": state["steps"] + [step],
"next_action": action,
"action_args": action_args,
}
tool node,在此节点进行工具的执行
def tool_node(state: AgentState) -> AgentState:
action = state["next_action"]
args = state.get("action_args", {})
if action not in TOOL_REGISTRY:
return {
**state,
"next_action": "finish",
}
tool = TOOL_REGISTRY[action]
result = tool.invoke(args)
print(f"[tool] {action}({args}) -> {result}", flush=True)
step = {
"type": "tool",
"tool": action,
"args": args,
"result": result,
}
return {
**state,
"steps": state["steps"] + [step],
"messages": state["messages"] + [
HumanMessage(content=f"工具返回结果:{result}")
],
"next_action": None,
"action_args": None,
}
2.3 graph
from langgraph.graph import StateGraph, END
from graph.state import AgentState
from graph.nodes import planner_node, tool_node, writer_node
graph = StateGraph(AgentState)
graph.add_node("planner", planner_node)
graph.add_node("tool", tool_node)
graph.set_entry_point("planner")
graph.add_conditional_edges(
"planner",
lambda s: s["next_action"],
{
"get_weather": "tool",
"finish": "__end__",
}
)
graph.add_edge("tool", "planner")
app = graph.compile()
2.4 tool
tool 注册
from typing import Dict, Optional, Any
from datetime import datetime
import time
import json
from langchain_core.tools import tool
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city"""
# 模拟天气数据
weather_data = {
"city": city,
"temperature": 25,
"condition": "clear sky",
"timestamp": int(time.time())
}
return json.dumps(weather_data, ensure_ascii=False)
2.5 llm (自定义方式)
使用自定义大模型节点,调用方式结合实际调用代码进行修改补充
import requests
from typing import List, Optional, ClassVar, Dict, Any
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.chat_generation import ChatGeneration
class PostChatModel(BaseChatModel):
model_name: str = "post-chat-model"
# ===== 默认配置=====
DEFAULT_URL: ClassVar[str]= "xxxxx"
DEFAULT_HEADERS: ClassVar[dict] = {"Content-Type": "application/json","Authorization": "xxxxxx"}
DEFAULT_TIMEOUT: ClassVar[int] = 600
# ===== 默认 =====
url: str = DEFAULT_URL
headers: Dict[str, str] = DEFAULT_HEADERS
timeout: int = DEFAULT_TIMEOUT
default_model_meta: Dict[str, Any] = {
"frequency_penalty": 0,
"max_tokens": 16384,
"response_format": {"type": "json_object"},
"stream": False,
"temperature": 0.95,
"top_k": 0,
"top_p": 0.9,
}
def __init__(
self,
url: Optional[str] = None,
headers: Optional[dict] = None,
timeout: Optional[int] = None,
default_model_meta: Optional[dict] = None,
):
super().__init__()
self.url = url or self.DEFAULT_URL
self.headers = headers or self.DEFAULT_HEADERS
self.timeout = timeout or self.DEFAULT_TIMEOUT
# 默认 model_meta,可被 invoke() 时覆盖
self.default_model_meta = default_model_meta or {
"frequency_penalty": 0,
"max_tokens": 16384,
"response_format": {"type": "json_object"},
"stream": False,
"temperature": 0.95,
"top_k": 0,
"top_p": 0.9,
}
@property
def _llm_type(self) -> str:
return "post_chat_model"
def _messages_to_openai(self, messages: List[BaseMessage]):
"""
LangChain Message -> OpenAI-style dict
"""
role_mapping = {
"human": "user",
"ai": "assistant",
"system": "system"
}
return [
{
"role": role_mapping.get(m.type, m.type),
"content": m.content
}
for m in messages
]
def _generate(self, messages, stop=None, run_manager=None, **kwargs):
model_meta = self.default_model_meta.copy()
# 允许在 invoke() 时动态覆盖
for k in ["temperature", "top_p", "top_k", "max_tokens", "frequency_penalty"]:
if k in kwargs:
model_meta[k] = kwargs[k]
data = {
"max_completion_tokens": kwargs.get(
"max_completion_tokens", 65535
),
"messages": self._messages_to_openai(messages),
"model_meta": model_meta,
"stream": self.default_model_meta["stream"]
}
response = requests.post(
self.url,
headers=self.headers,
json=data,
timeout=self.timeout,
)
response.raise_for_status()
result = response.json()
# === 关键:拆解 ===
choices = result.get("choices", [])
if not choices:
raise ValueError(f"Empty choices from LLM: {result}")
message = choices[0].get("message", {})
content = message.get("content", "")
# 创建ChatGeneration对象
chat_generation = ChatGeneration(message=AIMessage(content=content))
# 创建并返回ChatResult对象
return ChatResult(generations=[chat_generation])
2.6 运行
from graph.main_graph import app
from langchain_core.messages import HumanMessage
result = app.invoke({
"messages": [HumanMessage(content="北京今天天气怎么样?")],
"steps": [],
"next_action": None,
"action_args": None,
})
print(result)
输出
[tool] get_weather({'city': '北京'}) -> {"city": "北京", "temperature": 25, "condition": "clear sky", "timestamp": 1770894620}
{'messages': [HumanMessage(content='北京今天天气怎么样?', additional_kwargs={}, response_metadata={}, id='xxxxxxx'), HumanMessage(content='工具返回结果:{"city": "北京", "temperature": 25, "condition": "clear sky", "timestamp": 1770894620}', additional_kwargs={}, response_metadata={}, id='xxxxxxx')], 'steps': [{'type': 'planner', 'thought': '用户询问北京今天的天气,需要调用get_weather工具获取北京的天气信息', 'action': 'get_weather', 'action_args': {'city': '北京'}}, {'type': 'tool', 'tool': 'get_weather', 'args': {'city': '北京'}, 'result': '{"city": "北京", "temperature": 25, "condition": "clear sky", "timestamp": 1770894620}'}, {'type': 'planner', 'thought': '已通过get_weather工具获取北京的天气信息,包含温度25度、天气状况为clear sky,信息足够回答用户问题,无需继续操作', 'action': 'finish', 'action_args': {}}], 'next_action': 'finish', 'action_args': {}}
3. 完整代码
更多推荐



所有评论(0)