from langgraph.graph import StateGraph, START, END from langgraph.graph.message import add_messages from langchain_openai import ChatOpenAI from dotenv import load_dotenv from langchain_tavily import TavilySearch from langgraph.prebuilt import ToolNode, tools_condition from langgraph.checkpoint.memory import InMemorySaver from langgraph.types import Command, interrupt from langchain_core.tools import InjectedToolCallId, tool
# Any time a tool is called, we return to the chatbot to decide the next step graph_builder.add_edge("tools", "chatbot") graph_builder.add_edge(START, "chatbot") graph = graph_builder.compile(checkpointer=memory)
2. 添加步骤
向图表中添加步骤。每个步骤都会在其状态历史记录中设置检查点:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
config = {"configurable": {"thread_id": "1"}} events = graph.stream( { "messages": [ { "role": "user", "content": ( "I'm learning LangGraph. " "Could you do some research on it for me?" ), }, ], }, config, stream_mode="values", ) for event in events: if"messages"in event: event["messages"][-1].pretty_print()
================================ Human Message =================================
I'm learning LangGraph. Could you do some research on it for me? ================================== Ai Message ==================================
I'll research LangGraph for you. Let me gather some information. Tool Calls: tavily_search (call_6e5538b4ae9f4d828143bb) Call ID: call_6e5538b4ae9f4d828143bb Args: query: LangGraph overview, features, use cases, and learning resources search_depth: advanced ================================= Tool Message ================================= Name: tavily_search
{"query":"LangGraph overview, features, use cases, and learning resources", "follow_up_questions":null, "answer":null, "images": [], "results": [{"url": "https://www.langchain.com/resources", "title": "Resources - LangChain", "content": "Resources Products Platforms LangSmithLangGraph Platform Resources Resources HubBlogCustomer StoriesLangChain AcademyCommunityExpertsChangelog Python Sign up # Resources Use cases & inspiration Built with LangGraph Use cases & inspiration ## Built with LangGraph Use cases & inspiration ## Built with LangGraph The Definitive Guide to Testing LLM Applications ## The Definitive Guide to Testing LLM Applications Use cases & inspiration Use cases & inspiration Get inspired LangChain State of AI 2024 Report ## LangChain State of AI 2024 Report ## LangChain State of AI 2024 Report See product data LangChain, LangSmith, and LangGraph are critical parts of the reference architecture toget you from prototype to production. Products Resources Python DocsJS/TS DocsGitHubIntegrationsChangelogCommunityLangSmith Trust Portal Sign up for our newsletter to stay up to date", "score": 0.98575, "raw_content":null}, {"url": "https://www.langchain.com/langgraph", "title": "LangGraph - LangChain", "content": "Design agent-driven user experiences with LangGraph Platform's APIs. Quickly deploy and scale your application with infrastructure built for agents. LangGraph sets the foundation for how we can build and scale AI workloads — from conversational agents, complex task automation, to custom LLM-backed experiences that 'just work'. The next chapter in building complex production-ready features with LLMs is agentic, and with LangGraph and LangSmith, LangChain delivers an out-of-the-box solution to iterate quickly, debug immediately, and scale effortlessly.” LangGraph sets the foundation for how we can build and scale AI workloads — from conversational agents, complex task automation, to custom LLM-backed experiences that 'just work'. LangGraph Platform is a service for deploying and scaling LangGraph applications, with an opinionated API for building agent UXs, plus an integrated developer studio.", "score": 0.98332, "raw_content": null}], "response_time": 1.64, "request_id": "c26d7a05-dc55-4209-965a-0479239767e9"} ================================== Ai Message ==================================
Based on my research on LangGraph, here's a comprehensive overview:
### Overview LangGraph is a framework developed by **LangChain**for building stateful, multi-actor applications with LLMs. It's designed to orchestrate complex workflows involving multiple steps, agents, or tools while maintaining state across interactions.
### Key Features 1.**Agent-Driven Architecture**: Enables building conversational agents that can handle complex, multi-step tasks 2.**Stateful Workflows**: Maintains context and state across interactions for coherent task execution 3.**Task Automation**: Supports automation of complex workflows requiring sequential decision-making 4.**Scalable Infrastructure**: Built for production deployment with scaling capabilities 5.**Integrated Developer Studio**: Provides tools for debugging, iteration, and monitoring workflows
### Primary Use Cases -**Conversational Agents**: Build chatbots that handle complex, multi-turn conversations -**Task Automation**: Create workflows for document processing, data extraction, and decision-making -**Custom LLM Experiences**: Develop specialized applications leveraging multiple LLMs/tools -**AI Orchestration**: Coordinate multiple AI systems in complex pipelines
### Learning Resources 1.**Official Documentation**: [LangGraph Documentation](https://www.langchain.com/langgraph) (Best starting point) 2.**LangChain Resources**: [LangChain Resources Hub](https://www.langchain.com/resources) (Includes use cases and examples) 3.**Developer Studio**: Integrated tools for building and testing LangGraph applications 4.**LangChain Academy**: Offers courses on building with LangGraph (accessible through LangChain resources)
### Getting Started LangGraph is part of the LangChain ecosystem. You can: 1. Install via pip: `pip install langgraph` 2. Explore tutorials in the documentation 3. Experiment with pre-built agent templates
Would you like me to explain any specific aspect in more detail or help with a learning roadmap?
变更对话:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
events = graph.stream( { "messages": [ { "role": "user", "content": ( "Ya that's helpful. Maybe I'll " "build an autonomous agent with it!" ), }, ], }, config, stream_mode="values", ) for event in events: if"messages"in event: event["messages"][-1].pretty_print()
================================ Human Message =================================
Ya that's helpful. Maybe I'll build an autonomous agent with it! ================================== Ai Message ==================================
That's an excellent idea! Building an autonomous agent is one of LangGraph's core strengths. Here's what you should know about creating agents with LangGraph:
### Key Capabilities for Autonomous Agents 1. **State Management**: LangGraph maintains agent state across steps using: - `StateGraph` for tracking conversation/memory - Custom state objects for persistent data
2. **Decision Nodes**: Create nodes that decide next actions based on: ```python def should_continue(state): if state["query_complete"]: return "end" else: return "research"
Tool Integration: Easily connect external tools:
1 2
from langgraph.prebuilt import ToolNode tool_node = ToolNode([search_tool, math_tool])
Experiment with Templates: Try pre-built agent architectures:
1 2
pip install langgraph from langgraph.prebuilt import AgentExecutor
Key Concepts to Master:
StateGraph for persistent memory
Conditional edges for decision-making
Tool invocation nodes
Error handling with try ... except nodes
Sample Project Flow:
1 2 3 4 5 6 7
graph LR A[User Input] --> B{Agent Node} B -->|Requires Research| C[Search Tool] B -->|Requires Calculation| D[Math Tool] C --> B D --> B B -->|Complete| E[Response]
Pro Tips
Use LangSmith for tracing and debugging agent decisions
Start with single-agent systems before multi-agent setups
Implement fallback mechanisms for tool errors
Add reflection nodes for self-correction:
1 2 3
defreflect_on_quality(state): if state["response_quality"] < 0.8: return"improve_response"
Would you like me to: a) Provide a simple starter code template b) Explain the agent architecture in more detail c) Suggest specific agent project ideas?
```python to_replay = None forstatein graph.get_state_history(config): print("Num Messages: ", len(state.values["messages"]), "Next: ", state.next) print("-" * 80) if len(state.values["messages"]) == 6: # We are somewhat arbitrarily selecting a specific state based on the number of chat messages in the state. to_replay = state
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
Num Messages: 6 Next: () -------------------------------------------------------------------------------- Num Messages: 5 Next: ('chatbot',) -------------------------------------------------------------------------------- Num Messages: 4 Next: ('__start__',) -------------------------------------------------------------------------------- Num Messages: 4 Next: () -------------------------------------------------------------------------------- Num Messages: 3 Next: ('chatbot',) -------------------------------------------------------------------------------- Num Messages: 2 Next: ('tools',) -------------------------------------------------------------------------------- Num Messages: 1 Next: ('chatbot',) -------------------------------------------------------------------------------- Num Messages: 0 Next: ('__start__',) --------------------------------------------------------------------------------
# The `checkpoint_id` in the `to_replay.config` corresponds to a state we've persisted to our checkpointer. for event in graph.stream(None, to_replay.config, stream_mode="values"): if"messages"in event: event["messages"][-1].pretty_print()
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
================================== Ai Message ==================================
That's an excellent idea! Building an autonomous agent is one of LangGraph's core strengths. Here's what you should know about creating agents with LangGraph:
### Key Capabilities for Autonomous Agents 1. **State Management**: LangGraph maintains agent state across steps using: - `StateGraph` for tracking conversation/memory - Custom state objects for persistent data
2. **Decision Nodes**: Create nodes that decide next actions based on: ```python def should_continue(state): if state["query_complete"]: return "end" else: return "research"
Tool Integration: Easily connect external tools:
1 2
from langgraph.prebuilt import ToolNode tool_node = ToolNode([search_tool, math_tool])
Experiment with Templates: Try pre-built agent architectures:
1 2
pip install langgraph from langgraph.prebuilt import AgentExecutor
Key Concepts to Master:
StateGraph for persistent memory
Conditional edges for decision-making
Tool invocation nodes
Error handling with try ... except nodes
Sample Project Flow:
1 2 3 4 5 6 7
graph LR A[User Input] --> B{Agent Node} B -->|Requires Research| C[Search Tool] B -->|Requires Calculation| D[Math Tool] C --> B D --> B B -->|Complete| E[Response]
Pro Tips
Use LangSmith for tracing and debugging agent decisions
Start with single-agent systems before multi-agent setups
Implement fallback mechanisms for tool errors
Add reflection nodes for self-correction:
1 2 3
defreflect_on_quality(state): if state["response_quality"] < 0.8: return"improve_response"
Would you like me to: a) Provide a simple starter code template b) Explain the agent architecture in more detail c) Suggest specific agent project ideas?
from langgraph.graph import StateGraph, START, END from langgraph.graph.message import add_messages from langchain_openai import ChatOpenAI from dotenv import load_dotenv from langchain_tavily import TavilySearch from langgraph.prebuilt import ToolNode, tools_condition from langgraph.checkpoint.memory import InMemorySaver from langgraph.types import Command, interrupt from langchain_core.tools import InjectedToolCallId, tool
import json
from langchain_core.messages import ToolMessage
load_dotenv()
memory = InMemorySaver()
class State(TypedDict): messages: Annotated[list, add_messages]
with open(“graph.png”, “wb”) as f: f.write(png_bytes)
import os
os.system(“open graph.png”)
config = {“configurable”: {“thread_id”: “1”}} events = graph.stream( { “messages”: [ { “role”: “user”, “content”: ( “I’m learning LangGraph. “ “Could you do some research on it for me?” ), }, ], }, config, stream_mode=”values”, ) for event in events: if “messages” in event: event[“messages”][-1].pretty_print()
events = graph.stream( { “messages”: [ { “role”: “user”, “content”: ( “Ya that’s helpful. Maybe I’ll “ “build an autonomous agent with it!” ), }, ], }, config, stream_mode=”values”, ) for event in events: if “messages” in event: event[“messages”][-1].pretty_print()
to_replay = None for state in graph.get_state_history(config): print(“Num Messages: “, len(state.values[“messages”]), “Next: “, state.next) print(“-“ * 80) if len(state.values[“messages”]) == 6: # We are somewhat arbitrarily selecting a specific state based on the number of chat messages in the state. to_replay = state
print(to_replay.next) print(to_replay.config)
The checkpoint_id in the to_replay.config corresponds to a state we’ve persisted to our checkpointer.
for event in graph.stream(None, to_replay.config, stream_mode=”values”): if “messages” in event: event[“messages”][-1].pretty_print()