user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?" config = {"configurable": {"thread_id": "1"}}
events = graph.stream( {"messages": [{"role": "user", "content": user_input}]}, config, stream_mode="values", ) for event in events: if "messages" in event: event["messages"][-1].pretty_print() def chatbot(state: State): message = llm_with_tools.invoke(state["messages"]) # Because we will be interrupting during tool execution, # we disable parallel tool calling to avoid repeating any # tool invocations when we resume. assert len(message.tool_calls) <= 1 return {"messages": [message]} graph_builder.add_node("chatbot", chatbot)
graph_builder.add_conditional_edges( "chatbot", tools_condition, ) # Any time a tool is called, we return to the chatbot to decide the next step graph_builder.add_edge("tools", "chatbot") graph_builder.add_edge(START, "chatbot")
对话记录:
1 2 3 4 5 6 7 8 9
================================ Human Message =================================
I need some expert guidance for building an AI agent. Could you request assistance for me? ================================== Ai Message ================================== Tool Calls: human_assistance (call_ud2cy1vc) Call ID: call_ud2cy1vc Args: query: I need expert guidance for building an AI agent.
human_response = ( "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent." " It's much more reliable and extensible than simple autonomous agents." )
================================ Human Message =================================
I need some expert guidance for building an AI agent. Could you request assistance for me? ================================== Ai Message ================================== Tool Calls: human_assistance (call_gslu6y58) Call ID: call_gslu6y58 Args: query: I need expert guidance for building an AI agent. ================================== Ai Message ================================== Tool Calls: human_assistance (call_gslu6y58) Call ID: call_gslu6y58 Args: query: I need expert guidance for building an AI agent. ================================= Tool Message ================================= Name: human_assistance
We, the experts are here to help! We'd recommend you check out LangGraph to build your agent. It's much more reliable and extensible than simple autonomous agents. ================================== Ai Message ==================================
Sure, I can provide some guidance based on the expert advice we received. According to our experts, you should consider using a platform like LangGraph for building your AI agent. They found it to be more reliable and easier to extend compared to simpler autonomous agents.
Would you like more detailed information orspecific steps on how toget started with LangGraph?
恭喜!您已使用interrupt为聊天机器人添加了人机交互执行功能,以便在需要时进行人工监督和干预。这开启了您使用 AI 系统创建潜在 UI 的大门。
from langgraph.graph import StateGraph, START, END from langgraph.graph.message import add_messages from langchain_openai import ChatOpenAI from dotenv import load_dotenv from langchain_tavily import TavilySearch from langgraph.prebuilt import ToolNode, tools_condition from langgraph.checkpoint.memory import InMemorySaver from langgraph.types import Command, interrupt from langchain_core.tools import tool
defchatbot(state: State): message = llm_with_tools.invoke(state["messages"]) # Because we will be interrupting during tool execution, # we disable parallel tool calling to avoid repeating any # tool invocations when we resume. assertlen(message.tool_calls) <= 1 return {"messages": [message]}
# Any time a tool is called, we return to the chatbot to decide the next step graph_builder.add_edge("tools", "chatbot") graph_builder.add_edge(START, "chatbot") graph = graph_builder.compile(checkpointer=memory)
withopen("graph.png", "wb") as f: f.write(png_bytes)
# import os # os.system("open graph.png")
user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?" config = {"configurable": {"thread_id": "1"}}
events = graph.stream( {"messages": [{"role": "user", "content": user_input}]}, config, stream_mode="values", ) for event in events: if"messages"in event: event["messages"][-1].pretty_print()
human_response = ( "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent." " It's much more reliable and extensible than simple autonomous agents." )