Use LangGraph’s message handling to manage conversation history.
from typing import Annotated, Sequencefrom langchain_core.messages import BaseMessage, HumanMessage, AIMessagefrom langgraph.graph import StateGraph, START, END, add_messagesfrom typing_extensions import TypedDictclass ChatbotState(TypedDict): """State for chatbot with message history.""" messages: Annotated[Sequence[BaseMessage], add_messages]
The add_messages annotation:
Automatically appends new messages
Maintains conversation order
Handles message deduplication
2
Create the chatbot node
Build a node that calls an LLM to generate responses.
from langchain_openai import ChatOpenAI# OR use Anthropic: from langchain_anthropic import ChatAnthropic# Initialize the modelmodel = ChatOpenAI(model="gpt-4", temperature=0.7)# OR: model = ChatAnthropic(model="claude-3-5-sonnet-20241022")def chatbot_node(state: ChatbotState) -> dict: """Generate a response using the LLM.""" messages = state["messages"] # Call the model with conversation history response = model.invoke(messages) # Return the new message to be added to state return {"messages": [response]}
# First messageresult = app.invoke({ "messages": [HumanMessage(content="Hi! I'm learning about LangGraph.")]})print(result["messages"][-1].content)# AI: "That's great! LangGraph is a powerful framework for building..."# Continue the conversationresult = app.invoke({ "messages": [ HumanMessage(content="Hi! I'm learning about LangGraph."), result["messages"][-1], HumanMessage(content="What are the key concepts?") ]})print(result["messages"][-1].content)# AI: "The key concepts in LangGraph are: 1. State - holds your data..."
Each call:
Includes full conversation history
Maintains context
Generates contextual responses
5
Add conversation loop
Create an interactive chat experience.
def chat(): """Interactive chat loop.""" print("Chatbot ready! Type 'quit' to exit.\n") messages = [] while True: # Get user input user_input = input("You: ") if user_input.lower() in ["quit", "exit", "q"]: print("Goodbye!") break # Add user message messages.append(HumanMessage(content=user_input)) # Get bot response result = app.invoke({"messages": messages}) # Extract and display response bot_message = result["messages"][-1] messages = result["messages"] print(f"Bot: {bot_message.content}\n")# Run the chatchat()
Chatbot ready! Type 'quit' to exit.You: What is LangGraph?Bot: LangGraph is a framework for building stateful, multi-actor applications with LLMs...You: Can you give me an example?Bot: Sure! Here's a simple example of building an agent with LangGraph...You: quitGoodbye!
from langchain_core.messages import SystemMessagedef chatbot_node(state: ChatbotState) -> dict: messages = state["messages"] # Add system prompt full_messages = [ SystemMessage(content="You are a helpful AI assistant specializing in LangGraph."), *messages ] response = model.invoke(full_messages) return {"messages": [response]}
Add message persistence
from langgraph.checkpoint.memory import MemorySaver# Add checkpointer for persistencememory = MemorySaver()app = graph.compile(checkpointer=memory)# Use with thread_id for multiple conversationsconfig = {"configurable": {"thread_id": "conversation-1"}}result = app.invoke({"messages": [HumanMessage(content="Hello")]}, config)
Add streaming responses
# Stream responses token by tokenfor chunk in app.stream({"messages": [HumanMessage(content="Tell me a story")]}): if "chatbot" in chunk: print(chunk["chatbot"]["messages"][-1].content, end="", flush=True)