Understanding message types and structure for chat-based interactions in LangChain
Messages are the fundamental units of communication in chat-based language models. LangChain provides a rich type system for representing conversations with different message roles and complex content.
from langchain_core.messages import HumanMessagemessage = HumanMessage(content="What is LangChain?")print(message.content) # "What is LangChain?"print(message.type) # "human"
from langchain_core.messages import AIMessagemessage = AIMessage( content="LangChain is a framework for building LLM applications.", response_metadata={"model": "gpt-4", "usage": {...}})print(message.content) # "LangChain is a framework..."print(message.type) # "ai"
from langchain_core.messages import SystemMessagemessage = SystemMessage( content="You are a helpful assistant that answers questions concisely.")print(message.type) # "system"
from langchain_core.messages import trim_messagesmessages = [...] # Long conversation# Keep last N messagestrimmed = trim_messages( messages, max_tokens=1000, strategy="last", token_counter=len # Or use a real token counter)# Keep first N messagestrimmed = trim_messages( messages, max_tokens=1000, strategy="first")
from langchain_core.messages import get_buffer_string, HumanMessage, AIMessagemessages = [ HumanMessage(content="What is AI?"), AIMessage(content="AI is artificial intelligence.")]buffer = get_buffer_string(messages)print(buffer)# Output:# Human: What is AI?# AI: AI is artificial intelligence.
from langchain_core.tools import tool@tooldef get_weather(location: str) -> str: """Get weather for a location.""" return f"Weather in {location}: Sunny, 72°F"model_with_tools = model.bind_tools([get_weather])response = model_with_tools.invoke([ HumanMessage(content="What's the weather in NYC?")])# Check for tool callsif response.tool_calls: for tool_call in response.tool_calls: print(f"Tool: {tool_call['name']}") print(f"Args: {tool_call['args']}")
# Good: Clear message typesmessages = [ SystemMessage(content="You are helpful."), HumanMessage(content="Question"), AIMessage(content="Answer")]# Avoid: Generic ChatMessage unless neededmessages = [ ChatMessage(role="system", content="You are helpful."), ChatMessage(role="user", content="Question") # Use HumanMessage instead]
Include message IDs for tracing
Provide IDs for better observability:
from langchain_core.messages import HumanMessageimport uuidmessage = HumanMessage( content="Question", id=str(uuid.uuid4()) # Track this specific message)
Preserve tool call IDs
When responding to tool calls, preserve the ID:
# AI message with tool callai_msg = AIMessage( content="", tool_calls=[{"name": "search", "args": {"q": "LangChain"}, "id": "call_123"}])# Tool response MUST reference the same IDtool_msg = ToolMessage( content="Search results...", tool_call_id="call_123" # Match the original ID)