Manage multi-turn conversations and conversation history
T3Router maintains conversation context automatically, allowing you to build natural multi-turn conversations. This example demonstrates various conversation management techniques.
use dotenv::dotenv;use t3router::t3::{ client::Client, config::Config, message::{Message, Type},};#[tokio::main]async fn main() -> Result<(), Box<dyn std::error::Error>> { dotenv().ok(); let cookies = std::env::var("COOKIES").expect("COOKIES not set"); let convex_session_id = std::env::var("CONVEX_SESSION_ID") .expect("CONVEX_SESSION_ID not set"); let mut client = Client::new(cookies, convex_session_id); if client.init().await? { println!("Client initialized successfully\n"); } let config = Config::new(); // Example 1: Multi-turn conversation println!("=== Multi-turn Conversation ==="); client.new_conversation(); client.append_message(Message::new( Type::User, "I'm planning a trip to Paris. What are the top 3 attractions?".to_string(), )); let response1 = client .send("gemini-2.5-flash-lite", None, Some(config.clone())) .await?; println!("User: I'm planning a trip to Paris. What are the top 3 attractions?"); println!("Assistant: {}", response1.content); // Follow-up question with context let response2 = client .send( "gemini-2.5-flash-lite", Some(Message::new( Type::User, "Tell me more about the first one.".to_string(), )), Some(config.clone()), ) .await?; println!("\nUser: Tell me more about the first one."); println!("Assistant: {}", response2.content); // Another follow-up let response3 = client .send( "gemini-2.5-flash-lite", Some(Message::new( Type::User, "What's the best time to visit?".to_string(), )), Some(config.clone()), ) .await?; println!("\nUser: What's the best time to visit?"); println!("Assistant: {}\n", response3.content); // Example 2: Pre-populated conversation println!("=== Pre-populated Conversation ==="); client.new_conversation(); client.append_message(Message::new( Type::User, "Let's play a word association game.".to_string(), )); client.append_message(Message::new( Type::Assistant, "Great! I'm ready. Say your first word!".to_string(), )); client.append_message(Message::new(Type::User, "Ocean".to_string())); client.append_message(Message::new(Type::Assistant, "Waves".to_string())); client.append_message(Message::new(Type::User, "Beach".to_string())); let response4 = client .send("gemini-2.5-flash-lite", None, Some(config)) .await?; println!("Conversation history:"); for msg in client.get_messages() { let role = match msg.role { Type::User => "User", Type::Assistant => "Assistant", }; println!("{}: {}", role, msg.content); } // Get conversation metadata println!("\n=== Thread Information ==="); println!("Thread ID: {:?}", client.get_thread_id()); println!("Total messages: {}", client.get_messages().len()); Ok(())}
Client initialized successfully=== Multi-turn Conversation ===User: I'm planning a trip to Paris. What are the top 3 attractions?Assistant: The top 3 attractions in Paris are:1. Eiffel Tower - Iconic landmark...2. Louvre Museum - World's largest art museum...3. Notre-Dame Cathedral - Gothic masterpiece...User: Tell me more about the first one.Assistant: The Eiffel Tower was built in 1889...User: What's the best time to visit?Assistant: The best time to visit Paris is during spring...=== Pre-populated Conversation ===Conversation history:User: Let's play a word association game.Assistant: Great! I'm ready. Say your first word!User: OceanAssistant: WavesUser: BeachAssistant: Sunshine=== Thread Information ===Thread ID: Some("abc123...")Total messages: 6
Perfect for chatbots, customer support, or any scenario where context matters. The model remembers previous exchanges:
client.send(model, Some(Message::new(Type::User, "What's the weather?".to_string())), config).await?;// Later, with context:client.send(model, Some(Message::new(Type::User, "What about tomorrow?".to_string())), config).await?;
System Prompts / Role Setting
Pre-populate the conversation to set up the assistant’s behavior:
client.new_conversation();client.append_message(Message::new( Type::User, "You are a helpful Python tutor. Answer all questions about Python.".to_string(),));client.append_message(Message::new( Type::Assistant, "I understand. I'll help you learn Python!".to_string(),));
Conversation Replay
Reconstruct previous conversations for debugging or analysis:
for (user_msg, assistant_msg) in saved_history { client.append_message(Message::new(Type::User, user_msg)); client.append_message(Message::new(Type::Assistant, assistant_msg));}// Continue from where you left off
Each call to new_conversation() creates a fresh thread. The model won’t remember anything from previous threads.
Long conversations can exceed model context limits. Consider starting a new conversation when context becomes too large.