Skip to main content
T3Router supports image generation through multiple AI models. This example shows how to generate images, download them to disk, and mix text and image conversations.

Complete Example

use dotenv::dotenv;
use std::path::Path;
use t3router::t3::{
    client::Client,
    config::Config,
    message::{ContentType, Message, Type},
};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    dotenv().ok();

    let cookies = std::env::var("COOKIES").expect("COOKIES not set");
    let convex_session_id = format!(
        "\"{}\"",
        std::env::var("CONVEX_SESSION_ID").expect("CONVEX_SESSION_ID not set")
    );

    let mut client = Client::new(cookies, convex_session_id);

    if client.init().await? {
        println!("Client initialized successfully\n");
    }

    let config = Config::new();

    // Example 1: Generate without downloading
    println!("=== Generate Image (No Save) ===");
    let response = client
        .send(
            "gpt-image-1",
            Some(Message::new(
                Type::User,
                "Create an image of a futuristic city at sunset".to_string(),
            )),
            Some(config.clone()),
        )
        .await?;

    match response.content_type {
        ContentType::Image => {
            if let Some(url) = response.image_url {
                println!("Generated image URL: {}", url);
            }
        }
        ContentType::Text => {
            println!("Response: {}", response.content);
        }
    }

    // Example 2: Generate and download
    println!("\n=== Generate and Download Image ===");
    client.new_conversation();

    let save_path = Path::new("output/pokemon.png");
    let response2 = client
        .send_with_image_download(
            "gpt-image-1",
            Some(Message::new(
                Type::User,
                "Make a image of a pokemon".to_string(),
            )),
            Some(config.clone()),
            Some(save_path),
        )
        .await?;

    match response2.content_type {
        ContentType::Image => {
            if let Some(url) = response2.image_url {
                println!("Generated image URL: {}", url);
            }
            println!("Image saved to: {:?}", save_path);
            if let Some(b64) = response2.base64_data.as_ref() {
                println!("Base64 length: {} characters", b64.len());
            }
        }
        ContentType::Text => {
            println!("Response: {}", response2.content);
        }
    }

    // Example 3: Using Gemini Imagen
    println!("\n=== Gemini Imagen ===");
    client.new_conversation();

    let save_path_gemini = Path::new("output/landscape.png");
    let response3 = client
        .send_with_image_download(
            "gemini-imagen-4",
            Some(Message::new(
                Type::User,
                "Create a beautiful mountain landscape with a lake".to_string(),
            )),
            Some(config.clone()),
            Some(save_path_gemini),
        )
        .await?;

    match response3.content_type {
        ContentType::Image => {
            println!("Image saved to: {:?}", save_path_gemini);
        }
        ContentType::Text => {
            println!("Response: {}", response3.content);
        }
    }

    // Example 4: Mixed text and image conversation
    println!("\n=== Mixed Conversation ===");
    client.new_conversation();

    // First, ask a text question
    let response4 = client
        .send(
            "gemini-2.5-flash-lite",
            Some(Message::new(
                Type::User,
                "What makes a good landscape photo?".to_string(),
            )),
            Some(config.clone()),
        )
        .await?;

    println!("User: What makes a good landscape photo?");
    println!("Assistant: {}", response4.content);

    // Then, generate an image based on the advice
    let save_path_example = Path::new("output/example_landscape.png");
    let response5 = client
        .send_with_image_download(
            "gemini-imagen-4",
            Some(Message::new(
                Type::User,
                "Now create an example based on what you described".to_string(),
            )),
            Some(config),
            Some(save_path_example),
        )
        .await?;

    println!("\nUser: Now create an example based on what you described");
    match response5.content_type {
        ContentType::Image => {
            println!("Image saved to: {:?}", save_path_example);
        }
        ContentType::Text => {
            println!("Response: {}", response5.content);
        }
    }

    Ok(())
}

Image Generation Methods

Basic Generation (URL Only)

let response = client
    .send(
        "gpt-image-1",
        Some(Message::new(Type::User, "Create an image of a cat".to_string())),
        Some(config),
    )
    .await?;

if let Some(url) = response.image_url {
    println!("Image URL: {}", url);
}
Use send() when you only need the image URL and don’t need to download the file.

Generate and Download

let save_path = Path::new("output/my_image.png");
let response = client
    .send_with_image_download(
        "gpt-image-1",
        Some(Message::new(Type::User, "Create an image".to_string())),
        Some(config),
        Some(save_path),
    )
    .await?;

if response.content_type == ContentType::Image {
    println!("Downloaded to: {:?}", save_path);
}
Use send_with_image_download() to automatically download and save the generated image. The response includes:
  • image_url - Original image URL
  • base64_data - Base64-encoded image data
  • File saved to the specified path

Available Image Models

gpt-image-1

OpenAI’s image generation model. Good for creative and artistic images.

gemini-imagen-4

Google’s Imagen model. Excellent for realistic and detailed images.

Checking Response Type

match response.content_type {
    ContentType::Image => {
        // Handle image response
        if let Some(url) = response.image_url {
            println!("Image: {}", url);
        }
        if let Some(b64) = response.base64_data {
            println!("Base64 available: {} chars", b64.len());
        }
    }
    ContentType::Text => {
        // Handle text response (error or explanation)
        println!("Text: {}", response.content);
    }
}
Always check the content_type field. Some prompts might return text instead of images if the request cannot be fulfilled.

Expected Output

Client initialized successfully

=== Generate Image (No Save) ===
Generated image URL: https://...

=== Generate and Download Image ===
Generated image URL: https://...
Image saved to: "output/pokemon.png"
Base64 length: 245678 characters

=== Gemini Imagen ===
Image saved to: "output/landscape.png"

=== Mixed Conversation ===
User: What makes a good landscape photo?
Assistant: A good landscape photo typically includes...

User: Now create an example based on what you described
Image saved to: "output/example_landscape.png"

Advanced: Mixed Conversations

You can combine text and image models in the same conversation:
client.new_conversation();

// Get advice with a text model
let advice = client
    .send(
        "gemini-2.5-flash-lite",
        Some(Message::new(Type::User, "Describe a cyberpunk scene".to_string())),
        Some(config.clone()),
    )
    .await?;

// Generate image based on that advice with an image model
let image = client
    .send_with_image_download(
        "gpt-image-1",
        Some(Message::new(
            Type::User,
            "Create an image of what you just described".to_string(),
        )),
        Some(config),
        Some(Path::new("output/scene.png")),
    )
    .await?;
The conversation context is preserved across different models, allowing the image model to reference the text model’s previous response.
Create the output directory before running: mkdir -p output
Image generation may take longer than text responses. The send_with_image_download() method waits for generation to complete and downloads the result.
Not all prompts will generate images. Some may return text responses explaining why the image couldn’t be generated. Always check content_type.

Next Steps

Build docs developers (and LLMs) love