Skip to main content
ZeroClaw’s provider system lets you add any LLM backend in ~30 lines of code. Providers handle communication with language models and expose a uniform interface to the agent.

Overview

Providers implement the Provider trait, which defines a simple contract:
#[async_trait]
pub trait Provider: Send + Sync {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String>;
}

Step-by-Step Guide

1
Create Your Provider Module
2
Create a new file in src/providers/ for your provider:
3
use anyhow::Result;
use async_trait::async_trait;
use crate::providers::traits::Provider;

/// Custom provider for Ollama (local LLM server)
pub struct OllamaProvider {
    base_url: String,
    client: reqwest::Client,
}

impl OllamaProvider {
    pub fn new(base_url: Option<&str>) -> Self {
        Self {
            base_url: base_url.unwrap_or("http://localhost:11434").to_string(),
            client: reqwest::Client::new(),
        }
    }
}
4
Implement the Provider Trait
5
Implement the chat method to handle API communication:
6
#[async_trait]
impl Provider for OllamaProvider {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String> {
        let url = format!("{}/api/generate", self.base_url);

        let body = serde_json::json!({
            "model": model,
            "prompt": message,
            "temperature": temperature,
            "stream": false,
        });

        let resp = self
            .client
            .post(&url)
            .json(&body)
            .send()
            .await?
            .json::<serde_json::Value>()
            .await?;

        resp["response"]
            .as_str()
            .map(|s| s.to_string())
            .ok_or_else(|| anyhow::anyhow!("No response field in Ollama reply"))
    }
}
7
Register Your Provider
8
Add your provider to the factory in src/providers/mod.rs:
9
pub fn create_provider(name: &str, api_key: Option<&str>) -> Result<Box<dyn Provider>> {
    match name {
        "openai" => Ok(Box::new(openai::OpenAIProvider::new(api_key)?)),
        "anthropic" => Ok(Box::new(anthropic::AnthropicProvider::new(api_key)?)),
        "ollama" => Ok(Box::new(ollama::OllamaProvider::new(None))),
        _ => anyhow::bail!("Unknown provider: {}", name),
    }
}
10
Configure and Test
11
Add your provider to config.toml:
12
default_provider = "ollama"
default_model = "llama3.2"
default_temperature = 0.7

# Ollama doesn't need an API key for local use
api_key = ""
13
Test your provider:
14
zeroclaw chat "Hello, test my new provider!"

Complete Example

Here’s the full implementation from examples/custom_provider.rs:
use anyhow::Result;
use async_trait::async_trait;

#[async_trait]
pub trait Provider: Send + Sync {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String>;
}

pub struct OllamaProvider {
    base_url: String,
    client: reqwest::Client,
}

impl OllamaProvider {
    pub fn new(base_url: Option<&str>) -> Self {
        Self {
            base_url: base_url.unwrap_or("http://localhost:11434").to_string(),
            client: reqwest::Client::new(),
        }
    }
}

#[async_trait]
impl Provider for OllamaProvider {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String> {
        let url = format!("{}/api/generate", self.base_url);

        let body = serde_json::json!({
            "model": model,
            "prompt": message,
            "temperature": temperature,
            "stream": false,
        });

        let resp = self
            .client
            .post(&url)
            .json(&body)
            .send()
            .await?
            .json::<serde_json::Value>()
            .await?;

        resp["response"]
            .as_str()
            .map(|s| s.to_string())
            .ok_or_else(|| anyhow::anyhow!("No response field in Ollama reply"))
    }
}

Advanced Features

Error Handling

Providers should return descriptive errors:
if resp.status().is_client_error() {
    anyhow::bail!("API client error: {}", resp.status());
}

if resp.status().is_server_error() {
    anyhow::bail!("API server error: {}", resp.status());
}

Streaming Support

For streaming responses, implement chat_stream:
#[async_trait]
pub trait Provider: Send + Sync {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String>;
    
    async fn chat_stream(
        &self,
        message: &str,
        model: &str,
        temperature: f64,
    ) -> Result<Pin<Box<dyn Stream<Item = Result<String>> + Send>>> {
        // Optional streaming implementation
        unimplemented!("Streaming not supported")
    }
}

Authentication

Handle API keys securely:
pub struct CustomProvider {
    api_key: String,
    client: reqwest::Client,
}

impl CustomProvider {
    pub fn new(api_key: Option<&str>) -> Result<Self> {
        let api_key = api_key
            .ok_or_else(|| anyhow::anyhow!("API key required for CustomProvider"))?
            .to_string();

        Ok(Self {
            api_key,
            client: reqwest::Client::new(),
        })
    }
}

#[async_trait]
impl Provider for CustomProvider {
    async fn chat(&self, message: &str, model: &str, temperature: f64) -> Result<String> {
        let resp = self
            .client
            .post("https://api.example.com/v1/chat")
            .header("Authorization", format!("Bearer {}", self.api_key))
            .json(&serde_json::json!({
                "model": model,
                "prompt": message,
                "temperature": temperature,
            }))
            .send()
            .await?;
        
        // Handle response...
        Ok("response".to_string())
    }
}

Best Practices

Focus on API communication. Don’t add business logic — that belongs in the agent orchestration layer.
Implement exponential backoff for rate-limited APIs:
let mut retries = 0;
loop {
    match self.client.post(&url).send().await {
        Ok(resp) if resp.status() == 429 => {
            if retries >= 3 {
                anyhow::bail!("Rate limited after retries");
            }
            tokio::time::sleep(Duration::from_secs(2u64.pow(retries))).await;
            retries += 1;
        }
        Ok(resp) => return Ok(resp),
        Err(e) => return Err(e.into()),
    }
}
Use masked logging for credentials:
tracing::info!("Calling provider (key: ***masked**)");
Use integration tests with real (or mocked) API calls:
#[tokio::test]
async fn test_ollama_provider() {
    let provider = OllamaProvider::new(None);
    let response = provider
        .chat("Hello", "llama3.2", 0.7)
        .await
        .unwrap();
    assert!(!response.is_empty());
}

Next Steps

Creating Channels

Learn how to add messaging platform support

Creating Tools

Give your agent new capabilities

Build docs developers (and LLMs) love