Skip to main content
The OTLP (OpenTelemetry Protocol) exporter is the recommended way to export telemetry data from your Rust applications. It supports all three telemetry signals (traces, metrics, and logs) and can communicate via gRPC or HTTP.

Installation

Add the dependency to your Cargo.toml:
[dependencies]
opentelemetry = "0.31"
opentelemetry_sdk = "0.31"
opentelemetry-otlp = "0.31"

Feature Flags

The OTLP exporter supports multiple protocols and HTTP clients: Default features:
opentelemetry-otlp = { version = "0.31", default-features = true }
# Includes: http-proto, reqwest-blocking-client, trace, metrics, logs
For gRPC (Tonic):
opentelemetry-otlp = { version = "0.31", features = ["grpc-tonic"] }
For async HTTP with Reqwest:
opentelemetry-otlp = { version = "0.31", features = ["http-proto", "reqwest-client"] }
Additional features:
  • gzip-tonic - gRPC compression with gzip
  • zstd-tonic - gRPC compression with zstd
  • gzip-http - HTTP compression with gzip
  • zstd-http - HTTP compression with zstd
  • tls-ring or tls-aws-lc - TLS support for gRPC
  • http-json - HTTP with JSON encoding

Quick Start

HTTP Binary Protocol

The HTTP binary protocol is the simplest to get started with:
use opentelemetry::global;
use opentelemetry::trace::Tracer;
use opentelemetry_otlp::{Protocol, SpanExporter, WithExportConfig};
use opentelemetry_sdk::trace::SdkTracerProvider;
use opentelemetry_sdk::Resource;

fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
    // Create the exporter
    let exporter = SpanExporter::builder()
        .with_http()
        .with_protocol(Protocol::HttpBinary)
        .build()?;

    // Create tracer provider
    let provider = SdkTracerProvider::builder()
        .with_batch_exporter(exporter)
        .with_resource(
            Resource::builder()
                .with_service_name("my-service")
                .build()
        )
        .build();

    global::set_tracer_provider(provider.clone());

    // Use the tracer
    let tracer = global::tracer("my-tracer");
    tracer.in_span("example-operation", |_cx| {
        // Your application logic
    });

    provider.shutdown()?;
    Ok(())
}

gRPC Protocol

For gRPC, you need a Tokio runtime:
use opentelemetry::global;
use opentelemetry::trace::Tracer;
use opentelemetry_otlp::SpanExporter;
use opentelemetry_sdk::trace::SdkTracerProvider;
use opentelemetry_sdk::Resource;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
    // Create the exporter
    let exporter = SpanExporter::builder()
        .with_tonic()
        .build()?;

    // Create tracer provider
    let provider = SdkTracerProvider::builder()
        .with_batch_exporter(exporter)
        .with_resource(
            Resource::builder()
                .with_service_name("my-service")
                .build()
        )
        .build();

    global::set_tracer_provider(provider.clone());

    // Use the tracer
    let tracer = global::tracer("my-tracer");
    tracer.in_span("example-operation", |_cx| {
        // Your application logic
    });

    provider.shutdown()?;
    Ok(())
}

Complete Example with All Signals

Here’s a complete example that exports traces, metrics, and logs:
use opentelemetry::{global, trace::{TraceContextExt, Tracer}, InstrumentationScope, KeyValue};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_otlp::{LogExporter, MetricExporter, Protocol, SpanExporter, WithExportConfig};
use opentelemetry_sdk::{
    logs::SdkLoggerProvider,
    metrics::SdkMeterProvider,
    trace::SdkTracerProvider,
    Resource,
};
use std::sync::OnceLock;
use tracing::info;
use tracing_subscriber::prelude::*;

fn get_resource() -> Resource {
    static RESOURCE: OnceLock<Resource> = OnceLock::new();
    RESOURCE
        .get_or_init(|| {
            Resource::builder()
                .with_service_name("my-service")
                .build()
        })
        .clone()
}

fn init_traces() -> SdkTracerProvider {
    let exporter = SpanExporter::builder()
        .with_http()
        .with_protocol(Protocol::HttpBinary)
        .build()
        .expect("Failed to create span exporter");
    
    SdkTracerProvider::builder()
        .with_batch_exporter(exporter)
        .with_resource(get_resource())
        .build()
}

fn init_metrics() -> SdkMeterProvider {
    let exporter = MetricExporter::builder()
        .with_http()
        .with_protocol(Protocol::HttpBinary)
        .build()
        .expect("Failed to create metric exporter");
    
    SdkMeterProvider::builder()
        .with_periodic_exporter(exporter)
        .with_resource(get_resource())
        .build()
}

fn init_logs() -> SdkLoggerProvider {
    let exporter = LogExporter::builder()
        .with_http()
        .with_protocol(Protocol::HttpBinary)
        .build()
        .expect("Failed to create log exporter");
    
    SdkLoggerProvider::builder()
        .with_batch_exporter(exporter)
        .with_resource(get_resource())
        .build()
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
    let logger_provider = init_logs();
    let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
    tracing_subscriber::registry().with(otel_layer).init();

    let tracer_provider = init_traces();
    global::set_tracer_provider(tracer_provider.clone());

    let meter_provider = init_metrics();
    global::set_meter_provider(meter_provider.clone());

    // Create instrumentation scope
    let scope = InstrumentationScope::builder("my-app")
        .with_version("1.0")
        .build();

    let tracer = global::tracer_with_scope(scope.clone());
    let meter = global::meter_with_scope(scope);

    // Create a metric
    let counter = meter
        .u64_counter("requests_total")
        .with_description("Total number of requests")
        .build();
    
    counter.add(10, &[KeyValue::new("endpoint", "/api/users")]);

    // Create a trace
    tracer.in_span("main-operation", |cx| {
        let span = cx.span();
        span.set_attribute(KeyValue::new("user.id", "12345"));
        span.add_event(
            "Processing request",
            vec![KeyValue::new("request.size", 1024)],
        );
        
        info!("Processing user request");
    });

    // Shutdown all providers
    tracer_provider.shutdown()?;
    meter_provider.shutdown()?;
    logger_provider.shutdown()?;

    Ok(())
}

Configuration

Custom Endpoint

By default, the exporter connects to http://localhost:4318 for HTTP and http://localhost:4317 for gRPC. You can customize this:
use opentelemetry_otlp::WithExportConfig;

let exporter = SpanExporter::builder()
    .with_http()
    .with_endpoint("https://otel-collector.example.com:4318")
    .build()?;

Environment Variables

The exporter respects standard OpenTelemetry environment variables:
  • OTEL_EXPORTER_OTLP_ENDPOINT - Base endpoint for all signals
  • OTEL_EXPORTER_OTLP_TRACES_ENDPOINT - Endpoint for traces
  • OTEL_EXPORTER_OTLP_METRICS_ENDPOINT - Endpoint for metrics
  • OTEL_EXPORTER_OTLP_LOGS_ENDPOINT - Endpoint for logs
  • OTEL_EXPORTER_OTLP_HEADERS - Custom headers
  • OTEL_EXPORTER_OTLP_TIMEOUT - Export timeout

Headers and Authentication

Add custom headers for authentication:
use std::collections::HashMap;
use opentelemetry_otlp::WithExportConfig;

let mut headers = HashMap::new();
headers.insert("authorization".to_string(), "Bearer token123".to_string());
headers.insert("x-api-key".to_string(), "my-api-key".to_string());

let exporter = SpanExporter::builder()
    .with_http()
    .with_headers(headers)
    .build()?;

Timeout Configuration

use std::time::Duration;
use opentelemetry_otlp::WithExportConfig;

let exporter = SpanExporter::builder()
    .with_http()
    .with_timeout(Duration::from_secs(10))
    .build()?;

Compression

Enable compression to reduce network bandwidth: For gRPC:
[dependencies]
opentelemetry-otlp = { version = "0.31", features = ["grpc-tonic", "gzip-tonic"] }
For HTTP:
[dependencies]
opentelemetry-otlp = { version = "0.31", features = ["http-proto", "gzip-http"] }

Integration with Backends

OpenTelemetry Collector

Run the collector with Docker:
# HTTP endpoint (port 4318)
docker run -p 4318:4318 otel/opentelemetry-collector:latest

# gRPC endpoint (port 4317)
docker run -p 4317:4317 otel/opentelemetry-collector:latest

Jaeger

Jaeger natively supports OTLP:
docker run -p 16686:16686 -p 4317:4317 \
  -e COLLECTOR_OTLP_ENABLED=true \
  jaegertracing/all-in-one:latest
View traces at: http://localhost:16686

Prometheus

Prometheus can accept OTLP metrics:
docker run -p 9090:9090 \
  -v ./prometheus.yml:/etc/prometheus/prometheus.yml \
  prom/prometheus \
  --config.file=/etc/prometheus/prometheus.yml \
  --web.enable-otlp-receiver
Configure the exporter:
let exporter = MetricExporter::builder()
    .with_http()
    .with_protocol(Protocol::HttpBinary)
    .with_endpoint("http://localhost:9090/api/v1/otlp/v1/metrics")
    .build()?;

HTTP vs gRPC

When to Use HTTP

  • Simpler setup without async runtime requirements (with blocking client)
  • Firewall-friendly (standard HTTP/HTTPS ports)
  • Works well with HTTP proxies and load balancers
  • JSON format available for debugging

When to Use gRPC

  • Better performance for high-throughput scenarios
  • Built-in streaming support
  • More efficient binary protocol
  • Better support for bidirectional communication

Performance Considerations

Use Batch Exporters

Always use batch exporters in production:
let provider = SdkTracerProvider::builder()
    .with_batch_exporter(exporter)  // ✓ Good for production
    .build();
Avoid simple exporters in production:
let provider = SdkTracerProvider::builder()
    .with_simple_exporter(exporter)  // ✗ Only for development
    .build();

Enable Compression

Compression can reduce network bandwidth by 60-80%:
opentelemetry-otlp = { version = "0.31", features = ["grpc-tonic", "gzip-tonic"] }

Tune Batch Configuration

Customize batching for your workload:
use opentelemetry_sdk::trace::{BatchSpanProcessor, BatchConfigBuilder};

let batch_config = BatchConfigBuilder::default()
    .with_max_queue_size(4096)
    .with_max_export_batch_size(512)
    .with_scheduled_delay(std::time::Duration::from_secs(5))
    .build();

let batch_processor = BatchSpanProcessor::builder(exporter)
    .with_batch_config(batch_config)
    .build();

let provider = SdkTracerProvider::builder()
    .with_span_processor(batch_processor)
    .build();

Troubleshooting

Connection Refused

If you see connection errors, verify:
  1. The collector is running: docker ps
  2. The endpoint is correct (default: http://localhost:4318 for HTTP)
  3. Firewall rules allow the connection

Spans Not Appearing

Ensure you’re calling shutdown:
tracer_provider.shutdown()?;  // Flushes all pending spans

High Memory Usage

Reduce batch queue size:
let batch_config = BatchConfigBuilder::default()
    .with_max_queue_size(2048)  // Lower from default
    .build();

Protocol Reference

Available protocol options:
use opentelemetry_otlp::Protocol;

// Binary protobuf over HTTP (recommended)
Protocol::HttpBinary

// JSON over HTTP (for debugging)
Protocol::HttpJson

Next Steps

Stdout Exporter

Debug telemetry locally without a backend

Zipkin Exporter

Export traces to Zipkin

Build docs developers (and LLMs) love