Skip to main content
Span processors are hooks invoked when a span starts and ends. They are responsible for batching, filtering, and exporting spans to backends.

SpanProcessor Interface

From opentelemetry-sdk/src/trace/span_processor.rs:74-136:
pub trait SpanProcessor: Send + Sync + std::fmt::Debug {
    /// Called when a span is started
    fn on_start(&self, span: &mut Span, cx: &Context);

    /// Called when a span ends
    fn on_end(&self, span: SpanData);

    /// Force flush cached spans
    fn force_flush(&self) -> OTelSdkResult;

    /// Shutdown the processor
    fn shutdown_with_timeout(&self, timeout: Duration) -> OTelSdkResult;

    /// Set the resource for the processor
    fn set_resource(&mut self, resource: &Resource);
}

Built-in Processors

OpenTelemetry SDK provides two main span processors:

SimpleSpanProcessor

Exports spans synchronously as soon as they end (defined in opentelemetry-sdk/src/trace/span_processor.rs:138-212). Use cases:
  • Debugging and testing
  • Low-throughput applications
  • When you need immediate export
Characteristics:
  • Exports each span individually
  • Blocks the thread that ends the span
  • No batching overhead
  • Higher export overhead per span
use opentelemetry_sdk::trace::{SdkTracerProvider, SimpleSpanProcessor};
use opentelemetry_stdout::SpanExporter;

let exporter = SpanExporter::default();
let processor = SimpleSpanProcessor::new(exporter);

let provider = SdkTracerProvider::builder()
    .with_span_processor(processor)
    .build();

Exporter Compatibility

When using OTLP exporters with SimpleSpanProcessor:
  • grpc-tonic - Requires TracerProvider created within a tokio runtime. Spans can be emitted from any thread.
  • reqwest-blocking-client - TracerProvider may be created anywhere, but spans must be emitted from non-tokio threads.
  • reqwest-client - TracerProvider may be created anywhere, but spans must be emitted from tokio runtime threads.

BatchSpanProcessor

Collects finished spans and exports them in batches (defined in opentelemetry-sdk/src/trace/span_processor.rs:214-300). Use cases:
  • Production applications
  • High-throughput scenarios
  • When minimizing export overhead is important
Characteristics:
  • Uses a dedicated background thread
  • Batches spans before export
  • Configurable queue and batch sizes
  • Scheduled periodic exports
  • Lower per-span overhead
use opentelemetry_sdk::trace::{SdkTracerProvider, BatchSpanProcessor};
use opentelemetry_otlp::SpanExporter;

let exporter = SpanExporter::builder()
    .with_tonic()
    .build()?;

let processor = BatchSpanProcessor::builder(exporter).build();

let provider = SdkTracerProvider::builder()
    .with_span_processor(processor)
    .build();

Exporter Compatibility

When using OTLP exporters with BatchSpanProcessor:
  • grpc-tonic - Requires TracerProvider created within a tokio runtime
  • reqwest-blocking-client - Works with regular main or tokio::main
  • reqwest-client and hyper - Not supported with BatchSpanProcessor

Batch Configuration

Customize BatchSpanProcessor behavior:

BatchConfig Options

From opentelemetry-sdk/src/trace/span_processor.rs:52-72:
pub struct BatchConfig {
    /// Maximum queue size (default: 2048)
    max_queue_size: usize,

    /// Maximum batch size (default: 512)
    max_export_batch_size: usize,

    /// Export delay interval (default: 5 seconds)
    scheduled_delay: Duration,

    /// Export timeout (default: 30 seconds)
    export_timeout: Duration,

    /// Max concurrent exports (default: 1)
    max_concurrent_exports: usize,
}

Configuring Batch Processor

use opentelemetry_sdk::trace::{BatchSpanProcessor, BatchConfigBuilder};
use opentelemetry_otlp::SpanExporter;
use std::time::Duration;

let exporter = SpanExporter::builder()
    .with_tonic()
    .build()?;

let processor = BatchSpanProcessor::builder(exporter)
    .with_batch_config(
        BatchConfigBuilder::default()
            .with_max_queue_size(4096)        // Buffer up to 4096 spans
            .with_max_export_batch_size(512)  // Export in batches of 512
            .with_scheduled_delay(Duration::from_secs(5))  // Export every 5s
            .with_max_export_timeout(Duration::from_secs(30))
            .build(),
    )
    .build();

Environment Variables

Configure batch processor via environment variables:
# Maximum queue size
export OTEL_BSP_MAX_QUEUE_SIZE=4096

# Maximum batch size
export OTEL_BSP_MAX_EXPORT_BATCH_SIZE=512

# Schedule delay (milliseconds)
export OTEL_BSP_SCHEDULE_DELAY=5000

# Export timeout (milliseconds)
export OTEL_BSP_EXPORT_TIMEOUT=30000

# Max concurrent exports
export OTEL_BSP_MAX_CONCURRENT_EXPORTS=1

Complete Example

From the documentation in opentelemetry-sdk/src/trace/span_processor.rs:227-272:
use opentelemetry::global;
use opentelemetry_sdk::trace::{
    BatchSpanProcessor, BatchConfigBuilder, SdkTracerProvider, InMemorySpanExporter,
};
use opentelemetry::trace::Tracer;
use std::time::Duration;

// Step 1: Create an exporter
let exporter = InMemorySpanExporter::default();

// Step 2: Configure the BatchSpanProcessor
let batch_processor = BatchSpanProcessor::builder(exporter)
    .with_batch_config(
        BatchConfigBuilder::default()
            .with_max_queue_size(1024)
            .with_max_export_batch_size(256)
            .with_scheduled_delay(Duration::from_secs(5))
            .build(),
    )
    .build();

// Step 3: Set up a TracerProvider with the processor
let provider = SdkTracerProvider::builder()
    .with_span_processor(batch_processor)
    .build();

global::set_tracer_provider(provider.clone());

// Step 4: Create spans
let tracer = global::tracer("example-tracer");
let mut span = tracer.start("example-span");
span.end();

// Step 5: Ensure all spans are flushed before exiting
provider.shutdown();

Custom Span Processors

Implement custom processing logic:
use opentelemetry_sdk::trace::{SpanProcessor, SpanData, Span};
use opentelemetry::{Context, trace::Span as _};
use opentelemetry::KeyValue;
use std::time::Duration;

#[derive(Debug)]
struct MetricsProcessor {
    span_count: std::sync::atomic::AtomicU64,
}

impl MetricsProcessor {
    fn new() -> Self {
        Self {
            span_count: std::sync::atomic::AtomicU64::new(0),
        }
    }
}

impl SpanProcessor for MetricsProcessor {
    fn on_start(&self, span: &mut Span, cx: &Context) {
        // Extract context information and add as attributes
        if let Some(user_id) = cx.get::<String>("user_id") {
            span.set_attribute(KeyValue::new("user.id", user_id.clone()));
        }
    }

    fn on_end(&self, span: SpanData) {
        // Update metrics
        self.span_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);

        // Log slow spans
        let duration = span.end_time.duration_since(span.start_time)
            .unwrap_or_default();

        if duration > Duration::from_secs(1) {
            println!("Slow span detected: {:?} took {:?}", span.name, duration);
        }
    }

    fn force_flush(&self) -> opentelemetry_sdk::error::OTelSdkResult {
        Ok(())
    }

    fn shutdown_with_timeout(&self, _timeout: Duration) -> opentelemetry_sdk::error::OTelSdkResult {
        println!("Total spans processed: {}", 
            self.span_count.load(std::sync::atomic::Ordering::Relaxed));
        Ok(())
    }
}

Filtering Processor

Filter spans before export:
#[derive(Debug)]
struct FilteringProcessor {
    inner: Box<dyn SpanProcessor>,
    min_duration: Duration,
}

impl SpanProcessor for FilteringProcessor {
    fn on_start(&self, span: &mut Span, cx: &Context) {
        self.inner.on_start(span, cx);
    }

    fn on_end(&self, span: SpanData) {
        let duration = span.end_time
            .duration_since(span.start_time)
            .unwrap_or_default();

        // Only export spans longer than threshold
        if duration >= self.min_duration {
            self.inner.on_end(span);
        }
    }

    fn force_flush(&self) -> opentelemetry_sdk::error::OTelSdkResult {
        self.inner.force_flush()
    }

    fn shutdown_with_timeout(&self, timeout: Duration) -> opentelemetry_sdk::error::OTelSdkResult {
        self.inner.shutdown_with_timeout(timeout)
    }
}

Multiple Processors

Register multiple processors for different purposes:
use opentelemetry_sdk::trace::{
    SdkTracerProvider, SimpleSpanProcessor, BatchSpanProcessor
};
use opentelemetry_stdout::SpanExporter as StdoutExporter;
use opentelemetry_otlp::SpanExporter as OtlpExporter;

// Export to stdout for debugging
let stdout_processor = SimpleSpanProcessor::new(
    StdoutExporter::default()
);

// Batch export to OTLP backend
let otlp_exporter = OtlpExporter::builder()
    .with_tonic()
    .build()?;
let batch_processor = BatchSpanProcessor::builder(otlp_exporter).build();

// Register both processors
let provider = SdkTracerProvider::builder()
    .with_span_processor(stdout_processor)
    .with_span_processor(batch_processor)
    .build();
Processors are invoked in the order they were registered.

Context in Processors

Critical: Do not use Context::current() in on_end. It returns an unrelated context.
From opentelemetry-sdk/src/trace/span_processor.rs:87-118:
// ❌ INCORRECT
impl SpanProcessor for MyProcessor {
    fn on_end(&self, span: SpanData) {
        // Context::current() is NOT related to this span!
        let cx = Context::current();
    }
}

// ✅ CORRECT - Extract info in on_start
impl SpanProcessor for MyProcessor {
    fn on_start(&self, span: &mut Span, cx: &Context) {
        // Extract baggage and store as span attribute
        if let Some(value) = cx.baggage().get("my-key") {
            span.set_attribute(KeyValue::new("my-key", value.to_string()));
        }
    }

    fn on_end(&self, span: SpanData) {
        // Access the attribute stored in on_start
        let my_value = span.attributes.iter()
            .find(|kv| kv.key.as_str() == "my-key");
    }
}

Shutdown and Force Flush

Graceful Shutdown

Always shutdown the provider before application exit:
use opentelemetry_sdk::trace::SdkTracerProvider;

let provider: SdkTracerProvider = /* ... */;

// Application code...

// Shutdown with default 5 second timeout
provider.shutdown()?;

// Or specify custom timeout
use std::time::Duration;
provider.shutdown_with_timeout(Duration::from_secs(30))?;

Force Flush

Flush pending spans without shutting down:
// Force export of all buffered spans
provider.force_flush()?;

// Continue creating spans...

Best Practices

BatchSpanProcessor minimizes export overhead and prevents blocking application threads. Use SimpleSpanProcessor only for debugging.
Balance between memory usage (queue size) and export frequency. Larger batches reduce overhead but increase memory usage and latency.
Call shutdown() before application exit to ensure all spans are exported. Use sufficient timeout for batch processors to flush.
Never rely on Context::current() in on_end. Extract needed information in on_start and store as span attributes.
In high-throughput scenarios, monitor dropped span counts. Increase max_queue_size if spans are being dropped.

Tuning for High Throughput

For applications generating many spans:
use opentelemetry_sdk::trace::{BatchSpanProcessor, BatchConfigBuilder};
use std::time::Duration;

let processor = BatchSpanProcessor::builder(exporter)
    .with_batch_config(
        BatchConfigBuilder::default()
            .with_max_queue_size(8192)         // Larger queue
            .with_max_export_batch_size(1024)  // Larger batches
            .with_scheduled_delay(Duration::from_secs(2))  // More frequent exports
            .with_max_concurrent_exports(2)     // Parallel exports
            .build(),
    )
    .build();

Complete Production Example

use opentelemetry::{global, trace::Tracer};
use opentelemetry_sdk::{
    trace::{SdkTracerProvider, BatchSpanProcessor, BatchConfigBuilder, Sampler},
    Resource,
};
use opentelemetry_otlp::SpanExporter;
use std::time::Duration;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // Create OTLP exporter
    let exporter = SpanExporter::builder()
        .with_tonic()
        .with_endpoint("http://localhost:4317")
        .build()?;

    // Configure batch processor
    let processor = BatchSpanProcessor::builder(exporter)
        .with_batch_config(
            BatchConfigBuilder::default()
                .with_max_queue_size(4096)
                .with_max_export_batch_size(512)
                .with_scheduled_delay(Duration::from_secs(5))
                .build(),
        )
        .build();

    // Create tracer provider
    let provider = SdkTracerProvider::builder()
        .with_resource(Resource::builder()
            .with_service_name("my-service")
            .build())
        .with_sampler(Sampler::ParentBased(
            Box::new(Sampler::TraceIdRatioBased(0.1))
        ))
        .with_span_processor(processor)
        .build();

    global::set_tracer_provider(provider.clone());

    // Application code
    let tracer = global::tracer("my-component");
    tracer.in_span("operation", |_cx| {
        // Work here
    });

    // Graceful shutdown
    provider.shutdown()?;
    Ok(())
}

Next Steps

Sampling

Control which spans are recorded

Overview

Return to tracing overview

Build docs developers (and LLMs) love