Skip to main content
Basic sends and receives on channels are blocking. However, you can use select with a default clause to implement non-blocking sends, receives, and even non-blocking multi-way selects.

Non-Blocking Receive

package main

import "fmt"

func main() {
	messages := make(chan string)
	signals := make(chan bool)

	// Non-blocking receive
	select {
	case msg := <-messages:
		fmt.Println("received message", msg)
	default:
		fmt.Println("no message received")
	}
}
The default case executes immediately if no other case is ready, making the operation non-blocking.

Non-Blocking Send

msg := "hi"
select {
case messages <- msg:
	fmt.Println("sent message", msg)
default:
	fmt.Println("no message sent")
}
If the channel has no receiver ready, the default case executes instead of blocking.

Multi-Way Non-Blocking Select

select {
case msg := <-messages:
	fmt.Println("received message", msg)
case sig := <-signals:
	fmt.Println("received signal", sig)
default:
	fmt.Println("no activity")
}
Attempts to receive from multiple channels. If none are ready, executes the default case.
This pattern is useful for checking multiple channels without blocking, like polling for updates.

When to Use Non-Blocking Operations

1. Polling Pattern

for {
	select {
	case update := <-updates:
		applyUpdate(update)
	default:
		// Do other work while no updates available
		doOtherWork()
		time.Sleep(100 * time.Millisecond)
	}
}

2. Try-Send Pattern

func trySend(ch chan<- int, value int) bool {
	select {
	case ch <- value:
		return true
	default:
		return false
	}
}

if trySend(queue, item) {
	fmt.Println("Item queued")
} else {
	fmt.Println("Queue full, item dropped")
}

3. Try-Receive Pattern

func tryReceive(ch <-chan int) (int, bool) {
	select {
	case value := <-ch:
		return value, true
	default:
		return 0, false
	}
}

if value, ok := tryReceive(results); ok {
	fmt.Println("Got result:", value)
} else {
	fmt.Println("No result available")
}

4. Draining a Channel

func drainChannel(ch <-chan int) []int {
	var items []int
	for {
		select {
		case item := <-ch:
			items = append(items, item)
		default:
			return items
		}
	}
}

Complete Example

package main

import (
	"fmt"
	"time"
)

func main() {
	messages := make(chan string)
	signals := make(chan bool)

	// Non-blocking receive - nothing available
	select {
	case msg := <-messages:
		fmt.Println("received message", msg)
	default:
		fmt.Println("no message received")
	}

	// Non-blocking send - no receiver
	msg := "hi"
	select {
	case messages <- msg:
		fmt.Println("sent message", msg)
	default:
		fmt.Println("no message sent")
	}

	// Multi-way non-blocking select
	select {
	case msg := <-messages:
		fmt.Println("received message", msg)
	case sig := <-signals:
		fmt.Println("received signal", sig)
	default:
		fmt.Println("no activity")
	}
}

Blocking vs Non-Blocking

OperationBlockingNon-Blocking
Receivemsg := <-chselect { case msg := <-ch: ...; default: ... }
Sendch <- valueselect { case ch <- value: ...; default: ... }
BehaviorWaits for readyReturns immediately

Practical Patterns

Rate Limiter with Drop

type RateLimiter struct {
	tokens chan struct{}
}

func NewRateLimiter(rate int) *RateLimiter {
	rl := &RateLimiter{
		tokens: make(chan struct{}, rate),
	}
	
	// Fill tokens
	for i := 0; i < rate; i++ {
		rl.tokens <- struct{}{}
	}
	
	// Refill periodically
	go func() {
		for range time.Tick(time.Second) {
			for i := 0; i < rate; i++ {
				select {
				case rl.tokens <- struct{}{}:
				default:
					// Bucket full, skip
				}
			}
		}
	}()
	
	return rl
}

func (rl *RateLimiter) Allow() bool {
	select {
	case <-rl.tokens:
		return true
	default:
		return false
	}
}

// Usage:
limiter := NewRateLimiter(100)
if limiter.Allow() {
	handleRequest()
} else {
	http.Error(w, "rate limit exceeded", 429)
}

Cache with Non-Blocking Update

type Cache struct {
	updates chan CacheUpdate
	data    map[string]interface{}
}

func (c *Cache) TryUpdate(key string, value interface{}) bool {
	update := CacheUpdate{key, value}
	select {
	case c.updates <- update:
		return true
	default:
		// Update channel full, skip this update
		return false
	}
}

func (c *Cache) processUpdates() {
	for update := range c.updates {
		c.data[update.Key] = update.Value
	}
}

Event Logger with Overflow Protection

type Logger struct {
	events chan Event
	dropped atomic.Int64
}

func (l *Logger) Log(event Event) {
	select {
	case l.events <- event:
		// Logged successfully
	default:
		// Buffer full, drop event
		l.dropped.Add(1)
	}
}

func (l *Logger) Stats() map[string]int64 {
	return map[string]int64{
		"buffered": int64(len(l.events)),
		"dropped":  l.dropped.Load(),
	}
}

Checking Channel Status

func hasData(ch <-chan int) bool {
	select {
	case <-ch:
		return true
	default:
		return false
	}
}

func isEmpty(ch <-chan int) bool {
	select {
	case <-ch:
		return false
	default:
		return true
	}
}
These checks are inherently racy in concurrent programs. The channel state may change immediately after checking.

Common Pitfalls

Busy Waiting

// BAD: Wastes CPU
for {
	select {
	case msg := <-messages:
		process(msg)
	default:
		// Spins in tight loop!
	}
}

// GOOD: Block when no work
for msg := range messages {
	process(msg)
}

// GOOD: Non-blocking with sleep
for {
	select {
	case msg := <-messages:
		process(msg)
	default:
		time.Sleep(100 * time.Millisecond)
	}
}

Silent Data Loss

// BAD: Silently drops data
select {
case results <- result:
default:
	// Data lost without notice!
}

// GOOD: Track or log drops
select {
case results <- result:
default:
	droppedCount.Add(1)
	log.Warn("Result dropped due to full buffer")
}

Race Conditions

// BAD: Race condition
if len(ch) > 0 {
	// Channel might be empty now!
	value := <-ch
}

// GOOD: Atomic check-and-receive
select {
case value := <-ch:
	process(value)
default:
	// Handle empty channel
}

Performance Characteristics

Non-blocking operations are extremely fast (~10-20ns) when using the default clause, making them suitable for high-throughput scenarios.

Benchmarks (approximate)

  • Blocking receive: 50-100 ns (when ready)
  • Non-blocking receive (ready): 30-50 ns
  • Non-blocking receive (default): 10-20 ns
  • Busy loop overhead: Significant CPU waste

Best Practices

  1. Avoid busy waiting - Always add sleep or blocking in loops
  2. Track dropped data - Monitor and alert on drops
  3. Use buffered channels - Reduce blocking frequency
  4. Document non-blocking behavior - Callers should know data might be dropped
  5. Consider alternatives - Sometimes blocking is simpler and better
  6. Monitor metrics - Track drop rates and buffer utilization

When NOT to Use Non-Blocking

  1. Critical data - When you cannot afford to lose messages
  2. Simple patterns - Blocking is simpler for straightforward cases
  3. Synchronization - When you need strong ordering guarantees
  4. Error handling - Harder to handle failures gracefully

Alternative Patterns

Instead of non-blocking operations, consider:

Larger Buffers

// Instead of dropping, buffer more
ch := make(chan Event, 10000)

Timeouts

// Instead of default, use timeout
select {
case ch <- value:
case <-time.After(100 * time.Millisecond):
	return ErrTimeout
}

Backpressure

// Block and apply backpressure
ch <- value  // Slow down sender

Build docs developers (and LLMs) love