Skip to main content
For more complex state than simple counters, you can use a mutex (mutual exclusion lock) to safely access data across multiple goroutines.

Basic Mutex Usage

package main

import (
	"fmt"
	"sync"
)

// Container holds a map with synchronized access
type Container struct {
	mu       sync.Mutex
	counters map[string]int
}

func (c *Container) inc(name string) {
	// Lock before accessing
	c.mu.Lock()
	defer c.mu.Unlock()  // Unlock when function returns
	
	c.counters[name]++
}

func main() {
	c := Container{
		counters: map[string]int{"a": 0, "b": 0},
	}

	var wg sync.WaitGroup

	// Increment "a" counter
	doIncrement := func(name string, n int) {
		for range n {
			c.inc(name)
		}
	}

	// Multiple goroutines accessing same counter
	wg.Go(func() { doIncrement("a", 10000) })
	wg.Go(func() { doIncrement("a", 10000) })
	wg.Go(func() { doIncrement("b", 10000) })

	wg.Wait()
	fmt.Println(c.counters)  // {a: 20000, b: 10000}
}
The mutex ensures that only one goroutine can access the protected data at a time, preventing race conditions.

Mutex Methods

Lock()

Acquire the mutex:
mu.Lock()
// Critical section - only one goroutine can be here
mu.Unlock()

Unlock()

Release the mutex:
mu.Lock()
defer mu.Unlock()  // Recommended: use defer
// Critical section
Always use defer mu.Unlock() immediately after Lock() to ensure the mutex is released even if the function panics.

RWMutex (Read-Write Mutex)

For read-heavy workloads, use sync.RWMutex to allow multiple concurrent readers:
type Cache struct {
	mu    sync.RWMutex
	items map[string]string
}

// Multiple readers can call this concurrently
func (c *Cache) Get(key string) (string, bool) {
	c.mu.RLock()  // Read lock
	defer c.mu.RUnlock()
	
	value, ok := c.items[key]
	return value, ok
}

// Only one writer at a time, blocks all readers
func (c *Cache) Set(key, value string) {
	c.mu.Lock()  // Write lock
	defer c.mu.Unlock()
	
	c.items[key] = value
}
RWMutex allows either:
  • Multiple simultaneous readers (RLock), OR
  • One exclusive writer (Lock)

RWMutex Methods

RLock() / RUnlock()

Acquire/release a read lock:
mu.RLock()
defer mu.RUnlock()
// Can read safely, multiple goroutines can hold RLock

Lock() / Unlock()

Acquire/release a write lock:
mu.Lock()
defer mu.Unlock()
// Exclusive access, no other readers or writers

Practical Examples

Thread-Safe Counter

type SafeCounter struct {
	mu sync.Mutex
	v  map[string]int
}

func (c *SafeCounter) Inc(key string) {
	c.mu.Lock()
	c.v[key]++
	c.mu.Unlock()
}

func (c *SafeCounter) Value(key string) int {
	c.mu.Lock()
	defer c.mu.Unlock()
	return c.v[key]
}

Thread-Safe Cache

type Cache struct {
	mu    sync.RWMutex
	items map[string]CacheItem
}

type CacheItem struct {
	Value   interface{}
	Expires time.Time
}

func (c *Cache) Get(key string) (interface{}, bool) {
	c.mu.RLock()
	defer c.mu.RUnlock()
	
	item, ok := c.items[key]
	if !ok || time.Now().After(item.Expires) {
		return nil, false
	}
	return item.Value, true
}

func (c *Cache) Set(key string, value interface{}, ttl time.Duration) {
	c.mu.Lock()
	defer c.mu.Unlock()
	
	c.items[key] = CacheItem{
		Value:   value,
		Expires: time.Now().Add(ttl),
	}
}

func (c *Cache) Delete(key string) {
	c.mu.Lock()
	defer c.mu.Unlock()
	
	delete(c.items, key)
}

Configuration Manager

type Config struct {
	mu   sync.RWMutex
	data map[string]string
}

func (c *Config) Get(key string) string {
	c.mu.RLock()
	defer c.mu.RUnlock()
	return c.data[key]
}

func (c *Config) Set(key, value string) {
	c.mu.Lock()
	defer c.mu.Unlock()
	c.data[key] = value
}

func (c *Config) GetAll() map[string]string {
	c.mu.RLock()
	defer c.mu.RUnlock()
	
	// Return a copy to prevent external modification
	copy := make(map[string]string, len(c.data))
	for k, v := range c.data {
		copy[k] = v
	}
	return copy
}

Connection Pool

type ConnectionPool struct {
	mu          sync.Mutex
	connections []Connection
	maxSize     int
}

func (cp *ConnectionPool) Acquire() (Connection, error) {
	cp.mu.Lock()
	defer cp.mu.Unlock()
	
	if len(cp.connections) == 0 {
		return nil, errors.New("no connections available")
	}
	
	conn := cp.connections[len(cp.connections)-1]
	cp.connections = cp.connections[:len(cp.connections)-1]
	return conn, nil
}

func (cp *ConnectionPool) Release(conn Connection) {
	cp.mu.Lock()
	defer cp.mu.Unlock()
	
	if len(cp.connections) < cp.maxSize {
		cp.connections = append(cp.connections, conn)
	} else {
		conn.Close()
	}
}

Metrics Collector

type Metrics struct {
	mu       sync.RWMutex
	counters map[string]int64
	gauges   map[string]float64
}

func (m *Metrics) IncrementCounter(name string, delta int64) {
	m.mu.Lock()
	defer m.mu.Unlock()
	m.counters[name] += delta
}

func (m *Metrics) SetGauge(name string, value float64) {
	m.mu.Lock()
	defer m.mu.Unlock()
	m.gauges[name] = value
}

func (m *Metrics) GetSnapshot() map[string]interface{} {
	m.mu.RLock()
	defer m.mu.RUnlock()
	
	snapshot := make(map[string]interface{})
	for k, v := range m.counters {
		snapshot[k] = v
	}
	for k, v := range m.gauges {
		snapshot[k] = v
	}
	return snapshot
}

Mutex Best Practices

1. Always Use Defer

// GOOD
func (c *Container) update() {
	c.mu.Lock()
	defer c.mu.Unlock()  // Always unlocks, even on panic
	// Update data
}

// BAD - risky if panic occurs
func (c *Container) update() {
	c.mu.Lock()
	// Update data
	c.mu.Unlock()  // May not be reached if panic
}

2. Keep Critical Sections Small

// BAD - holds lock too long
func (c *Cache) Process(key string) Result {
	c.mu.Lock()
	defer c.mu.Unlock()
	
	data := c.data[key]
	result := expensiveComputation(data)  // Long operation!
	c.data[key] = result
	return result
}

// GOOD - minimize lock time
func (c *Cache) Process(key string) Result {
	c.mu.RLock()
	data := c.data[key]
	c.mu.RUnlock()
	
	result := expensiveComputation(data)  // Outside lock
	
	c.mu.Lock()
	c.data[key] = result
	c.mu.Unlock()
	
	return result
}

3. Don’t Copy Mutexes

Mutexes must not be copied. Always pass by pointer or embed in structs.
// BAD - copies mutex
func processContainer(c Container) {  // Passed by value!
	c.mu.Lock()  // Locks a COPY
	defer c.mu.Unlock()
}

// GOOD - pass by pointer
func processContainer(c *Container) {
	c.mu.Lock()
	defer c.mu.Unlock()
}

4. Avoid Nested Locks (Deadlock)

// BAD - potential deadlock
func (a *AccountA) Transfer(b *AccountB, amount int) {
	a.mu.Lock()
	b.mu.Lock()  // If another goroutine locks b then a...
	defer a.mu.Unlock()
	defer b.mu.Unlock()
	// Transfer logic
}

// GOOD - consistent lock ordering
func Transfer(a, b *Account, amount int) {
	// Always lock in same order (e.g., by ID)
	if a.ID < b.ID {
		a.mu.Lock()
		b.mu.Lock()
	} else {
		b.mu.Lock()
		a.mu.Lock()
	}
	defer a.mu.Unlock()
	defer b.mu.Unlock()
	// Transfer logic
}

Common Pitfalls

Forgetting to Unlock

// BAD
mu.Lock()
if condition {
	return  // Forgot to unlock!
}
mu.Unlock()

// GOOD
mu.Lock()
defer mu.Unlock()
if condition {
	return  // Automatically unlocks
}

Locking for Too Long

// BAD - I/O inside lock
mu.Lock()
data := readFromDatabase()  // Slow!
mu.Unlock()

// GOOD - I/O outside lock
data := readFromDatabase()
mu.Lock()
processData(data)
mu.Unlock()

Using Wrong Lock Type

// BAD - write lock for read
func (c *Cache) Get(key string) string {
	c.mu.Lock()  // Overkill for read!
	defer c.mu.Unlock()
	return c.data[key]
}

// GOOD - read lock for read
func (c *Cache) Get(key string) string {
	c.mu.RLock()
	defer c.mu.RUnlock()
	return c.data[key]
}

Mutex vs Atomic vs Channel

Use CaseBest ChoiceWhy
Simple counterAtomicFastest, lock-free
Complex structMutexProtects multiple fields
CommunicationChannelBuilt for messaging
Read-heavy mapRWMutexMultiple readers OK
CoordinationChannelIdiomatic Go

Performance Considerations

RWMutex read locks are only beneficial when reads significantly outnumber writes. Profile to verify.

Benchmarks (approximate)

  • Mutex Lock/Unlock: ~20-40 ns
  • RWMutex RLock/RUnlock: ~15-25 ns
  • RWMutex Lock/Unlock: ~25-50 ns
  • Atomic operation: ~1-5 ns

Detecting Race Conditions

Use Go’s race detector:
go run -race main.go
go test -race ./...
go build -race

Build docs developers (and LLMs) love