Skip to main content
Go’s approach to managing state emphasizes sharing memory by communicating rather than communicating by sharing memory. This channel-based approach aligns with Go’s philosophy and can be an alternative to mutex-based state management.

The Concept

Instead of multiple goroutines accessing shared state protected by mutexes, use a single goroutine that owns the state. Other goroutines send messages to request reads or writes.

Basic Stateful Goroutine

package main

import (
	"fmt"
	"math/rand"
	"sync/atomic"
	"time"
)

type readOp struct {
	key  int
	resp chan int
}

type writeOp struct {
	key  int
	val  int
	resp chan bool
}

func main() {
	var readOps uint64
	var writeOps uint64

	// Channels for read and write requests
	reads := make(chan readOp)
	writes := make(chan writeOp)

	// State-owning goroutine
	go func() {
		var state = make(map[int]int)
		for {
			select {
			case read := <-reads:
				read.resp <- state[read.key]
			case write := <-writes:
				state[write.key] = write.val
				write.resp <- true
			}
		}
	}()

	// Start 100 readers
	for range 100 {
		go func() {
			for {
				read := readOp{
					key:  rand.Intn(5),
					resp: make(chan int),
				}
				reads <- read
				<-read.resp
				atomic.AddUint64(&readOps, 1)
				time.Sleep(time.Millisecond)
			}
		}()
	}

	// Start 10 writers
	for range 10 {
		go func() {
			for {
				write := writeOp{
					key:  rand.Intn(5),
					val:  rand.Intn(100),
					resp: make(chan bool),
				}
				writes <- write
				<-write.resp
				atomic.AddUint64(&writeOps, 1)
				time.Sleep(time.Millisecond)
			}
		}()
	}

	// Let operations run for 1 second
	time.Sleep(time.Second)

	// Report results
	readOpsFinal := atomic.LoadUint64(&readOps)
	fmt.Println("readOps:", readOpsFinal)
	writeOpsFinal := atomic.LoadUint64(&writeOps)
	fmt.Println("writeOps:", writeOpsFinal)
}
The state (the map) is owned by a single goroutine. All access goes through message passing, eliminating the need for locks.

Key Components

Request/Response Structs

type readOp struct {
	key  int
	resp chan int  // Channel to send response back
}

type writeOp struct {
	key  int
	val  int
	resp chan bool
}
Each operation includes a response channel for the state goroutine to send results back.

State Owner

go func() {
	var state = make(map[int]int)  // State owned by this goroutine
	for {
		select {
		case read := <-reads:
			read.resp <- state[read.key]  // Read and respond
		case write := <-writes:
			state[write.key] = write.val  // Write and respond
			write.resp <- true
		}
	}
}()
The goroutine has exclusive access to the state.

Clients

read := readOp{
	key:  5,
	resp: make(chan int),  // Create response channel
}
reads <- read      // Send request
value := <-read.resp  // Wait for response

Advantages

  1. No locks needed - Single owner eliminates race conditions
  2. Clear ownership - One goroutine owns the state
  3. Composable - Easy to add new operations
  4. Idiomatic Go - Follows “share memory by communicating”
  5. Easy to reason about - Sequential access to state

Practical Examples

Key-Value Store

type KVStore struct {
	reads  chan readRequest
	writes chan writeRequest
}

type readRequest struct {
	key  string
	resp chan string
}

type writeRequest struct {
	key   string
	value string
	resp  chan bool
}

func NewKVStore() *KVStore {
	kv := &KVStore{
		reads:  make(chan readRequest),
		writes: make(chan writeRequest),
	}
	go kv.run()
	return kv
}

func (kv *KVStore) run() {
	state := make(map[string]string)
	for {
		select {
		case r := <-kv.reads:
			r.resp <- state[r.key]
		case w := <-kv.writes:
			state[w.key] = w.value
			w.resp <- true
		}
	}
}

func (kv *KVStore) Get(key string) string {
	req := readRequest{
		key:  key,
		resp: make(chan string),
	}
	kv.reads <- req
	return <-req.resp
}

func (kv *KVStore) Set(key, value string) {
	req := writeRequest{
		key:   key,
		value: value,
		resp:  make(chan bool),
	}
	kv.writes <- req
	<-req.resp
}

Counter Service

type CounterService struct {
	increment chan chan int
	get       chan chan int
	reset     chan chan bool
}

func NewCounterService() *CounterService {
	cs := &CounterService{
		increment: make(chan chan int),
		get:       make(chan chan int),
		reset:     make(chan chan bool),
	}
	go cs.run()
	return cs
}

func (cs *CounterService) run() {
	count := 0
	for {
		select {
		case resp := <-cs.increment:
			count++
			resp <- count
		case resp := <-cs.get:
			resp <- count
		case resp := <-cs.reset:
			count = 0
			resp <- true
		}
	}
}

func (cs *CounterService) Increment() int {
	resp := make(chan int)
	cs.increment <- resp
	return <-resp
}

func (cs *CounterService) Get() int {
	resp := make(chan int)
	cs.get <- resp
	return <-resp
}

func (cs *CounterService) Reset() {
	resp := make(chan bool)
	cs.reset <- resp
	<-resp
}

Session Manager

type Session struct {
	ID      string
	Data    map[string]interface{}
	Expires time.Time
}

type SessionManager struct {
	create chan createRequest
	get    chan getRequest
	delete chan deleteRequest
}

type createRequest struct {
	sessionID string
	resp      chan *Session
}

type getRequest struct {
	sessionID string
	resp      chan *Session
}

type deleteRequest struct {
	sessionID string
	resp      chan bool
}

func NewSessionManager() *SessionManager {
	sm := &SessionManager{
		create: make(chan createRequest),
		get:    make(chan getRequest),
		delete: make(chan deleteRequest),
	}
	go sm.run()
	return sm
}

func (sm *SessionManager) run() {
	sessions := make(map[string]*Session)
	
	for {
		select {
		case req := <-sm.create:
			session := &Session{
				ID:      req.sessionID,
				Data:    make(map[string]interface{}),
				Expires: time.Now().Add(30 * time.Minute),
			}
			sessions[req.sessionID] = session
			req.resp <- session
			
		case req := <-sm.get:
			req.resp <- sessions[req.sessionID]
			
		case req := <-sm.delete:
			delete(sessions, req.sessionID)
			req.resp <- true
		}
	}
}

func (sm *SessionManager) Create(sessionID string) *Session {
	req := createRequest{
		sessionID: sessionID,
		resp:      make(chan *Session),
	}
	sm.create <- req
	return <-req.resp
}

Cache with Expiration

type CacheItem struct {
	Value   interface{}
	Expires time.Time
}

type Cache struct {
	set    chan setRequest
	get    chan getRequest
	delete chan deleteRequest
}

type setRequest struct {
	key   string
	value interface{}
	ttl   time.Duration
	resp  chan bool
}

type getRequest struct {
	key  string
	resp chan interface{}
}

type deleteRequest struct {
	key  string
	resp chan bool
}

func NewCache() *Cache {
	c := &Cache{
		set:    make(chan setRequest),
		get:    make(chan getRequest),
		delete: make(chan deleteRequest),
	}
	go c.run()
	return c
}

func (c *Cache) run() {
	items := make(map[string]CacheItem)
	cleanup := time.NewTicker(1 * time.Minute)
	defer cleanup.Stop()
	
	for {
		select {
		case req := <-c.set:
			items[req.key] = CacheItem{
				Value:   req.value,
				Expires: time.Now().Add(req.ttl),
			}
			req.resp <- true
			
		case req := <-c.get:
			item, ok := items[req.key]
			if !ok || time.Now().After(item.Expires) {
				req.resp <- nil
			} else {
				req.resp <- item.Value
			}
			
		case req := <-c.delete:
			delete(items, req.key)
			req.resp <- true
			
		case <-cleanup.C:
			now := time.Now()
			for key, item := range items {
				if now.After(item.Expires) {
					delete(items, key)
				}
			}
		}
	}
}

func (c *Cache) Set(key string, value interface{}, ttl time.Duration) {
	req := setRequest{key: key, value: value, ttl: ttl, resp: make(chan bool)}
	c.set <- req
	<-req.resp
}

func (c *Cache) Get(key string) interface{} {
	req := getRequest{key: key, resp: make(chan interface{})}
	c.get <- req
	return <-req.resp
}

Stateful Goroutine vs Mutex

AspectStateful GoroutineMutex
Concurrency modelMessage passingShared memory
Lock-freeYesNo
ComplexityHigherLower
PerformanceGoodBetter for simple ops
Go idiomVery idiomaticLess idiomatic
ComposabilityExcellentGood
Use stateful goroutines when:
  • You want to follow Go’s philosophy
  • State access patterns are complex
  • You need to serialize operations naturally
  • Composability is important
Use mutexes when:
  • State is simple (e.g., a counter)
  • Performance is critical
  • The team is more familiar with locks

Best Practices

  1. One owner per state - Only one goroutine modifies the state
  2. Buffered response channels - Prevent goroutine leaks
  3. Handle shutdown - Provide a way to stop the state goroutine
  4. Keep operations simple - Complex logic in handlers, not clients
  5. Document the protocol - Make message types clear
  6. Consider timeouts - Don’t block forever waiting for responses

Graceful Shutdown

type StatefulService struct {
	reads  chan readOp
	writes chan writeOp
	quit   chan struct{}
	done   chan struct{}
}

func (s *StatefulService) run() {
	defer close(s.done)
	state := make(map[int]int)
	
	for {
		select {
		case r := <-s.reads:
			r.resp <- state[r.key]
		case w := <-s.writes:
			state[w.key] = w.val
			w.resp <- true
		case <-s.quit:
			return
		}
	}
}

func (s *StatefulService) Shutdown() {
	close(s.quit)
	<-s.done
}

Build docs developers (and LLMs) love