Skip to main content

Overview

The Auth0 Go SDK v2 supports two pagination styles:
  1. Page-based pagination - Used by most list endpoints (clients, users, connections, etc.)
  2. Checkpoint pagination - Used by logs and organizations endpoints
All paginated responses use the Page type with built-in iterator support for seamless traversal.

Page-Based Pagination

Most Auth0 Management API endpoints use traditional page-based pagination with page and per_page parameters. The iterator pattern provides the simplest way to process all results:
import (
    "context"
    "fmt"

    "github.com/auth0/go-auth0/v2/management"
)

ctx := context.Background()

// Set up initial request
listRequest := &management.ListClientsRequestParameters{
    PerPage: management.Int(50), // Fetch 50 items per page
}

clientsPage, err := mgmt.Clients.List(ctx, listRequest)
if err != nil {
    return err
}

// Use iterator to automatically handle pagination
iterator := clientsPage.Iterator()
for iterator.Next(ctx) {
    client := iterator.Current()
    fmt.Printf("Client: %s (%s)\n",
        *client.GetName(),
        *client.GetClientID(),
    )
}

if iterator.Err() != nil {
    return iterator.Err()
}

Manual Pagination

For more control, manually iterate through pages:
ctx := context.Background()
page := 0

for {
    listRequest := &management.ListClientsRequestParameters{
        Page:    management.Int(page),
        PerPage: management.Int(25),
    }

    clientsPage, err := mgmt.Clients.List(ctx, listRequest)
    if err != nil {
        return err
    }

    // Process current page results
    for _, client := range clientsPage.Results {
        fmt.Printf("Client: %s (%s)\n",
            *client.GetName(),
            *client.GetClientID(),
        )
    }

    // Try to get next page
    nextPage, err := clientsPage.GetNextPage(ctx)
    if err != nil {
        if errors.Is(err, core.ErrNoPages) {
            // No more pages, we're done
            break
        }
        return err
    }

    // Continue with next page
    clientsPage = nextPage
    page++
}

Checkpoint Pagination

Checkpoint pagination is used for time-series data like logs and organizations. Instead of page numbers, you use the ID of the last item as a checkpoint.

Logs Example with Iterator

logListRequest := &management.ListLogsRequestParameters{
    Take: management.Int(100), // Fetch 100 logs per request
}

logsPage, err := mgmt.Logs.List(ctx, logListRequest)
if err != nil {
    return err
}

iterator := logsPage.Iterator()
for iterator.Next(ctx) {
    log := iterator.Current()
    fmt.Printf("Log: %s - %s at %s\n",
        *log.GetID(),
        *log.GetType(),
        log.GetDate(),
    )
}

if iterator.Err() != nil {
    return iterator.Err()
}

Manual Checkpoint Pagination

For advanced use cases like batching or rate limiting:
var fromLogID *string

for {
    logListRequest := &management.ListLogsRequestParameters{
        Take: management.Int(100),
    }

    if fromLogID != nil {
        logListRequest.From = fromLogID
    }

    logsPage, err := mgmt.Logs.List(ctx, logListRequest)
    if err != nil {
        return err
    }

    // Process logs
    for _, log := range logsPage.Results {
        fmt.Printf("Log: %s - %s\n",
            *log.GetID(),
            *log.GetType(),
        )
    }

    // Check if we have more logs
    if len(logsPage.Results) == 0 {
        break // No more logs
    }

    // Use the ID of the last log as the checkpoint for the next request
    lastLog := logsPage.Results[len(logsPage.Results)-1]
    fromLogID = lastLog.ID
}

Advanced Patterns

Parallel Processing with Workers

Process items in parallel using a worker pool:
package main

import (
    "context"
    "fmt"
    "sync"

    "github.com/auth0/go-auth0/v2/management"
)

func processClientsInParallel(mgmt *management.Management, workers int) error {
    ctx := context.Background()

    // Channel for distributing work
    clientChan := make(chan *management.Client, 100)
    
    // Channel for collecting errors
    errChan := make(chan error, 1)

    // Start worker pool
    var wg sync.WaitGroup
    for i := 0; i < workers; i++ {
        wg.Add(1)
        go func(workerID int) {
            defer wg.Done()
            for client := range clientChan {
                // Process each client
                fmt.Printf("[Worker %d] Processing: %s\n", 
                    workerID, 
                    *client.GetName(),
                )
                
                // Do work here (e.g., update, validate, export)
                // ...
            }
        }(i)
    }

    // Fetch and distribute clients
    go func() {
        defer close(clientChan)
        
        listRequest := &management.ListClientsRequestParameters{
            PerPage: management.Int(50),
        }

        clientsPage, err := mgmt.Clients.List(ctx, listRequest)
        if err != nil {
            errChan <- err
            return
        }

        iterator := clientsPage.Iterator()
        for iterator.Next(ctx) {
            clientChan <- iterator.Current()
        }

        if iterator.Err() != nil {
            errChan <- iterator.Err()
        }
    }()

    // Wait for workers to finish
    wg.Wait()
    close(errChan)

    // Check for errors
    if err := <-errChan; err != nil {
        return err
    }

    return nil
}

Rate-Limited Pagination

Control request rate when processing large datasets:
import (
    "context"
    "time"

    "golang.org/x/time/rate"
)

func paginateWithRateLimit(mgmt *management.Management) error {
    ctx := context.Background()
    
    // Allow 2 requests per second with burst of 5
    limiter := rate.NewLimiter(rate.Every(500*time.Millisecond), 5)

    listRequest := &management.ListUsersRequestParameters{
        PerPage: management.Int(100),
    }

    usersPage, err := mgmt.Users.List(ctx, listRequest)
    if err != nil {
        return err
    }

    iterator := usersPage.Iterator()
    for iterator.Next(ctx) {
        // Wait for rate limiter
        if err := limiter.Wait(ctx); err != nil {
            return err
        }

        user := iterator.Current()
        fmt.Printf("Processing user: %s\n", *user.GetEmail())
        
        // Process user
        // ...
    }

    return iterator.Err()
}

Filtered Pagination

Combine pagination with filtering and searching:
// List users with filtering
userListRequest := &management.ListUsersRequestParameters{
    Search:        management.String(`email:"*@example.com" AND email_verified:true`),
    Sort:          management.String("created_at:1"),
    Fields:        management.String("user_id,email,email_verified,created_at"),
    IncludeFields: management.Bool(true),
    PerPage:       management.Int(50),
}

usersPage, err := mgmt.Users.List(ctx, userListRequest)
if err != nil {
    return err
}

iterator := usersPage.Iterator()
for iterator.Next(ctx) {
    user := iterator.Current()
    fmt.Printf("Verified user: %s (created: %s)\n",
        *user.GetEmail(),
        *user.GetCreatedAt(),
    )
}

Batched Processing

Process items in batches for bulk operations:
func processBatches(mgmt *management.Management, batchSize int) error {
    ctx := context.Background()

    listRequest := &management.ListClientsRequestParameters{
        PerPage: management.Int(100),
    }

    clientsPage, err := mgmt.Clients.List(ctx, listRequest)
    if err != nil {
        return err
    }

    iterator := clientsPage.Iterator()
    batch := make([]*management.Client, 0, batchSize)

    for iterator.Next(ctx) {
        batch = append(batch, iterator.Current())

        // Process when batch is full
        if len(batch) >= batchSize {
            if err := processBatch(ctx, mgmt, batch); err != nil {
                return err
            }
            batch = batch[:0] // Reset batch
        }
    }

    // Process remaining items
    if len(batch) > 0 {
        if err := processBatch(ctx, mgmt, batch); err != nil {
            return err
        }
    }

    return iterator.Err()
}

func processBatch(ctx context.Context, mgmt *management.Management, clients []*management.Client) error {
    fmt.Printf("Processing batch of %d clients\n", len(clients))
    // Perform bulk operation
    // ...
    return nil
}

Progress Tracking

Track pagination progress for long-running operations:
import (
    "fmt"
    "sync/atomic"
)

func paginateWithProgress(mgmt *management.Management) error {
    ctx := context.Background()
    
    var (
        processedCount atomic.Int64
        errorCount     atomic.Int64
    )

    // Start progress reporter
    done := make(chan struct{})
    go func() {
        ticker := time.NewTicker(5 * time.Second)
        defer ticker.Stop()
        
        for {
            select {
            case <-ticker.C:
                fmt.Printf("Progress: %d processed, %d errors\n",
                    processedCount.Load(),
                    errorCount.Load(),
                )
            case <-done:
                return
            }
        }
    }()
    defer close(done)

    listRequest := &management.ListUsersRequestParameters{
        PerPage: management.Int(100),
    }

    usersPage, err := mgmt.Users.List(ctx, listRequest)
    if err != nil {
        return err
    }

    iterator := usersPage.Iterator()
    for iterator.Next(ctx) {
        user := iterator.Current()
        
        // Process user
        if err := processUser(ctx, mgmt, user); err != nil {
            errorCount.Add(1)
            fmt.Printf("Error processing %s: %v\n", *user.GetEmail(), err)
        } else {
            processedCount.Add(1)
        }
    }

    // Final stats
    fmt.Printf("\nCompleted: %d processed, %d errors\n",
        processedCount.Load(),
        errorCount.Load(),
    )

    return iterator.Err()
}

Resume from Checkpoint

Implement resumable pagination for long operations:
type PaginationCheckpoint struct {
    LastProcessedID string
    PageNumber      int
    Timestamp       time.Time
}

func saveCheckpoint(checkpoint PaginationCheckpoint) error {
    // Save to file, database, etc.
    return nil
}

func loadCheckpoint() (*PaginationCheckpoint, error) {
    // Load from file, database, etc.
    return nil, nil
}

func resumablePagination(mgmt *management.Management) error {
    ctx := context.Background()

    // Try to load checkpoint
    checkpoint, err := loadCheckpoint()
    if err != nil {
        checkpoint = &PaginationCheckpoint{PageNumber: 0}
    }

    page := checkpoint.PageNumber

    for {
        listRequest := &management.ListClientsRequestParameters{
            Page:    management.Int(page),
            PerPage: management.Int(50),
        }

        clientsPage, err := mgmt.Clients.List(ctx, listRequest)
        if err != nil {
            return err
        }

        for _, client := range clientsPage.Results {
            // Skip already processed items
            if checkpoint.LastProcessedID != "" && 
               *client.GetClientID() <= checkpoint.LastProcessedID {
                continue
            }

            // Process client
            if err := processClient(ctx, mgmt, client); err != nil {
                return err
            }

            // Update checkpoint
            checkpoint.LastProcessedID = *client.GetClientID()
            checkpoint.PageNumber = page
            checkpoint.Timestamp = time.Now()
            
            if err := saveCheckpoint(*checkpoint); err != nil {
                return err
            }
        }

        // Try next page
        nextPage, err := clientsPage.GetNextPage(ctx)
        if err != nil {
            if errors.Is(err, core.ErrNoPages) {
                break
            }
            return err
        }

        clientsPage = nextPage
        page++
    }

    return nil
}

Best Practices

Use Iterators

Prefer the iterator pattern for simplicity and automatic error handling. It reduces boilerplate and makes code more readable.

Optimize Page Size

Balance between fewer requests (larger pages) and memory usage. Typical range: 25-100 items per page.

Handle Errors Properly

Always check iterator.Err() after iteration completes. For manual pagination, distinguish between ErrNoPages and actual errors.

Implement Rate Limiting

Use rate limiting when processing large datasets to avoid hitting API rate limits.

Performance Tips

1

Request Only Needed Fields

Use Fields parameter to reduce response size:
Fields: management.String("client_id,name,app_type")
2

Use Appropriate Page Sizes

Larger pages = fewer requests but more memory:
  • Small datasets: 25-50 items
  • Large datasets: 50-100 items
  • Never exceed API limits (typically 100)
3

Implement Parallel Processing

Process items concurrently using worker pools for CPU-intensive operations.
4

Cache When Possible

For frequently accessed data, implement caching to reduce API calls.

Error Handling

Always handle pagination errors appropriately:
import (
    "errors"
    "fmt"

    "github.com/auth0/go-auth0/v2/core"
)

iterator := clientsPage.Iterator()
for iterator.Next(ctx) {
    client := iterator.Current()
    // Process client
}

if err := iterator.Err(); err != nil {
    if errors.Is(err, core.ErrNoPages) {
        // This should not happen with iterator
        fmt.Println("Unexpectedly reached end of pages")
    } else if errors.Is(err, context.Canceled) {
        fmt.Println("Operation canceled")
    } else if errors.Is(err, context.DeadlineExceeded) {
        fmt.Println("Operation timed out")
    } else {
        // Handle other errors
        return fmt.Errorf("pagination error: %w", err)
    }
}

Build docs developers (and LLMs) love