Skip to main content
POST
/
api
/
v1
/
agent_heartbeat
Agent Heartbeat
curl --request POST \
  --url https://api.example.com/api/v1/agent_heartbeat
{
  "error": {}
}

Method Signature

func (c *Client) AgentHeartbeat(ctx context.Context) error

Description

Sends a heartbeat signal to update the agent’s last_seen timestamp. This endpoint is used by agents to indicate they are still active and running. The agent ID is automatically extracted from the agent authentication token, so no explicit agent ID parameter is required. Regular heartbeats are essential for maintaining the agent’s active status in the system. Agents that fail to send heartbeats within the expected interval will be marked as inactive.

Authentication

This endpoint requires agent token authentication. The agent token should be set in the client configuration. The agent ID is extracted from this token automatically.
// Create client with agent token
c := client.New("agent-token-here")

Parameters

ctx
context.Context
required
Context for request cancellation and deadlines

Response

error
error
Returns nil on successful heartbeat. Error is returned if:
  • Missing or invalid agent token (401)
  • Agent not found (404)
  • Internal server error (500)

Example Usage

Basic Heartbeat

package main

import (
	"context"
	"fmt"
	"log"

	"github.com/garnet-org/api/client"
)

func main() {
	// Create client with agent token
	c := client.New("agent-token-abc123")

	// Send heartbeat
	err := c.AgentHeartbeat(context.Background())
	if err != nil {
		log.Fatalf("Heartbeat failed: %v", err)
	}

	fmt.Println("Heartbeat sent successfully")
}

Periodic Heartbeat Loop

import "time"

func RunHeartbeatLoop(c *client.Client, interval time.Duration) {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
			err := c.AgentHeartbeat(ctx)
			cancel()

			if err != nil {
				log.Printf("Heartbeat failed: %v", err)
			} else {
				log.Println("Heartbeat sent successfully")
			}
		}
	}
}

// Usage
func main() {
	c := client.New("agent-token-abc123")
	
	// Send heartbeat every 30 seconds
	go RunHeartbeatLoop(c, 30*time.Second)
	
	// Keep main goroutine running
	select {}
}

Heartbeat with Graceful Shutdown

import (
	"os"
	"os/signal"
	"syscall"
	"time"
)

func RunHeartbeatWithShutdown(c *client.Client, interval time.Duration) {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()

	// Setup signal handler for graceful shutdown
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)

	for {
		select {
		case <-ticker.C:
			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
			err := c.AgentHeartbeat(ctx)
			cancel()

			if err != nil {
				log.Printf("Heartbeat failed: %v", err)
			} else {
				log.Println("Heartbeat sent")
			}

		case sig := <-sigChan:
			log.Printf("Received signal %v, stopping heartbeat", sig)
			return
		}
	}
}

Heartbeat with Retry Logic

import "time"

func SendHeartbeatWithRetry(c *client.Client, maxRetries int) error {
	var lastErr error

	for i := 0; i < maxRetries; i++ {
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		err := c.AgentHeartbeat(ctx)
		cancel()

		if err == nil {
			return nil
		}

		lastErr = err
		log.Printf("Heartbeat attempt %d/%d failed: %v", i+1, maxRetries, err)

		// Exponential backoff
		if i < maxRetries-1 {
			backoff := time.Duration(1<<uint(i)) * time.Second
			time.Sleep(backoff)
		}
	}

	return fmt.Errorf("heartbeat failed after %d attempts: %w", maxRetries, lastErr)
}

Heartbeat with Health Monitoring

import (
	"sync"
	"time"
)

type HeartbeatMonitor struct {
	client        *client.Client
	interval      time.Duration
	lastSuccess   time.Time
	consecutiveFails int
	mu            sync.RWMutex
}

func NewHeartbeatMonitor(c *client.Client, interval time.Duration) *HeartbeatMonitor {
	return &HeartbeatMonitor{
		client:      c,
		interval:    interval,
		lastSuccess: time.Now(),
	}
}

func (h *HeartbeatMonitor) Run(ctx context.Context) {
	ticker := time.NewTicker(h.interval)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			return
		case <-ticker.C:
			h.sendHeartbeat()
		}
	}
}

func (h *HeartbeatMonitor) sendHeartbeat() {
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
	defer cancel()

	err := h.client.AgentHeartbeat(ctx)

	h.mu.Lock()
	defer h.mu.Unlock()

	if err != nil {
		h.consecutiveFails++
		log.Printf("Heartbeat failed (consecutive failures: %d): %v", 
			h.consecutiveFails, err)
	} else {
		if h.consecutiveFails > 0 {
			log.Printf("Heartbeat recovered after %d failures", h.consecutiveFails)
		}
		h.lastSuccess = time.Now()
		h.consecutiveFails = 0
	}
}

func (h *HeartbeatMonitor) IsHealthy() bool {
	h.mu.RLock()
	defer h.mu.RUnlock()

	// Consider unhealthy if no successful heartbeat in last 5 minutes
	// or more than 3 consecutive failures
	return time.Since(h.lastSuccess) < 5*time.Minute && h.consecutiveFails < 3
}

func (h *HeartbeatMonitor) GetStatus() (time.Time, int) {
	h.mu.RLock()
	defer h.mu.RUnlock()
	return h.lastSuccess, h.consecutiveFails
}

Usage in Agent Lifecycle

func StartAgent(agentToken string) error {
	// Initialize client with agent token
	c := client.New(agentToken)

	// Send initial heartbeat
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
	err := c.AgentHeartbeat(ctx)
	cancel()

	if err != nil {
		return fmt.Errorf("initial heartbeat failed: %w", err)
	}

	log.Println("Agent started, initial heartbeat sent")

	// Start periodic heartbeat
	monitor := NewHeartbeatMonitor(c, 30*time.Second)
	go monitor.Run(context.Background())

	// Rest of agent initialization...
	return nil
}

Error Handling

import "strings"

err := c.AgentHeartbeat(context.Background())
if err != nil {
	switch {
	case strings.Contains(err.Error(), "unauthorized"):
		log.Fatal("Invalid agent token")
	case strings.Contains(err.Error(), "not found"):
		log.Fatal("Agent not found - may have been deleted")
	case ctx.Err() == context.DeadlineExceeded:
		log.Println("Heartbeat timeout - will retry")
	default:
		log.Printf("Heartbeat error: %v", err)
	}
}

Heartbeat Interval Recommendations

Production Environment

  • Recommended interval: 30-60 seconds
  • Timeout per request: 10 seconds
  • Retry attempts: 3 with exponential backoff

Development Environment

  • Recommended interval: 60-120 seconds
  • Timeout per request: 10 seconds

High-Availability Systems

  • Recommended interval: 15-30 seconds
  • Timeout per request: 5 seconds
  • Retry attempts: 5 with exponential backoff

Best Practices

  1. Use consistent intervals: Keep heartbeat intervals consistent to avoid unnecessary server load.
  2. Implement retry logic: Network issues are common; implement exponential backoff for retries.
  3. Monitor health: Track successful and failed heartbeats to detect issues early.
  4. Use timeouts: Always use context with timeout to prevent hanging goroutines.
  5. Graceful shutdown: Stop heartbeats cleanly during agent shutdown.
  6. Log failures: Log heartbeat failures for debugging and monitoring.
  7. Initial heartbeat: Send a heartbeat immediately after agent starts before entering the periodic loop.
  8. Avoid flooding: Don’t set intervals too short; respect server capacity.

HTTP Response

Success (200 OK)

{
  "status": "ok"
}

Error Responses

  • 401 Unauthorized: Missing or invalid agent token
  • 404 Not Found: Agent not found
  • 500 Internal Server Error: Server-side error

Build docs developers (and LLMs) love