Skip to main content
AgentLIB emits events throughout the agent lifecycle, providing visibility into execution, tool calls, memory operations, and errors. Use events for logging, monitoring, debugging, and building reactive systems.

Core Events

Agents emit events at key execution points:
type CoreEvent =
  | 'run:start'         // Agent run begins
  | 'run:end'           // Agent run completes
  | 'step:start'        // Reasoning step starts
  | 'step:end'          // Reasoning step ends
  | 'step:reasoning'    // Reasoning step emitted
  | 'model:request'     // Model API request
  | 'model:response'    // Model API response
  | 'tool:before'       // Before tool execution
  | 'tool:after'        // After tool execution
  | 'memory:read'       // Memory loaded
  | 'memory:write'      // Memory saved
  | 'cancel'            // Execution cancelled
  | 'error'             // Error occurred

Listening to Events

Use .on() to register event listeners:
import { createAgent } from '@agentlib/core'
import { openai } from '@agentlib/openai'

const agent = createAgent({ name: 'assistant' })
  .provider(openai({ apiKey: process.env.OPENAI_API_KEY }))

agent.on('run:start', ({ input, sessionId }) => {
  console.log(`[run:start] input="${input}" session="${sessionId}"`)
})

agent.on('run:end', ({ output, state }) => {
  console.log(`[run:end] output="${output}"`)
  console.log(`Steps: ${state.steps.length}, Tokens: ${state.usage?.totalTokens}`)
})

const result = await agent.run('Hello!')

Event Payloads

Each event carries specific payload data:

Run Events

agent.on('run:start', ({ input, sessionId }) => {
  console.log(`Starting run for session: ${sessionId}`)
  console.log(`Input: ${input}`)
})

Tool Events

agent.on('tool:before', ({ name, args }) => {
  console.log(`[TOOL] Calling ${name}`)
  console.log(`Arguments:`, args)
})

Reasoning Events

agent.on('step:reasoning', (step) => {
  switch (step.type) {
    case 'thought':
      console.log(`💭 Thought: ${step.content}`)
      break
    case 'tool_call':
      console.log(`🔧 Calling ${step.toolName}(${JSON.stringify(step.args)})`)
      break
    case 'plan':
      console.log(`📋 Plan:`, step.tasks)
      break
    case 'reflection':
      console.log(`🔍 Reflection: ${step.assessment}`)
      break
  }
})

Model Events

agent.on('model:request', (request) => {
  console.log(`[MODEL] Sending ${request.messages.length} messages`)
  console.log(`Tools available: ${request.tools?.length || 0}`)
})

Memory Events

agent.on('memory:read', ({ sessionId }) => {
  console.log(`[MEMORY] Loading history for session: ${sessionId}`)
})

Error Events

agent.on('error', (error) => {
  console.error('[ERROR]', error)
  
  // Log to external service
  logToSentry(error)
})

Practical Examples

Request Logging

import fs from 'fs/promises'

const logFile = '/tmp/agent-requests.log'

agent.on('run:start', async ({ input, sessionId }) => {
  const logEntry = {
    timestamp: new Date().toISOString(),
    event: 'run:start',
    sessionId,
    input,
  }
  await fs.appendFile(logFile, JSON.stringify(logEntry) + '\n')
})

agent.on('run:end', async ({ output, state }) => {
  const logEntry = {
    timestamp: new Date().toISOString(),
    event: 'run:end',
    output,
    steps: state.steps.length,
    tokens: state.usage?.totalTokens,
  }
  await fs.appendFile(logFile, JSON.stringify(logEntry) + '\n')
})

Token Usage Tracking

let totalTokens = 0

agent.on('model:response', (response) => {
  const tokens = response.usage?.totalTokens || 0
  totalTokens += tokens
  console.log(`Request tokens: ${tokens}`)
  console.log(`Total session tokens: ${totalTokens}`)
  
  if (totalTokens > 50_000) {
    console.warn('⚠️  Token budget exceeded!')
  }
})

Tool Call Analytics

const toolStats = new Map<string, { count: number; totalMs: number }>()

agent.on('tool:before', ({ name }) => {
  const stats = toolStats.get(name) || { count: 0, totalMs: 0 }
  stats.count += 1
  stats.startTime = Date.now()
  toolStats.set(name, stats)
})

agent.on('tool:after', ({ name }) => {
  const stats = toolStats.get(name)
  if (stats?.startTime) {
    stats.totalMs += Date.now() - stats.startTime
    delete stats.startTime
  }
})

agent.on('run:end', () => {
  console.log('\n--- Tool Statistics ---')
  for (const [name, stats] of toolStats) {
    console.log(`${name}: ${stats.count} calls, avg ${(stats.totalMs / stats.count).toFixed(2)}ms`)
  }
})

Real-time Progress Streaming

import { EventEmitter } from 'events'

const progressEmitter = new EventEmitter()

agent.on('step:reasoning', (step) => {
  progressEmitter.emit('progress', {
    type: 'reasoning',
    step,
  })
})

agent.on('tool:before', ({ name, args }) => {
  progressEmitter.emit('progress', {
    type: 'tool',
    name,
    args,
  })
})

// Consumer
progressEmitter.on('progress', (update) => {
  // Send to WebSocket, SSE, etc.
  ws.send(JSON.stringify(update))
})

Error Recovery

const MAX_RETRIES = 3
let retryCount = 0

agent.on('error', async (error) => {
  console.error(`Error occurred: ${error.message}`)
  
  if (retryCount < MAX_RETRIES && error.message.includes('rate limit')) {
    retryCount++
    console.log(`Retrying (${retryCount}/${MAX_RETRIES}) in 5s...`)
    await new Promise(resolve => setTimeout(resolve, 5000))
    // Re-run agent (implementation depends on your setup)
  } else {
    console.error('Max retries exceeded or unrecoverable error')
    throw error
  }
})

agent.on('run:end', () => {
  retryCount = 0  // Reset on success
})

Debugging Tool Chains

const toolChain: string[] = []

agent.on('tool:before', ({ name }) => {
  toolChain.push(name)
})

agent.on('run:end', () => {
  console.log('\n--- Tool Call Chain ---')
  console.log(toolChain.join(' → '))
  toolChain.length = 0  // Clear for next run
})

Performance Monitoring

interface PerformanceMetrics {
  totalDuration: number
  modelCalls: number
  modelDuration: number
  toolCalls: number
  toolDuration: number
}

const metrics: PerformanceMetrics = {
  totalDuration: 0,
  modelCalls: 0,
  modelDuration: 0,
  toolCalls: 0,
  toolDuration: 0,
}

let runStartTime: number
let modelStartTime: number
let toolStartTime: number

agent.on('run:start', () => {
  runStartTime = Date.now()
})

agent.on('model:request', () => {
  modelStartTime = Date.now()
  metrics.modelCalls++
})

agent.on('model:response', () => {
  metrics.modelDuration += Date.now() - modelStartTime
})

agent.on('tool:before', () => {
  toolStartTime = Date.now()
  metrics.toolCalls++
})

agent.on('tool:after', () => {
  metrics.toolDuration += Date.now() - toolStartTime
})

agent.on('run:end', () => {
  metrics.totalDuration = Date.now() - runStartTime
  
  console.log('\n--- Performance Metrics ---')
  console.log(`Total duration: ${metrics.totalDuration}ms`)
  console.log(`Model calls: ${metrics.modelCalls} (${metrics.modelDuration}ms total, ${(metrics.modelDuration / metrics.modelCalls).toFixed(2)}ms avg)`)
  console.log(`Tool calls: ${metrics.toolCalls} (${metrics.toolDuration}ms total, ${(metrics.toolDuration / metrics.toolCalls).toFixed(2)}ms avg)`)
})

Event-Driven Workflows

Triggering Side Effects

agent.on('tool:after', async ({ name, result }) => {
  if (name === 'create_order') {
    // Send confirmation email
    await sendEmail({
      to: result.customerEmail,
      subject: 'Order Confirmation',
      body: `Order ${result.orderId} created successfully`,
    })
  }
})

Conditional Execution

agent.on('step:reasoning', (step) => {
  if (step.type === 'plan') {
    // Alert if plan exceeds threshold
    if (step.tasks.length > 10) {
      console.warn('⚠️  Complex plan detected, may take longer')
    }
  }
})

External System Integration

import { kafka } from './kafka'

agent.on('run:end', async ({ output, state }) => {
  // Publish to Kafka
  await kafka.send({
    topic: 'agent-completions',
    messages: [{
      key: state.sessionId,
      value: JSON.stringify({
        output,
        steps: state.steps.length,
        tokens: state.usage?.totalTokens,
        timestamp: new Date().toISOString(),
      }),
    }],
  })
})

Complete Example

import 'dotenv/config'
import { createAgent, defineTool } from '@agentlib/core'
import { openai } from '@agentlib/openai'
import { ReactEngine } from '@agentlib/reasoning'

const searchTool = defineTool({
  schema: {
    name: 'search',
    description: 'Search the web',
    parameters: {
      type: 'object',
      properties: { query: { type: 'string' } },
      required: ['query'],
    },
  },
  async execute({ query }) {
    return { results: [`Result for: ${query}`] }
  },
})

const agent = createAgent({ name: 'monitored-agent' })
  .provider(openai({
    apiKey: process.env.OPENAI_API_KEY,
    model: 'gpt-4o',
  }))
  .tool(searchTool)
  .reasoning(new ReactEngine({ maxSteps: 8 }))

// Event listeners
agent.on('run:start', ({ input }) => {
  console.log(`\n🚀 Starting: "${input}"`)
})

agent.on('step:reasoning', (step) => {
  switch (step.type) {
    case 'thought':
      console.log(`💭 ${step.content}`)
      break
    case 'tool_call':
      console.log(`🔧 Calling ${step.toolName}`)
      break
  }
})

agent.on('tool:after', ({ name, result }) => {
  console.log(`✓ ${name} completed:`, result)
})

agent.on('model:response', (response) => {
  console.log(`📊 Tokens: ${response.usage?.totalTokens}`)
})

agent.on('run:end', ({ output, state }) => {
  console.log(`\n✅ Complete: "${output}"`)
  console.log(`📈 Steps: ${state.steps.length}, Tokens: ${state.usage?.totalTokens}`)
})

agent.on('error', (error) => {
  console.error('❌ Error:', error)
})

// Run agent
const result = await agent.run('What is the population of Tokyo?')

Next Steps

Build docs developers (and LLMs) love