Overview
Backends transform model-agnostic AXON IR into provider-specific prompt structures. Each backend implements the compilation strategy for a particular LLM provider (Anthropic, OpenAI, Gemini, etc.).
from axon import get_backend, BACKEND_REGISTRY
# Get a backend instance
backend = get_backend( "anthropic" )
# Compile IR to prompts
compiled = backend.compile_program(ir_program)
# Access compiled output
for unit in compiled.execution_units:
print ( f "Flow: { unit.flow_name } " )
print ( f "System prompt: { unit.system_prompt } " )
for step in unit.steps:
print ( f " Step: { step.step_name } " )
print ( f " User prompt: { step.user_prompt } " )
Backend Registry
BACKEND_REGISTRY
Global registry mapping backend names to classes.
from axon import BACKEND_REGISTRY
print ( "Available backends:" )
for name in BACKEND_REGISTRY :
print ( f " - { name } " )
# Output:
# Available backends:
# - anthropic
# - gemini
# - openai
# - ollama
get_backend(name: str) -> BaseBackend
Get a backend instance by canonical name.
The backend name (e.g., “anthropic”, “openai”)
Returns: Backend instance
Raises: ValueError if backend name is not recognized
from axon import get_backend
try :
backend = get_backend( "anthropic" )
print ( f "Using backend: { backend.name } " )
except ValueError as e:
print ( f "Error: { e } " )
# Error: Unknown backend 'invalid'. Available: anthropic, gemini, openai, ollama
Built-in Backends
AnthropicBackend
Compiles for Claude models (Anthropic API).
backend = get_backend( "anthropic" )
print (backend.name) # "anthropic"
Features:
System prompt with thinking tags
Tool use via function calling
Structured output with prompt caching
Anchor enforcement in system instructions
GeminiBackend
Compiles for Gemini models (Google AI API).
backend = get_backend( "gemini" )
print (backend.name) # "gemini"
Features:
Multi-turn conversation format
Function declarations for tools
JSON mode for structured output
OpenAIBackend
Compiles for GPT models (OpenAI API).
backend = get_backend( "openai" )
print (backend.name) # "openai"
Features:
Chat completions format
Function calling for tools
JSON schema for structured output
OllamaBackend
Compiles for locally-hosted models via Ollama.
backend = get_backend( "ollama" )
print (backend.name) # "ollama"
Features:
Compatible with any Ollama-hosted model
Local execution
Simple prompt format
Class: BaseBackend
Abstract base class that all backends must implement.
Properties
name: str
The canonical backend name.
backend = get_backend( "anthropic" )
print (backend.name) # "anthropic"
Methods
compile_program(ir: IRProgram) -> CompiledProgram
Compile a complete IR program into backend-specific output.
The intermediate representation to compile
Returns: CompiledProgram with all execution units compiled
from axon import IRGenerator, get_backend
ir_program = IRGenerator().generate(ast)
backend = get_backend( "anthropic" )
compiled = backend.compile_program(ir_program)
print ( f "Backend: { compiled.backend_name } " )
print ( f "Execution units: { len (compiled.execution_units) } " )
compile_step(step: IRNode, context: CompilationContext) -> CompiledStep
Compile a single IR step into a backend-specific prompt.
The IR node to compile (IRStep, IRProbe, etc.)
context
CompilationContext
required
The current compilation context
Returns: CompiledStep with model-specific prompts
compile_system_prompt(persona, context, anchors) -> str
Build the system prompt from persona, context, and anchors.
Active anchor constraints
Returns: System prompt string
from axon.backends.base_backend import CompilationContext
backend = get_backend( "anthropic" )
system_prompt = backend.compile_system_prompt(
persona = ir_program.personas[ 0 ],
context = ir_program.contexts[ 0 ],
anchors = list (ir_program.anchors)
)
print (system_prompt)
Compile a tool specification into the backend’s native format.
The tool specification to compile
Returns: Provider-native tool declaration dict
tool_spec = ir_program.tools[ 0 ]
tool_declaration = backend.compile_tool_spec(tool_spec)
# For Anthropic:
# {
# "name": "WebSearch",
# "description": "Search the web",
# "input_schema": {...}
# }
# For OpenAI:
# {
# "type": "function",
# "function": {
# "name": "WebSearch",
# "parameters": {...}
# }
# }
Compilation Output Types
CompiledProgram
The complete compilation output for an AXON program.
class CompiledProgram :
backend_name: str
execution_units: list[CompiledExecutionUnit]
metadata: dict[ str , Any]
def to_dict ( self ) -> dict[ str , Any]:
"""Serialize to JSON-compatible dict."""
Example:
compiled = backend.compile_program(ir_program)
print ( f "Backend: { compiled.backend_name } " )
for unit in compiled.execution_units:
print ( f " Flow: { unit.flow_name } " )
print ( f " Steps: { len (unit.steps) } " )
# Serialize to JSON
import json
with open ( "compiled.json" , "w" ) as f:
json.dump(compiled.to_dict(), f, indent = 2 )
CompiledExecutionUnit
A single execution unit (one run statement fully compiled).
class CompiledExecutionUnit :
flow_name: str
persona_name: str
context_name: str
system_prompt: str
steps: list[CompiledStep]
tool_declarations: list[dict[ str , Any]]
anchor_instructions: list[ str ]
active_anchors: list[dict[ str , Any]]
effort: str
metadata: dict[ str , Any]
def to_dict ( self ) -> dict[ str , Any]:
"""Serialize to JSON-compatible dict."""
Example:
unit = compiled.execution_units[ 0 ]
print ( f "Flow: { unit.flow_name } " )
print ( f "Persona: { unit.persona_name } " )
print ( f " \n System Prompt: \n { unit.system_prompt } " )
print ( f " \n Active Anchors:" )
for anchor in unit.active_anchors:
print ( f " - { anchor[ 'name' ] } " )
print ( f " \n Steps:" )
for step in unit.steps:
print ( f " { step.step_name } : { step.user_prompt[: 80 ] } ..." )
CompiledStep
The compilation result for a single cognitive step.
class CompiledStep :
step_name: str
system_prompt: str
user_prompt: str
tool_declarations: list[dict[ str , Any]]
output_schema: dict[ str , Any] | None
metadata: dict[ str , Any]
def to_dict ( self ) -> dict[ str , Any]:
"""Serialize to JSON-compatible dict."""
Example:
step = unit.steps[ 0 ]
print ( f "Step: { step.step_name } " )
print ( f " \n User Prompt: \n { step.user_prompt } " )
if step.output_schema:
print ( f " \n Output Schema: \n { json.dumps(step.output_schema, indent = 2 ) } " )
if step.tool_declarations:
print ( f " \n Tools Available: { len (step.tool_declarations) } " )
CompilationContext
Carries state through the step compilation process.
class CompilationContext :
persona: IRPersona | None
context: IRContext | None
anchors: list[IRAnchor]
tools: dict[ str , IRToolSpec]
flow: IRFlow | None
prior_step_names: list[ str ]
effort: str
Example: Custom Backend
You can create custom backends by subclassing BaseBackend:
from axon.backends.base_backend import (
BaseBackend,
CompiledStep,
CompilationContext
)
from axon.compiler.ir_nodes import IRNode, IRPersona, IRContext, IRAnchor, IRToolSpec
from typing import Any
class CustomBackend ( BaseBackend ):
@ property
def name ( self ) -> str :
return "custom"
def compile_step (
self , step : IRNode, context : CompilationContext
) -> CompiledStep:
# Custom step compilation logic
from axon.compiler.ir_nodes import IRStep
if isinstance (step, IRStep):
return CompiledStep(
step_name = step.name,
user_prompt = f "Custom prompt: { step.ask } " ,
metadata = { "custom" : True }
)
return CompiledStep()
def compile_system_prompt (
self ,
persona : IRPersona | None ,
context : IRContext | None ,
anchors : list[IRAnchor],
) -> str :
parts = [ "Custom System Prompt" ]
if persona:
parts.append( f "Persona: { persona.name } " )
parts.append( f "Tone: { persona.tone } " )
for anchor in anchors:
parts.append( f "Constraint: { anchor.name } " )
return " \n " .join(parts)
def compile_tool_spec ( self , tool : IRToolSpec) -> dict[ str , Any]:
return {
"name" : tool.name,
"provider" : tool.provider,
"custom_format" : True
}
# Register your custom backend
from axon.backends import BACKEND_REGISTRY
BACKEND_REGISTRY [ "custom" ] = CustomBackend
# Use it
backend = get_backend( "custom" )
compiled = backend.compile_program(ir_program)
Next Steps
Executor API Execute compiled programs with the runtime
Context API Manage execution state between steps