Overview
The IRGenerator transforms a type-checked AST into AXON’s Intermediate Representation (IR). The IR is a flattened, cross-referenced structure ready for backend compilation.
from axon import Lexer, Parser, TypeChecker, IRGenerator
source = open ( "my_program.axon" ).read()
lexer = Lexer(source)
parser = Parser(lexer.tokenize())
ast = parser.parse()
checker = TypeChecker(ast)
errors = checker.check()
if errors:
for error in errors:
print ( f "Error: { error.message } " )
exit ( 1 )
ir_gen = IRGenerator()
ir_program = ir_gen.generate(ast)
print ( f "Personas: { len (ir_program.personas) } " )
print ( f "Flows: { len (ir_program.flows) } " )
print ( f "Run statements: { len (ir_program.runs) } " )
Class: IRGenerator
Constructor
No parameters required - creates a fresh generator instance.
Methods
generate(program: ProgramNode) -> IRProgram
Generate a complete IR program from a validated AST.
The root AST node from the parser
Returns: IRProgram with all declarations resolved and cross-referenced
Raises: AxonIRError if cross-references cannot be resolved
ir_program = ir_gen.generate(ast)
# Access IR nodes
for persona in ir_program.personas:
print ( f "Persona: { persona.name } " )
print ( f " Domain: { persona.domain } " )
print ( f " Tone: { persona.tone } " )
for flow in ir_program.flows:
print ( f "Flow: { flow.name } " )
print ( f " Parameters: { len (flow.parameters) } " )
print ( f " Steps: { len (flow.steps) } " )
for run in ir_program.runs:
print ( f "Run: { run.flow_name } " )
print ( f " Persona: { run.resolved_persona.name if run.resolved_persona else 'None' } " )
IR Node Types
IRProgram
The complete compiled program.
class IRProgram :
source_line: int
source_column: int
personas: tuple[IRPersona, ... ]
contexts: tuple[IRContext, ... ]
anchors: tuple[IRAnchor, ... ]
tools: tuple[IRToolSpec, ... ]
memories: tuple[IRMemory, ... ]
types: tuple[IRType, ... ]
flows: tuple[IRFlow, ... ]
runs: tuple[IRRun, ... ]
imports: tuple[IRImport, ... ]
Declaration Nodes
IRPersona
class IRPersona :
source_line: int
source_column: int
name: str
domain: tuple[ str , ... ]
tone: str
confidence_threshold: float | None
cite_sources: bool
refuse_if: tuple[ str , ... ]
language: str
description: str
IRContext
class IRContext :
source_line: int
source_column: int
name: str
memory_scope: str
language: str
depth: str
max_tokens: int | None
temperature: float | None
cite_sources: bool
IRAnchor
class IRAnchor :
source_line: int
source_column: int
name: str
require: str
reject: tuple[ str , ... ]
enforce: str
confidence_floor: float | None
unknown_response: str
on_violation: str
on_violation_target: str
IRFlow
class IRFlow :
source_line: int
source_column: int
name: str
parameters: tuple[IRParameter, ... ]
return_type_name: str
return_type_generic: str
return_type_optional: bool
steps: tuple[IRNode, ... ]
edges: tuple[IRDataEdge, ... ] # Data flow edges
execution_levels: tuple[tuple[ str , ... ], ... ] # Topologically sorted
Cognitive Step Nodes
IRStep
class IRStep :
source_line: int
source_column: int
name: str
given: str
ask: str
use_tool: IRUseTool | None
probe: IRProbe | None
reason: IRReason | None
weave: IRWeave | None
output_type: str
confidence_floor: float | None
body: tuple[IRNode, ... ]
IRReason
class IRReason :
source_line: int
source_column: int
name: str
about: str
given: tuple[ str , ... ]
depth: int
show_work: bool
chain_of_thought: bool
ask: str
output_type: str
IRValidate
class IRValidate :
source_line: int
source_column: int
target: str
schema: str
rules: tuple[IRValidateRule, ... ]
IRWeave
class IRWeave :
source_line: int
source_column: int
sources: tuple[ str , ... ] # Step names to synthesize
target: str
format_type: str
priority: tuple[ str , ... ]
style: str
Execution Nodes
IRRun
A fully-resolved run statement with cross-references.
class IRRun :
source_line: int
source_column: int
flow_name: str
arguments: tuple[ str , ... ]
persona_name: str
context_name: str
anchor_names: tuple[ str , ... ]
on_failure: str
on_failure_params: tuple[tuple[ str , str ], ... ]
output_to: str
effort: str
# Resolved references (after cross-reference resolution)
resolved_flow: IRFlow | None
resolved_persona: IRPersona | None
resolved_context: IRContext | None
resolved_anchors: tuple[IRAnchor, ... ]
Example:
for run in ir_program.runs:
print ( f "Running flow: { run.flow_name } " )
if run.resolved_flow:
print ( f " Flow has { len (run.resolved_flow.steps) } steps" )
if run.resolved_persona:
print ( f " Using persona: { run.resolved_persona.name } " )
print ( f " Tone: { run.resolved_persona.tone } " )
for anchor in run.resolved_anchors:
print ( f " Anchor: { anchor.name } " )
Data Flow Analysis
IRDataEdge
Represents data dependencies between steps.
class IRDataEdge :
source_line: int
source_column: int
source_step: str # Step name providing data
target_step: str # Step name consuming data
type_name: str # Type of data flowing
Execution Levels
The IR generator automatically computes execution order using topological sort:
flow = ir_program.flows[ 0 ]
print ( "Execution levels (can run in parallel):" )
for level_idx, level in enumerate (flow.execution_levels):
print ( f " Level { level_idx } : { ', ' .join(level) } " )
print ( " \n Data flow edges:" )
for edge in flow.edges:
print ( f " { edge.source_step } -> { edge.target_step } ( { edge.type_name } )" )
Example:
source = '''
flow Analyze(doc: Document) -> Report {
step Extract {
ask: "Extract facts"
}
step Classify {
ask: "Classify document"
}
step Synthesize {
given: [Extract.output, Classify.output]
ask: "Create report"
}
}
'''
ir_program = IRGenerator().generate(
Parser(Lexer(source).tokenize()).parse()
)
flow = ir_program.flows[ 0 ]
print (flow.execution_levels)
# Output:
# (
# ('Extract', 'Classify'), # Level 0: can run in parallel
# ('Synthesize',) # Level 1: depends on Level 0
# )
Cross-Reference Resolution
The IR generator resolves all symbolic references:
Flow References
source = '''
flow Process(doc: Document) -> String { ... }
run Process(my_document)
'''
ir_program = ir_gen.generate(ast)
run = ir_program.runs[ 0 ]
assert run.flow_name == "Process"
assert run.resolved_flow is not None
assert run.resolved_flow.name == "Process"
Persona References
source = '''
persona Expert { domain: ["law"] }
flow Analyze(doc: Document) -> String { ... }
run Analyze(doc) as Expert
'''
ir_program = ir_gen.generate(ast)
run = ir_program.runs[ 0 ]
assert run.persona_name == "Expert"
assert run.resolved_persona is not None
assert run.resolved_persona.domain == ( "law" ,)
source = '''
tool WebSearch { provider: serper }
flow Research(query: String) -> String {
step Search {
use WebSearch(query)
}
}
'''
ir_program = ir_gen.generate(ast)
# Tool references are validated during generation
Error Handling
AxonIRError
Raised when cross-references cannot be resolved:
from axon.compiler.ir_generator import AxonIRError
try :
ir_program = ir_gen.generate(ast)
except AxonIRError as e:
print ( f "IR generation error at { e.line } : { e.column } " )
print ( f " { e.message } " )
Common errors:
# Undefined flow
source = "run NonExistentFlow(arg)"
# AxonIRError: Run statement references undefined flow 'NonExistentFlow'
# Undefined persona
source = "run MyFlow(arg) as UnknownPersona"
# AxonIRError: Run statement references undefined persona 'UnknownPersona'
# Undefined tool
source = '''
flow Test() {
step S { use UnknownTool("query") }
}
'''
# AxonIRError: Step 'S' uses undefined tool 'UnknownTool'
# Circular dependency
source = '''
flow Test() {
step A { given: B.output }
step B { given: A.output }
}
'''
# AxonIRError: Cycle detected in flow step dependencies
Example: Full Compilation Pipeline
from axon import Lexer, Parser, TypeChecker, IRGenerator, get_backend
import json
def compile_program ( source : str , backend_name : str = "anthropic" ):
"""Compile an AXON program to backend-specific prompts."""
# Phase 1: Lexical analysis
lexer = Lexer(source)
tokens = lexer.tokenize()
print ( f "✓ Tokenized: { len (tokens) } tokens" )
# Phase 2: Parsing
parser = Parser(tokens)
ast = parser.parse()
print ( f "✓ Parsed: { len (ast.declarations) } declarations" )
# Phase 3: Type checking
checker = TypeChecker(ast)
errors = checker.check()
if errors:
print ( "✗ Type checking failed:" )
for error in errors:
print ( f " { error.line } : { error.column } - { error.message } " )
return None
print ( "✓ Type checking passed" )
# Phase 4: IR generation
ir_gen = IRGenerator()
ir_program = ir_gen.generate(ast)
print ( f "✓ IR generated:" )
print ( f " Flows: { len (ir_program.flows) } " )
print ( f " Runs: { len (ir_program.runs) } " )
# Phase 5: Backend compilation
backend = get_backend(backend_name)
compiled = backend.compile_program(ir_program)
print ( f "✓ Compiled for { backend_name } backend" )
return compiled
# Usage
source = open ( "my_program.axon" ).read()
compiled = compile_program(source, backend_name = "anthropic" )
if compiled:
# Save compiled output
with open ( "compiled.json" , "w" ) as f:
json.dump(compiled.to_dict(), f, indent = 2 )
print ( "Saved to compiled.json" )
Next Steps
Backends API Compile IR to provider-specific prompts
Executor API Execute compiled programs with the runtime