Skip to main content

Anthropic (Claude) Backend

The AnthropicBackend compiles AXON IR into prompts optimized for the Claude model family.

Overview

Target API: Anthropic Messages API
Models: Claude 3 (Opus, Sonnet, Haiku), Claude 3.5, future models
Key Features:
  • System prompts with persona identity and anchor enforcement
  • Extended thinking via chain-of-thought instructions
  • Tool use declarations in Anthropic’s native format
  • Structured extraction via JSON output schemas

Implementation

from axon.backends.base_backend import BaseBackend, CompiledStep, CompilationContext
from axon.compiler.ir_nodes import (
    IRAnchor, IRContext, IRPersona, IRStep, IRProbe, IRReason, IRWeave
)

class AnthropicBackend(BaseBackend):
    """Compiles AXON IR to Claude-native prompt structures."""

    @property
    def name(self) -> str:
        return "anthropic"

System Prompt Compilation

Structure

The system prompt has three sections:
  1. Persona Identity — Who the AI is
  2. Context Configuration — Session parameters
  3. Anchor Enforcement — Hard constraints

Implementation

def compile_system_prompt(
    self,
    persona: IRPersona | None,
    context: IRContext | None,
    anchors: list[IRAnchor],
) -> str:
    sections: list[str] = []

    # — Persona identity —
    if persona is not None:
        sections.append(self._compile_persona_block(persona))

    # — Context configuration —
    if context is not None:
        sections.append(self._compile_context_block(context))

    # — Anchor enforcement —
    if anchors:
        sections.append(self._compile_anchor_block(anchors))

    return "\n\n".join(sections)

Persona Block

def _compile_persona_block(self, persona: IRPersona) -> str:
    lines: list[str] = [f"You are {persona.name}."]

    if persona.description:
        lines.append(persona.description)

    if persona.domain:
        domain_str = ", ".join(persona.domain)
        lines.append(f"Your areas of expertise: {domain_str}.")

    if persona.tone:
        lines.append(f"Communication tone: {persona.tone}.")

    if persona.confidence_threshold is not None:
        lines.append(
            f"Only provide claims you are at least "
            f"{persona.confidence_threshold:.0%} confident about."
        )

    if persona.cite_sources:
        lines.append("Always cite your sources.")

    return "\n".join(lines)
Example Output:
You are LegalExpert.
Your areas of expertise: contract law, IP.
Communication tone: precise.
Only provide claims you are at least 85% confident about.
Always cite your sources.

Context Block

def _compile_context_block(self, context: IRContext) -> str:
    lines: list[str] = ["[SESSION CONFIGURATION]"]

    if context.depth:
        depth_map = {
            "shallow": "Provide concise, high-level responses.",
            "standard": "Provide balanced, moderately detailed responses.",
            "deep": "Provide thorough, detailed analysis.",
            "exhaustive": "Provide exhaustive analysis covering all angles.",
        }
        instruction = depth_map.get(context.depth, f"Analysis depth: {context.depth}.")
        lines.append(f"  Depth: {instruction}")

    if context.max_tokens is not None:
        lines.append(f"  Target response length: ~{context.max_tokens} tokens")

    return "\n".join(lines)
Example Output:
[SESSION CONFIGURATION]
  Depth: Provide thorough, detailed analysis.
  Target response length: ~4096 tokens
  Citation required: yes

Anchor Block (Constraint Enforcement)

def _compile_anchor_block(self, anchors: list[IRAnchor]) -> str:
    lines: list[str] = [
        "[HARD CONSTRAINTS — THESE RULES ARE ABSOLUTE AND NON-NEGOTIABLE]",
        "",
    ]

    for i, anchor in enumerate(anchors, 1):
        lines.append(f"CONSTRAINT {i}: {anchor.name}")

        if anchor.require:
            lines.append(f"  → You MUST: {anchor.require}")
        if anchor.reject:
            reject_str = ", ".join(anchor.reject)
            lines.append(f"  → You MUST NOT: {reject_str}")
        if anchor.confidence_floor is not None:
            lines.append(
                f"  → MINIMUM CONFIDENCE: {anchor.confidence_floor:.0%} — "
                f"below this threshold, do not make the claim."
            )
        if anchor.unknown_response:
            lines.append(
                f'  → WHEN UNCERTAIN, respond exactly with: '
                f'"{anchor.unknown_response}"'
            )
        lines.append("")

    return "\n".join(lines).rstrip()
Example Output:
[HARD CONSTRAINTS — THESE RULES ARE ABSOLUTE AND NON-NEGOTIABLE]

CONSTRAINT 1: NoHallucination
  → You MUST: cite all sources
  → You MUST NOT: hallucinate, speculate
  → MINIMUM CONFIDENCE: 90% — below this threshold, do not make the claim.
  → WHEN UNCERTAIN, respond exactly with: "I don't have sufficient information to answer this confidently."

Step Compilation

Dispatch Logic

def compile_step(
    self, step: IRNode, context: CompilationContext
) -> CompiledStep:
    if isinstance(step, IRStep):
        return self._compile_step_node(step, context)
    if isinstance(step, IRIntent):
        return self._compile_intent(step, context)
    if isinstance(step, IRProbe):
        return self._compile_probe(step, context)
    if isinstance(step, IRReason):
        return self._compile_reason(step, context)
    if isinstance(step, IRWeave):
        return self._compile_weave(step, context)

    # Fallback
    return CompiledStep(
        step_name=getattr(step, "name", step.node_type),
        user_prompt=f"[{step.node_type}] Execute this operation.",
    )

IRStep Compilation

def _compile_step_node(
    self, step: IRStep, context: CompilationContext
) -> CompiledStep:
    prompt_parts: list[str] = []

    # Given (input binding)
    if step.given:
        prompt_parts.append(f"Given the input: {step.given}")

    # Embedded cognitive operations
    if step.probe is not None:
        probe_prompt = self._format_probe(step.probe)
        prompt_parts.append(probe_prompt)
    elif step.reason is not None:
        reason_prompt = self._format_reason(step.reason)
        prompt_parts.append(reason_prompt)
    elif step.ask:
        prompt_parts.append(step.ask)

    # Output type expectation
    if step.output_type:
        prompt_parts.append(
            f"\nYour output MUST conform to the type: {step.output_type}"
        )

    # Confidence floor
    if step.confidence_floor is not None:
        prompt_parts.append(
            f"\nMinimum confidence required: {step.confidence_floor:.0%}. "
            f"If you cannot meet this threshold, indicate uncertainty."
        )

    return CompiledStep(
        step_name=step.name,
        user_prompt="\n".join(prompt_parts),
        metadata={"ir_node_type": "step"},
    )
Example Output:
Given the input: contract_document

Extract the following from contract_document: [parties, dates, obligations]
Return structured results for each field.

Your output MUST conform to the type: EntityMap

Minimum confidence required: 85%. If you cannot meet this threshold, indicate uncertainty.

Probe Compilation (Structured Extraction)

def _compile_probe(
    self, probe: IRProbe, context: CompilationContext
) -> CompiledStep:
    fields_str = ", ".join(probe.fields)
    prompt = (
        f"Analyze the following and extract these specific fields: [{fields_str}]\n\n"
        f"Source: {probe.target}\n\n"
        f"Return the results as a structured JSON object with exactly these keys: {fields_str}. "
        f"If a field cannot be determined, set its value to null."
    )

    return CompiledStep(
        step_name=f"probe_{probe.target}",
        user_prompt=prompt,
        output_schema={
            "type": "object",
            "properties": {f: {"type": "string"} for f in probe.fields},
            "required": list(probe.fields),
        },
        metadata={"ir_node_type": "probe"},
    )
Example Output:
{
  "step_name": "probe_contract",
  "user_prompt": "Analyze the following and extract these specific fields: [parties, effective_date, termination_clause]\n\nSource: contract\n\nReturn the results as a structured JSON object with exactly these keys: parties, effective_date, termination_clause. If a field cannot be determined, set its value to null.",
  "output_schema": {
    "type": "object",
    "properties": {
      "parties": {"type": "string"},
      "effective_date": {"type": "string"},
      "termination_clause": {"type": "string"}
    },
    "required": ["parties", "effective_date", "termination_clause"]
  }
}

Reason Compilation (Chain-of-Thought)

def _compile_reason(
    self, reason: IRReason, context: CompilationContext
) -> CompiledStep:
    parts: list[str] = []

    # Frame the reasoning task
    if reason.about:
        parts.append(f"Reason carefully about: {reason.about}")

    if reason.given:
        given_str = ", ".join(reason.given)
        parts.append(f"Based on: {given_str}")

    if reason.ask:
        parts.append(f"\n{reason.ask}")

    # Depth and work-showing configuration
    if reason.depth > 1:
        parts.append(
            f"\nPerform {reason.depth} levels of analysis, "
            f"each building on the previous."
        )

    if reason.show_work or reason.chain_of_thought:
        parts.append(
            "\nShow your complete reasoning process step by step. "
            "Make your chain of thought explicit and traceable."
        )

    return CompiledStep(
        step_name=reason.name or f"reason_{reason.about}",
        user_prompt="\n".join(parts),
        metadata={
            "ir_node_type": "reason",
            "depth": reason.depth,
            "show_work": reason.show_work,
        },
    )
Example Output:
Reason carefully about: contract_risks
Based on: extracted_clauses, industry_standards

What clauses in this contract present the highest legal risks?

Perform 3 levels of analysis, each building on the previous.

Show your complete reasoning process step by step. Make your chain of thought explicit and traceable.

Tool Specification Compilation

Anthropic Messages API Format

def compile_tool_spec(self, tool: IRToolSpec) -> dict[str, Any]:
    # Build description from tool metadata
    desc_parts: list[str] = [f"External tool: {tool.name}"]
    if tool.provider:
        desc_parts.append(f"Provider: {tool.provider}")
    if tool.timeout:
        desc_parts.append(f"Timeout: {tool.timeout}")

    # Build input schema
    properties: dict[str, Any] = {
        "query": {
            "type": "string",
            "description": f"The input query for {tool.name}",
        }
    }
    if tool.max_results is not None:
        properties["max_results"] = {
            "type": "integer",
            "description": "Maximum number of results to return",
            "default": tool.max_results,
        }

    return {
        "name": tool.name,
        "description": ". ".join(desc_parts),
        "input_schema": {
            "type": "object",
            "properties": properties,
            "required": ["query"],
        },
    }
Example Output:
{
  "name": "WebSearch",
  "description": "External tool: WebSearch. Provider: brave. Timeout: 10s.",
  "input_schema": {
    "type": "object",
    "properties": {
      "query": {
        "type": "string",
        "description": "The input query for WebSearch"
      },
      "max_results": {
        "type": "integer",
        "description": "Maximum number of results to return",
        "default": 5
      }
    },
    "required": ["query"]
  }
}

Weave Compilation (Semantic Synthesis)

def _compile_weave(
    self, weave: IRWeave, context: CompilationContext
) -> CompiledStep:
    sources_str = ", ".join(weave.sources)
    parts: list[str] = [
        f"Synthesize the following sources into a coherent result: [{sources_str}]"
    ]

    if weave.target:
        parts.append(f"\nTarget output: {weave.target}")

    if weave.format_type:
        parts.append(f"Output format: {weave.format_type}")

    if weave.priority:
        priority_str = " → ".join(weave.priority)
        parts.append(
            f"Priority ordering (address first to last): {priority_str}"
        )

    return CompiledStep(
        step_name=f"weave_{weave.target}" if weave.target else "weave",
        user_prompt="\n".join(parts),
        metadata={"ir_node_type": "weave"},
    )
Example Output:
Synthesize the following sources into a coherent result: [entity_map, risk_analysis, legal_precedents]

Target output: FinalReport
Output format: StructuredReport
Priority ordering (address first to last): risks → recommendations → summary

Complete Example

Input IR

ir_program = IRProgram(
    personas=(IRPersona(name="LegalExpert", domain=("contract law",)),),
    anchors=(IRAnchor(name="NoHallucination", require="cite all sources"),),
    flows=(
        IRFlow(
            name="AnalyzeContract",
            steps=(
                IRProbe(target="contract", fields=("parties", "dates")),
                IRReason(about="risks", depth=2, show_work=True),
            ),
        ),
    ),
    runs=(
        IRRun(
            flow_name="AnalyzeContract",
            resolved_persona=IRPersona(...),
            resolved_anchors=(IRAnchor(...),),
        ),
    ),
)

Compiled Output

compiled = anthropic_backend.compile_program(ir_program)

# compiled.execution_units[0].system_prompt:
"""
You are LegalExpert.
Your areas of expertise: contract law.

[HARD CONSTRAINTS — THESE RULES ARE ABSOLUTE]
CONSTRAINT 1: NoHallucination
  → You MUST: cite all sources
"""

# compiled.execution_units[0].steps[0].user_prompt:
"""
Analyze the following and extract these specific fields: [parties, dates]

Source: contract

Return the results as a structured JSON object with exactly these keys: parties, dates.
"""

# compiled.execution_units[0].steps[1].user_prompt:
"""
Reason carefully about: risks

Perform 2 levels of analysis, each building on the previous.

Show your complete reasoning process step by step.
"""

Next Steps

Gemini Backend

Compare with Google Gemini’s compilation approach

Runtime Executor

See how compiled prompts are executed

Build docs developers (and LLMs) love