Skip to main content

Overview

Nodes are the building blocks of agent graphs. Each node performs a unit of work, makes decisions, and produces results.

Class: NodeSpec

Declarative specification for a node.
from framework.graph import NodeSpec

node = NodeSpec(
    id="calculator",
    name="Calculator Node",
    description="Performs mathematical calculations",
    node_type="event_loop",
    input_keys=["expression"],
    output_keys=["result"],
    tools=["calculate"],
    system_prompt="You are a calculator..."
)

Required Fields

id
str
required
Unique identifier for the node
name
str
required
Human-readable name
description
str
required
What this node does

Node Type

node_type
str
default:"'event_loop'"
Type of node:
  • event_loop (recommended): LLM with tool-calling loop
  • gcu: Browser automation (subagents only)

Data Flow

input_keys
list[str]
default:"[]"
Keys this node reads from shared memory or input
output_keys
list[str]
default:"[]"
Keys this node writes to shared memory or output
nullable_output_keys
list[str]
default:"[]"
Output keys that can be None without triggering validation errors
input_schema
dict[str, dict]
default:"{}"
Optional schema for input validation. Format:
{"key": {"type": "string", "required": True, "description": "..."}}
output_schema
dict[str, dict]
default:"{}"
Optional schema for output validation

LLM Configuration

system_prompt
str | None
default:"None"
System prompt for LLM nodes
tools
list[str]
default:"[]"
Tool names this node can use
model
str | None
default:"None"
Specific model to use (defaults to graph default)

Subagent Delegation

sub_agents
list[str]
default:"[]"
Node IDs that can be invoked as subagents from this node

Routing

routes
dict[str, str]
default:"{}"
Condition -> target_node_id mapping for router nodes

Retry Behavior

max_retries
int
default:"3"
Maximum retries on failure
retry_on
list[str]
default:"[]"
Error types to retry on
max_node_visits
int
default:"0"
Max times this node executes in one run. 0 = unlimited (default, for forever-alive agents). Set >1 for one-shot agents with feedback loops.

Output Validation

output_model
type[BaseModel] | None
default:"None"
Optional Pydantic model class for validating LLM output
max_validation_retries
int
default:"2"
Maximum retries when Pydantic validation fails

Client Interaction

client_facing
bool
default:"False"
If True, this node streams output to the end user and can request input
success_criteria
str | None
default:"None"
Natural-language criteria for phase completion (enables Level 2 conversation-aware judge)

Class: NodeContext

Everything a node needs to execute.
from framework.graph import NodeContext

# Passed to node.execute()
ctx = NodeContext(
    runtime=runtime,
    node_id="calculator",
    node_spec=node_spec,
    memory=shared_memory,
    input_data={"expression": "2 + 3"},
    llm=llm_provider,
    available_tools=tools,
)

Fields

runtime
Runtime
Runtime for decision logging
node_id
str
Node identity
node_spec
NodeSpec
Node specification
memory
SharedMemory
Shared state between nodes
input_data
dict[str, Any]
Input data for this execution
llm
LLMProvider | None
LLM provider (if applicable)
available_tools
list[Tool]
Tools this node can use
goal_context
str
Goal context for guidance
max_tokens
int
Maximum tokens for LLM responses (default: 4096)
attempt
int
Current attempt number (for retries)
max_attempts
int
Maximum attempts allowed

Class: NodeResult

The output of a node execution.
from framework.graph import NodeResult

result = NodeResult(
    success=True,
    output={"result": 5},
    tokens_used=150,
    latency_ms=420
)

Fields

success
bool
required
Whether the node succeeded
output
dict[str, Any]
default:"{}"
Output data
error
str | None
default:"None"
Error message if failed
next_node
str | None
default:"None"
For routing decisions: which node to execute next
route_reason
str | None
default:"None"
Reason for routing decision
tokens_used
int
default:"0"
LLM tokens consumed
latency_ms
int
default:"0"
Execution time in milliseconds
validation_errors
list[str]
default:"[]"
Pydantic validation errors (if any)

Protocol: NodeProtocol

Interface all nodes must implement.
from framework.graph import NodeProtocol, NodeContext, NodeResult

class CalculatorNode(NodeProtocol):
    async def execute(self, ctx: NodeContext) -> NodeResult:
        # Get input
        expression = ctx.input_data.get("expression")
        
        # Record decision
        decision_id = ctx.runtime.decide(
            intent="Calculate expression",
            options=[
                {"id": "eval", "description": "Evaluate expression"}
            ],
            chosen="eval",
            reasoning="Standard evaluation"
        )
        
        # Do the work
        try:
            result = eval(expression)
            
            # Record outcome
            ctx.runtime.record_outcome(
                decision_id=decision_id,
                success=True,
                result=result
            )
            
            return NodeResult(
                success=True,
                output={"result": result}
            )
        except Exception as e:
            ctx.runtime.record_outcome(
                decision_id=decision_id,
                success=False,
                error=str(e)
            )
            return NodeResult(
                success=False,
                error=str(e)
            )

Required Method

execute
async (ctx: NodeContext) -> NodeResult
required
Execute this node’s logic

Optional Method

validate_input
(ctx: NodeContext) -> list[str]
Validate that required inputs are present. Returns list of validation error messages (empty if valid).

Class: SharedMemory

Shared state between nodes in a graph execution.
memory = SharedMemory()

# Write data
memory.write("result", 42)

# Read data
value = memory.read("result")

# Read all
all_data = memory.read_all()

Methods

read
(key: str) -> Any
Read a value from shared memory
write
(key: str, value: Any, validate: bool = True) -> None
Write a value to shared memory. If validate=True, checks for suspicious content.
write_async
async (key: str, value: Any, validate: bool = True) -> None
Thread-safe async write with per-key locking. Use for parallel execution.
read_all
() -> dict[str, Any]
Read all accessible data
with_permissions
(read_keys: list[str], write_keys: list[str]) -> SharedMemory
Create a view with restricted permissions for a specific node

Example: Event Loop Node

from framework.graph import NodeSpec

node = NodeSpec(
    id="email-sender",
    name="Email Sender",
    description="Send emails to leads",
    node_type="event_loop",
    input_keys=["lead_email", "message"],
    output_keys=["sent", "response"],
    tools=["send_email", "validate_email"],
    system_prompt="""
        You are an email sending agent.
        - Validate email addresses before sending
        - Use professional tone
        - Track delivery status
    """,
    max_retries=3,
    retry_on=["network_error", "rate_limit"],
)

Example: Router Node

node = NodeSpec(
    id="lead-router",
    name="Lead Router",
    description="Route leads based on priority",
    node_type="event_loop",
    input_keys=["lead_priority"],
    output_keys=["route_decision"],
    routes={
        "high_priority": "sales-team",
        "medium_priority": "queue",
        "low_priority": "nurture",
    },
)

Example: Client-Facing Node

node = NodeSpec(
    id="chat-handler",
    name="Chat Handler",
    description="Handle user chat interactions",
    node_type="event_loop",
    input_keys=["user_message"],
    output_keys=["response"],
    tools=["search_kb", "create_ticket"],
    system_prompt="You are a helpful support agent.",
    client_facing=True,  # Streams to user
    success_criteria="User's question is fully answered",
)

Build docs developers (and LLMs) love