Hypergraph nodes are pure functions. Test them directly — no framework setup, no mocking.
The Core Pattern
Every node has a .func attribute that gives you the raw function:
@node ( output_name = "result" )
def process ( text : str ) -> str :
return text.upper()
# Test the function directly
def test_process ():
result = process.func( "hello" )
assert result == "HELLO"
The .func attribute is the original function before the @node decorator added metadata. You can call it with normal Python testing tools.
Why This Works
The @node decorator adds metadata (inputs, outputs, name) but doesn’t change the function’s behavior:
@node ( output_name = "doubled" )
def double ( x : int ) -> int :
return x * 2
# These are equivalent:
double( 5 ) # Call through the node wrapper → 10
double.func( 5 ) # Call the raw function → 10
Both calls return the same result, but .func bypasses framework metadata and executes the pure function.
Testing Patterns
Unit Test a Single Node
import pytest
from myapp.nodes import embed, retrieve, generate
def test_embed ():
result = embed.func( "hello world" )
assert isinstance (result, list )
assert len (result) == 768 # Embedding dimension
assert all ( isinstance (x, float ) for x in result)
def test_retrieve ():
fake_embedding = [ 0.1 ] * 768
docs = retrieve.func(fake_embedding, k = 3 )
assert isinstance (docs, list )
assert len (docs) <= 3
def test_generate ():
docs = [ "Document 1" , "Document 2" ]
query = "What is the answer?"
response = generate.func(docs, query)
assert isinstance (response, str )
assert len (response) > 0
Test with Mocked Dependencies
from unittest.mock import patch, MagicMock
@node ( output_name = "response" )
def call_llm ( prompt : str ) -> str :
return llm_client.generate(prompt)
def test_call_llm_with_mock ():
with patch( "myapp.nodes.llm_client" ) as mock_client:
mock_client.generate.return_value = "Mocked response"
result = call_llm.func( "test prompt" )
assert result == "Mocked response"
mock_client.generate.assert_called_once_with( "test prompt" )
Use standard mocking libraries like unittest.mock or pytest-mock. Hypergraph nodes are just functions.
Test Async Nodes
import pytest
@node ( output_name = "data" )
async def fetch ( url : str ) -> dict :
async with httpx.AsyncClient() as client:
response = await client.get(url)
return response.json()
@pytest.mark.asyncio
async def test_fetch ():
result = await fetch.func( "https://api.example.com/data" )
assert "id" in result
Test Multiple Outputs
@node ( output_name = ( "mean" , "std" ))
def statistics ( data : list ) -> tuple[ float , float ]:
m = sum (data) / len (data)
s = ( sum ((x - m) ** 2 for x in data) / len (data)) ** 0.5
return m, s
def test_statistics ():
mean, std = statistics.func([ 1 , 2 , 3 , 4 , 5 ])
assert mean == 3.0
assert abs (std - 1.414 ) < 0.01
Testing Graphs
Test Graph Construction
from hypergraph import Graph
def test_graph_builds ():
graph = Graph([node_a, node_b, node_c])
assert "node_a" in graph.nodes
assert graph.inputs.required == ( "input_param" ,)
assert "output" in graph.outputs
def test_graph_with_strict_types ():
# Should not raise
graph = Graph([node_a, node_b], strict_types = True )
assert graph.strict_types is True
Graph construction is fast and deterministic. Test that your graph builds correctly and validates inputs/outputs as expected.
Test Graph Execution
from hypergraph import SyncRunner, RunStatus
def test_pipeline_integration ():
graph = Graph([clean, transform, validate])
runner = SyncRunner()
result = runner.run(graph, { "raw_data" : "test input" })
assert result.status == RunStatus. COMPLETED
assert "validated" in result
assert result[ "validated" ] is True
Test with Fixtures
import pytest
from hypergraph import Graph, SyncRunner
@pytest.fixture
def rag_pipeline ():
return Graph([embed, retrieve, generate])
@pytest.fixture
def runner ():
return SyncRunner()
def test_rag_responds ( rag_pipeline , runner ):
result = runner.run(rag_pipeline, {
"query" : "What is Python?" ,
"top_k" : 3 ,
})
assert "answer" in result
assert len (result[ "answer" ]) > 50
Use pytest fixtures to share graphs and runners across tests. This reduces boilerplate and keeps tests focused.
Testing Routing Logic
from hypergraph import END , route
@route ( targets = [ "process" , END ])
def should_continue ( score : float ) -> str :
if score >= 0.8 :
return END
return "process"
def test_routing_continues_on_low_score ():
result = should_continue.func( 0.5 )
assert result == "process"
def test_routing_ends_on_high_score ():
result = should_continue.func( 0.9 )
assert result is END
Property-Based Testing
Use hypothesis for thorough testing:
from hypothesis import given, strategies as st
@node ( output_name = "cleaned" )
def clean ( text : str ) -> str :
return text.strip().lower()
@given (st.text())
def test_clean_always_lowercase ( text ):
result = clean.func(text)
assert result == result.lower()
@given (st.text())
def test_clean_no_leading_trailing_whitespace ( text ):
result = clean.func(text)
assert result == result.strip()
Property-based testing finds edge cases you might not think of. Use it for data transformation and validation nodes.
Snapshot Testing
For complex outputs, use snapshot testing:
def test_generate_response ( snapshot ):
result = generate.func(
docs = [ "Doc 1" , "Doc 2" ],
query = "Test query" ,
)
# Compare against saved snapshot
snapshot.assert_match(result, "generate_response.txt" )
Benefits
Fast No graph construction or runner overhead
Isolated Test one function at a time
Simple Standard pytest patterns work
Debuggable Step through your function directly
Real-World Example: RAG Node Tests
import pytest
from unittest.mock import MagicMock, patch
from myapp.rag import embed, retrieve, generate
class TestRAGPipeline :
"""Test suite for RAG pipeline nodes."""
def test_embed_returns_correct_dimensions ( self ):
result = embed.func( "test query" )
assert len (result) == 1536 # OpenAI embedding size
@patch ( "myapp.rag.vector_db" )
def test_retrieve_queries_vector_db ( self , mock_db ):
mock_db.search.return_value = [
{ "content" : "doc1" },
{ "content" : "doc2" },
]
embedding = [ 0.1 ] * 1536
docs = retrieve.func(embedding, k = 2 )
assert docs == [ "doc1" , "doc2" ]
mock_db.search.assert_called_once_with(embedding, k = 2 )
def test_generate_includes_context ( self ):
with patch( "myapp.rag.llm_client" ) as mock_llm:
mock_response = MagicMock()
mock_response.content = [MagicMock( text = "Answer based on context" )]
mock_llm.messages.create.return_value = mock_response
result = generate.func(
docs = [ "Context doc" ],
query = "What is this?" ,
)
assert "Answer based on context" == result
# Verify context was included in system message
call_args = mock_llm.messages.create.call_args
assert "Context doc" in call_args.kwargs[ "system" ]
See Also
Batch Processing Test batch operations with runner.map()
Integrate with LLMs Test LLM integrations with mock clients