Request-Response Pattern
The Request-Response pattern provides one-to-one synchronous communication between agents. It’s ideal for deterministic tasks that require a clear request and an explicit response.
Overview
This pattern implements a classic request-response communication model:
Sender agent sends a request message
Receiver agent processes the request
Receiver agent returns a response
Sender agent receives and processes the response
The Request-Response protocol supports optional LLM integration for intelligent message processing.
When to Use
Data Processing Query and transform data with guaranteed results
Simple Q&A Ask questions and receive direct answers
Status Retrieval Check agent or system status synchronously
Deterministic Tasks Execute tasks with predictable outcomes
Basic Usage
Without LLM (Fast Path)
use mofa_sdk :: collaboration :: {
RequestResponseProtocol ,
CollaborationMessage ,
CollaborationMode
};
use std :: sync :: Arc ;
// Create protocol without LLM
let protocol = Arc :: new ( RequestResponseProtocol :: new ( "agent_001" ));
// Create request message
let msg = CollaborationMessage :: new (
"agent_001" . to_string (),
"Process data: [1, 2, 3, 4, 5]" ,
CollaborationMode :: RequestResponse
) . with_receiver ( "agent_002" . to_string ());
// Send message
protocol . send_message ( msg ) . await ? ;
// Receive response
if let Some ( response ) = protocol . receive_message () . await ? {
let result = protocol . process_message ( response ) . await ? ;
println! ( "Success: {}" , result . success);
println! ( "Duration: {}ms" , result . duration_ms);
}
With LLM (Intelligent Path)
use mofa_sdk :: collaboration :: RequestResponseProtocol ;
use mofa_sdk :: llm :: { LLMClient , openai_from_env};
use std :: sync :: Arc ;
// Create LLM client
let provider = openai_from_env () ? ;
let llm_client = Arc :: new ( LLMClient :: new ( Arc :: new ( provider )));
// Create LLM-enabled protocol
let protocol = Arc :: new ( RequestResponseProtocol :: with_llm (
"agent_001" ,
llm_client . clone ()
));
// Messages are now processed with LLM understanding
let msg = CollaborationMessage :: new (
"agent_001" . to_string (),
"Analyze customer sentiment from recent reviews" ,
CollaborationMode :: RequestResponse
) . with_receiver ( "analyst_agent" . to_string ());
protocol . send_message ( msg ) . await ? ;
Using the Collaboration Manager
The LLMDrivenCollaborationManager provides a higher-level API:
use mofa_sdk :: collaboration :: {
LLMDrivenCollaborationManager ,
RequestResponseProtocol
};
use std :: sync :: Arc ;
#[tokio :: main]
async fn main () -> Result <(), Box < dyn std :: error :: Error >> {
// Create manager
let manager = LLMDrivenCollaborationManager :: new ( "agent_001" );
// Register request-response protocol
manager . register_protocol ( Arc :: new (
RequestResponseProtocol :: new ( "agent_001" )
)) . await ? ;
// Execute task using the protocol
let result = manager . execute_task_with_protocol (
"request_response" ,
"Calculate the sum of [10, 20, 30, 40]"
) . await ? ;
if result . success {
if let Some ( data ) = result . data {
println! ( "Response: {}" , data . to_text ());
}
}
// Check statistics
let stats = manager . stats () . await ;
println! ( "Total tasks: {}" , stats . total_tasks);
println! ( "Success rate: {:.2}%" ,
( stats . successful_tasks as f64 / stats . total_tasks as f64 ) * 100.0
);
Ok (())
}
Advanced Examples
Code Analysis Request
use mofa_sdk :: collaboration :: {
LLMDrivenCollaborationManager ,
RequestResponseProtocol ,
CollaborationContent
};
use mofa_sdk :: llm :: { LLMClient , openai_from_env};
use std :: sync :: Arc ;
async fn analyze_code () -> Result <(), Box < dyn std :: error :: Error >> {
let provider = openai_from_env () ? ;
let llm_client = Arc :: new ( LLMClient :: new ( Arc :: new ( provider )));
let manager = LLMDrivenCollaborationManager :: new ( "analyzer" );
// Register LLM-enabled protocol
manager . register_protocol ( Arc :: new (
RequestResponseProtocol :: with_llm (
"analyzer" ,
llm_client . clone ()
)
)) . await ? ;
let code_snippet = r#"
fn process_data(input: &str) -> String {
let mut result = String::new();
for ch in input.chars() {
if ch.is_ascii() {
result.push(ch.to_ascii_uppercase());
}
}
result
}
"# ;
let result = manager . execute_task_with_protocol (
"request_response" ,
CollaborationContent :: Mixed {
text : "Analyze this Rust code for security vulnerabilities and performance issues" . to_string (),
data : serde_json :: json! ({
"code" : code_snippet ,
"language" : "rust"
})
}
) . await ? ;
if let Some ( data ) = result . data {
println! ( "Analysis result: {}" , data . to_text ());
}
Ok (())
}
use mofa_sdk :: collaboration :: {
RequestResponseProtocol ,
CollaborationMessage ,
CollaborationContent ,
CollaborationMode
};
use std :: sync :: Arc ;
async fn transform_data () -> Result <(), Box < dyn std :: error :: Error >> {
let protocol = Arc :: new ( RequestResponseProtocol :: new ( "transformer" ));
// Step 1: Extract data
let extract_msg = CollaborationMessage :: new (
"transformer" . to_string (),
CollaborationContent :: Data ( serde_json :: json! ({
"action" : "extract" ,
"source" : "database" ,
"query" : "SELECT * FROM users WHERE active = true"
})),
CollaborationMode :: RequestResponse
) . with_receiver ( "data_agent" . to_string ());
protocol . send_message ( extract_msg ) . await ? ;
let extract_result = protocol . receive_message () . await ? ;
// Step 2: Transform data
if let Some ( msg ) = extract_result {
let result = protocol . process_message ( msg ) . await ? ;
println! ( "Extraction completed in {}ms" , result . duration_ms);
}
Ok (())
}
// Get protocol information
let protocol = RequestResponseProtocol :: new ( "agent_001" );
println! ( "Protocol: {}" , protocol . name ());
println! ( "Description: {}" , protocol . description ());
println! ( "Scenarios: {:?}" , protocol . applicable_scenarios ());
println! ( "Available: {}" , protocol . is_available ());
// Get statistics
let stats = protocol . stats ();
for ( key , value ) in stats {
println! ( "{}: {:?}" , key , value );
}
Message Types
The protocol supports multiple content types:
Text Messages
let msg = CollaborationMessage :: new (
"sender" ,
"Simple text request" ,
CollaborationMode :: RequestResponse
);
Structured Data
let msg = CollaborationMessage :: new (
"sender" ,
CollaborationContent :: Data ( serde_json :: json! ({
"action" : "query" ,
"params" : {
"table" : "users" ,
"limit" : 100
}
})),
CollaborationMode :: RequestResponse
);
Mixed Content
let msg = CollaborationMessage :: new (
"sender" ,
CollaborationContent :: Mixed {
text : "Process this dataset" . to_string (),
data : serde_json :: json! ({
"dataset_id" : "ds_001" ,
"filters" : [ "active" , "verified" ]
})
},
CollaborationMode :: RequestResponse
);
Error Handling
use mofa_sdk :: collaboration :: {
LLMDrivenCollaborationManager ,
CollaborationResult
};
async fn handle_errors () -> Result <(), Box < dyn std :: error :: Error >> {
let manager = LLMDrivenCollaborationManager :: new ( "agent_001" );
let result = manager . execute_task_with_protocol (
"request_response" ,
"Risky operation"
) . await ? ;
match result . success {
true => {
println! ( "Task succeeded" );
if let Some ( data ) = result . data {
println! ( "Result: {}" , data . to_text ());
}
}
false => {
if let Some ( error ) = result . error {
eprintln! ( "Task failed: {}" , error );
}
}
}
Ok (())
}
Protocol Configuration
The Request-Response protocol has minimal configuration:
use mofa_sdk :: collaboration :: RequestResponseProtocol ;
// Basic configuration
let protocol = RequestResponseProtocol :: new ( "agent_id" );
// With LLM support
let protocol = RequestResponseProtocol :: with_llm (
"agent_id" ,
llm_client
);
// Check protocol properties
assert_eq! ( protocol . name (), "request_response" );
assert_eq! ( protocol . mode (), CollaborationMode :: RequestResponse );
assert! ( protocol . is_available ());
Best Practices
Use for Synchronous Operations
Request-Response is best for operations that need immediate responses. For asynchronous workflows, consider Publish-Subscribe.
Enable LLM for Complex Requests
Use LLM integration when dealing with natural language queries or complex decision-making.
Handle Timeouts Gracefully
Implement timeout handling to prevent blocking indefinitely: use tokio :: time :: {timeout, Duration };
let result = timeout (
Duration :: from_secs ( 30 ),
protocol . receive_message ()
) . await ?? ;
Monitor Performance
Track execution times and success rates using the collaboration manager’s statistics.
See Also
Publish-Subscribe For one-to-many broadcasting scenarios
Sequential Pattern For chained request-response workflows
Collaboration Overview Return to patterns overview
LLM Integration Learn about LLM integration