Architecture Overview
The tool system has three main components:Tool Lifecycle
Tool Building
Tools are built dynamically based on tenant configuration.ToolBuilder Service
# From: src/apps/calls/tools/tool_builder.py:17-78
class ToolBuilder:
def __init__(self) -> None:
# Register per-feature builders here
self._builders: list[Callable[[TenantConfig], FunctionTool | None]] = [
self._build_refer_tool,
# later: self._build_crm_tool, self._build_calendar_tool, ...
]
def build_tools(self, cfg: TenantConfig) -> ToolBuildResult:
log_event(
logging.INFO,
"tool_build_started",
tenant_id=cfg.tenant_id,
builder_count=len(self._builders),
)
tools: list[FunctionTool] = []
for build_fn in self._builders:
try:
log_event(
logging.INFO,
"tool_builder_phase_started",
tenant_id=cfg.tenant_id,
builder=build_fn.__name__,
)
tool = build_fn(cfg)
if tool is not None:
tools.append(tool)
log_event(
logging.INFO,
"tool_builder_phase_built_tool",
tenant_id=cfg.tenant_id,
builder=build_fn.__name__,
tool_name=tool.name,
)
else:
log_event(
logging.INFO,
"tool_builder_phase_no_tool",
tenant_id=cfg.tenant_id,
builder=build_fn.__name__,
)
except ToolBuildError as e:
log_event(
logging.INFO,
"tool_build_error",
str(e),
tenant_id=cfg.tenant_id,
builder=build_fn.__name__,
error=e.to_log_dict(),
)
# Default policy: tools available => auto; otherwise none
tool_choice = "auto" if tools else "none"
log_event(
logging.INFO,
"tool_build_finished",
tenant_id=cfg.tenant_id,
tool_count=len(tools),
tool_choice=tool_choice,
tool_names=[tool.name for tool in tools],
)
return ToolBuildResult(tools=tools, tool_choice=tool_choice)
Tool Build Result
# From: src/apps/calls/tools/tool_builder.py:11-14
@dataclass(frozen=True)
class ToolBuildResult:
tools: list[FunctionTool]
tool_choice: str # "auto" | "none" | "required" | "<function name>"
The tool choice defaults to “auto” when tools are available, allowing the AI to decide when to use them.
Example: Transfer Tool
The transfer tool allows the AI to transfer calls to different destinations.Configuration
Tenant configuration defines transfer destinations:{
"tenant_id": "acme-corp",
"features": {
"refer": {
"enabled": true,
"require_confirmation": true,
"handoff_phrase": "Je vous transfère vers {{label}}",
"destinations": [
{
"destination_id": "sales",
"label": "Service Commercial",
"description_for_model": "For sales inquiries and new customer questions",
"target_uri": "sip:[email protected]",
"enabled": true,
"priority": 10
},
{
"destination_id": "support",
"label": "Service Technique",
"description_for_model": "For technical support and troubleshooting",
"target_uri": "+14155552000",
"enabled": true,
"priority": 5
}
]
}
}
}
Building the Tool Definition
# From: src/apps/calls/tools/tool_builder.py:84-186
def _build_refer_tool(self, cfg: TenantConfig) -> FunctionTool | None:
refer = cfg.features.refer
if not refer.enabled:
log_event(
logging.INFO,
"tool_builder_refer_disabled",
tenant_id=cfg.tenant_id,
)
return None
destinations = [d for d in refer.destinations if d.enabled]
if not destinations:
log_event(
logging.INFO,
"tool_builder_refer_no_enabled_destinations",
tenant_id=cfg.tenant_id,
configured_destinations=len(refer.destinations),
)
return None
destinations.sort(key=lambda d: d.priority, reverse=True)
log_event(
logging.INFO,
"tool_builder_refer_destinations_selected",
tenant_id=cfg.tenant_id,
destination_ids=[d.destination_id for d in destinations],
)
# Keep destination IDs explicit and guidance in description.
enum_destination_ids: list[str] = []
destination_hints: list[str] = []
for d in destinations:
label = (d.label or d.destination_id).strip()
opt_desc = (d.description_for_model or "").strip()
description = f"{label} — {opt_desc}".strip(" —")
enum_destination_ids.append(d.destination_id)
destination_hints.append(f"{d.destination_id}: {description}")
# Tool description also comes from DB as much as possible
description_parts: list[str] = []
if refer.require_confirmation:
description_parts.append(
"Utiliser uniquement APRÈS confirmation explicite de l'appelant."
)
else:
description_parts.append(
"Utiliser lorsque l'appelant demande explicitement un transfert."
)
# Provide the tenant-defined handoff phrase as guidance to the model
if refer.handoff_phrase:
description_parts.append(
f"Avant d'appeler l'outil, annoncer: {refer.handoff_phrase} "
"(remplacer {{label}} par le libellé du service choisi)."
)
tool_description = " ".join(
p.strip() for p in description_parts if p.strip()
).strip()
params = {
"type": "object",
"properties": {
"destination_id": {
"type": "string",
"enum": enum_destination_ids,
"description": (
"Identifiant du service vers lequel transférer. "
f"Valeurs autorisées: {', '.join(enum_destination_ids)}. "
f"Détails: {' | '.join(destination_hints)}"
),
},
"reason": {
"type": "string",
"description": "Raison courte du transfert (optionnel).",
},
},
"required": ["destination_id"],
"additionalProperties": False,
}
log_event(
logging.INFO,
"tool_builder_refer_parameters_built",
tenant_id=cfg.tenant_id,
enum_destination_ids=enum_destination_ids,
required=params["required"],
)
log_event(
logging.INFO,
"tool_builder_refer_tool_definition_built",
tenant_id=cfg.tenant_id,
tool_name="request_transfer",
enabled_destination_ids=enum_destination_ids,
parameters=params,
)
return FunctionTool(
name="request_transfer",
description=tool_description,
parameters=params,
)
The tool description is dynamically built from tenant configuration, including confirmation requirements and handoff phrases that guide the AI’s behavior.
Tool Execution
When the AI decides to call a tool, the invocation flows through several components.Tool Invocation Model
class ToolInvocation:
tenant_config: TenantConfig
call_id: str
function_call_id: str
name: str
arguments_json: str
response_id: str | None
item_id: str | None
WebSocket Event Handling
Tool invocations arrive via WebSocket events:# From: src/apps/calls/ws/client_openai_calls.py:131-185
elif event_type == "response.function_call_arguments.done":
function_call_id = event["call_id"]
log_event(
logging.INFO,
"ws_tool_call_received",
call_id=self.call_id,
function_call_id=function_call_id,
tool_name=event.get("name"),
response_id=event.get("response_id"),
item_id=event.get("item_id"),
)
invocation = ToolInvocation(
tenant_config=self.cfg,
call_id=self.call_id,
function_call_id=function_call_id,
name=event["name"],
arguments_json=event["arguments"],
response_id=event.get("response_id"),
item_id=event.get("item_id"),
)
log_event(
logging.INFO,
"ws_tool_invocation_built",
call_id=self.call_id,
function_call_id=invocation.function_call_id,
openai_sip_call_id=invocation.call_id,
tool_name=invocation.name,
response_id=invocation.response_id,
item_id=invocation.item_id,
arguments_json=invocation.arguments_json,
invocation=invocation.model_dump(),
)
if self.tool_dispatcher is None:
log_event(
logging.INFO,
"ws_tool_dispatcher_missing_fallback_output",
call_id=self.call_id,
function_call_id=function_call_id,
)
await self.send_tool_output(
function_call_id=function_call_id,
result=None,
invocation=None,
)
continue
log_event(
logging.INFO,
"ws_tool_invocation_submitting_to_dispatcher",
call_id=self.call_id,
function_call_id=invocation.function_call_id,
tool_name=invocation.name,
)
await self.tool_dispatcher.submit_tool_invocation(invocation)
Tool Dispatcher
The CallSession acts as a dispatcher, managing tool execution:# From: src/apps/calls/app/call_session.py:321-364
async def submit_tool_invocation(self, invocation: ToolInvocation) -> None:
function_call_id = invocation.function_call_id
if function_call_id in self._pending_tool_calls:
log_event(
logging.INFO,
"tool_invocation_ignored_already_pending",
call_id=self.call_id,
tenant_id=self.tenant_id,
function_call_id=function_call_id,
tool_name=invocation.name,
response_id=invocation.response_id,
)
return
if function_call_id in self._executed_tool_calls:
log_event(
logging.INFO,
"tool_invocation_ignored_already_executed",
call_id=self.call_id,
tenant_id=self.tenant_id,
function_call_id=function_call_id,
tool_name=invocation.name,
response_id=invocation.response_id,
)
return
self._pending_tool_calls[function_call_id] = invocation
self._tool_tasks[function_call_id] = asyncio.create_task(
self._run_tool_and_respond(invocation),
name=f"tool-{self.call_id}-{function_call_id}",
)
log_event(
logging.INFO,
"tool_invocation_scheduled",
call_id=self.call_id,
tenant_id=self.tenant_id,
function_call_id=function_call_id,
openai_sip_call_id=invocation.call_id,
tool_name=invocation.name,
response_id=invocation.response_id,
pending_tool_calls=len(self._pending_tool_calls),
active_tool_tasks=len(self._tool_tasks),
arguments_json=invocation.arguments_json,
invocation=invocation.model_dump(),
)
Tool executions run in background tasks, allowing multiple tools to execute concurrently without blocking the call.
ToolExecutor Service
The ToolExecutor dispatches to the correct tool implementation:# From: src/apps/calls/tools/tool_executor.py:21-46
class ToolExecutor:
"""
Stateless dispatcher.
- Holds a registry of tool callables keyed by tool name.
- Parses invocation.arguments_json to dict.
- Dispatches to the matching callable.
- Returns a ToolResult with output string to send back to the model.
Note: per-call state (tenant config, confirmation flags, etc.) should NOT live here.
Pass context via args/invocation or via the callable closure.
"""
def __init__(self, tools: dict[str, ToolCallable] | None = None) -> None:
self._tools: dict[str, ToolCallable] = tools or {}
# Helpers objects
self._openai_call_service = openai_calls_service
def register(self, name: str, fn: ToolCallable) -> None:
self._tools[name] = fn
def has_tool(self, name: str) -> bool:
return name in self._tools
Execution Flow
# From: src/apps/calls/tools/tool_executor.py:45-183
async def execute(self, invocation: ToolInvocation) -> ToolResult:
"""
Main entrypoint. Never raises: always returns a ToolResult that is readable for the model.
Uses ToolNotFoundError / ToolArgsParseError / ToolExecutionError for logging/telemetry context,
but swallows them and returns a structured ToolResult.
"""
tool_name = invocation.name
tenant_id = getattr(invocation.tenant_config, "tenant_id", None)
log_event(
logging.INFO,
"tool_executor_execute_started",
tool_name=tool_name,
tenant_id=tenant_id,
openai_sip_call_id=invocation.call_id,
function_call_id=invocation.function_call_id,
response_id=invocation.response_id,
item_id=invocation.item_id,
)
fn = self._tools.get(tool_name)
if fn is None:
err = ToolNotFoundError(
tenant_id=tenant_id,
tool_name=tool_name,
context={
"call_id": invocation.function_call_id,
"response_id": invocation.response_id,
"item_id": invocation.item_id,
},
)
log_event(
logging.INFO,
"tool_not_found",
str(err),
tool_name=tool_name,
tenant_id=tenant_id,
call_id=invocation.function_call_id,
response_id=invocation.response_id,
item_id=invocation.item_id,
error=err.to_log_dict(),
)
return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="tool_not_found",
output=json.dumps(
{
"ok": False,
"error": "tool_not_found",
"tool": tool_name,
"message": "Requested tool is not available.",
}
),
)
# Parse args
try:
raw = invocation.arguments_json or "{}"
args = json.loads(raw)
if not isinstance(args, dict):
raise ValueError("Tool arguments must be a JSON object.")
log_event(
logging.INFO,
"tool_executor_arguments_parsed",
tool_name=tool_name,
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
argument_keys=sorted(args.keys()),
)
except Exception as e:
err = ToolArgsParseError(
tenant_id=tenant_id,
tool_name=tool_name,
arguments_json=invocation.arguments_json,
context={
"call_id": invocation.function_call_id,
"response_id": invocation.response_id,
"item_id": invocation.item_id,
},
cause=e,
)
# ... return error result
# Execute tool
try:
deps: dict[str, Any] | None = None
if tool_name == "request_transfer":
# Inject shared dependencies via args for now
deps = {}
deps["call_service"] = self._openai_call_service
log_event(
logging.INFO,
"tool_executor_dependencies_prepared",
tool_name=tool_name,
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
dependency_keys=sorted(deps.keys()),
)
result = await fn(args, invocation, deps)
if not isinstance(result, ToolResult):
raise TypeError(
f"Tool '{tool_name}' must return ToolResult, got {type(result).__name__}"
)
log_event(
logging.INFO,
"tool_executor_execute_finished",
tool_name=tool_name,
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
ok=result.ok,
error=result.error,
output_length=len(result.output),
)
return result
The execute method never raises exceptions. It always returns a ToolResult, with error information included when failures occur.
Tool Implementation
Tool implementations are async functions that receive parsed arguments and return a ToolResult.Transfer Tool Implementation
# From: src/apps/calls/tools/definitions/request_transfer.py:15-233
async def request_transfer_tool(
args: dict[str, Any], invocation: ToolInvocation, deps: dict[str, Any] | None = None
) -> ToolResult:
"""
Tool to request a call transfer.
Expects args to contain:
- target_uri: str, the target phone number or SIP URI to transfer the call to
- reason: Optional[str], the reason for the transfer (for logging/telemetry)
"""
tenant_id = getattr(invocation.tenant_config, "tenant_id", None)
log_event(
logging.INFO,
"request_transfer_started",
tenant_id=tenant_id,
openai_sip_call_id=invocation.call_id,
function_call_id=invocation.function_call_id,
response_id=invocation.response_id,
item_id=invocation.item_id,
argument_keys=sorted(args.keys()),
)
# Validate args
destination_id = args.get("destination_id")
reason = args.get("reason", "")
call_service = (
deps.get("call_service") if deps else None
)
if not isinstance(destination_id, str):
# ... return error
# Resolve destination_id -> target_uri
target_uri = await resolve_refer_target_uri(
invocation.tenant_config, destination_id
)
log_event(
logging.INFO,
"request_transfer_destination_resolution_finished",
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
destination_id=destination_id,
resolution_type=type(target_uri).__name__,
)
if not target_uri:
# ... return error
if isinstance(target_uri, list):
# Invalid destination_id, return list of valid options
# ... return error with valid options
# Refer the call
idempotency_key = f"{invocation.call_id}-transfer-{target_uri}"
log_event(
logging.INFO,
"request_transfer_refer_call_started",
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
openai_sip_call_id=invocation.call_id,
target_uri=target_uri,
destination_id=destination_id,
reason=reason,
idempotency_key=idempotency_key,
)
try:
await call_service.refer_call(
call_id=invocation.call_id,
target_uri=target_uri,
idempotency_key=idempotency_key,
)
except Exception as e:
err = ToolExecutionError(
tenant_id=tenant_id,
tool_name="request_transfer",
reason="call_transfer_failed",
)
log_event(
logging.INFO,
"tool_execution_error",
str(err),
tool_name="request_transfer",
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
openai_sip_call_id=invocation.call_id,
target_uri=target_uri,
error=str(e),
)
output = json.dumps(
{
"error": "call_transfer_failed",
"detail": "Failed to transfer the call. Please handle the call without transfer.",
}
)
return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="call_transfer_failed",
output=output,
)
log_event(
logging.INFO,
"request_transfer_refer_call_succeeded",
tenant_id=tenant_id,
function_call_id=invocation.function_call_id,
openai_sip_call_id=invocation.call_id,
target_uri=target_uri,
destination_id=destination_id,
)
output = json.dumps(
{
"message": "call_transfer_requested",
"target_uri": target_uri,
"reason": reason,
}
)
return ToolResult(
function_call_id=invocation.function_call_id,
ok=True,
output=output,
)
Destination Resolution
# From: src/apps/calls/tools/definitions/request_transfer.py:236-275
async def resolve_refer_target_uri(
cfg: TenantConfig, destination_id: str
) -> str | list[str] | None:
log_event(
logging.INFO,
"request_transfer_destination_resolution_started",
tenant_id=cfg.tenant_id,
destination_id=destination_id,
)
refer = cfg.features.refer
if not refer.enabled:
log_event(
logging.INFO,
"request_transfer_refer_feature_disabled",
tenant_id=cfg.tenant_id,
destination_id=destination_id,
)
return None
for d in refer.destinations:
if d.destination_id == destination_id:
log_event(
logging.INFO,
"request_transfer_destination_resolved",
tenant_id=cfg.tenant_id,
destination_id=destination_id,
target_uri=d.target_uri,
)
return d.target_uri
# List valid destination_ids
destination_ids = [d.destination_id for d in refer.destinations]
log_event(
logging.INFO,
"request_transfer_destination_not_found",
tenant_id=cfg.tenant_id,
destination_id=destination_id,
valid_destination_ids=destination_ids,
)
return destination_ids
Tool Result Model
class ToolResult:
function_call_id: str
ok: bool
error: str | None = None
output: str # JSON string sent back to AI
The output field must be a JSON string. The AI parses this to understand the result of the tool execution.
Error Handling
The tool system has comprehensive error handling.Error Types
ToolNotFoundError
Thrown when the tool name doesn’t match any registered tool:return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="tool_not_found",
output=json.dumps({
"ok": False,
"error": "tool_not_found",
"tool": tool_name,
"message": "Requested tool is not available.",
}),
)
ToolArgsParseError
Thrown when arguments cannot be parsed:return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="tool_args_parse_error",
output=json.dumps({
"ok": False,
"error": "tool_args_parse_error",
"tool": tool_name,
"message": "Tool arguments could not be parsed.",
}),
)
ToolExecutionError
Thrown when tool execution fails:return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="tool_execution_failed",
output=json.dumps({
"ok": False,
"error": "tool_execution_failed",
"tool": tool_name,
"message": "Tool execution failed.",
}),
)
Graceful Degradation
When tools fail, the call continues:# From: src/apps/calls/app/call_session.py:449-473
except Exception as e:
try:
await self.ws_client.send_tool_output(
function_call_id=invocation.function_call_id,
result=None,
invocation=None,
)
log_event(
logging.INFO,
"tool_run_and_respond_fallback_output_sent",
call_id=self.call_id,
tenant_id=self.tenant_id,
function_call_id=invocation.function_call_id,
tool_name=invocation.name,
)
except Exception:
log_event(
logging.INFO,
"tool_run_and_respond_fallback_output_failed",
call_id=self.call_id,
tenant_id=self.tenant_id,
function_call_id=invocation.function_call_id,
tool_name=invocation.name,
)
Even if a tool fails, the AI receives a response and can continue the conversation.
Tool Cancellation
Tools can be cancelled when responses are interrupted.Response Cancellation
When a response is cancelled (e.g., user interrupts), associated tools are cancelled:# From: src/apps/calls/app/call_session.py:366-400
def cancel_tools_for_response(self, response_id: str) -> None:
if not response_id:
log_event(
logging.INFO,
"tool_cancel_for_response_skipped_missing_response_id",
call_id=self.call_id,
tenant_id=self.tenant_id,
)
return
cancelled_count = 0
for function_call_id, invocation in list(self._pending_tool_calls.items()):
if invocation.response_id != response_id:
continue
task = self._tool_tasks.get(function_call_id)
if task and not task.done():
task.cancel()
cancelled_count += 1
log_event(
logging.INFO,
"tool_cancelled_for_response",
call_id=self.call_id,
tenant_id=self.tenant_id,
response_id=response_id,
function_call_id=function_call_id,
tool_name=invocation.name,
)
log_event(
logging.INFO,
"tool_cancel_for_response_finished",
call_id=self.call_id,
tenant_id=self.tenant_id,
response_id=response_id,
cancelled_count=cancelled_count,
)
Session Cleanup
When a call ends, all pending tools are cancelled:# From: src/apps/calls/app/call_session.py:496-522
async def _cancel_all_tool_tasks(self) -> None:
tasks = [task for task in self._tool_tasks.values() if not task.done()]
log_event(
logging.INFO,
"tool_cancel_all_started",
call_id=self.call_id,
tenant_id=self.tenant_id,
cancellable_tasks=len(tasks),
tracked_tasks=len(self._tool_tasks),
)
for task in tasks:
task.cancel()
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
for function_call_id, task in list(self._tool_tasks.items()):
if task.done():
self._tool_tasks.pop(function_call_id, None)
self._pending_tool_calls.pop(function_call_id, None)
log_event(
logging.INFO,
"tool_cancel_all_finished",
call_id=self.call_id,
tenant_id=self.tenant_id,
remaining_tasks=len(self._tool_tasks),
remaining_pending=len(self._pending_tool_calls),
)
Creating Custom Tools
You can add new tools to extend agent capabilities.Step 1: Define Tool Configuration
Add feature configuration to TenantConfig model:class Features:
refer: ReferFeature
my_custom_feature: MyCustomFeature # Add your feature
Step 2: Create Tool Builder
Add a builder method to ToolBuilder:def _build_my_custom_tool(self, cfg: TenantConfig) -> FunctionTool | None:
feature = cfg.features.my_custom_feature
if not feature.enabled:
return None
return FunctionTool(
name="my_custom_tool",
description="What this tool does",
parameters={
"type": "object",
"properties": {
"param1": {
"type": "string",
"description": "Description for AI",
},
},
"required": ["param1"],
},
)
__init__:
self._builders = [
self._build_refer_tool,
self._build_my_custom_tool, # Add your builder
]
Step 3: Implement Tool Logic
Create the tool implementation:async def my_custom_tool(
args: dict[str, Any],
invocation: ToolInvocation,
deps: dict[str, Any] | None = None
) -> ToolResult:
# Validate arguments
param1 = args.get("param1")
if not param1:
return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="missing_param",
output=json.dumps({"error": "param1 is required"}),
)
# Execute tool logic
try:
result = await do_something(param1)
return ToolResult(
function_call_id=invocation.function_call_id,
ok=True,
output=json.dumps({"result": result}),
)
except Exception as e:
return ToolResult(
function_call_id=invocation.function_call_id,
ok=False,
error="execution_failed",
output=json.dumps({"error": str(e)}),
)
Step 4: Register Tool
Register the tool with the executor:tool_executor = ToolExecutor()
tool_executor.register("my_custom_tool", my_custom_tool)
Best Practices
Tool Design
- Keep tool interfaces simple and focused
- Use clear, descriptive parameter names
- Provide detailed descriptions for the AI
- Include examples in descriptions when helpful
Error Messages
- Return structured JSON errors the AI can understand
- Include enough detail for the AI to explain to the user
- Use consistent error formats across tools
Performance
- Keep tool execution fast (< 5 seconds ideal)
- Use async/await for I/O operations
- Consider timeouts for external API calls
- Cache frequently accessed data
Testing
- Test tools independently of the call system
- Verify error handling for all edge cases
- Test with invalid arguments
- Test concurrent tool executions
Monitoring
Key Metrics
- Tool Invocation Count: How often each tool is called
- Tool Success Rate: Percentage of successful executions
- Tool Latency: Time taken for tool execution
- Error Distribution: Breakdown of error types
Logging
All tool operations are extensively logged:log_event(logging.INFO, "tool_executor_execute_started", tool_name=tool_name)
log_event(logging.INFO, "tool_executor_execute_finished", ok=result.ok)
log_event(logging.ERROR, "tool_execution_failed", error=str(e))
Next Steps
- Learn about Call Lifecycle to understand when tools execute
- Explore Multi-Tenancy for per-tenant tool configuration
- Review Instructions to guide the AI on when to use tools