Overview
Function calling (also called tool calling) allows LLMs to interact with external functions and APIs. LiteLLM standardizes function calling across 100+ providers using the OpenAI format.
Basic Usage
from litellm import completion
tools = [{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get the current weather for a location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : {
"type" : "string" ,
"description" : "City name, e.g. San Francisco"
},
"unit" : {
"type" : "string" ,
"enum" : [ "celsius" , "fahrenheit" ]
}
},
"required" : [ "location" ]
}
}
}]
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "What's the weather in Paris?" }],
tools = tools
)
# Check if function was called
if response.choices[ 0 ].message.tool_calls:
tool_call = response.choices[ 0 ].message.tool_calls[ 0 ]
print ( f "Function: { tool_call.function.name } " )
print ( f "Arguments: { tool_call.function.arguments } " )
Function Calling Parameters
List of tool definitions. Each tool must have:
type: Always "function"
function: Object with name, description, and parameters
Controls which tools the model can call:
"auto" (default): Model decides whether to call a function
"none": Model will not call any functions
{"type": "function", "function": {"name": "function_name"}}: Force specific function
"required": Model must call at least one function
Whether to allow multiple function calls in a single response. Default: True
tool = {
"type" : "function" ,
"function" : {
"name" : "function_name" , # Required: Function identifier
"description" : "What it does" , # Required: Help model understand usage
"parameters" : { # Required: JSON Schema
"type" : "object" ,
"properties" : {
"param1" : {
"type" : "string" ,
"description" : "Parameter description"
},
"param2" : {
"type" : "number" ,
"description" : "Another parameter"
}
},
"required" : [ "param1" ] # Optional: Required parameters
}
}
}
When a function is called, the response contains tool calls:
class ChatCompletionMessageToolCall :
id : str # Unique call ID
type : str # Always "function"
function: Function
class Function :
name: str # Function name
arguments: str # JSON string of arguments
Examples
Single Function Call
from litellm import completion
import json
# Define function
def get_weather ( location : str , unit : str = "fahrenheit" ) -> dict :
# Implementation
return {
"location" : location,
"temperature" : 72 ,
"unit" : unit,
"forecast" : "Sunny"
}
# Define tool
tools = [{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get current weather for a location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : { "type" : "string" },
"unit" : { "type" : "string" , "enum" : [ "celsius" , "fahrenheit" ]}
},
"required" : [ "location" ]
}
}
}]
# Call LLM
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "What's the weather in Tokyo?" }],
tools = tools
)
# Process function call
message = response.choices[ 0 ].message
if message.tool_calls:
tool_call = message.tool_calls[ 0 ]
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Execute function
if function_name == "get_weather" :
result = get_weather( ** function_args)
print ( f "Weather: { result } " )
Complete Conversation Flow
from litellm import completion
import json
def get_weather ( location : str , unit : str = "fahrenheit" ) -> dict :
return { "temperature" : 72 , "unit" : unit, "forecast" : "Sunny" }
tools = [{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get weather for a location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : { "type" : "string" },
"unit" : { "type" : "string" , "enum" : [ "celsius" , "fahrenheit" ]}
},
"required" : [ "location" ]
}
}
}]
messages = [{ "role" : "user" , "content" : "What's the weather in Paris?" }]
# First call - LLM decides to call function
response = completion( model = "gpt-4" , messages = messages, tools = tools)
message = response.choices[ 0 ].message
messages.append(message)
# Execute function
if message.tool_calls:
for tool_call in message.tool_calls:
function_args = json.loads(tool_call.function.arguments)
result = get_weather( ** function_args)
# Add function result to messages
messages.append({
"role" : "tool" ,
"tool_call_id" : tool_call.id,
"content" : json.dumps(result)
})
# Second call - LLM uses function result
final_response = completion( model = "gpt-4" , messages = messages, tools = tools)
print (final_response.choices[ 0 ].message.content)
# "The weather in Paris is 72°F and sunny."
Multiple Functions
from litellm import completion
import json
tools = [
{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get weather for a location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : { "type" : "string" }
},
"required" : [ "location" ]
}
}
},
{
"type" : "function" ,
"function" : {
"name" : "get_time" ,
"description" : "Get current time for a location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : { "type" : "string" }
},
"required" : [ "location" ]
}
}
},
{
"type" : "function" ,
"function" : {
"name" : "search_restaurants" ,
"description" : "Search for restaurants" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : { "type" : "string" },
"cuisine" : { "type" : "string" }
},
"required" : [ "location" ]
}
}
}
]
response = completion(
model = "gpt-4" ,
messages = [{
"role" : "user" ,
"content" : "What's the weather in Tokyo and find me Italian restaurants there?"
}],
tools = tools
)
# Model might call multiple functions
for tool_call in response.choices[ 0 ].message.tool_calls:
print ( f "Calling: { tool_call.function.name } " )
print ( f "Arguments: { tool_call.function.arguments } " )
Forcing Function Call
from litellm import completion
tools = [{
"type" : "function" ,
"function" : {
"name" : "calculate" ,
"description" : "Perform calculation" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"expression" : { "type" : "string" }
},
"required" : [ "expression" ]
}
}
}]
# Force the model to call the calculate function
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "What is 25 * 4?" }],
tools = tools,
tool_choice = {
"type" : "function" ,
"function" : { "name" : "calculate" }
}
)
print (response.choices[ 0 ].message.tool_calls[ 0 ].function.arguments)
# {"expression": "25 * 4"}
Parallel Function Calls
from litellm import completion
import json
tools = [{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get weather" ,
"parameters" : {
"type" : "object" ,
"properties" : { "location" : { "type" : "string" }},
"required" : [ "location" ]
}
}
}]
response = completion(
model = "gpt-4" ,
messages = [{
"role" : "user" ,
"content" : "What's the weather in London, Paris, and Tokyo?"
}],
tools = tools,
parallel_tool_calls = True # Allow multiple calls
)
# Model calls function 3 times in parallel
for tool_call in response.choices[ 0 ].message.tool_calls:
args = json.loads(tool_call.function.arguments)
print ( f "Getting weather for { args[ 'location' ] } " )
Function Calling with Streaming
from litellm import completion
import json
tools = [{
"type" : "function" ,
"function" : {
"name" : "search" ,
"description" : "Search the web" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"query" : { "type" : "string" }
},
"required" : [ "query" ]
}
}
}]
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "Search for Python tutorials" }],
tools = tools,
stream = True
)
function_name = ""
function_args = ""
for chunk in response:
delta = chunk.choices[ 0 ].delta
if delta.tool_calls:
for tool_call in delta.tool_calls:
if tool_call.function.name:
function_name += tool_call.function.name
if tool_call.function.arguments:
function_args += tool_call.function.arguments
if chunk.choices[ 0 ].finish_reason == "tool_calls" :
print ( f "Function: { function_name } " )
print ( f "Arguments: { json.loads(function_args) } " )
Provider-Specific Examples
OpenAI
Anthropic
Google
Azure OpenAI
from litellm import completion
tools = [{ "type" : "function" , "function" : { ... }}]
response = completion(
model = "gpt-4" ,
messages = [ ... ],
tools = tools
)
Advanced Usage
Function with Complex Parameters
tools = [{
"type" : "function" ,
"function" : {
"name" : "book_hotel" ,
"description" : "Book a hotel room" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"hotel_name" : { "type" : "string" },
"check_in" : {
"type" : "string" ,
"description" : "Check-in date (YYYY-MM-DD)"
},
"check_out" : {
"type" : "string" ,
"description" : "Check-out date (YYYY-MM-DD)"
},
"guests" : {
"type" : "object" ,
"properties" : {
"adults" : { "type" : "integer" },
"children" : { "type" : "integer" }
},
"required" : [ "adults" ]
},
"preferences" : {
"type" : "array" ,
"items" : { "type" : "string" },
"description" : "Room preferences"
}
},
"required" : [ "hotel_name" , "check_in" , "check_out" ]
}
}
}]
Error Handling
from litellm import completion
from litellm.exceptions import BadRequestError
import json
try :
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "Get weather" }],
tools = tools
)
if response.choices[ 0 ].message.tool_calls:
for tool_call in response.choices[ 0 ].message.tool_calls:
try :
args = json.loads(tool_call.function.arguments)
# Execute function
except json.JSONDecodeError:
print ( "Invalid function arguments" )
except Exception as e:
print ( f "Function execution failed: { e } " )
except BadRequestError as e:
print ( f "Invalid request: { e } " )
Function Calling with Pydantic
from litellm import completion
from pydantic import BaseModel, Field
import json
class WeatherParams ( BaseModel ):
location: str = Field( description = "City name" )
unit: str = Field( description = "Temperature unit" , default = "fahrenheit" )
# Convert Pydantic model to tool schema
tools = [{
"type" : "function" ,
"function" : {
"name" : "get_weather" ,
"description" : "Get weather" ,
"parameters" : WeatherParams.model_json_schema()
}
}]
response = completion(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : "Weather in NYC?" }],
tools = tools
)
if response.choices[ 0 ].message.tool_calls:
tool_call = response.choices[ 0 ].message.tool_calls[ 0 ]
# Validate with Pydantic
params = WeatherParams.model_validate_json(tool_call.function.arguments)
print ( f "Location: { params.location } , Unit: { params.unit } " )
Best Practices
Clear descriptions : Write detailed function and parameter descriptions
Use enums : Constrain parameter values when possible
Handle errors : Always validate function arguments before execution
Complete the loop : Send function results back to the model
Consider parallel calls : Enable for independent operations
Set tool_choice wisely : Use “auto” for flexibility, force calls when needed
Troubleshooting
Function Not Called
Ensure the function description clearly states when to use it
Try setting tool_choice="required" to force a function call
Check if the model supports function calling
Invalid Arguments
Validate the JSON schema in your tool definition
Add detailed descriptions for each parameter
Use json.loads() with error handling
Multiple Unwanted Calls
Set parallel_tool_calls=False to limit to one call
Be specific in function descriptions about when to use each tool