curl --request GET \
--url https://api.example.com/v1/models/{model_id}{
"id": "<string>",
"provider": "<string>",
"created_at": "<string>",
"display_name": "<string>",
"description": "<string>",
"version": "<string>",
"capabilities": {
"text": true,
"vision": true,
"audio": true,
"image_generation": true,
"tools": true,
"structured_output": true,
"streaming": true,
"thinking": true,
"input_token_limit": 123,
"output_token_limit": 123
},
"defaults": {
"temperature": 123,
"max_output_tokens": 123,
"top_p": 123,
"top_k": 123
},
"provider_declared_generation_methods": [
"<string>"
],
"provider_info": {}
}Retrieve detailed information about a specific model
curl --request GET \
--url https://api.example.com/v1/models/{model_id}{
"id": "<string>",
"provider": "<string>",
"created_at": "<string>",
"display_name": "<string>",
"description": "<string>",
"version": "<string>",
"capabilities": {
"text": true,
"vision": true,
"audio": true,
"image_generation": true,
"tools": true,
"structured_output": true,
"streaming": true,
"thinking": true,
"input_token_limit": 123,
"output_token_limit": 123
},
"defaults": {
"temperature": 123,
"max_output_tokens": 123,
"top_p": 123,
"top_k": 123
},
"provider_declared_generation_methods": [
"<string>"
],
"provider_info": {}
}client.models.retrieve(modelId)
"openai/gpt-4", "anthropic/claude-3-5-sonnet-20241022")."openai/gpt-4").openai, anthropic, google, xai, mistral, groq, fireworks, deepseek.Show capabilities object
const model = await client.models.retrieve("openai/gpt-4");
console.log(`Model: ${model.id}`);
console.log(`Provider: ${model.provider}`);
console.log(`Display Name: ${model.display_name}`);
const model = await client.models.retrieve("anthropic/claude-3-5-sonnet-20241022");
if (model.capabilities?.vision) {
console.log("This model supports vision!");
}
if (model.capabilities?.tools) {
console.log("This model supports function calling");
}
console.log(`Max input tokens: ${model.capabilities?.input_token_limit}`);
console.log(`Max output tokens: ${model.capabilities?.output_token_limit}`);
const model = await client.models.retrieve("openai/gpt-4");
if (model.defaults) {
console.log(`Default temperature: ${model.defaults.temperature}`);
console.log(`Default max tokens: ${model.defaults.max_output_tokens}`);
}
{
"id": "openai/gpt-4",
"provider": "openai",
"created_at": "2023-06-27T00:00:00Z",
"display_name": "GPT-4",
"description": "Most capable GPT-4 model for complex tasks",
"version": "gpt-4-0613",
"capabilities": {
"text": true,
"vision": false,
"audio": false,
"image_generation": false,
"tools": true,
"structured_output": true,
"streaming": true,
"thinking": false,
"input_token_limit": 8192,
"output_token_limit": 4096
},
"defaults": {
"temperature": 1.0,
"max_output_tokens": 4096,
"top_p": 1.0
},
"provider_declared_generation_methods": [
"generateContent",
"streamGenerateContent"
],
"provider_info": {
"openai_specific_field": "value"
}
}
{
"error": {
"message": "Model not found",
"type": "invalid_request_error",
"code": "model_not_found"
}
}
{
"error": {
"message": "Invalid API key",
"type": "invalid_request_error",
"code": "invalid_api_key"
}
}