LLM Gateway provides a unified API interface that allows you to access models from multiple providers (OpenAI, Anthropic, Google, AWS Bedrock, and more) using a single, consistent OpenAI-compatible API format.Instead of learning different APIs for each provider, you can use the familiar OpenAI SDK or API format for all your LLM requests.
curl https://api.llmgateway.io/v1/chat/completions \ -H "Content-Type: application/json" \ -H "Authorization: Bearer YOUR_API_KEY" \ -d '{ "model": "anthropic/claude-3-5-sonnet-20241022", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'
from openai import OpenAIclient = OpenAI( base_url="https://api.llmgateway.io/v1", api_key="YOUR_API_KEY")# Use Anthropic's Clauderesponse = client.chat.completions.create( model="anthropic/claude-3-5-sonnet-20241022", messages=[ {"role": "user", "content": "What is the capital of France?"} ])print(response.choices[0].message.content)
import OpenAI from 'openai';const client = new OpenAI({ baseURL: 'https://api.llmgateway.io/v1', apiKey: process.env.LLMGATEWAY_API_KEY,});// Use Google's Geminiconst response = await client.chat.completions.create({ model: 'google-ai-studio/gemini-2.0-flash-001', messages: [ { role: 'user', content: 'What is the capital of France?' }, ],});console.log(response.choices[0].message.content);
for chunk in client.chat.completions.create( model="anthropic/claude-3-5-sonnet-20241022", messages=[{"role": "user", "content": "Count to 10"}], stream=True): print(chunk.choices[0].delta.content, end="")