Make your first API call with the OpenAI Python SDK
This quickstart guide will help you make your first API call using the OpenAI Python SDK. We’ll cover both the Responses API (recommended) and the Chat Completions API.
import osfrom openai import OpenAIclient = OpenAI( # This is the default and can be omitted api_key=os.environ.get("OPENAI_API_KEY"),)response = client.responses.create( model="gpt-5.2", instructions="You are a coding assistant that talks like a pirate.", input="How do I check if a Python object is an instance of a class?",)print(response.output_text)
The api_key parameter is optional. If not provided, the client automatically reads from the OPENAI_API_KEY environment variable.
from openai import OpenAIclient = OpenAI()completion = client.chat.completions.create( model="gpt-5.2", messages=[ {"role": "developer", "content": "Talk like a pirate."}, { "role": "user", "content": "How do I check if a Python object is an instance of a class?", }, ],)print(completion.choices[0].message.content)
completion = client.chat.completions.create( model="gpt-5.2", messages=[ {"role": "developer", "content": "You are a helpful Python tutor."}, {"role": "user", "content": "What is a list comprehension?"}, {"role": "assistant", "content": "A list comprehension is a concise way to create lists..."}, {"role": "user", "content": "Can you show me an example?"}, ],)print(completion.choices[0].message.content)
Stream responses in real-time for a better user experience:
from openai import OpenAIclient = OpenAI()stream = client.responses.create( model="gpt-5.2", input="Write a one-sentence bedtime story about a unicorn.", stream=True,)for event in stream: print(event)
For async applications, use AsyncOpenAI instead of OpenAI:
import osimport asynciofrom openai import AsyncOpenAIclient = AsyncOpenAI( # This is the default and can be omitted api_key=os.environ.get("OPENAI_API_KEY"),)async def main() -> None: response = await client.responses.create( model="gpt-5.2", input="Explain disestablishmentarianism to a smart five year old.", ) print(response.output_text)asyncio.run(main())
The async client provides identical functionality to the sync client, but all methods use await.
import openaifrom openai import OpenAIclient = OpenAI()try: response = client.responses.create( model="gpt-5.2", input="Hello, world!", ) print(response.output_text)except openai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx.except openai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.")except openai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response)
from openai import OpenAI# Set default timeout for all requests (default is 10 minutes)client = OpenAI(timeout=20.0) # 20 seconds# Override per-requestclient.with_options(timeout=5.0).responses.create( model="gpt-5.2", input="Quick response please!",)