Makes an API request to list the available models. Returns a paginated response with model information.By default, returns all available base models. Can be configured to list tuned models instead.
# Get first page with 5 modelsresponse = client.models.list(config={'page_size': 5})print("First page:")for model in response.page: print(f" {model.display_name}")# Get next pageif response.next_page_token: next_response = client.models.list( config={'page_size': 5, 'page_token': response.next_page_token} ) print("\nNext page:") for model in next_response.page: print(f" {model.display_name}")
models = client.models.list()gemini_models = [ model for model in models if 'gemini' in model.name.lower()]print("Gemini models:")for model in gemini_models: print(f" {model.display_name}") print(f" Context: {model.input_token_limit:,} tokens") print(f" Methods: {', '.join(model.supported_generation_methods)}")
models = client.models.list()# Find models supporting embeddingsembedding_models = [ model for model in models if 'embedContent' in model.supported_generation_methods]print("Embedding models:")for model in embedding_models: print(f" {model.display_name}")# Find models with large context windowslarge_context_models = [ model for model in models if model.input_token_limit and model.input_token_limit > 100000]print("\nLarge context models (>100k tokens):")for model in large_context_models: print(f" {model.display_name}: {model.input_token_limit:,} tokens")
models = client.models.list()print("Model Comparison:\n")print(f"{'Model':<30} {'Input Tokens':<15} {'Output Tokens':<15}")print("-" * 60)for model in models: if 'gemini' in model.name.lower(): name = model.display_name or model.name.split('/')[-1] input_limit = f"{model.input_token_limit:,}" if model.input_token_limit else "N/A" output_limit = f"{model.output_token_limit:,}" if model.output_token_limit else "N/A" print(f"{name:<30} {input_limit:<15} {output_limit:<15}")
models = client.models.list()for model in models: # Automatically fetches all pages print(model.name)
Manual pagination (more control):
response = client.models.list(config={'page_size': 10})# Process current pagefor model in response.page: print(model.name)# Get next page if existsif response.next_page_token: next_response = client.models.list( config={'page_token': response.next_page_token} )