def create_llm_from_provider(
provider_type: str,
model_id: str,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
provider_config: Optional[Dict[str, Any]] = None,
temperature: float = DEFAULT_TEMPERATURE,
max_retries: int = DEFAULT_MAX_RETRIES,
) -> BaseChatModel:
"""Create a LangChain chat model from provider configuration.
Args:
provider_type: Type of provider (anthropic, openai, google, custom).
model_id: Model identifier (e.g., 'claude-3-opus-20240229', 'gpt-4').
api_key: API key for the provider (decrypted).
base_url: Optional base URL for custom endpoints or proxies.
provider_config: Additional provider-specific configuration.
temperature: LLM temperature setting.
max_retries: Maximum retry attempts for API calls.
Returns:
Configured BaseChatModel instance.
Raises:
ValueError: If provider type is unsupported or required dependencies missing.
"""
provider_config = provider_config or {}
if provider_type == "anthropic":
if not _anthropic_available:
raise ValueError(
"Anthropic provider requires langchain-anthropic package. "
"Install with: pip install langchain-anthropic"
)
if not api_key:
raise ValueError("Anthropic provider requires an API key")
kwargs = {
"model": model_id,
"api_key": api_key,
"temperature": temperature,
"max_retries": max_retries,
}
if base_url:
kwargs["base_url"] = base_url
logger.info(f"Creating Anthropic LLM with model: {model_id}")
return ChatAnthropic(**kwargs)
elif provider_type == "openai":
if not _openai_available:
raise ValueError(
"OpenAI provider requires langchain-openai package. "
"Install with: pip install langchain-openai"
)
if not api_key:
raise ValueError("OpenAI provider requires an API key")
kwargs = {
"model": model_id,
"api_key": api_key,
"temperature": temperature,
"max_retries": max_retries,
}
if base_url:
kwargs["base_url"] = base_url
# Handle organization ID if provided
if provider_config.get("organization_id"):
kwargs["organization"] = provider_config["organization_id"]
logger.info(f"Creating OpenAI LLM with model: {model_id}")
return ChatOpenAI(**kwargs)
elif provider_type == "google":
if not _google_available:
raise ValueError(
"Google provider requires langchain-google-genai package. "
"Install with: pip install langchain-google-genai"
)
if not api_key:
raise ValueError("Google provider requires an API key")
kwargs = {
"model": model_id,
"google_api_key": api_key,
"temperature": temperature,
"max_retries": max_retries,
}
logger.info(f"Creating Google LLM with model: {model_id}")
return ChatGoogleGenerativeAI(**kwargs)
elif provider_type == "custom":
# Custom provider uses Ollama-compatible or OpenAI-compatible API
if not base_url:
raise ValueError("Custom provider requires a base URL")
auth_type = provider_config.get("auth_type", "none")
# Check if it's OpenAI-compatible (has /v1/chat/completions endpoint)
is_openai_compatible = provider_config.get("openai_compatible", False)
if is_openai_compatible and _openai_available:
# Use OpenAI client for OpenAI-compatible APIs
kwargs = {
"model": model_id,
"base_url": base_url,
"temperature": temperature,
"max_retries": max_retries,
}
if auth_type == "bearer" and api_key:
kwargs["api_key"] = api_key
elif auth_type == "none":
# Some local models don't need auth
kwargs["api_key"] = "not-needed"
logger.info(f"Creating OpenAI-compatible LLM at {base_url} with model: {model_id}")
return ChatOpenAI(**kwargs)
else:
# Default to Ollama-compatible API
kwargs = {
"model": model_id,
"base_url": base_url,
"temperature": temperature,
"max_retries": max_retries,
}
logger.info(f"Creating Ollama-compatible LLM at {base_url} with model: {model_id}")
return ChatOllama(**kwargs)
else:
raise ValueError(f"Unsupported provider type: {provider_type}")