Files
igny8/backend/igny8_core/ai/settings.py

116 lines
4.0 KiB
Python

"""
AI Settings - Centralized model configurations and limits
Uses AISettings (system defaults) with optional per-account overrides via AccountSettings.
API keys are stored in IntegrationProvider.
"""
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
# Function name aliases (for backward compatibility)
FUNCTION_ALIASES = {
"cluster_keywords": "auto_cluster",
"auto_cluster_keywords": "auto_cluster",
"auto_generate_ideas": "generate_ideas",
"auto_generate_content": "generate_content",
"auto_generate_images": "generate_images",
}
def get_model_config(function_name: str, account) -> Dict[str, Any]:
"""
Get model configuration for AI function.
Architecture:
- API keys: From IntegrationProvider (centralized)
- Model: From AIModelConfig (is_default=True)
- Params: From AISettings with AccountSettings overrides
Args:
function_name: Name of the AI function
account: Account instance (required)
Returns:
dict: Model configuration with 'model', 'max_tokens', 'temperature', 'api_key'
Raises:
ValueError: If account not provided or settings not configured
"""
if not account:
raise ValueError("Account is required for model configuration")
# Resolve function alias
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
try:
from igny8_core.modules.system.ai_settings import AISettings
from igny8_core.ai.model_registry import ModelRegistry
# Get API key from IntegrationProvider
api_key = ModelRegistry.get_api_key('openai')
if not api_key:
raise ValueError(
"Platform OpenAI API key not configured. "
"Please configure IntegrationProvider in Django admin."
)
# Get default text model from AIModelConfig
default_model = ModelRegistry.get_default_model('text')
if not default_model:
default_model = 'gpt-4o-mini' # Ultimate fallback
model = default_model
# Get settings with account overrides
temperature = AISettings.get_effective_temperature(account)
max_tokens = AISettings.get_effective_max_tokens(account)
# Get max_tokens from AIModelConfig if available
try:
from igny8_core.business.billing.models import AIModelConfig
model_config = AIModelConfig.objects.filter(
model_name=model,
is_active=True
).first()
if model_config and model_config.max_output_tokens:
max_tokens = model_config.max_output_tokens
except Exception as e:
logger.warning(f"Could not load max_tokens from AIModelConfig for {model}: {e}")
except Exception as e:
logger.error(f"Could not load OpenAI settings for account {account.id}: {e}")
raise ValueError(
f"Could not load OpenAI configuration for account {account.id}. "
f"Please configure IntegrationProvider and AISettings."
)
# Validate model is in our supported list using ModelRegistry (database-driven)
try:
if not ModelRegistry.validate_model(model):
supported_models = [m.model_name for m in ModelRegistry.list_models(model_type='text')]
logger.warning(
f"Model '{model}' for account {account.id} is not in supported list. "
f"Supported models: {supported_models}"
)
except Exception:
pass
# Build response format based on model (JSON mode for supported models)
response_format = None
try:
from igny8_core.ai.constants import JSON_MODE_MODELS
if model in JSON_MODE_MODELS:
response_format = {"type": "json_object"}
except ImportError:
pass
return {
'model': model,
'max_tokens': max_tokens,
'temperature': temperature,
'response_format': response_format,
}