Files
igny8/backend/igny8_core/ai/settings.py
2025-11-12 04:20:43 +05:00

123 lines
3.9 KiB
Python

"""
AI Settings - Centralized model configurations and limits
"""
from typing import Dict, Any
# Model configurations for each AI function
MODEL_CONFIG = {
"auto_cluster": {
"model": "gpt-4o-mini",
"max_tokens": 3000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
},
"generate_ideas": {
"model": "gpt-4.1",
"max_tokens": 4000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # JSON output
},
"generate_content": {
"model": "gpt-4.1",
"max_tokens": 8000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # JSON output
},
"generate_images": {
"model": "dall-e-3",
"size": "1024x1024",
"provider": "openai",
},
"extract_image_prompts": {
"model": "gpt-4o-mini",
"max_tokens": 1000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_image_prompts": {
"model": "gpt-4o-mini",
"max_tokens": 2000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
}
# Function name aliases (for backward compatibility)
FUNCTION_ALIASES = {
"cluster_keywords": "auto_cluster",
"auto_cluster_keywords": "auto_cluster",
"auto_generate_ideas": "generate_ideas",
"auto_generate_content": "generate_content",
"auto_generate_images": "generate_images",
}
def get_model_config(function_name: str, account=None) -> Dict[str, Any]:
"""
Get model configuration for an AI function.
Reads model from IntegrationSettings if account is provided, otherwise uses defaults.
Args:
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
account: Optional account object to read model from IntegrationSettings
Returns:
Dict with model, max_tokens, temperature, etc.
"""
# Check aliases first
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
# Get base config
config = MODEL_CONFIG.get(actual_name, {}).copy()
# Try to get model from IntegrationSettings if account is provided
model_from_settings = None
if account:
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_settings = openai_settings.config.get('model')
if model_from_settings:
# Validate model is in our supported list
from igny8_core.utils.ai_processor import MODEL_RATES
if model_from_settings in MODEL_RATES:
config['model'] = model_from_settings
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Could not load model from IntegrationSettings: {e}", exc_info=True)
# Merge with defaults
default_config = {
"model": "gpt-4.1",
"max_tokens": 4000,
"temperature": 0.7,
"response_format": None,
}
return {**default_config, **config}
def get_model(function_name: str) -> str:
"""Get model name for function"""
config = get_model_config(function_name)
return config.get("model", "gpt-4.1")
def get_max_tokens(function_name: str) -> int:
"""Get max tokens for function"""
config = get_model_config(function_name)
return config.get("max_tokens", 4000)
def get_temperature(function_name: str) -> float:
"""Get temperature for function"""
config = get_model_config(function_name)
return config.get("temperature", 0.7)