Step 2: Remove MODEL_CONFIG and update get_model_config() to use IntegrationSettings only

- Remove MODEL_CONFIG dict with hardcoded defaults
- Update get_model_config() to require account parameter
- Remove default_config fallback
- Remove unused helper functions (get_model, get_max_tokens, get_temperature)
- Fix generate_images.py to pass account to get_model_config()
- Raise ValueError with clear messages when IntegrationSettings not configured
This commit is contained in:
IGNY8 VPS (Salman)
2025-11-16 09:17:17 +00:00
parent 60ffc12e8c
commit 6044fab57d
2 changed files with 73 additions and 96 deletions

View File

@@ -122,8 +122,10 @@ class GenerateImagesFunction(BaseAIFunction):
} }
) )
# Get model config # Get model config (requires account)
model_config = get_model_config('extract_image_prompts') if not account_obj:
raise ValueError("Account is required for model configuration")
model_config = get_model_config('extract_image_prompts', account=account_obj)
# Call AI to extract prompts using centralized request handler # Call AI to extract prompts using centralized request handler
result = ai_core.run_ai_request( result = ai_core.run_ai_request(

View File

@@ -1,46 +1,11 @@
""" """
AI Settings - Centralized model configurations and limits AI Settings - Centralized model configurations and limits
Uses IntegrationSettings only - no hardcoded defaults or fallbacks.
""" """
from typing import Dict, Any from typing import Dict, Any
import logging
# Model configurations for each AI function logger = logging.getLogger(__name__)
MODEL_CONFIG = {
"auto_cluster": {
"model": "gpt-4o-mini",
"max_tokens": 3000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
},
"generate_ideas": {
"model": "gpt-4.1",
"max_tokens": 4000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # JSON output
},
"generate_content": {
"model": "gpt-4.1",
"max_tokens": 8000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # JSON output
},
"generate_images": {
"model": "dall-e-3",
"size": "1024x1024",
"provider": "openai",
},
"extract_image_prompts": {
"model": "gpt-4o-mini",
"max_tokens": 1000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_image_prompts": {
"model": "gpt-4o-mini",
"max_tokens": 2000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
}
# Function name aliases (for backward compatibility) # Function name aliases (for backward compatibility)
FUNCTION_ALIASES = { FUNCTION_ALIASES = {
@@ -52,71 +17,81 @@ FUNCTION_ALIASES = {
} }
def get_model_config(function_name: str, account=None) -> Dict[str, Any]: def get_model_config(function_name: str, account) -> Dict[str, Any]:
""" """
Get model configuration for an AI function. Get model configuration from IntegrationSettings only.
Reads model from IntegrationSettings if account is provided, otherwise uses defaults. No fallbacks - account must have IntegrationSettings configured.
Args: Args:
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas') function_name: Name of the AI function
account: Optional account object to read model from IntegrationSettings account: Account instance (required)
Returns: Returns:
Dict with model, max_tokens, temperature, etc. dict: Model configuration with 'model', 'max_tokens', 'temperature'
Raises:
ValueError: If account not provided or IntegrationSettings not configured
""" """
# Check aliases first if not account:
raise ValueError("Account is required for model configuration")
# Resolve function alias
actual_name = FUNCTION_ALIASES.get(function_name, function_name) actual_name = FUNCTION_ALIASES.get(function_name, function_name)
# Get base config # Get IntegrationSettings for OpenAI
config = MODEL_CONFIG.get(actual_name, {}).copy() try:
from igny8_core.modules.system.models import IntegrationSettings
integration_settings = IntegrationSettings.objects.get(
integration_type='openai',
account=account,
is_active=True
)
except IntegrationSettings.DoesNotExist:
raise ValueError(
f"OpenAI IntegrationSettings not configured for account {account.id}. "
f"Please configure OpenAI settings in the integration page."
)
# Try to get model from IntegrationSettings if account is provided config = integration_settings.config or {}
model_from_settings = None
if account:
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_settings = openai_settings.config.get('model')
if model_from_settings:
# Validate model is in our supported list
from igny8_core.utils.ai_processor import MODEL_RATES
if model_from_settings in MODEL_RATES:
config['model'] = model_from_settings
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Could not load model from IntegrationSettings: {e}", exc_info=True)
# Merge with defaults # Get model from config
default_config = { model = config.get('model')
"model": "gpt-4.1", if not model:
"max_tokens": 4000, raise ValueError(
"temperature": 0.7, f"Model not configured in IntegrationSettings for account {account.id}. "
"response_format": None, f"Please set 'model' in OpenAI integration settings."
)
# Validate model is in our supported list (optional validation)
try:
from igny8_core.utils.ai_processor import MODEL_RATES
if model not in MODEL_RATES:
logger.warning(
f"Model '{model}' for account {account.id} is not in supported list. "
f"Supported models: {list(MODEL_RATES.keys())}"
)
except ImportError:
# MODEL_RATES not available - skip validation
pass
# Get max_tokens and temperature from config (with reasonable defaults for API)
max_tokens = config.get('max_tokens', 4000) # Reasonable default for API limits
temperature = config.get('temperature', 0.7) # Reasonable default
# Build response format based on model (JSON mode for supported models)
response_format = None
try:
from igny8_core.ai.constants import JSON_MODE_MODELS
if model in JSON_MODE_MODELS:
response_format = {"type": "json_object"}
except ImportError:
# JSON_MODE_MODELS not available - skip
pass
return {
'model': model,
'max_tokens': max_tokens,
'temperature': temperature,
'response_format': response_format,
} }
return {**default_config, **config}
def get_model(function_name: str) -> str:
"""Get model name for function"""
config = get_model_config(function_name)
return config.get("model", "gpt-4.1")
def get_max_tokens(function_name: str) -> int:
"""Get max tokens for function"""
config = get_model_config(function_name)
return config.get("max_tokens", 4000)
def get_temperature(function_name: str) -> float:
"""Get temperature for function"""
config = get_model_config(function_name)
return config.get("temperature", 0.7)