Step 3: Remove _default_model and require model parameter in run_ai_request()

- Remove _default_model initialization and attribute
- Remove Django settings fallback for model (lines 89-90)
- Remove model loading from IntegrationSettings in _load_account_settings()
- Update run_ai_request() to require model parameter (not Optional)
- Add model validation at start of run_ai_request()
- Deprecate get_model() method (raises error with helpful message)
- Update error handling to use provided model (no fallback)
- Simplify debug logging (removed model_from_settings comparison)
This commit is contained in:
IGNY8 VPS (Salman)
2025-11-16 09:20:13 +00:00
parent 6044fab57d
commit 793b64e437

View File

@@ -40,7 +40,6 @@ class AICore:
self.account = account
self._openai_api_key = None
self._runware_api_key = None
self._default_model = None
self._load_account_settings()
def _load_account_settings(self):
@@ -57,18 +56,6 @@ class AICore:
).first()
if openai_settings and openai_settings.config:
self._openai_api_key = openai_settings.config.get('apiKey')
model = openai_settings.config.get('model')
if model:
if model in MODEL_RATES:
self._default_model = model
logger.info(f"Loaded model '{model}' from IntegrationSettings for account {self.account.id}")
else:
error_msg = f"Model '{model}' from IntegrationSettings is not in supported models list. Supported models: {list(MODEL_RATES.keys())}"
logger.error(f"[AICore] {error_msg}")
logger.error(f"[AICore] Account {self.account.id} has invalid model configuration. Please update Integration Settings.")
# Don't set _default_model, will fall back to Django settings
else:
logger.warning(f"No model configured in IntegrationSettings for account {self.account.id}, will use fallback")
# Load Runware settings
runware_settings = IntegrationSettings.objects.filter(
@@ -81,13 +68,11 @@ class AICore:
except Exception as e:
logger.warning(f"Could not load account settings: {e}", exc_info=True)
# Fallback to Django settings
# Fallback to Django settings for API keys only (no model fallback)
if not self._openai_api_key:
self._openai_api_key = getattr(settings, 'OPENAI_API_KEY', None)
if not self._runware_api_key:
self._runware_api_key = getattr(settings, 'RUNWARE_API_KEY', None)
if not self._default_model:
self._default_model = getattr(settings, 'DEFAULT_AI_MODEL', DEFAULT_AI_MODEL)
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
"""Get API key for integration type"""
@@ -98,15 +83,20 @@ class AICore:
return None
def get_model(self, integration_type: str = 'openai') -> str:
"""Get model for integration type"""
if integration_type == 'openai':
return self._default_model
return DEFAULT_AI_MODEL
"""
Get model for integration type.
DEPRECATED: Model should be passed directly to run_ai_request().
This method is kept for backward compatibility but raises an error.
"""
raise ValueError(
"get_model() is deprecated. Model must be passed directly to run_ai_request(). "
"Use get_model_config() from settings.py to get model from IntegrationSettings."
)
def run_ai_request(
self,
prompt: str,
model: Optional[str] = None,
model: str,
max_tokens: int = 4000,
temperature: float = 0.7,
response_format: Optional[Dict] = None,
@@ -121,7 +111,7 @@ class AICore:
Args:
prompt: Prompt text
model: Model name (defaults to account's default)
model: Model name (required - must be provided from IntegrationSettings)
max_tokens: Maximum tokens
temperature: Temperature (0-1)
response_format: Optional response format dict (for JSON mode)
@@ -132,6 +122,9 @@ class AICore:
Returns:
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
'model', 'cost', 'error', 'api_id'
Raises:
ValueError: If model is not provided
"""
# Use provided tracker or create a new one
if tracker is None:
@@ -139,39 +132,11 @@ class AICore:
tracker.ai_call("Preparing request...")
# Step 1: Validate API key
api_key = api_key or self._openai_api_key
if not api_key:
error_msg = 'OpenAI API key not configured'
# Step 1: Validate model is provided
if not model:
error_msg = "Model is required. Ensure IntegrationSettings is configured for the account."
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': model or self._default_model,
'cost': 0.0,
'api_id': None,
}
# Step 2: Determine model
active_model = model or self._default_model
# Debug logging: Show model from settings vs model used
model_from_settings = self._default_model
model_used = active_model
logger.info(f"[AICore] Model Configuration Debug:")
logger.info(f" - Model from IntegrationSettings: {model_from_settings}")
logger.info(f" - Model parameter passed: {model}")
logger.info(f" - Model actually used in request: {model_used}")
tracker.ai_call(f"Model Debug - Settings: {model_from_settings}, Parameter: {model}, Using: {model_used}")
# Validate model is available and supported
if not active_model:
error_msg = 'No AI model configured. Please configure a model in Integration Settings or Django settings.'
logger.error(f"[AICore] {error_msg}")
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
@@ -183,6 +148,31 @@ class AICore:
'api_id': None,
}
# Step 2: Validate API key
api_key = api_key or self._openai_api_key
if not api_key:
error_msg = 'OpenAI API key not configured'
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': model,
'cost': 0.0,
'api_id': None,
}
# Step 3: Use provided model (no fallback)
active_model = model
# Debug logging: Show model used
logger.info(f"[AICore] Model Configuration:")
logger.info(f" - Model parameter passed: {model}")
logger.info(f" - Model used in request: {active_model}")
tracker.ai_call(f"Using model: {active_model}")
if active_model not in MODEL_RATES:
error_msg = f"Model '{active_model}' is not supported. Supported models: {list(MODEL_RATES.keys())}"
logger.error(f"[AICore] {error_msg}")