django admin Groups reorg, Frontend udpates for site settings, #Migration runs

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-05 01:21:52 +00:00
parent 6e30d2d4e8
commit dc7a459ebb
39 changed files with 3142 additions and 1589 deletions

View File

@@ -13,8 +13,6 @@ from django.conf import settings
from .constants import (
DEFAULT_AI_MODEL,
JSON_MODE_MODELS,
MODEL_RATES,
IMAGE_MODEL_RATES,
VALID_OPENAI_IMAGE_MODELS,
VALID_SIZES_BY_MODEL,
DEBUG_MODE,
@@ -45,21 +43,18 @@ class AICore:
self._load_account_settings()
def _load_account_settings(self):
"""Load API keys from GlobalIntegrationSettings (platform-wide, used by ALL accounts)"""
"""Load API keys from IntegrationProvider (centralized provider config)"""
try:
from igny8_core.modules.system.global_settings_models import GlobalIntegrationSettings
from igny8_core.ai.model_registry import ModelRegistry
# Get global settings - single instance used by ALL accounts
global_settings = GlobalIntegrationSettings.get_instance()
# Load API keys from global settings (platform-wide)
self._openai_api_key = global_settings.openai_api_key
self._runware_api_key = global_settings.runware_api_key
self._bria_api_key = getattr(global_settings, 'bria_api_key', None)
self._anthropic_api_key = getattr(global_settings, 'anthropic_api_key', None)
# Load API keys from IntegrationProvider (centralized, platform-wide)
self._openai_api_key = ModelRegistry.get_api_key('openai')
self._runware_api_key = ModelRegistry.get_api_key('runware')
self._bria_api_key = ModelRegistry.get_api_key('bria')
self._anthropic_api_key = ModelRegistry.get_api_key('anthropic')
except Exception as e:
logger.error(f"Could not load GlobalIntegrationSettings: {e}", exc_info=True)
logger.error(f"Could not load API keys from IntegrationProvider: {e}", exc_info=True)
self._openai_api_key = None
self._runware_api_key = None
self._bria_api_key = None
@@ -169,24 +164,24 @@ class AICore:
logger.info(f" - Model used in request: {active_model}")
tracker.ai_call(f"Using model: {active_model}")
# Use ModelRegistry for validation with fallback to constants
# Use ModelRegistry for validation (database-driven)
from igny8_core.ai.model_registry import ModelRegistry
if not ModelRegistry.validate_model(active_model):
# Fallback check against constants for backward compatibility
if active_model not in MODEL_RATES:
error_msg = f"Model '{active_model}' is not supported. Supported models: {list(MODEL_RATES.keys())}"
logger.error(f"[AICore] {error_msg}")
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
# Get list of supported models from database
supported_models = [m.model_name for m in ModelRegistry.list_models(model_type='text')]
error_msg = f"Model '{active_model}' is not supported. Supported models: {supported_models}"
logger.error(f"[AICore] {error_msg}")
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
tracker.ai_call(f"Using model: {active_model}")
@@ -305,17 +300,13 @@ class AICore:
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
tracker.parse(f"Content length: {len(content)} characters")
# Step 10: Calculate cost using ModelRegistry (with fallback to constants)
# Step 10: Calculate cost using ModelRegistry (database-driven)
from igny8_core.ai.model_registry import ModelRegistry
cost = float(ModelRegistry.calculate_cost(
active_model,
input_tokens=input_tokens,
output_tokens=output_tokens
))
# Fallback to constants if ModelRegistry returns 0
if cost == 0:
rates = MODEL_RATES.get(active_model, {'input': 2.00, 'output': 8.00})
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
tracker.parse(f"Cost calculated: ${cost:.6f}")
tracker.done("Request completed successfully")
@@ -902,11 +893,9 @@ class AICore:
image_url = image_data.get('url')
revised_prompt = image_data.get('revised_prompt')
# Use ModelRegistry for image cost (with fallback to constants)
# Use ModelRegistry for image cost (database-driven)
from igny8_core.ai.model_registry import ModelRegistry
cost = float(ModelRegistry.calculate_cost(model, num_images=n))
if cost == 0:
cost = IMAGE_MODEL_RATES.get(model, 0.040) * n
print(f"[AI][{function_name}] Step 5: Image generated successfully")
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
print(f"[AI][{function_name}][Success] Image generation completed")
@@ -1361,24 +1350,13 @@ class AICore:
}
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
"""Calculate cost for API call using ModelRegistry with fallback to constants"""
"""Calculate cost for API call using ModelRegistry (database-driven)"""
from igny8_core.ai.model_registry import ModelRegistry
if model_type == 'text':
cost = float(ModelRegistry.calculate_cost(model, input_tokens=input_tokens, output_tokens=output_tokens))
if cost == 0:
# Fallback to constants
rates = MODEL_RATES.get(model, {'input': 2.00, 'output': 8.00})
input_cost = (input_tokens / 1_000_000) * rates['input']
output_cost = (output_tokens / 1_000_000) * rates['output']
return input_cost + output_cost
return cost
return float(ModelRegistry.calculate_cost(model, input_tokens=input_tokens, output_tokens=output_tokens))
elif model_type == 'image':
cost = float(ModelRegistry.calculate_cost(model, num_images=1))
if cost == 0:
rate = IMAGE_MODEL_RATES.get(model, 0.040)
return rate * 1
return cost
return float(ModelRegistry.calculate_cost(model, num_images=1))
return 0.0
# Legacy method names for backward compatibility

View File

@@ -1,7 +1,17 @@
"""
AI Constants - Model pricing, valid models, and configuration constants
AI Constants - Configuration constants for AI operations
NOTE: Model pricing (MODEL_RATES, IMAGE_MODEL_RATES) has been moved to the database
via AIModelConfig. Use ModelRegistry to get model pricing:
from igny8_core.ai.model_registry import ModelRegistry
cost = ModelRegistry.calculate_cost(model_id, input_tokens=N, output_tokens=N)
The constants below are DEPRECATED and kept only for reference/backward compatibility.
Do NOT use MODEL_RATES or IMAGE_MODEL_RATES in new code.
"""
# Model pricing (per 1M tokens) - EXACT from reference plugin model-rates-config.php
# DEPRECATED - Use AIModelConfig database table instead
# Model pricing (per 1M tokens) - kept for reference only
MODEL_RATES = {
'gpt-4.1': {'input': 2.00, 'output': 8.00},
'gpt-4o-mini': {'input': 0.15, 'output': 0.60},
@@ -10,7 +20,8 @@ MODEL_RATES = {
'gpt-5.2': {'input': 1.75, 'output': 14.00},
}
# Image model pricing (per image) - EXACT from reference plugin
# DEPRECATED - Use AIModelConfig database table instead
# Image model pricing (per image) - kept for reference only
IMAGE_MODEL_RATES = {
'dall-e-3': 0.040,
'dall-e-2': 0.020,

View File

@@ -219,32 +219,12 @@ class GenerateImagePromptsFunction(BaseAIFunction):
# Helper methods
def _get_max_in_article_images(self, account) -> int:
"""
Get max_in_article_images from settings.
Uses account's IntegrationSettings override, or GlobalIntegrationSettings.
Get max_in_article_images from AISettings (with account override).
"""
from igny8_core.modules.system.models import IntegrationSettings
from igny8_core.modules.system.global_settings_models import GlobalIntegrationSettings
from igny8_core.modules.system.ai_settings import AISettings
# Try account-specific override first
try:
settings = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
max_images = settings.config.get('max_in_article_images')
if max_images is not None:
max_images = int(max_images)
logger.info(f"Using max_in_article_images={max_images} from account {account.id} IntegrationSettings override")
return max_images
except IntegrationSettings.DoesNotExist:
logger.debug(f"No IntegrationSettings override for account {account.id}, using GlobalIntegrationSettings")
# Use GlobalIntegrationSettings default
global_settings = GlobalIntegrationSettings.get_instance()
max_images = global_settings.max_in_article_images
logger.info(f"Using max_in_article_images={max_images} from GlobalIntegrationSettings (account {account.id})")
max_images = AISettings.get_effective_max_images(account)
logger.info(f"Using max_in_article_images={max_images} for account {account.id}")
return max_images
def _extract_content_elements(self, content: Content, max_images: int) -> Dict:

View File

@@ -67,40 +67,33 @@ class GenerateImagesFunction(BaseAIFunction):
if not tasks:
raise ValueError("No tasks found")
# Get image generation settings
# Try account-specific override, otherwise use GlobalIntegrationSettings
from igny8_core.modules.system.models import IntegrationSettings
from igny8_core.modules.system.global_settings_models import GlobalIntegrationSettings
# Get image generation settings from AISettings (with account overrides)
from igny8_core.modules.system.ai_settings import AISettings
from igny8_core.ai.model_registry import ModelRegistry
image_settings = {}
try:
integration = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
image_settings = integration.config or {}
logger.info(f"Using image settings from account {account.id} IntegrationSettings override")
except IntegrationSettings.DoesNotExist:
logger.info(f"No IntegrationSettings override for account {account.id}, using GlobalIntegrationSettings")
# Get effective settings (AISettings + AccountSettings overrides)
image_style = AISettings.get_effective_image_style(account)
max_images = AISettings.get_effective_max_images(account)
# Use GlobalIntegrationSettings for missing values
global_settings = GlobalIntegrationSettings.get_instance()
# Extract settings with defaults from global settings
provider = image_settings.get('provider') or image_settings.get('service') or global_settings.default_image_service
if provider == 'runware':
model = image_settings.get('model') or image_settings.get('runwareModel') or global_settings.runware_model
# Get default image model and provider from database
default_model = ModelRegistry.get_default_model('image')
if default_model:
model_config = ModelRegistry.get_model(default_model)
provider = model_config.provider if model_config else 'openai'
model = default_model
else:
model = image_settings.get('model') or global_settings.dalle_model
provider = 'openai'
model = 'dall-e-3'
logger.info(f"Using image settings: provider={provider}, model={model}, style={image_style}, max={max_images}")
return {
'tasks': tasks,
'account': account,
'provider': provider,
'model': model,
'image_type': image_settings.get('image_type') or global_settings.image_style,
'max_in_article_images': int(image_settings.get('max_in_article_images') or global_settings.max_in_article_images),
'image_type': image_style,
'max_in_article_images': max_images,
}
def build_prompt(self, data: Dict, account=None) -> Dict:

View File

@@ -1,11 +1,10 @@
"""
Model Registry Service
Central registry for AI model configurations with caching.
Replaces hardcoded MODEL_RATES and IMAGE_MODEL_RATES from constants.py
This service provides:
- Database-driven model configuration (from AIModelConfig)
- Fallback to constants.py for backward compatibility
- Integration provider API key retrieval (from IntegrationProvider)
- Caching for performance
- Cost calculation methods
@@ -20,6 +19,9 @@ Usage:
# Calculate cost
cost = ModelRegistry.calculate_cost('gpt-4o-mini', input_tokens=1000, output_tokens=500)
# Get API key for a provider
api_key = ModelRegistry.get_api_key('openai')
"""
import logging
from decimal import Decimal
@@ -33,12 +35,14 @@ MODEL_CACHE_TTL = 300
# Cache key prefix
CACHE_KEY_PREFIX = 'ai_model_'
PROVIDER_CACHE_PREFIX = 'provider_'
class ModelRegistry:
"""
Central registry for AI model configurations with caching.
Uses AIModelConfig from database with fallback to constants.py
Uses AIModelConfig from database for model configs.
Uses IntegrationProvider for API keys.
"""
@classmethod
@@ -46,6 +50,11 @@ class ModelRegistry:
"""Generate cache key for model"""
return f"{CACHE_KEY_PREFIX}{model_id}"
@classmethod
def _get_provider_cache_key(cls, provider_id: str) -> str:
"""Generate cache key for provider"""
return f"{PROVIDER_CACHE_PREFIX}{provider_id}"
@classmethod
def _get_from_db(cls, model_id: str) -> Optional[Any]:
"""Get model config from database"""
@@ -59,46 +68,6 @@ class ModelRegistry:
logger.debug(f"Could not fetch model {model_id} from DB: {e}")
return None
@classmethod
def _get_from_constants(cls, model_id: str) -> Optional[Dict[str, Any]]:
"""
Get model config from constants.py as fallback.
Returns a dict mimicking AIModelConfig attributes.
"""
from igny8_core.ai.constants import MODEL_RATES, IMAGE_MODEL_RATES
# Check text models first
if model_id in MODEL_RATES:
rates = MODEL_RATES[model_id]
return {
'model_name': model_id,
'display_name': model_id,
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal(str(rates.get('input', 0))),
'output_cost_per_1m': Decimal(str(rates.get('output', 0))),
'cost_per_image': None,
'is_active': True,
'_from_constants': True
}
# Check image models
if model_id in IMAGE_MODEL_RATES:
cost = IMAGE_MODEL_RATES[model_id]
return {
'model_name': model_id,
'display_name': model_id,
'model_type': 'image',
'provider': 'openai' if 'dall-e' in model_id else 'runware',
'input_cost_per_1m': None,
'output_cost_per_1m': None,
'cost_per_image': Decimal(str(cost)),
'is_active': True,
'_from_constants': True
}
return None
@classmethod
def get_model(cls, model_id: str) -> Optional[Any]:
"""
@@ -107,13 +76,12 @@ class ModelRegistry:
Order of lookup:
1. Cache
2. Database (AIModelConfig)
3. constants.py fallback
Args:
model_id: The model identifier (e.g., 'gpt-4o-mini', 'dall-e-3')
Returns:
AIModelConfig instance or dict with model config, None if not found
AIModelConfig instance, None if not found
"""
cache_key = cls._get_cache_key(model_id)
@@ -129,13 +97,7 @@ class ModelRegistry:
cache.set(cache_key, model_config, MODEL_CACHE_TTL)
return model_config
# Fallback to constants
fallback = cls._get_from_constants(model_id)
if fallback:
cache.set(cache_key, fallback, MODEL_CACHE_TTL)
return fallback
logger.warning(f"Model {model_id} not found in DB or constants")
logger.warning(f"Model {model_id} not found in database")
return None
@classmethod
@@ -154,16 +116,6 @@ class ModelRegistry:
if not model:
return Decimal('0')
# Handle dict (from constants fallback)
if isinstance(model, dict):
if rate_type == 'input':
return model.get('input_cost_per_1m') or Decimal('0')
elif rate_type == 'output':
return model.get('output_cost_per_1m') or Decimal('0')
elif rate_type == 'image':
return model.get('cost_per_image') or Decimal('0')
return Decimal('0')
# Handle AIModelConfig instance
if rate_type == 'input':
return model.input_cost_per_1m or Decimal('0')
@@ -195,8 +147,8 @@ class ModelRegistry:
if not model:
return Decimal('0')
# Determine model type
model_type = model.get('model_type') if isinstance(model, dict) else model.model_type
# Get model type from AIModelConfig
model_type = model.model_type
if model_type == 'text':
input_rate = cls.get_rate(model_id, 'input')
@@ -218,7 +170,7 @@ class ModelRegistry:
@classmethod
def get_default_model(cls, model_type: str = 'text') -> Optional[str]:
"""
Get the default model for a given type.
Get the default model for a given type from database.
Args:
model_type: 'text' or 'image'
@@ -236,32 +188,33 @@ class ModelRegistry:
if default:
return default.model_name
# If no default is set, return first active model of this type
first_active = AIModelConfig.objects.filter(
model_type=model_type,
is_active=True
).order_by('model_name').first()
if first_active:
return first_active.model_name
except Exception as e:
logger.debug(f"Could not get default {model_type} model from DB: {e}")
# Fallback to constants
from igny8_core.ai.constants import DEFAULT_AI_MODEL
if model_type == 'text':
return DEFAULT_AI_MODEL
elif model_type == 'image':
return 'dall-e-3'
logger.error(f"Could not get default {model_type} model from DB: {e}")
return None
@classmethod
def list_models(cls, model_type: Optional[str] = None, provider: Optional[str] = None) -> list:
"""
List all available models, optionally filtered by type or provider.
List all available models from database, optionally filtered by type or provider.
Args:
model_type: Filter by 'text', 'image', or 'embedding'
provider: Filter by 'openai', 'anthropic', 'runware', etc.
Returns:
List of model configs
List of AIModelConfig instances
"""
models = []
try:
from igny8_core.business.billing.models import AIModelConfig
queryset = AIModelConfig.objects.filter(is_active=True)
@@ -271,27 +224,10 @@ class ModelRegistry:
if provider:
queryset = queryset.filter(provider=provider)
models = list(queryset.order_by('sort_order', 'model_name'))
return list(queryset.order_by('model_name'))
except Exception as e:
logger.debug(f"Could not list models from DB: {e}")
# Add models from constants if not in DB
if not models:
from igny8_core.ai.constants import MODEL_RATES, IMAGE_MODEL_RATES
if model_type in (None, 'text'):
for model_id in MODEL_RATES:
fallback = cls._get_from_constants(model_id)
if fallback:
models.append(fallback)
if model_type in (None, 'image'):
for model_id in IMAGE_MODEL_RATES:
fallback = cls._get_from_constants(model_id)
if fallback:
models.append(fallback)
return models
logger.error(f"Could not list models from DB: {e}")
return []
@classmethod
def clear_cache(cls, model_id: Optional[str] = None):
@@ -311,10 +247,10 @@ class ModelRegistry:
if hasattr(default_cache, 'delete_pattern'):
default_cache.delete_pattern(f"{CACHE_KEY_PREFIX}*")
else:
# Fallback: clear known models
from igny8_core.ai.constants import MODEL_RATES, IMAGE_MODEL_RATES
for model_id in list(MODEL_RATES.keys()) + list(IMAGE_MODEL_RATES.keys()):
cache.delete(cls._get_cache_key(model_id))
# Fallback: clear all known models from DB
from igny8_core.business.billing.models import AIModelConfig
for model in AIModelConfig.objects.values_list('model_name', flat=True):
cache.delete(cls._get_cache_key(model))
except Exception as e:
logger.warning(f"Could not clear all model caches: {e}")
@@ -332,8 +268,110 @@ class ModelRegistry:
model = cls.get_model(model_id)
if not model:
return False
# Check if active
if isinstance(model, dict):
return model.get('is_active', True)
return model.is_active
# ========== IntegrationProvider methods ==========
@classmethod
def get_provider(cls, provider_id: str) -> Optional[Any]:
"""
Get IntegrationProvider by provider_id.
Args:
provider_id: The provider identifier (e.g., 'openai', 'stripe', 'resend')
Returns:
IntegrationProvider instance, None if not found
"""
cache_key = cls._get_provider_cache_key(provider_id)
# Try cache first
cached = cache.get(cache_key)
if cached is not None:
return cached
try:
from igny8_core.modules.system.models import IntegrationProvider
provider = IntegrationProvider.objects.filter(
provider_id=provider_id,
is_active=True
).first()
if provider:
cache.set(cache_key, provider, MODEL_CACHE_TTL)
return provider
except Exception as e:
logger.error(f"Could not fetch provider {provider_id} from DB: {e}")
return None
@classmethod
def get_api_key(cls, provider_id: str) -> Optional[str]:
"""
Get API key for a provider.
Args:
provider_id: The provider identifier (e.g., 'openai', 'anthropic', 'runware')
Returns:
API key string, None if not found or provider is inactive
"""
provider = cls.get_provider(provider_id)
if provider and provider.api_key:
return provider.api_key
return None
@classmethod
def get_api_secret(cls, provider_id: str) -> Optional[str]:
"""
Get API secret for a provider (for OAuth, Stripe secret key, etc.).
Args:
provider_id: The provider identifier
Returns:
API secret string, None if not found
"""
provider = cls.get_provider(provider_id)
if provider and provider.api_secret:
return provider.api_secret
return None
@classmethod
def get_webhook_secret(cls, provider_id: str) -> Optional[str]:
"""
Get webhook secret for a provider (for Stripe, PayPal webhooks).
Args:
provider_id: The provider identifier
Returns:
Webhook secret string, None if not found
"""
provider = cls.get_provider(provider_id)
if provider and provider.webhook_secret:
return provider.webhook_secret
return None
@classmethod
def clear_provider_cache(cls, provider_id: Optional[str] = None):
"""
Clear provider cache.
Args:
provider_id: Clear specific provider cache, or all if None
"""
if provider_id:
cache.delete(cls._get_provider_cache_key(provider_id))
else:
try:
from django.core.cache import caches
default_cache = caches['default']
if hasattr(default_cache, 'delete_pattern'):
default_cache.delete_pattern(f"{PROVIDER_CACHE_PREFIX}*")
else:
from igny8_core.modules.system.models import IntegrationProvider
for pid in IntegrationProvider.objects.values_list('provider_id', flat=True):
cache.delete(cls._get_provider_cache_key(pid))
except Exception as e:
logger.warning(f"Could not clear provider caches: {e}")

View File

@@ -1,6 +1,7 @@
"""
AI Settings - Centralized model configurations and limits
Uses global settings with optional per-account overrides.
Uses AISettings (system defaults) with optional per-account overrides via AccountSettings.
API keys are stored in IntegrationProvider.
"""
from typing import Dict, Any
import logging
@@ -22,10 +23,9 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
Get model configuration for AI function.
Architecture:
- API keys: ALWAYS from GlobalIntegrationSettings (platform-wide)
- Model/params: From IntegrationSettings if account has override, else from global
- Free plan: Cannot override, uses global defaults
- Starter/Growth/Scale: Can override model, temperature, max_tokens, etc.
- API keys: From IntegrationProvider (centralized)
- Model: From AIModelConfig (is_default=True)
- Params: From AISettings with AccountSettings overrides
Args:
function_name: Name of the AI function
@@ -44,25 +44,30 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
try:
from igny8_core.modules.system.global_settings_models import GlobalIntegrationSettings
from igny8_core.modules.system.models import IntegrationSettings
from igny8_core.modules.system.ai_settings import AISettings
from igny8_core.ai.model_registry import ModelRegistry
# Get global settings (for API keys and defaults)
global_settings = GlobalIntegrationSettings.get_instance()
# Get API key from IntegrationProvider
api_key = ModelRegistry.get_api_key('openai')
if not global_settings.openai_api_key:
if not api_key:
raise ValueError(
"Platform OpenAI API key not configured. "
"Please configure GlobalIntegrationSettings in Django admin."
"Please configure IntegrationProvider in Django admin."
)
# Start with global defaults
model = global_settings.openai_model
temperature = global_settings.openai_temperature
api_key = global_settings.openai_api_key # ALWAYS from global
# Get default text model from AIModelConfig
default_model = ModelRegistry.get_default_model('text')
if not default_model:
default_model = 'gpt-4o-mini' # Ultimate fallback
# Get max_tokens from AIModelConfig for the selected model
max_tokens = global_settings.openai_max_tokens # Fallback
model = default_model
# Get settings with account overrides
temperature = AISettings.get_effective_temperature(account)
max_tokens = AISettings.get_effective_max_tokens(account)
# Get max_tokens from AIModelConfig if available
try:
from igny8_core.business.billing.models import AIModelConfig
model_config = AIModelConfig.objects.filter(
@@ -74,60 +79,22 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
except Exception as e:
logger.warning(f"Could not load max_tokens from AIModelConfig for {model}: {e}")
# Check if account has overrides (only for Starter/Growth/Scale plans)
# Free plan users cannot create IntegrationSettings records
try:
account_settings = IntegrationSettings.objects.get(
account=account,
integration_type='openai',
is_active=True
)
config = account_settings.config or {}
# Override model if specified (NULL = use global)
if config.get('model'):
model = config['model']
# Also update max_tokens for the overridden model
try:
from igny8_core.business.billing.models import AIModelConfig
override_config = AIModelConfig.objects.filter(
model_name=model,
is_active=True
).first()
if override_config and override_config.max_output_tokens:
max_tokens = override_config.max_output_tokens
except Exception:
pass
# Override temperature if specified
if config.get('temperature') is not None:
temperature = config['temperature']
# Override max_tokens if explicitly specified (rare case)
if config.get('max_tokens'):
max_tokens = config['max_tokens']
except IntegrationSettings.DoesNotExist:
# No account override, use global defaults (already set above)
pass
except Exception as e:
logger.error(f"Could not load OpenAI settings for account {account.id}: {e}")
raise ValueError(
f"Could not load OpenAI configuration for account {account.id}. "
f"Please configure GlobalIntegrationSettings."
f"Please configure IntegrationProvider and AISettings."
)
# Validate model is in our supported list (optional validation)
# Validate model is in our supported list using ModelRegistry (database-driven)
try:
from igny8_core.utils.ai_processor import MODEL_RATES
if model not in MODEL_RATES:
if not ModelRegistry.validate_model(model):
supported_models = [m.model_name for m in ModelRegistry.list_models(model_type='text')]
logger.warning(
f"Model '{model}' for account {account.id} is not in supported list. "
f"Supported models: {list(MODEL_RATES.keys())}"
f"Supported models: {supported_models}"
)
except ImportError:
except Exception:
pass
# Build response format based on model (JSON mode for supported models)

View File

@@ -181,42 +181,26 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
failed = 0
results = []
# Get image generation settings
# Try account-specific override, otherwise use GlobalIntegrationSettings
# Get image generation settings from AISettings (with account overrides)
logger.info("[process_image_generation_queue] Step 1: Loading image generation settings")
from igny8_core.modules.system.global_settings_models import GlobalIntegrationSettings
from igny8_core.modules.system.ai_settings import AISettings
from igny8_core.ai.model_registry import ModelRegistry
config = {}
try:
image_settings = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
logger.info(f"[process_image_generation_queue] Using account {account.id} IntegrationSettings override")
config = image_settings.config or {}
except IntegrationSettings.DoesNotExist:
logger.info(f"[process_image_generation_queue] No IntegrationSettings override for account {account.id}, using GlobalIntegrationSettings")
except Exception as e:
logger.error(f"[process_image_generation_queue] ERROR loading image generation settings: {e}", exc_info=True)
return {'success': False, 'error': f'Error loading image generation settings: {str(e)}'}
# Get effective settings
image_type = AISettings.get_effective_image_style(account)
image_format = 'webp' # Default format
# Use GlobalIntegrationSettings for missing values
global_settings = GlobalIntegrationSettings.get_instance()
logger.info(f"[process_image_generation_queue] Image generation settings loaded. Config keys: {list(config.keys())}")
logger.info(f"[process_image_generation_queue] Full config: {config}")
# Get provider and model from config with global fallbacks
provider = config.get('provider') or global_settings.default_image_service
if provider == 'runware':
model = config.get('model') or config.get('imageModel') or global_settings.runware_model
# Get default image model from database
default_model = ModelRegistry.get_default_model('image')
if default_model:
model_config = ModelRegistry.get_model(default_model)
provider = model_config.provider if model_config else 'openai'
model = default_model
else:
model = config.get('model') or config.get('imageModel') or global_settings.dalle_model
provider = 'openai'
model = 'dall-e-3'
logger.info(f"[process_image_generation_queue] Using PROVIDER: {provider}, MODEL: {model} from settings")
image_type = config.get('image_type') or global_settings.image_style
image_format = config.get('image_format', 'webp')
# Style to prompt enhancement mapping
# These style descriptors are added to the image prompt for better results
@@ -268,22 +252,15 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
logger.info(f" - Featured image size: {featured_image_size}")
logger.info(f" - In-article square: {in_article_square_size}, landscape: {in_article_landscape_size}")
# Get provider API key
# API keys are ALWAYS from GlobalIntegrationSettings (accounts cannot override API keys)
# Account IntegrationSettings only store provider preference, NOT API keys
logger.info(f"[process_image_generation_queue] Step 2: Loading {provider.upper()} API key from GlobalIntegrationSettings")
# Get provider API key from IntegrationProvider (centralized)
logger.info(f"[process_image_generation_queue] Step 2: Loading {provider.upper()} API key from IntegrationProvider")
# Get API key from GlobalIntegrationSettings
if provider == 'runware':
api_key = global_settings.runware_api_key
elif provider == 'openai':
api_key = global_settings.dalle_api_key or global_settings.openai_api_key
else:
api_key = None
# Get API key from IntegrationProvider (centralized)
api_key = ModelRegistry.get_api_key(provider)
if not api_key:
logger.error(f"[process_image_generation_queue] {provider.upper()} API key not configured in GlobalIntegrationSettings")
return {'success': False, 'error': f'{provider.upper()} API key not configured in GlobalIntegrationSettings'}
logger.error(f"[process_image_generation_queue] {provider.upper()} API key not configured in IntegrationProvider")
return {'success': False, 'error': f'{provider.upper()} API key not configured'}
# Log API key presence (but not the actual key for security)
api_key_preview = f"{api_key[:10]}...{api_key[-4:]}" if len(api_key) > 14 else "***"

View File

@@ -145,7 +145,7 @@ def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
Dict with 'valid' (bool) and optional 'error' (str)
"""
try:
# Try database first
# Use database-driven validation via AIModelConfig
from igny8_core.business.billing.models import AIModelConfig
exists = AIModelConfig.objects.filter(
@@ -169,29 +169,20 @@ def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
else:
return {
'valid': False,
'error': f'Model "{model}" is not found in database'
'error': f'No {model_type} models configured in database'
}
return {'valid': True}
except Exception:
# Fallback to constants if database fails
from .constants import MODEL_RATES, VALID_OPENAI_IMAGE_MODELS
if model_type == 'text':
if model not in MODEL_RATES:
return {
'valid': False,
'error': f'Model "{model}" is not in supported models list'
}
elif model_type == 'image':
if model not in VALID_OPENAI_IMAGE_MODELS:
return {
'valid': False,
'error': f'Model "{model}" is not valid for OpenAI image generation. Only {", ".join(VALID_OPENAI_IMAGE_MODELS)} are supported.'
}
return {'valid': True}
except Exception as e:
# Log error but don't fallback to constants - DB is authoritative
import logging
logger = logging.getLogger(__name__)
logger.error(f"Error validating model {model}: {e}")
return {
'valid': False,
'error': f'Error validating model: {e}'
}
def validate_image_size(size: str, model: str) -> Dict[str, Any]: