wokring models and image genration model and admin apges
This commit is contained in:
@@ -161,6 +161,7 @@ class Igny8AdminSite(UnfoldAdminSite):
|
||||
'models': [
|
||||
('system', 'GlobalIntegrationSettings'),
|
||||
('system', 'GlobalModuleSettings'),
|
||||
('billing', 'AIModelConfig'),
|
||||
('system', 'GlobalAIPrompt'),
|
||||
('system', 'GlobalAuthorProfile'),
|
||||
('system', 'GlobalStrategy'),
|
||||
|
||||
@@ -59,9 +59,21 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
# Start with global defaults
|
||||
model = global_settings.openai_model
|
||||
temperature = global_settings.openai_temperature
|
||||
max_tokens = global_settings.openai_max_tokens
|
||||
api_key = global_settings.openai_api_key # ALWAYS from global
|
||||
|
||||
# Get max_tokens from AIModelConfig for the selected model
|
||||
max_tokens = global_settings.openai_max_tokens # Fallback
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
model_config = AIModelConfig.objects.filter(
|
||||
model_name=model,
|
||||
is_active=True
|
||||
).first()
|
||||
if model_config and model_config.max_output_tokens:
|
||||
max_tokens = model_config.max_output_tokens
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load max_tokens from AIModelConfig for {model}: {e}")
|
||||
|
||||
# Check if account has overrides (only for Starter/Growth/Scale plans)
|
||||
# Free plan users cannot create IntegrationSettings records
|
||||
try:
|
||||
@@ -76,12 +88,23 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
# Override model if specified (NULL = use global)
|
||||
if config.get('model'):
|
||||
model = config['model']
|
||||
# Also update max_tokens for the overridden model
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
override_config = AIModelConfig.objects.filter(
|
||||
model_name=model,
|
||||
is_active=True
|
||||
).first()
|
||||
if override_config and override_config.max_output_tokens:
|
||||
max_tokens = override_config.max_output_tokens
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Override temperature if specified
|
||||
if config.get('temperature') is not None:
|
||||
temperature = config['temperature']
|
||||
|
||||
# Override max_tokens if specified
|
||||
# Override max_tokens if explicitly specified (rare case)
|
||||
if config.get('max_tokens'):
|
||||
max_tokens = config['max_tokens']
|
||||
|
||||
|
||||
@@ -124,12 +124,22 @@ class IsEditorOrAbove(permissions.BasePermission):
|
||||
class IsAdminOrOwner(permissions.BasePermission):
|
||||
"""
|
||||
Permission class that requires admin or owner role only
|
||||
OR user belongs to aws-admin account
|
||||
For settings, keys, billing operations
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
return False
|
||||
|
||||
# Check if user belongs to aws-admin account (case-insensitive)
|
||||
if hasattr(request.user, 'account') and request.user.account:
|
||||
account_name = getattr(request.user.account, 'name', None)
|
||||
account_slug = getattr(request.user.account, 'slug', None)
|
||||
if account_name and account_name.lower() == 'aws admin':
|
||||
return True
|
||||
if account_slug == 'aws-admin':
|
||||
return True
|
||||
|
||||
# Check user role
|
||||
if hasattr(request.user, 'role'):
|
||||
role = request.user.role
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
Migration: Update Runware model configurations in AIModelConfig
|
||||
|
||||
This migration:
|
||||
1. Updates runware:97@1 to have display_name "Hi Dream Full - Standard"
|
||||
2. Adds Bria 3.2 model as civitai:618692@691639
|
||||
"""
|
||||
from decimal import Decimal
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def update_runware_models(apps, schema_editor):
|
||||
"""Update Runware models in AIModelConfig"""
|
||||
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
|
||||
|
||||
# Update existing runware:97@1 model
|
||||
AIModelConfig.objects.update_or_create(
|
||||
model_name='runware:97@1',
|
||||
defaults={
|
||||
'display_name': 'Hi Dream Full - Standard',
|
||||
'model_type': 'image',
|
||||
'provider': 'runware',
|
||||
'cost_per_image': Decimal('0.008'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024', '1024x1792', '1792x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': True, # Make this the default Runware model
|
||||
'sort_order': 10,
|
||||
'description': 'Hi Dream Full - Standard quality image generation via Runware',
|
||||
}
|
||||
)
|
||||
|
||||
# Add Bria 3.2 Premium model
|
||||
AIModelConfig.objects.update_or_create(
|
||||
model_name='civitai:618692@691639',
|
||||
defaults={
|
||||
'display_name': 'Bria 3.2 - Premium',
|
||||
'model_type': 'image',
|
||||
'provider': 'runware',
|
||||
'cost_per_image': Decimal('0.012'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024', '1024x1792', '1792x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 11,
|
||||
'description': 'Bria 3.2 - Premium quality image generation via Runware/Civitai',
|
||||
}
|
||||
)
|
||||
|
||||
# Optionally remove the old runware:100@1 and runware:101@1 models if they exist
|
||||
AIModelConfig.objects.filter(
|
||||
model_name__in=['runware:100@1', 'runware:101@1']
|
||||
).update(is_active=False)
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""Reverse the migration"""
|
||||
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
|
||||
|
||||
# Restore old display name
|
||||
AIModelConfig.objects.filter(model_name='runware:97@1').update(
|
||||
display_name='Runware Standard',
|
||||
is_default=False,
|
||||
)
|
||||
|
||||
# Remove Bria 3.2 model
|
||||
AIModelConfig.objects.filter(model_name='civitai:618692@691639').delete()
|
||||
|
||||
# Re-activate old models
|
||||
AIModelConfig.objects.filter(
|
||||
model_name__in=['runware:100@1', 'runware:101@1']
|
||||
).update(is_active=True)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('billing', '0022_fix_historical_calculation_mode_null'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(update_runware_models, reverse_migration),
|
||||
]
|
||||
@@ -2,6 +2,7 @@
|
||||
System Module Admin
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django import forms
|
||||
from unfold.admin import ModelAdmin
|
||||
from igny8_core.admin.base import AccountAdminMixin, Igny8ModelAdmin
|
||||
from .models import AIPrompt, IntegrationSettings, AuthorProfile, Strategy
|
||||
@@ -333,16 +334,61 @@ class StrategyAdmin(ImportExportMixin, AccountAdminMixin, Igny8ModelAdmin):
|
||||
# GLOBAL SETTINGS ADMIN - Platform-wide defaults
|
||||
# =============================================================================
|
||||
|
||||
class GlobalIntegrationSettingsForm(forms.ModelForm):
|
||||
"""Custom form for GlobalIntegrationSettings with dynamic choices from AIModelConfig"""
|
||||
|
||||
class Meta:
|
||||
model = GlobalIntegrationSettings
|
||||
fields = '__all__'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Load choices dynamically from AIModelConfig
|
||||
from igny8_core.modules.system.global_settings_models import (
|
||||
get_text_model_choices,
|
||||
get_image_model_choices,
|
||||
get_provider_choices,
|
||||
)
|
||||
|
||||
# OpenAI text model choices
|
||||
openai_choices = get_text_model_choices()
|
||||
openai_text_choices = [(m, d) for m, d in openai_choices if 'gpt' in m.lower() or 'openai' in m.lower()]
|
||||
if openai_text_choices:
|
||||
self.fields['openai_model'].choices = openai_text_choices
|
||||
|
||||
# DALL-E image model choices
|
||||
dalle_choices = get_image_model_choices(provider='openai')
|
||||
if dalle_choices:
|
||||
self.fields['dalle_model'].choices = dalle_choices
|
||||
|
||||
# Runware image model choices
|
||||
runware_choices = get_image_model_choices(provider='runware')
|
||||
if runware_choices:
|
||||
self.fields['runware_model'].choices = runware_choices
|
||||
|
||||
# Image service provider choices (only OpenAI and Runware for now)
|
||||
image_providers = get_provider_choices(model_type='image')
|
||||
# Filter to only OpenAI and Runware
|
||||
allowed_image_providers = [
|
||||
(p, d) for p, d in image_providers
|
||||
if p in ('openai', 'runware')
|
||||
]
|
||||
if allowed_image_providers:
|
||||
self.fields['default_image_service'].choices = allowed_image_providers
|
||||
|
||||
|
||||
@admin.register(GlobalIntegrationSettings)
|
||||
class GlobalIntegrationSettingsAdmin(Igny8ModelAdmin):
|
||||
"""Admin for global integration settings (singleton)"""
|
||||
form = GlobalIntegrationSettingsForm
|
||||
list_display = ["id", "is_active", "last_updated", "updated_by"]
|
||||
readonly_fields = ["last_updated"]
|
||||
readonly_fields = ["last_updated", "openai_max_tokens", "anthropic_max_tokens"]
|
||||
|
||||
fieldsets = (
|
||||
("OpenAI Settings", {
|
||||
"fields": ("openai_api_key", "openai_model", "openai_temperature", "openai_max_tokens"),
|
||||
"description": "Global OpenAI configuration used by all accounts (unless overridden)"
|
||||
"description": "Global OpenAI configuration used by all accounts (unless overridden). Max tokens is loaded from AI Model Configuration."
|
||||
}),
|
||||
("Image Generation - Default Service", {
|
||||
"fields": ("default_image_service",),
|
||||
@@ -365,6 +411,49 @@ class GlobalIntegrationSettingsAdmin(Igny8ModelAdmin):
|
||||
}),
|
||||
)
|
||||
|
||||
def get_readonly_fields(self, request, obj=None):
|
||||
"""Make max_tokens fields readonly - they are populated from AI Model Configuration"""
|
||||
readonly = list(super().get_readonly_fields(request, obj))
|
||||
if 'openai_max_tokens' not in readonly:
|
||||
readonly.append('openai_max_tokens')
|
||||
if 'anthropic_max_tokens' not in readonly:
|
||||
readonly.append('anthropic_max_tokens')
|
||||
return readonly
|
||||
|
||||
def openai_max_tokens(self, obj):
|
||||
"""Display max tokens from the selected OpenAI model's configuration"""
|
||||
from igny8_core.modules.system.global_settings_models import get_model_max_tokens
|
||||
max_tokens = get_model_max_tokens(obj.openai_model) if obj else None
|
||||
if max_tokens:
|
||||
return f"{max_tokens:,} (from AI Model Configuration)"
|
||||
return obj.openai_max_tokens if obj else "8192 (default)"
|
||||
openai_max_tokens.short_description = "Max Output Tokens"
|
||||
|
||||
def anthropic_max_tokens(self, obj):
|
||||
"""Display max tokens from the selected Anthropic model's configuration"""
|
||||
from igny8_core.modules.system.global_settings_models import get_model_max_tokens
|
||||
max_tokens = get_model_max_tokens(obj.anthropic_model) if obj else None
|
||||
if max_tokens:
|
||||
return f"{max_tokens:,} (from AI Model Configuration)"
|
||||
return obj.anthropic_max_tokens if obj else "8192 (default)"
|
||||
anthropic_max_tokens.short_description = "Max Output Tokens"
|
||||
|
||||
def save_model(self, request, obj, form, change):
|
||||
"""Update max_tokens from model config on save"""
|
||||
from igny8_core.modules.system.global_settings_models import get_model_max_tokens
|
||||
|
||||
# Update OpenAI max tokens from model config
|
||||
openai_max = get_model_max_tokens(obj.openai_model)
|
||||
if openai_max:
|
||||
obj.openai_max_tokens = openai_max
|
||||
|
||||
# Update Anthropic max tokens from model config
|
||||
anthropic_max = get_model_max_tokens(obj.anthropic_model)
|
||||
if anthropic_max:
|
||||
obj.anthropic_max_tokens = anthropic_max
|
||||
|
||||
super().save_model(request, obj, form, change)
|
||||
|
||||
def has_add_permission(self, request):
|
||||
"""Only allow one instance (singleton pattern)"""
|
||||
return not GlobalIntegrationSettings.objects.exists()
|
||||
|
||||
@@ -5,6 +5,125 @@ Accounts can override model selection and parameters (but NOT API keys).
|
||||
"""
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_text_model_choices():
|
||||
"""
|
||||
Get text model choices from AIModelConfig database.
|
||||
Returns list of tuples (model_name, display_name) for active text models.
|
||||
Falls back to hardcoded defaults if database unavailable.
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
models = AIModelConfig.objects.filter(
|
||||
model_type='text',
|
||||
is_active=True
|
||||
).order_by('sort_order', 'model_name')
|
||||
|
||||
if models.exists():
|
||||
return [(m.model_name, m.display_name) for m in models]
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load text models from database: {e}")
|
||||
|
||||
# Fallback to hardcoded defaults
|
||||
return [
|
||||
('gpt-4o-mini', 'GPT-4o mini - $0.15 / $0.60 per 1M tokens'),
|
||||
('gpt-4o', 'GPT-4o - $2.50 / $10.00 per 1M tokens'),
|
||||
]
|
||||
|
||||
|
||||
def get_image_model_choices(provider=None):
|
||||
"""
|
||||
Get image model choices from AIModelConfig database.
|
||||
Optionally filter by provider (openai, runware, etc.)
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
qs = AIModelConfig.objects.filter(
|
||||
model_type='image',
|
||||
is_active=True
|
||||
)
|
||||
if provider:
|
||||
qs = qs.filter(provider=provider)
|
||||
qs = qs.order_by('sort_order', 'model_name')
|
||||
|
||||
if qs.exists():
|
||||
return [(m.model_name, m.display_name) for m in qs]
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load image models from database: {e}")
|
||||
|
||||
# Fallback based on provider
|
||||
if provider == 'openai':
|
||||
return [
|
||||
('dall-e-3', 'DALL·E 3 - $0.040 per image'),
|
||||
('dall-e-2', 'DALL·E 2 - $0.020 per image'),
|
||||
]
|
||||
elif provider == 'runware':
|
||||
return [
|
||||
('runware:97@1', 'Hi Dream Full - Standard'),
|
||||
('civitai:618692@691639', 'Bria 3.2 - Premium'),
|
||||
]
|
||||
return []
|
||||
|
||||
|
||||
def get_provider_choices(model_type='text'):
|
||||
"""
|
||||
Get provider choices from AIModelConfig database.
|
||||
Returns unique providers for the given model type.
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
providers = list(AIModelConfig.objects.filter(
|
||||
model_type=model_type,
|
||||
is_active=True
|
||||
).values_list('provider', flat=True).distinct())
|
||||
|
||||
provider_display = {
|
||||
'openai': 'OpenAI DALL-E' if model_type == 'image' else 'OpenAI',
|
||||
'anthropic': 'Anthropic (Claude)',
|
||||
'runware': 'Runware',
|
||||
'google': 'Google',
|
||||
}
|
||||
|
||||
if providers:
|
||||
# Use dict to ensure unique entries
|
||||
unique_providers = {}
|
||||
for p in providers:
|
||||
if p not in unique_providers:
|
||||
unique_providers[p] = provider_display.get(p, p.title())
|
||||
return [(p, d) for p, d in unique_providers.items()]
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load providers from database: {e}")
|
||||
|
||||
# Fallback
|
||||
if model_type == 'text':
|
||||
return [('openai', 'OpenAI (GPT)'), ('anthropic', 'Anthropic (Claude)')]
|
||||
elif model_type == 'image':
|
||||
return [('openai', 'OpenAI DALL-E'), ('runware', 'Runware')]
|
||||
return []
|
||||
|
||||
|
||||
def get_model_max_tokens(model_name):
|
||||
"""
|
||||
Get max_output_tokens for a specific model from AIModelConfig.
|
||||
Returns None if model not found or doesn't have max_output_tokens.
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
model_config = AIModelConfig.objects.filter(
|
||||
model_name=model_name,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if model_config and model_config.max_output_tokens:
|
||||
return model_config.max_output_tokens
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get max tokens for model {model_name}: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class GlobalIntegrationSettings(models.Model):
|
||||
@@ -52,9 +171,8 @@ class GlobalIntegrationSettings(models.Model):
|
||||
]
|
||||
|
||||
RUNWARE_MODEL_CHOICES = [
|
||||
('runware:97@1', 'Runware 97@1 - Versatile Model'),
|
||||
('runware:100@1', 'Runware 100@1 - High Quality'),
|
||||
('runware:101@1', 'Runware 101@1 - Fast Generation'),
|
||||
('runware:97@1', 'Hi Dream Full - Standard'),
|
||||
('civitai:618692@691639', 'Bria 3.2 - Premium'),
|
||||
]
|
||||
|
||||
BRIA_MODEL_CHOICES = [
|
||||
@@ -79,7 +197,6 @@ class GlobalIntegrationSettings(models.Model):
|
||||
IMAGE_SERVICE_CHOICES = [
|
||||
('openai', 'OpenAI DALL-E'),
|
||||
('runware', 'Runware'),
|
||||
('bria', 'Bria AI'),
|
||||
]
|
||||
|
||||
ANTHROPIC_MODEL_CHOICES = [
|
||||
|
||||
@@ -795,12 +795,25 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
|
||||
|
||||
# Build response with global defaults
|
||||
if integration_type == 'openai':
|
||||
# Get max_tokens from AIModelConfig for the selected model
|
||||
max_tokens = global_settings.openai_max_tokens # Fallback
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
model_config = AIModelConfig.objects.filter(
|
||||
model_name=global_settings.openai_model,
|
||||
is_active=True
|
||||
).first()
|
||||
if model_config and model_config.max_output_tokens:
|
||||
max_tokens = model_config.max_output_tokens
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
response_data = {
|
||||
'id': 'openai',
|
||||
'enabled': True, # Always enabled (platform-wide)
|
||||
'model': global_settings.openai_model,
|
||||
'temperature': global_settings.openai_temperature,
|
||||
'max_tokens': global_settings.openai_max_tokens,
|
||||
'max_tokens': max_tokens,
|
||||
'using_global': True, # Flag to show it's using global
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user