AI MODELS & final updates - feat: Implement AI Model Configuration with dynamic pricing and REST API

- Added AIModelConfig model to manage AI model configurations in the database.
- Created serializers and views for AI model configurations, enabling read-only access via REST API.
- Implemented filtering capabilities for model type, provider, and default status in the API.
- Seeded initial data for text and image models, including pricing and capabilities.
- Updated Django Admin interface for managing AI models with enhanced features and bulk actions.
- Added validation methods for model and image size checks.
- Comprehensive migration created to establish the AIModelConfig model and seed initial data.
- Documented implementation and validation results in summary and report files.
This commit is contained in:
IGNY8 VPS (Salman)
2025-12-24 13:37:36 +00:00
parent 355b0ac897
commit 02d4f1fa46
9 changed files with 1531 additions and 28 deletions

View File

@@ -135,7 +135,7 @@ def validate_api_key(api_key: Optional[str], integration_type: str = 'openai') -
def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
"""
Validate that model is in supported list.
Validate that model is in supported list using database.
Args:
model: Model name to validate
@@ -144,27 +144,59 @@ def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from .constants import MODEL_RATES, VALID_OPENAI_IMAGE_MODELS
if model_type == 'text':
if model not in MODEL_RATES:
return {
'valid': False,
'error': f'Model "{model}" is not in supported models list'
}
elif model_type == 'image':
if model not in VALID_OPENAI_IMAGE_MODELS:
return {
'valid': False,
'error': f'Model "{model}" is not valid for OpenAI image generation. Only {", ".join(VALID_OPENAI_IMAGE_MODELS)} are supported.'
}
return {'valid': True}
try:
# Try database first
from igny8_core.business.billing.models import AIModelConfig
exists = AIModelConfig.objects.filter(
model_name=model,
model_type=model_type,
is_active=True
).exists()
if not exists:
# Get available models for better error message
available = list(AIModelConfig.objects.filter(
model_type=model_type,
is_active=True
).values_list('model_name', flat=True))
if available:
return {
'valid': False,
'error': f'Model "{model}" is not active or not found. Available {model_type} models: {", ".join(available)}'
}
else:
return {
'valid': False,
'error': f'Model "{model}" is not found in database'
}
return {'valid': True}
except Exception:
# Fallback to constants if database fails
from .constants import MODEL_RATES, VALID_OPENAI_IMAGE_MODELS
if model_type == 'text':
if model not in MODEL_RATES:
return {
'valid': False,
'error': f'Model "{model}" is not in supported models list'
}
elif model_type == 'image':
if model not in VALID_OPENAI_IMAGE_MODELS:
return {
'valid': False,
'error': f'Model "{model}" is not valid for OpenAI image generation. Only {", ".join(VALID_OPENAI_IMAGE_MODELS)} are supported.'
}
return {'valid': True}
def validate_image_size(size: str, model: str) -> Dict[str, Any]:
"""
Validate that image size is valid for the selected model.
Validate that image size is valid for the selected model using database.
Args:
size: Image size (e.g., '1024x1024')
@@ -173,14 +205,40 @@ def validate_image_size(size: str, model: str) -> Dict[str, Any]:
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from .constants import VALID_SIZES_BY_MODEL
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
if size not in valid_sizes:
return {
'valid': False,
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
}
return {'valid': True}
try:
# Try database first
from igny8_core.business.billing.models import AIModelConfig
model_config = AIModelConfig.objects.filter(
model_name=model,
model_type='image',
is_active=True
).first()
if model_config:
if not model_config.validate_size(size):
valid_sizes = model_config.valid_sizes or []
return {
'valid': False,
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
}
return {'valid': True}
else:
return {
'valid': False,
'error': f'Image model "{model}" not found in database'
}
except Exception:
# Fallback to constants if database fails
from .constants import VALID_SIZES_BY_MODEL
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
if size not in valid_sizes:
return {
'valid': False,
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
}
return {'valid': True}

View File

@@ -687,3 +687,238 @@ class AccountPaymentMethod(AccountBaseModel):
def __str__(self):
return f"{self.account_id} - {self.display_name} ({self.type})"
class AIModelConfig(models.Model):
"""
AI Model Configuration - Database-driven model pricing and capabilities.
Replaces hardcoded MODEL_RATES and IMAGE_MODEL_RATES from constants.py
Two pricing models:
- Text models: Cost per 1M tokens (input/output), credits calculated AFTER AI call
- Image models: Cost per image, credits calculated BEFORE AI call
"""
MODEL_TYPE_CHOICES = [
('text', 'Text Generation'),
('image', 'Image Generation'),
('embedding', 'Embedding'),
]
PROVIDER_CHOICES = [
('openai', 'OpenAI'),
('anthropic', 'Anthropic'),
('runware', 'Runware'),
('google', 'Google'),
]
# Basic Information
model_name = models.CharField(
max_length=100,
unique=True,
db_index=True,
help_text="Model identifier used in API calls (e.g., 'gpt-4o-mini', 'dall-e-3')"
)
display_name = models.CharField(
max_length=200,
help_text="Human-readable name shown in UI (e.g., 'GPT-4o mini - Fast & Affordable')"
)
model_type = models.CharField(
max_length=20,
choices=MODEL_TYPE_CHOICES,
db_index=True,
help_text="Type of model - determines which pricing fields are used"
)
provider = models.CharField(
max_length=50,
choices=PROVIDER_CHOICES,
db_index=True,
help_text="AI provider (OpenAI, Anthropic, etc.)"
)
# Text Model Pricing (Only for model_type='text')
input_cost_per_1m = models.DecimalField(
max_digits=10,
decimal_places=4,
null=True,
blank=True,
validators=[MinValueValidator(Decimal('0.0001'))],
help_text="Cost per 1 million input tokens (USD). For text models only."
)
output_cost_per_1m = models.DecimalField(
max_digits=10,
decimal_places=4,
null=True,
blank=True,
validators=[MinValueValidator(Decimal('0.0001'))],
help_text="Cost per 1 million output tokens (USD). For text models only."
)
context_window = models.IntegerField(
null=True,
blank=True,
validators=[MinValueValidator(1)],
help_text="Maximum input tokens (context length). For text models only."
)
max_output_tokens = models.IntegerField(
null=True,
blank=True,
validators=[MinValueValidator(1)],
help_text="Maximum output tokens per request. For text models only."
)
# Image Model Pricing (Only for model_type='image')
cost_per_image = models.DecimalField(
max_digits=10,
decimal_places=4,
null=True,
blank=True,
validators=[MinValueValidator(Decimal('0.0001'))],
help_text="Fixed cost per image generation (USD). For image models only."
)
valid_sizes = models.JSONField(
null=True,
blank=True,
help_text='Array of valid image sizes (e.g., ["1024x1024", "1024x1792"]). For image models only.'
)
# Capabilities
supports_json_mode = models.BooleanField(
default=False,
help_text="True for models with JSON response format support"
)
supports_vision = models.BooleanField(
default=False,
help_text="True for models that can analyze images"
)
supports_function_calling = models.BooleanField(
default=False,
help_text="True for models with function calling capability"
)
# Status & Configuration
is_active = models.BooleanField(
default=True,
db_index=True,
help_text="Enable/disable model without deleting"
)
is_default = models.BooleanField(
default=False,
db_index=True,
help_text="Mark as default model for its type (only one per type)"
)
sort_order = models.IntegerField(
default=0,
help_text="Control order in dropdown lists (lower numbers first)"
)
# Metadata
description = models.TextField(
blank=True,
help_text="Admin notes about model usage, strengths, limitations"
)
release_date = models.DateField(
null=True,
blank=True,
help_text="When model was released/added"
)
deprecation_date = models.DateField(
null=True,
blank=True,
help_text="When model will be removed"
)
# Audit Fields
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='ai_model_updates',
help_text="Admin who last updated"
)
# History tracking
history = HistoricalRecords()
class Meta:
app_label = 'billing'
db_table = 'igny8_ai_model_config'
verbose_name = 'AI Model Configuration'
verbose_name_plural = 'AI Model Configurations'
ordering = ['model_type', 'sort_order', 'model_name']
indexes = [
models.Index(fields=['model_type', 'is_active']),
models.Index(fields=['provider', 'is_active']),
models.Index(fields=['is_default', 'model_type']),
]
def __str__(self):
return self.display_name
def save(self, *args, **kwargs):
"""Ensure only one is_default per model_type"""
if self.is_default:
# Unset other defaults for same model_type
AIModelConfig.objects.filter(
model_type=self.model_type,
is_default=True
).exclude(pk=self.pk).update(is_default=False)
super().save(*args, **kwargs)
def get_cost_for_tokens(self, input_tokens, output_tokens):
"""Calculate cost for text models based on token usage"""
if self.model_type != 'text':
raise ValueError("get_cost_for_tokens only applies to text models")
if not self.input_cost_per_1m or not self.output_cost_per_1m:
raise ValueError(f"Model {self.model_name} missing cost_per_1m values")
cost = (
(Decimal(input_tokens) * self.input_cost_per_1m) +
(Decimal(output_tokens) * self.output_cost_per_1m)
) / Decimal('1000000')
return cost
def get_cost_for_images(self, num_images):
"""Calculate cost for image models"""
if self.model_type != 'image':
raise ValueError("get_cost_for_images only applies to image models")
if not self.cost_per_image:
raise ValueError(f"Model {self.model_name} missing cost_per_image")
return self.cost_per_image * Decimal(num_images)
def validate_size(self, size):
"""Check if size is valid for this image model"""
if self.model_type != 'image':
raise ValueError("validate_size only applies to image models")
if not self.valid_sizes:
return True # No size restrictions
return size in self.valid_sizes
def get_display_with_pricing(self):
"""For dropdowns: show model with pricing"""
if self.model_type == 'text':
return f"{self.display_name} - ${self.input_cost_per_1m}/${self.output_cost_per_1m} per 1M"
elif self.model_type == 'image':
return f"{self.display_name} - ${self.cost_per_image} per image"
return self.display_name

View File

@@ -13,6 +13,7 @@ from igny8_core.modules.billing.views import (
CreditBalanceViewSet,
CreditUsageViewSet,
CreditTransactionViewSet,
AIModelConfigViewSet,
)
router = DefaultRouter()
@@ -21,6 +22,8 @@ router.register(r'admin', BillingViewSet, basename='billing-admin')
router.register(r'credits/balance', CreditBalanceViewSet, basename='credit-balance')
router.register(r'credits/usage', CreditUsageViewSet, basename='credit-usage')
router.register(r'credits/transactions', CreditTransactionViewSet, basename='credit-transactions')
# AI Models endpoint
router.register(r'ai/models', AIModelConfigViewSet, basename='ai-models')
# User-facing billing endpoints
router.register(r'invoices', InvoiceViewSet, basename='invoices')
router.register(r'payments', PaymentViewSet, basename='payments')

View File

@@ -15,6 +15,7 @@ from igny8_core.business.billing.models import (
CreditPackage,
PaymentMethodConfig,
PlanLimitUsage,
AIModelConfig,
)
from .models import CreditTransaction, CreditUsageLog, AccountPaymentMethod
from import_export.admin import ExportMixin, ImportExportMixin
@@ -744,3 +745,209 @@ class BillingConfigurationAdmin(Igny8ModelAdmin):
"""Track who made the change"""
obj.updated_by = request.user
super().save_model(request, obj, form, change)
@admin.register(AIModelConfig)
class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
"""
Admin for AI Model Configuration - Database-driven model pricing
Replaces hardcoded MODEL_RATES and IMAGE_MODEL_RATES
"""
list_display = [
'model_name',
'display_name_short',
'model_type_badge',
'provider_badge',
'pricing_display',
'is_active_icon',
'is_default_icon',
'sort_order',
'updated_at',
]
list_filter = [
'model_type',
'provider',
'is_active',
'is_default',
'supports_json_mode',
'supports_vision',
'supports_function_calling',
]
search_fields = ['model_name', 'display_name', 'description']
ordering = ['model_type', 'sort_order', 'model_name']
readonly_fields = ['created_at', 'updated_at', 'updated_by']
fieldsets = (
('Basic Information', {
'fields': ('model_name', 'display_name', 'model_type', 'provider', 'description'),
'description': 'Core model identification and classification'
}),
('Text Model Pricing', {
'fields': ('input_cost_per_1m', 'output_cost_per_1m', 'context_window', 'max_output_tokens'),
'description': 'Pricing and limits for TEXT models only (leave blank for image models)',
'classes': ('collapse',)
}),
('Image Model Pricing', {
'fields': ('cost_per_image', 'valid_sizes'),
'description': 'Pricing and configuration for IMAGE models only (leave blank for text models)',
'classes': ('collapse',)
}),
('Capabilities', {
'fields': ('supports_json_mode', 'supports_vision', 'supports_function_calling'),
'description': 'Model features and capabilities'
}),
('Status & Display', {
'fields': ('is_active', 'is_default', 'sort_order'),
'description': 'Control model availability and ordering in dropdowns'
}),
('Lifecycle', {
'fields': ('release_date', 'deprecation_date'),
'description': 'Model release and deprecation dates',
'classes': ('collapse',)
}),
('Audit Trail', {
'fields': ('created_at', 'updated_at', 'updated_by'),
'classes': ('collapse',)
}),
)
# Custom display methods
def display_name_short(self, obj):
"""Truncated display name for list view"""
if len(obj.display_name) > 50:
return obj.display_name[:47] + '...'
return obj.display_name
display_name_short.short_description = 'Display Name'
def model_type_badge(self, obj):
"""Colored badge for model type"""
colors = {
'text': '#3498db', # Blue
'image': '#e74c3c', # Red
'embedding': '#2ecc71', # Green
}
color = colors.get(obj.model_type, '#95a5a6')
return format_html(
'<span style="background-color: {}; color: white; padding: 3px 10px; '
'border-radius: 3px; font-weight: bold;">{}</span>',
color,
obj.get_model_type_display()
)
model_type_badge.short_description = 'Type'
def provider_badge(self, obj):
"""Colored badge for provider"""
colors = {
'openai': '#10a37f', # OpenAI green
'anthropic': '#d97757', # Anthropic orange
'runware': '#6366f1', # Purple
'google': '#4285f4', # Google blue
}
color = colors.get(obj.provider, '#95a5a6')
return format_html(
'<span style="background-color: {}; color: white; padding: 3px 10px; '
'border-radius: 3px; font-weight: bold;">{}</span>',
color,
obj.get_provider_display()
)
provider_badge.short_description = 'Provider'
def pricing_display(self, obj):
"""Format pricing based on model type"""
if obj.model_type == 'text':
return format_html(
'<span style="color: #2c3e50; font-family: monospace;">'
'${} / ${} per 1M</span>',
obj.input_cost_per_1m,
obj.output_cost_per_1m
)
elif obj.model_type == 'image':
return format_html(
'<span style="color: #2c3e50; font-family: monospace;">'
'${} per image</span>',
obj.cost_per_image
)
return '-'
pricing_display.short_description = 'Pricing'
def is_active_icon(self, obj):
"""Active status icon"""
if obj.is_active:
return format_html(
'<span style="color: green; font-size: 18px;" title="Active">●</span>'
)
return format_html(
'<span style="color: red; font-size: 18px;" title="Inactive">●</span>'
)
is_active_icon.short_description = 'Active'
def is_default_icon(self, obj):
"""Default status icon"""
if obj.is_default:
return format_html(
'<span style="color: gold; font-size: 18px;" title="Default">★</span>'
)
return format_html(
'<span style="color: #ddd; font-size: 18px;" title="Not Default">☆</span>'
)
is_default_icon.short_description = 'Default'
# Admin actions
actions = ['bulk_activate', 'bulk_deactivate', 'set_as_default']
def bulk_activate(self, request, queryset):
"""Enable selected models"""
count = queryset.update(is_active=True)
self.message_user(
request,
f'{count} model(s) activated successfully.',
messages.SUCCESS
)
bulk_activate.short_description = 'Activate selected models'
def bulk_deactivate(self, request, queryset):
"""Disable selected models"""
count = queryset.update(is_active=False)
self.message_user(
request,
f'{count} model(s) deactivated successfully.',
messages.WARNING
)
bulk_deactivate.short_description = 'Deactivate selected models'
def set_as_default(self, request, queryset):
"""Set one model as default for its type"""
if queryset.count() != 1:
self.message_user(
request,
'Please select exactly one model to set as default.',
messages.ERROR
)
return
model = queryset.first()
# Unset other defaults for same type
AIModelConfig.objects.filter(
model_type=model.model_type,
is_default=True
).exclude(pk=model.pk).update(is_default=False)
# Set this as default
model.is_default = True
model.save()
self.message_user(
request,
f'{model.model_name} is now the default {model.get_model_type_display()} model.',
messages.SUCCESS
)
set_as_default.short_description = 'Set as default model (for its type)'
def save_model(self, request, obj, form, change):
"""Track who made the change"""
obj.updated_by = request.user
super().save_model(request, obj, form, change)

View File

@@ -0,0 +1,264 @@
# Generated by Django 5.2.9 on 2025-12-24 01:20
import django.core.validators
import django.db.models.deletion
import simple_history.models
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
def seed_ai_models(apps, schema_editor):
"""Seed AIModelConfig with data from constants.py"""
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
# Text Models (from MODEL_RATES in constants.py)
text_models = [
{
'model_name': 'gpt-4o-mini',
'display_name': 'GPT-4o mini - Fast & Affordable',
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal('0.1500'),
'output_cost_per_1m': Decimal('0.6000'),
'context_window': 128000,
'max_output_tokens': 16000,
'supports_json_mode': True,
'supports_vision': False,
'supports_function_calling': True,
'is_active': True,
'is_default': True, # Default text model
'sort_order': 1,
'description': 'Fast and cost-effective model for most tasks. Best balance of speed and quality.',
},
{
'model_name': 'gpt-4.1',
'display_name': 'GPT-4.1 - Legacy Model',
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal('2.0000'),
'output_cost_per_1m': Decimal('8.0000'),
'context_window': 8192,
'max_output_tokens': 4096,
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': False,
'sort_order': 10,
'description': 'Legacy GPT-4 model. Higher cost but reliable.',
},
{
'model_name': 'gpt-4o',
'display_name': 'GPT-4o - High Quality with Vision',
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal('2.5000'),
'output_cost_per_1m': Decimal('10.0000'),
'context_window': 128000,
'max_output_tokens': 4096,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 5,
'description': 'Most capable GPT-4 variant with vision capabilities. Best for complex tasks.',
},
{
'model_name': 'gpt-5.1',
'display_name': 'GPT-5.1 - Advanced (16K context)',
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal('1.2500'),
'output_cost_per_1m': Decimal('10.0000'),
'context_window': 16000,
'max_output_tokens': 16000,
'supports_json_mode': True,
'supports_vision': False,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 20,
'description': 'Advanced GPT-5 model with 16K context window.',
},
{
'model_name': 'gpt-5.2',
'display_name': 'GPT-5.2 - Most Advanced (16K context)',
'model_type': 'text',
'provider': 'openai',
'input_cost_per_1m': Decimal('1.7500'),
'output_cost_per_1m': Decimal('14.0000'),
'context_window': 16000,
'max_output_tokens': 16000,
'supports_json_mode': True,
'supports_vision': False,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 30,
'description': 'Most advanced GPT-5 variant. Highest quality output.',
},
]
# Image Models (from IMAGE_MODEL_RATES in constants.py)
image_models = [
{
'model_name': 'dall-e-3',
'display_name': 'DALL-E 3 - High Quality Images',
'model_type': 'image',
'provider': 'openai',
'cost_per_image': Decimal('0.0400'),
'valid_sizes': ['1024x1024', '1024x1792', '1792x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': True, # Default image model
'sort_order': 1,
'description': 'Latest DALL-E model with best quality and prompt adherence.',
},
{
'model_name': 'dall-e-2',
'display_name': 'DALL-E 2 - Standard Quality',
'model_type': 'image',
'provider': 'openai',
'cost_per_image': Decimal('0.0200'),
'valid_sizes': ['256x256', '512x512', '1024x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': False,
'sort_order': 10,
'description': 'Cost-effective image generation with good quality.',
},
{
'model_name': 'gpt-image-1',
'display_name': 'GPT Image 1 (Not compatible with OpenAI)',
'model_type': 'image',
'provider': 'openai',
'cost_per_image': Decimal('0.0420'),
'valid_sizes': ['1024x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': False, # Not valid for OpenAI endpoint
'is_default': False,
'sort_order': 20,
'description': 'Not compatible with OpenAI /v1/images/generations endpoint.',
},
{
'model_name': 'gpt-image-1-mini',
'display_name': 'GPT Image 1 Mini (Not compatible with OpenAI)',
'model_type': 'image',
'provider': 'openai',
'cost_per_image': Decimal('0.0110'),
'valid_sizes': ['1024x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': False, # Not valid for OpenAI endpoint
'is_default': False,
'sort_order': 30,
'description': 'Not compatible with OpenAI /v1/images/generations endpoint.',
},
]
# Create all models
for model_data in text_models + image_models:
AIModelConfig.objects.create(**model_data)
def reverse_seed(apps, schema_editor):
"""Remove seeded data"""
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
AIModelConfig.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('billing', '0019_populate_token_based_config'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalAIModelConfig',
fields=[
('id', models.BigIntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('model_name', models.CharField(db_index=True, help_text="Model identifier used in API calls (e.g., 'gpt-4o-mini', 'dall-e-3')", max_length=100)),
('display_name', models.CharField(help_text="Human-readable name shown in UI (e.g., 'GPT-4o mini - Fast & Affordable')", max_length=200)),
('model_type', models.CharField(choices=[('text', 'Text Generation'), ('image', 'Image Generation'), ('embedding', 'Embedding')], db_index=True, help_text='Type of model - determines which pricing fields are used', max_length=20)),
('provider', models.CharField(choices=[('openai', 'OpenAI'), ('anthropic', 'Anthropic'), ('runware', 'Runware'), ('google', 'Google')], db_index=True, help_text='AI provider (OpenAI, Anthropic, etc.)', max_length=50)),
('input_cost_per_1m', models.DecimalField(blank=True, decimal_places=4, help_text='Cost per 1 million input tokens (USD). For text models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('output_cost_per_1m', models.DecimalField(blank=True, decimal_places=4, help_text='Cost per 1 million output tokens (USD). For text models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('context_window', models.IntegerField(blank=True, help_text='Maximum input tokens (context length). For text models only.', null=True, validators=[django.core.validators.MinValueValidator(1)])),
('max_output_tokens', models.IntegerField(blank=True, help_text='Maximum output tokens per request. For text models only.', null=True, validators=[django.core.validators.MinValueValidator(1)])),
('cost_per_image', models.DecimalField(blank=True, decimal_places=4, help_text='Fixed cost per image generation (USD). For image models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('valid_sizes', models.JSONField(blank=True, help_text='Array of valid image sizes (e.g., ["1024x1024", "1024x1792"]). For image models only.', null=True)),
('supports_json_mode', models.BooleanField(default=False, help_text='True for models with JSON response format support')),
('supports_vision', models.BooleanField(default=False, help_text='True for models that can analyze images')),
('supports_function_calling', models.BooleanField(default=False, help_text='True for models with function calling capability')),
('is_active', models.BooleanField(db_index=True, default=True, help_text='Enable/disable model without deleting')),
('is_default', models.BooleanField(db_index=True, default=False, help_text='Mark as default model for its type (only one per type)')),
('sort_order', models.IntegerField(default=0, help_text='Control order in dropdown lists (lower numbers first)')),
('description', models.TextField(blank=True, help_text='Admin notes about model usage, strengths, limitations')),
('release_date', models.DateField(blank=True, help_text='When model was released/added', null=True)),
('deprecation_date', models.DateField(blank=True, help_text='When model will be removed', null=True)),
('created_at', models.DateTimeField(blank=True, editable=False)),
('updated_at', models.DateTimeField(blank=True, editable=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField(db_index=True)),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(blank=True, db_constraint=False, help_text='Admin who last updated', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical AI Model Configuration',
'verbose_name_plural': 'historical AI Model Configurations',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': ('history_date', 'history_id'),
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='AIModelConfig',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_name', models.CharField(db_index=True, help_text="Model identifier used in API calls (e.g., 'gpt-4o-mini', 'dall-e-3')", max_length=100, unique=True)),
('display_name', models.CharField(help_text="Human-readable name shown in UI (e.g., 'GPT-4o mini - Fast & Affordable')", max_length=200)),
('model_type', models.CharField(choices=[('text', 'Text Generation'), ('image', 'Image Generation'), ('embedding', 'Embedding')], db_index=True, help_text='Type of model - determines which pricing fields are used', max_length=20)),
('provider', models.CharField(choices=[('openai', 'OpenAI'), ('anthropic', 'Anthropic'), ('runware', 'Runware'), ('google', 'Google')], db_index=True, help_text='AI provider (OpenAI, Anthropic, etc.)', max_length=50)),
('input_cost_per_1m', models.DecimalField(blank=True, decimal_places=4, help_text='Cost per 1 million input tokens (USD). For text models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('output_cost_per_1m', models.DecimalField(blank=True, decimal_places=4, help_text='Cost per 1 million output tokens (USD). For text models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('context_window', models.IntegerField(blank=True, help_text='Maximum input tokens (context length). For text models only.', null=True, validators=[django.core.validators.MinValueValidator(1)])),
('max_output_tokens', models.IntegerField(blank=True, help_text='Maximum output tokens per request. For text models only.', null=True, validators=[django.core.validators.MinValueValidator(1)])),
('cost_per_image', models.DecimalField(blank=True, decimal_places=4, help_text='Fixed cost per image generation (USD). For image models only.', max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))])),
('valid_sizes', models.JSONField(blank=True, help_text='Array of valid image sizes (e.g., ["1024x1024", "1024x1792"]). For image models only.', null=True)),
('supports_json_mode', models.BooleanField(default=False, help_text='True for models with JSON response format support')),
('supports_vision', models.BooleanField(default=False, help_text='True for models that can analyze images')),
('supports_function_calling', models.BooleanField(default=False, help_text='True for models with function calling capability')),
('is_active', models.BooleanField(db_index=True, default=True, help_text='Enable/disable model without deleting')),
('is_default', models.BooleanField(db_index=True, default=False, help_text='Mark as default model for its type (only one per type)')),
('sort_order', models.IntegerField(default=0, help_text='Control order in dropdown lists (lower numbers first)')),
('description', models.TextField(blank=True, help_text='Admin notes about model usage, strengths, limitations')),
('release_date', models.DateField(blank=True, help_text='When model was released/added', null=True)),
('deprecation_date', models.DateField(blank=True, help_text='When model will be removed', null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('updated_by', models.ForeignKey(blank=True, help_text='Admin who last updated', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ai_model_updates', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'AI Model Configuration',
'verbose_name_plural': 'AI Model Configurations',
'db_table': 'igny8_ai_model_config',
'ordering': ['model_type', 'sort_order', 'model_name'],
'indexes': [models.Index(fields=['model_type', 'is_active'], name='igny8_ai_mo_model_t_1eef71_idx'), models.Index(fields=['provider', 'is_active'], name='igny8_ai_mo_provide_fbda6c_idx'), models.Index(fields=['is_default', 'model_type'], name='igny8_ai_mo_is_defa_95bfb9_idx')],
},
),
# Seed initial model data
migrations.RunPython(seed_ai_models, reverse_seed),
]

View File

@@ -142,3 +142,59 @@ class UsageLimitsSerializer(serializers.Serializer):
"""Serializer for usage limits response"""
limits: LimitCardSerializer = LimitCardSerializer(many=True)
class AIModelConfigSerializer(serializers.Serializer):
"""
Serializer for AI Model Configuration (Read-Only API)
Provides model information for frontend dropdowns and displays
"""
model_name = serializers.CharField(read_only=True)
display_name = serializers.CharField(read_only=True)
model_type = serializers.CharField(read_only=True)
provider = serializers.CharField(read_only=True)
# Text model fields
input_cost_per_1m = serializers.DecimalField(
max_digits=10,
decimal_places=4,
read_only=True,
allow_null=True
)
output_cost_per_1m = serializers.DecimalField(
max_digits=10,
decimal_places=4,
read_only=True,
allow_null=True
)
context_window = serializers.IntegerField(read_only=True, allow_null=True)
max_output_tokens = serializers.IntegerField(read_only=True, allow_null=True)
# Image model fields
cost_per_image = serializers.DecimalField(
max_digits=10,
decimal_places=4,
read_only=True,
allow_null=True
)
valid_sizes = serializers.ListField(read_only=True, allow_null=True)
# Capabilities
supports_json_mode = serializers.BooleanField(read_only=True)
supports_vision = serializers.BooleanField(read_only=True)
supports_function_calling = serializers.BooleanField(read_only=True)
# Status
is_default = serializers.BooleanField(read_only=True)
sort_order = serializers.IntegerField(read_only=True)
# Computed field
pricing_display = serializers.SerializerMethodField()
def get_pricing_display(self, obj):
"""Generate pricing display string based on model type"""
if obj.model_type == 'text':
return f"${obj.input_cost_per_1m}/{obj.output_cost_per_1m} per 1M"
elif obj.model_type == 'image':
return f"${obj.cost_per_image} per image"
return ""

View File

@@ -751,3 +751,75 @@ class AdminBillingViewSet(viewsets.ViewSet):
return Response({'error': 'Method not found'}, status=404)
@extend_schema_view(
list=extend_schema(tags=['AI Models'], summary='List available AI models'),
retrieve=extend_schema(tags=['AI Models'], summary='Get AI model details'),
)
class AIModelConfigViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for AI Model Configuration (Read-Only)
Provides model information for frontend dropdowns and displays
"""
permission_classes = [IsAuthenticatedAndActive]
authentication_classes = [JWTAuthentication, CSRFExemptSessionAuthentication]
throttle_scope = 'billing'
throttle_classes = [DebugScopedRateThrottle]
pagination_class = None # No pagination for model lists
lookup_field = 'model_name'
def get_queryset(self):
"""Get AIModelConfig queryset with filters"""
from igny8_core.business.billing.models import AIModelConfig
queryset = AIModelConfig.objects.filter(is_active=True)
# Filter by model type
model_type = self.request.query_params.get('type', None)
if model_type:
queryset = queryset.filter(model_type=model_type)
# Filter by provider
provider = self.request.query_params.get('provider', None)
if provider:
queryset = queryset.filter(provider=provider)
# Filter by default
is_default = self.request.query_params.get('default', None)
if is_default is not None:
is_default_bool = is_default.lower() in ['true', '1', 'yes']
queryset = queryset.filter(is_default=is_default_bool)
return queryset.order_by('model_type', 'sort_order', 'model_name')
def get_serializer_class(self):
"""Return serializer class"""
from .serializers import AIModelConfigSerializer
return AIModelConfigSerializer
def list(self, request, *args, **kwargs):
"""List all available models with filters"""
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
return success_response(
data=serializer.data,
message='AI models retrieved successfully'
)
def retrieve(self, request, *args, **kwargs):
"""Get details for a specific model"""
try:
instance = self.get_queryset().get(model_name=kwargs.get('model_name'))
serializer = self.get_serializer(instance)
return success_response(
data=serializer.data,
message='AI model details retrieved successfully'
)
except Exception as e:
return error_response(
message='Model not found',
errors={'model_name': [str(e)]},
status_code=status.HTTP_404_NOT_FOUND
)