195 lines
7.0 KiB
Python
195 lines
7.0 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
Seed AIModelConfig with the CORRECT models from GlobalIntegrationSettings choices.
|
|
These are the models that should be available in the dropdowns.
|
|
"""
|
|
import os
|
|
import sys
|
|
import django
|
|
|
|
# Setup Django
|
|
sys.path.insert(0, '/app')
|
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
|
django.setup()
|
|
|
|
from decimal import Decimal
|
|
from igny8_core.business.billing.models import AIModelConfig
|
|
|
|
def seed_models():
|
|
"""Create AIModelConfig records for all models that were in GlobalIntegrationSettings"""
|
|
|
|
models_to_create = [
|
|
# OpenAI Text Models (from OPENAI_MODEL_CHOICES)
|
|
{
|
|
'model_name': 'gpt-4.1',
|
|
'display_name': 'GPT-4.1',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.002'), # $2.00 per 1M = $0.002 per 1K
|
|
'cost_per_1k_output_tokens': Decimal('0.008'), # $8.00 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'gpt-4o-mini',
|
|
'display_name': 'GPT-4o Mini',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.00015'), # $0.15 per 1M
|
|
'cost_per_1k_output_tokens': Decimal('0.0006'), # $0.60 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'gpt-4o',
|
|
'display_name': 'GPT-4o',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.0025'), # $2.50 per 1M
|
|
'cost_per_1k_output_tokens': Decimal('0.01'), # $10.00 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'gpt-4-turbo-preview',
|
|
'display_name': 'GPT-4 Turbo Preview',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.01'), # $10.00 per 1M
|
|
'cost_per_1k_output_tokens': Decimal('0.03'), # $30.00 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'gpt-5.1',
|
|
'display_name': 'GPT-5.1 (16K)',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.00125'), # $1.25 per 1M
|
|
'cost_per_1k_output_tokens': Decimal('0.01'), # $10.00 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'gpt-5.2',
|
|
'display_name': 'GPT-5.2 (16K)',
|
|
'provider': 'openai',
|
|
'model_type': 'text',
|
|
'cost_per_1k_input_tokens': Decimal('0.00175'), # $1.75 per 1M
|
|
'cost_per_1k_output_tokens': Decimal('0.014'), # $14.00 per 1M
|
|
'tokens_per_credit': 100,
|
|
'is_active': True,
|
|
},
|
|
|
|
# OpenAI Image Models (from DALLE_MODEL_CHOICES)
|
|
{
|
|
'model_name': 'dall-e-3',
|
|
'display_name': 'DALL·E 3',
|
|
'provider': 'openai',
|
|
'model_type': 'image',
|
|
'cost_per_1k_input_tokens': Decimal('0.04'), # $0.040 per image
|
|
'cost_per_1k_output_tokens': Decimal('0.00'),
|
|
'tokens_per_credit': 1, # 1 image = 1 unit
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'dall-e-2',
|
|
'display_name': 'DALL·E 2',
|
|
'provider': 'openai',
|
|
'model_type': 'image',
|
|
'cost_per_1k_input_tokens': Decimal('0.02'), # $0.020 per image
|
|
'cost_per_1k_output_tokens': Decimal('0.00'),
|
|
'tokens_per_credit': 1,
|
|
'is_active': True,
|
|
},
|
|
|
|
# Runware Image Models (from RUNWARE_MODEL_CHOICES)
|
|
{
|
|
'model_name': 'runware:97@1',
|
|
'display_name': 'Runware 97@1 (Versatile)',
|
|
'provider': 'runware',
|
|
'model_type': 'image',
|
|
'cost_per_1k_input_tokens': Decimal('0.005'), # Estimated
|
|
'cost_per_1k_output_tokens': Decimal('0.00'),
|
|
'tokens_per_credit': 1,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'runware:100@1',
|
|
'display_name': 'Runware 100@1 (High Quality)',
|
|
'provider': 'runware',
|
|
'model_type': 'image',
|
|
'cost_per_1k_input_tokens': Decimal('0.008'), # Estimated
|
|
'cost_per_1k_output_tokens': Decimal('0.00'),
|
|
'tokens_per_credit': 1,
|
|
'is_active': True,
|
|
},
|
|
{
|
|
'model_name': 'runware:101@1',
|
|
'display_name': 'Runware 101@1 (Fast)',
|
|
'provider': 'runware',
|
|
'model_type': 'image',
|
|
'cost_per_1k_input_tokens': Decimal('0.003'), # Estimated
|
|
'cost_per_1k_output_tokens': Decimal('0.00'),
|
|
'tokens_per_credit': 1,
|
|
'is_active': True,
|
|
},
|
|
]
|
|
|
|
print("Seeding AIModelConfig with correct models...")
|
|
print("=" * 70)
|
|
|
|
created_count = 0
|
|
updated_count = 0
|
|
|
|
for model_data in models_to_create:
|
|
model, created = AIModelConfig.objects.update_or_create(
|
|
model_name=model_data['model_name'],
|
|
provider=model_data['provider'],
|
|
defaults=model_data
|
|
)
|
|
|
|
if created:
|
|
created_count += 1
|
|
print(f"✓ Created: {model.display_name} ({model.model_name})")
|
|
else:
|
|
updated_count += 1
|
|
print(f"↻ Updated: {model.display_name} ({model.model_name})")
|
|
|
|
print("=" * 70)
|
|
print(f"Summary: {created_count} created, {updated_count} updated")
|
|
|
|
# Set default models
|
|
print("\nSetting default models...")
|
|
|
|
# Default text model: gpt-4o-mini
|
|
default_text = AIModelConfig.objects.filter(model_name='gpt-4o-mini').first()
|
|
if default_text:
|
|
AIModelConfig.objects.filter(model_type='text').update(is_default=False)
|
|
default_text.is_default = True
|
|
default_text.save()
|
|
print(f"✓ Default text model: {default_text.display_name}")
|
|
|
|
# Default image model: dall-e-3
|
|
default_image = AIModelConfig.objects.filter(model_name='dall-e-3').first()
|
|
if default_image:
|
|
AIModelConfig.objects.filter(model_type='image').update(is_default=False)
|
|
default_image.is_default = True
|
|
default_image.save()
|
|
print(f"✓ Default image model: {default_image.display_name}")
|
|
|
|
print("\n✅ Seeding complete!")
|
|
|
|
# Show summary
|
|
print("\nActive models by type:")
|
|
print("-" * 70)
|
|
for model_type in ['text', 'image']:
|
|
models = AIModelConfig.objects.filter(model_type=model_type, is_active=True)
|
|
print(f"\n{model_type.upper()}: {models.count()} models")
|
|
for m in models:
|
|
default = " [DEFAULT]" if m.is_default else ""
|
|
print(f" - {m.display_name} ({m.model_name}) - {m.provider}{default}")
|
|
|
|
if __name__ == '__main__':
|
|
seed_models()
|