Success Rate
diff --git a/backend/igny8_core/utils/debug.py b/backend/igny8_core/utils/debug.py
new file mode 100644
index 00000000..bbdc6589
--- /dev/null
+++ b/backend/igny8_core/utils/debug.py
@@ -0,0 +1,128 @@
+"""
+Debug logging utilities
+Fast checks with minimal overhead when debug is disabled.
+"""
+from django.core.cache import cache
+import os
+
+
+def is_debug_enabled():
+ """
+ Fast check if debug logging is enabled.
+ Uses cache to avoid DB queries. Returns False immediately if disabled.
+
+ Returns:
+ bool: True if debug logging enabled, False otherwise
+ """
+ # Check cache first (fastest)
+ cache_key = 'debug_enabled'
+ enabled = cache.get(cache_key)
+
+ # If we have a cached value (True or False), use it
+ if enabled is not None:
+ return bool(enabled)
+
+ # Cache miss - check database
+ try:
+ from igny8_core.business.system.models import DebugConfiguration
+ config = DebugConfiguration.get_config()
+ enabled = config.enable_debug_logging
+
+ # Cache the actual boolean value
+ cache.set(cache_key, enabled, 60) # Cache for 1 minute
+
+ return bool(enabled)
+ except Exception as e:
+ # If DB not ready or model doesn't exist, default to False
+ # Cache this to avoid repeated DB errors
+ cache.set(cache_key, False, 10)
+ return False
+
+
+def _should_log_in_this_worker():
+ """
+ Only log in the main worker to avoid duplicate logs.
+ Returns True if this is the first worker or if we should always log.
+ """
+ # Get worker PID - only log from worker with lowest PID to avoid duplicates
+ worker_pid = os.getpid()
+
+ # Cache the first worker PID that tries to log
+ first_worker = cache.get('debug_first_worker_pid')
+ if first_worker is None:
+ cache.set('debug_first_worker_pid', worker_pid, 300) # Cache for 5 minutes
+ return True
+
+ # Only log if we're the first worker
+ return worker_pid == first_worker
+
+
+def debug_log(message, category='general'):
+ """
+ Log a debug message only if debug is enabled.
+ Completely skips processing if debug is disabled.
+
+ Args:
+ message: Message to log
+ category: Log category (ai_steps, api_requests, db_queries, celery_tasks)
+ """
+ # Fast exit - don't even process the message if debug is disabled
+ if not is_debug_enabled():
+ return
+
+ # Only log in one worker to avoid duplicates
+ if not _should_log_in_this_worker():
+ return
+
+ # Check category-specific settings
+ try:
+ from igny8_core.business.system.models import DebugConfiguration
+ config = DebugConfiguration.get_config()
+
+ # Check category-specific flags
+ if category == 'ai_steps' and not config.log_ai_steps:
+ return
+ if category == 'api_requests' and not config.log_api_requests:
+ return
+ if category == 'db_queries' and not config.log_database_queries:
+ return
+ if category == 'celery_tasks' and not config.log_celery_tasks:
+ return
+ except Exception:
+ pass
+
+ # Debug is enabled - log to console
+ import sys
+ import datetime
+ timestamp = datetime.datetime.now().strftime("%H:%M:%S")
+ worker_pid = os.getpid()
+ prefix = f"[{timestamp}] [PID:{worker_pid}] [DEBUG:{category.upper()}]"
+ print(f"{prefix} {message}", file=sys.stdout, flush=True)
+
+
+def debug_log_ai_step(step_name, message, **kwargs):
+ """
+ Log an AI execution step only if debug is enabled.
+ Completely skips processing if debug is disabled.
+
+ Args:
+ step_name: Name of the step (INIT, PREPARE, AI_CALL, etc.)
+ message: Step message
+ **kwargs: Additional context to log
+ """
+ # Fast exit - don't even process if debug is disabled
+ if not is_debug_enabled():
+ return
+
+ # Only log in one worker to avoid duplicates
+ if not _should_log_in_this_worker():
+ return
+
+ # Format the message with context
+ context_str = ""
+ if kwargs:
+ context_parts = [f"{k}={v}" for k, v in kwargs.items()]
+ context_str = f" | {', '.join(context_parts)}"
+
+ full_message = f"[{step_name}] {message}{context_str}"
+ debug_log(full_message, category='ai_steps')
diff --git a/backend/seed_ai_models.py b/backend/seed_ai_models.py
new file mode 100644
index 00000000..8b486cf7
--- /dev/null
+++ b/backend/seed_ai_models.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+"""
+Seed AI model configurations
+"""
+import os
+import sys
+import django
+
+# Setup Django
+sys.path.insert(0, '/app')
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
+django.setup()
+
+from django.db import transaction
+from igny8_core.business.billing.models import AIModelConfig
+
+models_data = [
+ {
+ 'model_name': 'gpt-4o-mini',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': 0.000150,
+ 'cost_per_1k_output_tokens': 0.000600,
+ 'tokens_per_credit': 50,
+ 'display_name': 'GPT-4o Mini',
+ 'is_active': True,
+ 'is_default': True,
+ },
+ {
+ 'model_name': 'gpt-4-turbo-2024-04-09',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': 0.010000,
+ 'cost_per_1k_output_tokens': 0.030000,
+ 'tokens_per_credit': 30,
+ 'display_name': 'GPT-4 Turbo',
+ 'is_active': True,
+ 'is_default': False,
+ },
+ {
+ 'model_name': 'gpt-3.5-turbo',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': 0.000500,
+ 'cost_per_1k_output_tokens': 0.001500,
+ 'tokens_per_credit': 200,
+ 'display_name': 'GPT-3.5 Turbo',
+ 'is_active': True,
+ 'is_default': False,
+ },
+ {
+ 'model_name': 'claude-3-5-sonnet-20241022',
+ 'provider': 'anthropic',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': 0.003000,
+ 'cost_per_1k_output_tokens': 0.015000,
+ 'tokens_per_credit': 40,
+ 'display_name': 'Claude 3.5 Sonnet',
+ 'is_active': True,
+ 'is_default': False,
+ },
+ {
+ 'model_name': 'claude-3-haiku-20240307',
+ 'provider': 'anthropic',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': 0.000250,
+ 'cost_per_1k_output_tokens': 0.001250,
+ 'tokens_per_credit': 150,
+ 'display_name': 'Claude 3 Haiku',
+ 'is_active': True,
+ 'is_default': False,
+ },
+ {
+ 'model_name': 'runware-flux-1.1-pro',
+ 'provider': 'runware',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': 0.000000,
+ 'cost_per_1k_output_tokens': 0.040000,
+ 'tokens_per_credit': 1,
+ 'display_name': 'Runware FLUX 1.1 Pro',
+ 'is_active': True,
+ 'is_default': True,
+ },
+ {
+ 'model_name': 'dall-e-3',
+ 'provider': 'openai',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': 0.000000,
+ 'cost_per_1k_output_tokens': 0.040000,
+ 'tokens_per_credit': 1,
+ 'display_name': 'DALL-E 3',
+ 'is_active': True,
+ 'is_default': False,
+ },
+]
+
+print('Seeding AI model configurations...')
+created_count = 0
+updated_count = 0
+
+with transaction.atomic():
+ for data in models_data:
+ model, created = AIModelConfig.objects.update_or_create(
+ model_name=data['model_name'],
+ defaults=data
+ )
+
+ if created:
+ created_count += 1
+ print(f'✓ Created: {model.display_name}')
+ else:
+ updated_count += 1
+ print(f'↻ Updated: {model.display_name}')
+
+print('\n' + '='*60)
+print(f'✓ Successfully processed {len(models_data)} AI models')
+print(f' - Created: {created_count}')
+print(f' - Updated: {updated_count}')
+print('='*60)
diff --git a/backend/seed_correct_ai_models.py b/backend/seed_correct_ai_models.py
new file mode 100644
index 00000000..0945b4cd
--- /dev/null
+++ b/backend/seed_correct_ai_models.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+"""
+Seed AIModelConfig with the CORRECT models from GlobalIntegrationSettings choices.
+These are the models that should be available in the dropdowns.
+"""
+import os
+import sys
+import django
+
+# Setup Django
+sys.path.insert(0, '/app')
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
+django.setup()
+
+from decimal import Decimal
+from igny8_core.business.billing.models import AIModelConfig
+
+def seed_models():
+ """Create AIModelConfig records for all models that were in GlobalIntegrationSettings"""
+
+ models_to_create = [
+ # OpenAI Text Models (from OPENAI_MODEL_CHOICES)
+ {
+ 'model_name': 'gpt-4.1',
+ 'display_name': 'GPT-4.1',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.002'), # $2.00 per 1M = $0.002 per 1K
+ 'cost_per_1k_output_tokens': Decimal('0.008'), # $8.00 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'gpt-4o-mini',
+ 'display_name': 'GPT-4o Mini',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.00015'), # $0.15 per 1M
+ 'cost_per_1k_output_tokens': Decimal('0.0006'), # $0.60 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'gpt-4o',
+ 'display_name': 'GPT-4o',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.0025'), # $2.50 per 1M
+ 'cost_per_1k_output_tokens': Decimal('0.01'), # $10.00 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'gpt-4-turbo-preview',
+ 'display_name': 'GPT-4 Turbo Preview',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.01'), # $10.00 per 1M
+ 'cost_per_1k_output_tokens': Decimal('0.03'), # $30.00 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'gpt-5.1',
+ 'display_name': 'GPT-5.1 (16K)',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.00125'), # $1.25 per 1M
+ 'cost_per_1k_output_tokens': Decimal('0.01'), # $10.00 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'gpt-5.2',
+ 'display_name': 'GPT-5.2 (16K)',
+ 'provider': 'openai',
+ 'model_type': 'text',
+ 'cost_per_1k_input_tokens': Decimal('0.00175'), # $1.75 per 1M
+ 'cost_per_1k_output_tokens': Decimal('0.014'), # $14.00 per 1M
+ 'tokens_per_credit': 100,
+ 'is_active': True,
+ },
+
+ # OpenAI Image Models (from DALLE_MODEL_CHOICES)
+ {
+ 'model_name': 'dall-e-3',
+ 'display_name': 'DALL·E 3',
+ 'provider': 'openai',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': Decimal('0.04'), # $0.040 per image
+ 'cost_per_1k_output_tokens': Decimal('0.00'),
+ 'tokens_per_credit': 1, # 1 image = 1 unit
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'dall-e-2',
+ 'display_name': 'DALL·E 2',
+ 'provider': 'openai',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': Decimal('0.02'), # $0.020 per image
+ 'cost_per_1k_output_tokens': Decimal('0.00'),
+ 'tokens_per_credit': 1,
+ 'is_active': True,
+ },
+
+ # Runware Image Models (from RUNWARE_MODEL_CHOICES)
+ {
+ 'model_name': 'runware:97@1',
+ 'display_name': 'Runware 97@1 (Versatile)',
+ 'provider': 'runware',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': Decimal('0.005'), # Estimated
+ 'cost_per_1k_output_tokens': Decimal('0.00'),
+ 'tokens_per_credit': 1,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'runware:100@1',
+ 'display_name': 'Runware 100@1 (High Quality)',
+ 'provider': 'runware',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': Decimal('0.008'), # Estimated
+ 'cost_per_1k_output_tokens': Decimal('0.00'),
+ 'tokens_per_credit': 1,
+ 'is_active': True,
+ },
+ {
+ 'model_name': 'runware:101@1',
+ 'display_name': 'Runware 101@1 (Fast)',
+ 'provider': 'runware',
+ 'model_type': 'image',
+ 'cost_per_1k_input_tokens': Decimal('0.003'), # Estimated
+ 'cost_per_1k_output_tokens': Decimal('0.00'),
+ 'tokens_per_credit': 1,
+ 'is_active': True,
+ },
+ ]
+
+ print("Seeding AIModelConfig with correct models...")
+ print("=" * 70)
+
+ created_count = 0
+ updated_count = 0
+
+ for model_data in models_to_create:
+ model, created = AIModelConfig.objects.update_or_create(
+ model_name=model_data['model_name'],
+ provider=model_data['provider'],
+ defaults=model_data
+ )
+
+ if created:
+ created_count += 1
+ print(f"✓ Created: {model.display_name} ({model.model_name})")
+ else:
+ updated_count += 1
+ print(f"↻ Updated: {model.display_name} ({model.model_name})")
+
+ print("=" * 70)
+ print(f"Summary: {created_count} created, {updated_count} updated")
+
+ # Set default models
+ print("\nSetting default models...")
+
+ # Default text model: gpt-4o-mini
+ default_text = AIModelConfig.objects.filter(model_name='gpt-4o-mini').first()
+ if default_text:
+ AIModelConfig.objects.filter(model_type='text').update(is_default=False)
+ default_text.is_default = True
+ default_text.save()
+ print(f"✓ Default text model: {default_text.display_name}")
+
+ # Default image model: dall-e-3
+ default_image = AIModelConfig.objects.filter(model_name='dall-e-3').first()
+ if default_image:
+ AIModelConfig.objects.filter(model_type='image').update(is_default=False)
+ default_image.is_default = True
+ default_image.save()
+ print(f"✓ Default image model: {default_image.display_name}")
+
+ print("\n✅ Seeding complete!")
+
+ # Show summary
+ print("\nActive models by type:")
+ print("-" * 70)
+ for model_type in ['text', 'image']:
+ models = AIModelConfig.objects.filter(model_type=model_type, is_active=True)
+ print(f"\n{model_type.upper()}: {models.count()} models")
+ for m in models:
+ default = " [DEFAULT]" if m.is_default else ""
+ print(f" - {m.display_name} ({m.model_name}) - {m.provider}{default}")
+
+if __name__ == '__main__':
+ seed_models()
diff --git a/backend/test_system.py b/backend/test_system.py
new file mode 100644
index 00000000..f0fe9707
--- /dev/null
+++ b/backend/test_system.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+"""Comprehensive test of AI and billing system at commit #10"""
+import django
+import os
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
+django.setup()
+
+print('='*70)
+print('FINAL COMPREHENSIVE TEST - COMMIT #10 STATE')
+print('='*70)
+
+# Test 1: Credit Cost Config Save
+print('\n1. Testing CreditCostConfig Save:')
+try:
+ from igny8_core.business.billing.models import CreditCostConfig
+ obj = CreditCostConfig.objects.get(operation_type='clustering')
+ original_cost = obj.credits_cost
+ obj.credits_cost = 5
+ obj.save()
+ print(f' ✓ Save successful: clustering cost changed to {obj.credits_cost}')
+ obj.credits_cost = original_cost
+ obj.save()
+ print(f' ✓ Reverted to original: {obj.credits_cost}')
+except Exception as e:
+ print(f' ✗ ERROR: {e}')
+
+# Test 2: Credit Check
+print('\n2. Testing Credit Check:')
+try:
+ from igny8_core.business.billing.services.credit_service import CreditService
+ from igny8_core.auth.models import Account
+
+ acc = Account.objects.first()
+ print(f' Account: {acc.name} with {acc.credits} credits')
+
+ CreditService.check_credits(acc, 'clustering')
+ print(f' ✓ Credit check passed for clustering')
+
+ CreditService.check_credits(acc, 'idea_generation')
+ print(f' ✓ Credit check passed for idea_generation')
+
+ CreditService.check_credits(acc, 'content_generation', 1000)
+ print(f' ✓ Credit check passed for content_generation (1000 words)')
+except Exception as e:
+ print(f' ✗ ERROR: {e}')
+
+# Test 3: AI Core
+print('\n3. Testing AICore Initialization:')
+try:
+ from igny8_core.ai.ai_core import AICore
+ from igny8_core.auth.models import Account
+
+ acc = Account.objects.first()
+ ai_core = AICore(account=acc)
+ print(f' ✓ AICore initialized for account: {acc.name}')
+ has_key = "SET" if ai_core._openai_api_key else "NOT SET"
+ print(f' - OpenAI key: {has_key}')
+except Exception as e:
+ print(f' ✗ ERROR: {e}')
+
+# Test 4: AI Engine
+print('\n4. Testing AIEngine:')
+try:
+ from igny8_core.ai.engine import AIEngine
+ from igny8_core.auth.models import Account
+
+ acc = Account.objects.first()
+ engine = AIEngine(account=acc)
+ print(f' ✓ AIEngine initialized')
+
+ # Test operation type mapping
+ op_type = engine._get_operation_type('auto_cluster')
+ print(f' ✓ Operation mapping: auto_cluster → {op_type}')
+except Exception as e:
+ print(f' ✗ ERROR: {e}')
+
+# Test 5: Credit Deduction
+print('\n5. Testing Credit Deduction:')
+try:
+ from igny8_core.business.billing.services.credit_service import CreditService
+ from igny8_core.auth.models import Account
+ from django.db import transaction
+
+ acc = Account.objects.first()
+ original_credits = acc.credits
+ print(f' Before: {original_credits} credits')
+
+ with transaction.atomic():
+ CreditService.deduct_credits(
+ account=acc,
+ operation_type='clustering',
+ tokens_input=100,
+ tokens_output=200
+ )
+ acc.refresh_from_db()
+ print(f' After deduction: {acc.credits} credits')
+ print(f' ✓ Deducted: {original_credits - acc.credits} credits')
+
+ # Rollback
+ transaction.set_rollback(True)
+
+ acc.refresh_from_db()
+ print(f' After rollback: {acc.credits} credits')
+except Exception as e:
+ print(f' ✗ ERROR: {e}')
+
+print('\n' + '='*70)
+print('ALL TESTS COMPLETE - System is healthy!')
+print('='*70)
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index ba6e3b30..ec5620e5 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -4,6 +4,8 @@ import { HelmetProvider } from "react-helmet-async";
import AppLayout from "./layout/AppLayout";
import { ScrollToTop } from "./components/common/ScrollToTop";
import ProtectedRoute from "./components/auth/ProtectedRoute";
+import ModuleGuard from "./components/common/ModuleGuard";
+import { AwsAdminGuard } from "./components/auth/AwsAdminGuard";
import GlobalErrorDisplay from "./components/common/GlobalErrorDisplay";
import LoadingStateMonitor from "./components/common/LoadingStateMonitor";
import { useAuthStore } from "./store/authStore";
@@ -67,6 +69,9 @@ const AccountSettingsPage = lazy(() => import("./pages/account/AccountSettingsPa
const TeamManagementPage = lazy(() => import("./pages/account/TeamManagementPage"));
const UsageAnalyticsPage = lazy(() => import("./pages/account/UsageAnalyticsPage"));
+// Admin Module - Only dashboard for aws-admin users
+const AdminSystemDashboard = lazy(() => import("./pages/admin/AdminSystemDashboard"));
+
// Reference Data - Lazy loaded
const SeedKeywords = lazy(() => import("./pages/Reference/SeedKeywords"));
const ReferenceIndustries = lazy(() => import("./pages/Reference/Industries"));
@@ -81,6 +86,7 @@ const Users = lazy(() => import("./pages/Settings/Users"));
const Subscriptions = lazy(() => import("./pages/Settings/Subscriptions"));
const SystemSettings = lazy(() => import("./pages/Settings/System"));
const AccountSettings = lazy(() => import("./pages/Settings/Account"));
+const ModuleSettings = lazy(() => import("./pages/Settings/Modules"));
const AISettings = lazy(() => import("./pages/Settings/AI"));
const Plans = lazy(() => import("./pages/Settings/Plans"));
const Industries = lazy(() => import("./pages/Settings/Industries"));
@@ -147,42 +153,115 @@ export default function App() {
{/* Planner Module - Redirect dashboard to keywords */}
} />
-
} />
-
} />
-
} />
-
} />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
{/* Writer Module - Redirect dashboard to tasks */}
} />
- } />
+
+
+
+ } />
{/* Writer Content Routes - Order matters: list route must come before detail route */}
- } />
+
+
+
+ } />
{/* Content detail view - matches /writer/content/:id (e.g., /writer/content/10) */}
- } />
+
+
+
+ } />
} />
- } />
- } />
- } />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
{/* Automation Module */}
} />
{/* Linker Module - Redirect dashboard to content */}
} />
- } />
+
+
+
+ } />
{/* Optimizer Module - Redirect dashboard to content */}
} />
- } />
- } />
+
+
+
+ } />
+
+
+
+ } />
+ {/* Thinker Module */}
{/* Thinker Module - Redirect dashboard to prompts */}
} />
- } />
- } />
- } />
- } />
- } />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
+
+
+
+ } />
{/* Billing Module */}
} />
@@ -198,6 +277,13 @@ export default function App() {
} />
} />
+ {/* Admin Routes - Only Dashboard for aws-admin users */}
+
+
+
+ } />
+
{/* Reference Data */}
} />
} />
@@ -215,6 +301,7 @@ export default function App() {
} />
} />
} />
+ } />
} />
} />
} />
diff --git a/frontend/src/components/auth/AwsAdminGuard.tsx b/frontend/src/components/auth/AwsAdminGuard.tsx
new file mode 100644
index 00000000..b1033e2c
--- /dev/null
+++ b/frontend/src/components/auth/AwsAdminGuard.tsx
@@ -0,0 +1,31 @@
+import { Navigate } from 'react-router-dom';
+import { useAuthStore } from '../../store/authStore';
+
+interface AwsAdminGuardProps {
+ children: React.ReactNode;
+}
+
+/**
+ * Route guard that only allows access to users of the aws-admin account
+ * Used for the single remaining admin dashboard page
+ */
+export const AwsAdminGuard: React.FC = ({ children }) => {
+ const { user, loading } = useAuthStore();
+
+ if (loading) {
+ return (
+
+ );
+ }
+
+ // Check if user belongs to aws-admin account
+ const isAwsAdmin = user?.account?.slug === 'aws-admin';
+
+ if (!isAwsAdmin) {
+ return ;
+ }
+
+ return <>{children}>;
+};
diff --git a/frontend/src/components/common/ImageServiceCard.tsx b/frontend/src/components/common/ImageServiceCard.tsx
index 5559b18d..a4e49892 100644
--- a/frontend/src/components/common/ImageServiceCard.tsx
+++ b/frontend/src/components/common/ImageServiceCard.tsx
@@ -1,7 +1,6 @@
import { ReactNode, useState, useEffect } from 'react';
import Switch from '../form/switch/Switch';
import Button from '../ui/button/Button';
-import { usePersistentToggle } from '../../hooks/usePersistentToggle';
import { useToast } from '../ui/toast/ToastContainer';
type ValidationStatus = 'not_configured' | 'pending' | 'success' | 'error';
@@ -13,12 +12,12 @@ interface ImageServiceCardProps {
validationStatus: ValidationStatus;
onSettings: () => void;
onDetails: () => void;
+ onToggleSuccess?: (enabled: boolean, data?: any) => void; // Callback when toggle succeeds
}
/**
* Image Generation Service Card Component
- * Manages default image generation service and model selection app-wide
- * This is separate from individual API integrations (OpenAI/Runware)
+ * Manages default image generation service enable/disable state
*/
export default function ImageServiceCard({
icon,
@@ -27,32 +26,20 @@ export default function ImageServiceCard({
validationStatus,
onSettings,
onDetails,
+ onToggleSuccess,
}: ImageServiceCardProps) {
const toast = useToast();
-
- // Use built-in persistent toggle for image generation service
- const persistentToggle = usePersistentToggle({
- resourceId: 'image_generation',
- getEndpoint: '/v1/system/settings/integrations/{id}/',
- saveEndpoint: '/v1/system/settings/integrations/{id}/save/',
- initialEnabled: false,
- onToggleSuccess: (enabled) => {
- toast.success(`Image generation service ${enabled ? 'enabled' : 'disabled'}`);
- },
- onToggleError: (error) => {
- toast.error(`Failed to update image generation service: ${error.message}`);
- },
- });
-
- const enabled = persistentToggle.enabled;
- const isToggling = persistentToggle.loading;
- const [imageSettings, setImageSettings] = useState<{ service?: string; model?: string; runwareModel?: string }>({});
+ const [enabled, setEnabled] = useState(false);
+ const [loading, setLoading] = useState(true);
+ const [isSaving, setIsSaving] = useState(false);
+ const [imageSettings, setImageSettings] = useState<{ service?: string; provider?: string; model?: string; imageModel?: string; runwareModel?: string }>({});
const API_BASE_URL = import.meta.env.VITE_BACKEND_URL || 'https://api.igny8.com/api';
- // Load image settings to get provider and model
+ // Load image settings
useEffect(() => {
const loadSettings = async () => {
+ setLoading(true);
try {
const response = await fetch(
`${API_BASE_URL}/v1/system/settings/integrations/image_generation/`,
@@ -62,38 +49,67 @@ export default function ImageServiceCard({
const data = await response.json();
if (data.success && data.data) {
setImageSettings(data.data);
+ setEnabled(data.data.enabled || false);
}
}
} catch (error) {
console.error('Error loading image settings:', error);
+ } finally {
+ setLoading(false);
}
};
loadSettings();
- }, [API_BASE_URL, enabled]); // Reload when enabled changes
+ }, [API_BASE_URL]);
- const handleToggle = (newEnabled: boolean) => {
- persistentToggle.toggle(newEnabled);
+ // Handle toggle
+ const handleToggle = async (newEnabled: boolean) => {
+ setIsSaving(true);
+ try {
+ const response = await fetch(
+ `${API_BASE_URL}/v1/system/settings/integrations/image_generation/save/`,
+ {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ credentials: 'include',
+ body: JSON.stringify({ ...imageSettings, enabled: newEnabled }),
+ }
+ );
+ if (response.ok) {
+ setEnabled(newEnabled);
+ toast.success(`Image generation service ${newEnabled ? 'enabled' : 'disabled'}`);
+
+ // Call onToggleSuccess callback with enabled state and settings data
+ if (onToggleSuccess) {
+ onToggleSuccess(newEnabled, imageSettings);
+ }
+ } else {
+ toast.error('Failed to update image generation service');
+ }
+ } catch (error) {
+ console.error('Error toggling image generation:', error);
+ toast.error('Failed to update image generation service');
+ } finally {
+ setIsSaving(false);
+ }
};
// Get provider and model display text
const getProviderModelText = () => {
- const service = imageSettings.service || 'openai';
+ const service = imageSettings.service || imageSettings.provider || 'openai';
if (service === 'openai') {
- const model = imageSettings.model || 'dall-e-3';
+ const model = imageSettings.model || imageSettings.imageModel || 'dall-e-3';
const modelNames: Record = {
'dall-e-3': 'DALL·E 3',
'dall-e-2': 'DALL·E 2',
- 'gpt-image-1': 'GPT Image 1 (Full)',
- 'gpt-image-1-mini': 'GPT Image 1 Mini',
};
return `OpenAI ${modelNames[model] || model}`;
} else if (service === 'runware') {
- const model = imageSettings.runwareModel || 'runware:97@1';
+ const model = imageSettings.runwareModel || imageSettings.model || 'runware:97@1';
// Map model ID to display name
const modelDisplayNames: Record = {
'runware:97@1': 'HiDream-I1 Full',
- 'runware:gen3a_turbo': 'Gen3a Turbo',
- 'runware:gen3a': 'Gen3a',
+ 'runware:100@1': 'Runware 100@1',
+ 'runware:101@1': 'Runware 101@1',
};
const displayName = modelDisplayNames[model] || model;
return `Runware ${displayName}`;
@@ -177,7 +193,7 @@ export default function ImageServiceCard({
diff --git a/frontend/src/pages/Settings/Integration.tsx b/frontend/src/pages/Settings/Integration.tsx
index ed20db2d..c93c350f 100644
--- a/frontend/src/pages/Settings/Integration.tsx
+++ b/frontend/src/pages/Settings/Integration.tsx
@@ -47,11 +47,7 @@ const GSCIcon = () => (
interface IntegrationConfig {
id: string;
enabled: boolean;
- apiKey?: string;
- clientId?: string;
- clientSecret?: string;
- authBaseUri?: string;
- appName?: string;
+ // Note: API keys are configured platform-wide in GlobalIntegrationSettings (not user-editable)
model?: string;
// Image generation service settings (separate from API integrations)
service?: string; // 'openai' or 'runware'
@@ -74,13 +70,12 @@ export default function Integration() {
openai: {
id: 'openai',
enabled: false,
- apiKey: '',
- model: 'gpt-4.1',
+ model: 'gpt-4o-mini',
},
runware: {
id: 'runware',
enabled: false,
- apiKey: '',
+ model: 'runware:97@1',
},
image_generation: {
id: 'image_generation',
@@ -105,6 +100,17 @@ export default function Integration() {
const [isSaving, setIsSaving] = useState(false);
const [isTesting, setIsTesting] = useState(false);
+ // Available models from AIModelConfig
+ const [availableModels, setAvailableModels] = useState<{
+ openai_text: Array<{ value: string; label: string }>;
+ openai_image: Array<{ value: string; label: string }>;
+ runware_image: Array<{ value: string; label: string }>;
+ }>({
+ openai_text: [],
+ openai_image: [],
+ runware_image: [],
+ });
+
// Validation status for each integration: 'not_configured' | 'pending' | 'success' | 'error'
const [validationStatuses, setValidationStatuses] = useState