Merge remote changes and add SEO fields to Tasks model, improve content generation response handling, and enhance progress bar animation

This commit is contained in:
Gitea Deploy
2025-11-09 21:25:11 +00:00
17 changed files with 199 additions and 1173 deletions

View File

@@ -269,6 +269,7 @@ class AICore:
'cost': cost, 'cost': cost,
'error': None, 'error': None,
'api_id': api_id, 'api_id': api_id,
'duration': request_duration, # Add duration tracking
} }
else: else:
error_msg = 'No content in OpenAI response' error_msg = 'No content in OpenAI response'
@@ -315,8 +316,9 @@ class AICore:
} }
except Exception as e: except Exception as e:
error_msg = f'Unexpected error: {str(e)}' error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}") logger.error(f"[AI][{function_name}][Error] {error_msg}", exc_info=True)
logger.error(error_msg, exc_info=True) if tracker:
tracker.error('UnexpectedError', error_msg, e)
return { return {
'content': None, 'content': None,
'error': error_msg, 'error': error_msg,

View File

@@ -34,16 +34,14 @@ class BaseAIFunction(ABC):
def validate(self, payload: dict, account=None) -> Dict[str, Any]: def validate(self, payload: dict, account=None) -> Dict[str, Any]:
""" """
Validate input payload. Validate input payload.
Default: checks for 'ids' array, max_items limit. Default: checks for 'ids' array.
Override for custom validation. Override for custom validation.
""" """
ids = payload.get('ids', []) ids = payload.get('ids', [])
if not ids: if not ids:
return {'valid': False, 'error': 'No IDs provided'} return {'valid': False, 'error': 'No IDs provided'}
max_items = self.get_max_items() # Removed max_items limit check - no limits enforced
if max_items and len(ids) > max_items:
return {'valid': False, 'error': f'Maximum {max_items} items allowed'}
return {'valid': True} return {'valid': True}

View File

@@ -2,14 +2,16 @@
AI Function implementations AI Function implementations
""" """
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction, generate_ideas_core # REMOVED: generate_ideas function removed
# from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction, generate_ideas_core
from igny8_core.ai.functions.generate_content import GenerateContentFunction, generate_content_core from igny8_core.ai.functions.generate_content import GenerateContentFunction, generate_content_core
from igny8_core.ai.functions.generate_images import GenerateImagesFunction, generate_images_core from igny8_core.ai.functions.generate_images import GenerateImagesFunction, generate_images_core
__all__ = [ __all__ = [
'AutoClusterFunction', 'AutoClusterFunction',
'GenerateIdeasFunction', # REMOVED: generate_ideas function removed
'generate_ideas_core', # 'GenerateIdeasFunction',
# 'generate_ideas_core',
'GenerateContentFunction', 'GenerateContentFunction',
'generate_content_core', 'generate_content_core',
'GenerateImagesFunction', 'GenerateImagesFunction',

View File

@@ -34,14 +34,15 @@ class AutoClusterFunction(BaseAIFunction):
} }
def get_max_items(self) -> int: def get_max_items(self) -> int:
return 20 # No limit - return None
return None
def validate(self, payload: dict, account=None) -> Dict: def validate(self, payload: dict, account=None) -> Dict:
"""Custom validation for clustering with plan limit checks""" """Custom validation for clustering"""
from igny8_core.ai.validators import validate_ids, validate_keywords_exist, validate_cluster_limits from igny8_core.ai.validators import validate_ids, validate_keywords_exist
# Base validation # Base validation (no max_items limit)
result = validate_ids(payload, max_items=self.get_max_items()) result = validate_ids(payload, max_items=None)
if not result['valid']: if not result['valid']:
return result return result
@@ -51,10 +52,7 @@ class AutoClusterFunction(BaseAIFunction):
if not keywords_result['valid']: if not keywords_result['valid']:
return keywords_result return keywords_result
# Check plan limits # Removed plan limits check
limit_result = validate_cluster_limits(account, operation_type='cluster')
if not limit_result['valid']:
return limit_result
return {'valid': True} return {'valid': True}

View File

@@ -36,7 +36,8 @@ class GenerateImagesFunction(BaseAIFunction):
} }
def get_max_items(self) -> int: def get_max_items(self) -> int:
return 20 # Max tasks per batch # No limit - return None
return None
def validate(self, payload: dict, account=None) -> Dict: def validate(self, payload: dict, account=None) -> Dict:
"""Validate task IDs""" """Validate task IDs"""

View File

@@ -117,7 +117,8 @@ Make sure each prompt is detailed enough for image generation, describing the vi
# Mapping from function names to prompt types # Mapping from function names to prompt types
FUNCTION_TO_PROMPT_TYPE = { FUNCTION_TO_PROMPT_TYPE = {
'auto_cluster': 'clustering', 'auto_cluster': 'clustering',
'generate_ideas': 'ideas', # REMOVED: generate_ideas function removed
# 'generate_ideas': 'ideas',
'generate_content': 'content_generation', 'generate_content': 'content_generation',
'generate_images': 'image_prompt_extraction', 'generate_images': 'image_prompt_extraction',
'extract_image_prompts': 'image_prompt_extraction', 'extract_image_prompts': 'image_prompt_extraction',

View File

@@ -66,10 +66,11 @@ def _load_auto_cluster():
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
return AutoClusterFunction return AutoClusterFunction
def _load_generate_ideas(): # REMOVED: generate_ideas function removed
"""Lazy loader for generate_ideas function""" # def _load_generate_ideas():
from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction # """Lazy loader for generate_ideas function"""
return GenerateIdeasFunction # from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction
# return GenerateIdeasFunction
def _load_generate_content(): def _load_generate_content():
"""Lazy loader for generate_content function""" """Lazy loader for generate_content function"""
@@ -82,7 +83,8 @@ def _load_generate_images():
return GenerateImagesFunction return GenerateImagesFunction
register_lazy_function('auto_cluster', _load_auto_cluster) register_lazy_function('auto_cluster', _load_auto_cluster)
register_lazy_function('generate_ideas', _load_generate_ideas) # REMOVED: generate_ideas function removed
# register_lazy_function('generate_ideas', _load_generate_ideas)
register_lazy_function('generate_content', _load_generate_content) register_lazy_function('generate_content', _load_generate_content)
register_lazy_function('generate_images', _load_generate_images) register_lazy_function('generate_images', _load_generate_images)

View File

@@ -11,12 +11,13 @@ MODEL_CONFIG = {
"temperature": 0.7, "temperature": 0.7,
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models "response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
}, },
"generate_ideas": { # REMOVED: generate_ideas function removed
"model": "gpt-4.1", # "generate_ideas": {
"max_tokens": 4000, # "model": "gpt-4.1",
"temperature": 0.7, # "max_tokens": 4000,
"response_format": {"type": "json_object"}, # "temperature": 0.7,
}, # "response_format": {"type": "json_object"},
# },
"generate_content": { "generate_content": {
"model": "gpt-4.1", "model": "gpt-4.1",
"max_tokens": 8000, "max_tokens": 8000,
@@ -40,7 +41,8 @@ MODEL_CONFIG = {
FUNCTION_ALIASES = { FUNCTION_ALIASES = {
"cluster_keywords": "auto_cluster", "cluster_keywords": "auto_cluster",
"auto_cluster_keywords": "auto_cluster", "auto_cluster_keywords": "auto_cluster",
"auto_generate_ideas": "generate_ideas", # REMOVED: generate_ideas function removed
# "auto_generate_ideas": "generate_ideas",
"auto_generate_content": "generate_content", "auto_generate_content": "generate_content",
"auto_generate_images": "generate_images", "auto_generate_images": "generate_images",
} }

View File

@@ -12,7 +12,8 @@ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8.settings')
django.setup() django.setup()
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
from igny8_core.ai.functions.generate_ideas import generate_ideas_core # REMOVED: generate_ideas function removed
# from igny8_core.ai.functions.generate_ideas import generate_ideas_core
from igny8_core.ai.functions.generate_content import generate_content_core from igny8_core.ai.functions.generate_content import generate_content_core
from igny8_core.ai.functions.generate_images import generate_images_core from igny8_core.ai.functions.generate_images import generate_images_core
from igny8_core.ai.ai_core import AICore from igny8_core.ai.ai_core import AICore
@@ -122,7 +123,8 @@ if __name__ == '__main__':
test_ai_core() test_ai_core()
test_json_extraction() test_json_extraction()
test_auto_cluster() test_auto_cluster()
test_generate_ideas() # REMOVED: generate_ideas function removed
# test_generate_ideas()
test_generate_content() test_generate_content()
test_generate_images() test_generate_images()

View File

@@ -236,21 +236,40 @@ class ConsoleStepTracker:
self.start_time = time.time() self.start_time = time.time()
self.steps = [] self.steps = []
self.current_phase = None self.current_phase = None
# Debug: Verify DEBUG_MODE is enabled
import sys
if DEBUG_MODE:
init_msg = f"[DEBUG] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is ENABLED"
logger.info(init_msg)
print(init_msg, flush=True, file=sys.stdout)
else:
init_msg = f"[WARNING] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is DISABLED"
logger.warning(init_msg)
print(init_msg, flush=True, file=sys.stdout)
def _log(self, phase: str, message: str, status: str = 'info'): def _log(self, phase: str, message: str, status: str = 'info'):
"""Internal logging method that checks DEBUG_MODE""" """Internal logging method that checks DEBUG_MODE"""
if not DEBUG_MODE: if not DEBUG_MODE:
return return
import sys
timestamp = datetime.now().strftime('%H:%M:%S') timestamp = datetime.now().strftime('%H:%M:%S')
phase_label = phase.upper() phase_label = phase.upper()
if status == 'error': if status == 'error':
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] [ERROR] {message}") log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] [ERROR] {message}"
# Use logger.error for errors so they're always visible
logger.error(log_msg)
elif status == 'success': elif status == 'success':
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] ✅ {message}") log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] ✅ {message}"
logger.info(log_msg)
else: else:
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] {message}") log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] {message}"
logger.info(log_msg)
# Also print to stdout for immediate visibility (works in Celery worker logs)
print(log_msg, flush=True, file=sys.stdout)
self.steps.append({ self.steps.append({
'timestamp': timestamp, 'timestamp': timestamp,
@@ -285,7 +304,10 @@ class ConsoleStepTracker:
duration = time.time() - self.start_time duration = time.time() - self.start_time
self._log('DONE', f"{message} (Duration: {duration:.2f}s)", status='success') self._log('DONE', f"{message} (Duration: {duration:.2f}s)", status='success')
if DEBUG_MODE: if DEBUG_MODE:
print(f"[{self.function_name}] === AI Task Complete ===") import sys
complete_msg = f"[{self.function_name}] === AI Task Complete ==="
logger.info(complete_msg)
print(complete_msg, flush=True, file=sys.stdout)
def error(self, error_type: str, message: str, exception: Exception = None): def error(self, error_type: str, message: str, exception: Exception = None):
"""Log error with standardized format""" """Log error with standardized format"""
@@ -294,9 +316,12 @@ class ConsoleStepTracker:
error_msg += f" ({type(exception).__name__})" error_msg += f" ({type(exception).__name__})"
self._log(self.current_phase or 'ERROR', error_msg, status='error') self._log(self.current_phase or 'ERROR', error_msg, status='error')
if DEBUG_MODE and exception: if DEBUG_MODE and exception:
import sys
import traceback import traceback
print(f"[{self.function_name}] [ERROR] Stack trace:") error_trace_msg = f"[{self.function_name}] [ERROR] Stack trace:"
traceback.print_exc() logger.error(error_trace_msg, exc_info=exception)
print(error_trace_msg, flush=True, file=sys.stdout)
traceback.print_exc(file=sys.stdout)
def retry(self, attempt: int, max_attempts: int, reason: str = ""): def retry(self, attempt: int, max_attempts: int, reason: str = ""):
"""Log retry attempt""" """Log retry attempt"""

View File

@@ -10,11 +10,11 @@ logger = logging.getLogger(__name__)
def validate_ids(payload: dict, max_items: Optional[int] = None) -> Dict[str, Any]: def validate_ids(payload: dict, max_items: Optional[int] = None) -> Dict[str, Any]:
""" """
Base validation: checks for 'ids' array and max_items limit. Base validation: checks for 'ids' array.
Args: Args:
payload: Request payload containing 'ids' array payload: Request payload containing 'ids' array
max_items: Maximum number of items allowed (None = no limit) max_items: Maximum number of items allowed (deprecated - no longer enforced)
Returns: Returns:
Dict with 'valid' (bool) and optional 'error' (str) Dict with 'valid' (bool) and optional 'error' (str)
@@ -23,8 +23,7 @@ def validate_ids(payload: dict, max_items: Optional[int] = None) -> Dict[str, An
if not ids: if not ids:
return {'valid': False, 'error': 'No IDs provided'} return {'valid': False, 'error': 'No IDs provided'}
if max_items and len(ids) > max_items: # Removed max_items limit check - no limits enforced
return {'valid': False, 'error': f'Maximum {max_items} items allowed'}
return {'valid': True} return {'valid': True}
@@ -55,46 +54,16 @@ def validate_keywords_exist(ids: list, account=None) -> Dict[str, Any]:
def validate_cluster_limits(account, operation_type: str = 'cluster') -> Dict[str, Any]: def validate_cluster_limits(account, operation_type: str = 'cluster') -> Dict[str, Any]:
""" """
Validate plan limits for cluster operations. Validate plan limits for cluster operations.
DISABLED: All limits have been removed.
Args: Args:
account: Account object account: Account object
operation_type: Type of operation ('cluster', 'idea', etc.) operation_type: Type of operation ('cluster', 'idea', etc.)
Returns: Returns:
Dict with 'valid' (bool) and optional 'error' (str) Dict with 'valid' (bool) - always returns valid
""" """
if not account: # All limits removed - always return valid
return {'valid': False, 'error': 'Account is required'}
plan = getattr(account, 'plan', None)
if not plan:
return {'valid': False, 'error': 'Account does not have an active plan'}
if operation_type == 'cluster':
from igny8_core.modules.planner.models import Clusters
# Check daily cluster limit
now = timezone.now()
start_of_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
clusters_today = Clusters.objects.filter(
account=account,
created_at__gte=start_of_day
).count()
if plan.daily_cluster_limit and clusters_today >= plan.daily_cluster_limit:
return {
'valid': False,
'error': f'Daily cluster limit reached ({plan.daily_cluster_limit} clusters per day). Please try again tomorrow.'
}
# Check max clusters limit
total_clusters = Clusters.objects.filter(account=account).count()
if plan.max_clusters and total_clusters >= plan.max_clusters:
return {
'valid': False,
'error': f'Maximum cluster limit reached ({plan.max_clusters} clusters). Please upgrade your plan or delete existing clusters.'
}
return {'valid': True} return {'valid': True}

View File

@@ -7,7 +7,6 @@ from typing import List
from django.db import transaction from django.db import transaction
from igny8_core.modules.planner.models import Keywords, Clusters, ContentIdeas from igny8_core.modules.planner.models import Keywords, Clusters, ContentIdeas
from igny8_core.utils.ai_processor import ai_processor from igny8_core.utils.ai_processor import ai_processor
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
from igny8_core.ai.tracker import ConsoleStepTracker from igny8_core.ai.tracker import ConsoleStepTracker
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -729,554 +728,8 @@ def auto_cluster_keywords_task(self, keyword_ids: List[int], sector_id: int = No
return error_dict return error_dict
@shared_task(bind=True, max_retries=3) # REMOVED: All idea generation functions removed
def auto_generate_ideas_task(self, cluster_ids: List[int], account_id: int = None): # - auto_generate_ideas_task
""" # - _generate_single_idea_core
Celery task to generate content ideas for clusters using AI. # - generate_single_idea_task
Args:
cluster_ids: List of cluster IDs
account_id: Account ID for account isolation
"""
account_id = account_id
logger.info("=" * 80)
logger.info("auto_generate_ideas_task STARTED")
logger.info(f" - cluster_ids: {cluster_ids}")
logger.info(f" - account_id: {account_id}")
logger.info("=" * 80)
try:
from django.db import models
from django.db import connection
# Log database connection status
try:
connection.ensure_connection()
logger.info("Database connection: OK")
except Exception as db_error:
logger.error(f"Database connection error: {type(db_error).__name__}: {str(db_error)}")
raise
# Initialize progress
logger.info("Initializing task progress state...")
self.update_state(
state='PROGRESS',
meta={
'current': 0,
'total': len(cluster_ids),
'percentage': 0,
'message': 'Initializing content ideas generation...',
'phase': 'initializing'
}
)
# Get clusters with keywords and relationships (including site)
logger.info(f"Querying clusters with IDs: {cluster_ids}")
try:
clusters_queryset = Clusters.objects.filter(id__in=cluster_ids)
logger.info(f"Initial queryset count: {clusters_queryset.count()}")
if account_id:
clusters_queryset = clusters_queryset.filter(account_id=account_id)
logger.info(f"After account filter count: {clusters_queryset.count()}")
logger.info("Loading clusters with select_related...")
clusters = list(clusters_queryset.select_related('sector', 'account', 'site', 'sector__site'))
logger.info(f"Successfully loaded {len(clusters)} clusters")
# Log each cluster's details
for c in clusters:
account = getattr(c, 'account', None)
logger.info(f" Cluster {c.id}: name='{c.name}', account_id={account.id if account else 'None'}, site_id={c.site_id if c.site else 'None'}, sector_id={c.sector_id if c.sector else 'None'}")
except Exception as query_error:
logger.error(f"Error querying clusters: {type(query_error).__name__}: {str(query_error)}", exc_info=True)
raise
if not clusters:
logger.warning(f"No clusters found: {cluster_ids}")
return {'success': False, 'error': 'No clusters found'}
total_clusters = len(clusters)
# Update progress: Preparing clusters (0-10%)
self.update_state(
state='PROGRESS',
meta={
'current': 0,
'total': total_clusters,
'percentage': 5,
'message': f'Preparing {total_clusters} clusters for idea generation...',
'phase': 'preparing'
}
)
# Format cluster data for AI
cluster_data = []
for idx, cluster in enumerate(clusters):
# Get keywords for this cluster
keywords = Keywords.objects.filter(cluster=cluster).values_list('keyword', flat=True)
keywords_list = list(keywords)
cluster_item = {
'id': cluster.id,
'name': cluster.name,
'description': cluster.description or '',
'keywords': keywords_list,
}
cluster_data.append(cluster_item)
# Log cluster data being sent to AI
logger.info(f"Cluster {idx + 1}/{total_clusters} data for AI:")
logger.info(f" - ID: {cluster_item['id']}")
logger.info(f" - Name: {cluster_item['name']}")
logger.info(f" - Description: {cluster_item['description'][:100] if cluster_item['description'] else '(empty)'}...")
logger.info(f" - Keywords count: {len(keywords_list)}")
logger.info(f" - Keywords: {keywords_list[:5]}{'...' if len(keywords_list) > 5 else ''}")
account = getattr(cluster, 'account', None)
logger.info(f" - Cluster account: {account.id if account else 'None'}")
logger.info(f" - Cluster site: {cluster.site_id if cluster.site else 'None'}")
logger.info(f" - Cluster sector: {cluster.sector_id if cluster.sector else 'None'}")
# Update progress for each cluster preparation
progress_pct = 5 + int((idx / total_clusters) * 5)
self.update_state(
state='PROGRESS',
meta={
'current': idx + 1,
'total': total_clusters,
'percentage': progress_pct,
'message': f"Preparing cluster '{cluster.name}' ({idx + 1} of {total_clusters})...",
'phase': 'preparing',
'current_item': cluster.name
}
)
# Log clean request data before sending to AI
logger.info("=" * 80)
logger.info("CLEAN REQUEST DATA FOR AI (before sending request):")
logger.info("=" * 80)
import json
clean_data = {
'total_clusters': len(cluster_data),
'clusters': [
{
'id': c['id'],
'name': c['name'],
'description': c['description'][:200] if c['description'] else '(empty)',
'keywords_count': len(c['keywords']),
'keywords': c['keywords'],
}
for c in cluster_data
]
}
logger.info(json.dumps(clean_data, indent=2))
logger.info("=" * 80)
# Update progress: Generating ideas with AI (10-80%)
self.update_state(
state='PROGRESS',
meta={
'current': 0,
'total': total_clusters,
'percentage': 10,
'message': 'Generating content ideas with AI...',
'phase': 'generating'
}
)
# Create AIProcessor instance with account to load API keys from IntegrationSettings
account = clusters[0].account if clusters else None
from igny8_core.utils.ai_processor import AIProcessor
processor = AIProcessor(account=account)
logger.info(f"Calling AIProcessor.generate_ideas with {len(cluster_data)} clusters, account_id={account.id if account else None}")
result = processor.generate_ideas(cluster_data, account=account)
# Log AI response
logger.info("=" * 80)
logger.info("AI RESPONSE RECEIVED:")
logger.info("=" * 80)
if result.get('error'):
logger.error(f"AI Error: {result['error']}")
else:
ideas = result.get('ideas', [])
logger.info(f"Total ideas received: {len(ideas)}")
for idx, idea in enumerate(ideas[:3]): # Log first 3 ideas
logger.info(f"Idea {idx + 1}:")
logger.info(f" - Title: {idea.get('title', 'N/A')}")
logger.info(f" - Content Type: {idea.get('content_type', 'N/A')}")
logger.info(f" - Content Structure: {idea.get('content_structure', 'N/A')}")
logger.info(f" - Cluster Name: {idea.get('cluster_name', 'N/A')}")
logger.info(f" - Cluster ID: {idea.get('cluster_id', 'N/A')}")
logger.info(f" - Target Keywords: {idea.get('target_keywords', idea.get('covered_keywords', 'N/A'))}")
logger.info(f" - Description type: {type(idea.get('description', '')).__name__}")
if idx < 2: # Only show full description for first 2
desc = idea.get('description', '')
if isinstance(desc, str):
logger.info(f" - Description (first 200 chars): {desc[:200]}...")
else:
logger.info(f" - Description (dict): {str(desc)[:200]}...")
logger.info("=" * 80)
if result.get('error'):
logger.error(f"AI ideas generation error: {result['error']}")
self.update_state(
state='FAILURE',
meta={
'error': result['error'],
'message': f"Error: {result['error']}"
}
)
return {'success': False, 'error': result['error']}
# Update progress: Saving ideas (80-95%)
ideas_data = result.get('ideas', [])
self.update_state(
state='PROGRESS',
meta={
'current': 0,
'total': len(ideas_data),
'percentage': 80,
'message': f'Saving {len(ideas_data)} generated ideas...',
'phase': 'saving'
}
)
ideas_created = 0
# Create ContentIdeas records
with transaction.atomic():
for idx, idea_data in enumerate(ideas_data):
logger.info(f"Processing idea {idx + 1}/{len(ideas_data)}: {idea_data.get('title', 'Untitled')}")
cluster_name = idea_data.get('cluster_name', '')
cluster_id_from_ai = idea_data.get('cluster_id')
logger.info(f" - Looking for cluster: name='{cluster_name}', id_from_ai={cluster_id_from_ai}")
logger.info(f" - Available clusters: {[(c.id, c.name) for c in clusters]}")
# Find cluster - try by ID first, then by name
cluster = None
if cluster_id_from_ai:
for c in clusters:
if c.id == cluster_id_from_ai:
cluster = c
logger.info(f" - Found cluster by ID: {c.id} - {c.name}")
break
# Fallback to name matching if ID didn't work
if not cluster and cluster_name:
for c in clusters:
if c.name == cluster_name:
cluster = c
logger.info(f" - Found cluster by name: {c.id} - {c.name}")
break
# If still no cluster, try to match by position (first idea goes to first cluster, etc.)
if not cluster and len(clusters) > 0:
# Use modulo to distribute ideas across clusters
cluster_index = idx % len(clusters)
cluster = clusters[cluster_index]
logger.info(f" - Cluster not found by name/ID, using cluster at index {cluster_index}: {cluster.id} - {cluster.name}")
if not cluster:
logger.warning(f"Cluster not found for idea: {cluster_name or cluster_id_from_ai}, skipping")
continue
# Ensure site is available (extract from cluster or sector)
site = cluster.site
if not site and cluster.sector:
site = cluster.sector.site
logger.info(f" - Cluster details:")
logger.info(f" - ID: {cluster.id}")
logger.info(f" - Name: {cluster.name}")
account = getattr(cluster, 'account', None)
logger.info(f" - Account ID: {account.id if account else 'None'}")
logger.info(f" - Site ID: {cluster.site_id if cluster.site else 'None'}")
logger.info(f" - Site object: {site.id if site else 'None'}")
logger.info(f" - Sector ID: {cluster.sector_id if cluster.sector else 'None'}")
if not site:
logger.error(f"Site not found for cluster {cluster.id} (site_id={cluster.site_id}, sector.site_id={cluster.sector.site_id if cluster.sector and cluster.sector.site else 'None'}), cannot create ContentIdeas")
continue
# Update progress for each idea
progress_pct = 80 + int((idx / len(ideas_data)) * 15)
self.update_state(
state='PROGRESS',
meta={
'current': idx + 1,
'total': len(ideas_data),
'percentage': progress_pct,
'message': f"Saving idea '{idea_data.get('title', 'Untitled')}' ({idx + 1} of {len(ideas_data)})...",
'phase': 'saving',
'current_item': idea_data.get('title', 'Untitled')
}
)
# Handle description - it might be a dict (structured outline) or string
description = idea_data.get('description', '')
if isinstance(description, dict):
# Convert structured outline to JSON string
import json
description = json.dumps(description)
logger.info(f" - Description converted from dict to JSON (length: {len(description)})")
elif not isinstance(description, str):
description = str(description)
logger.info(f" - Description converted to string (type was {type(idea_data.get('description', '')).__name__})")
# Handle target_keywords - might be in covered_keywords or target_keywords
target_keywords = idea_data.get('covered_keywords', '') or idea_data.get('target_keywords', '')
# Prepare ContentIdeas record data
# Get account
account = getattr(cluster, 'account', None)
idea_record_data = {
'idea_title': idea_data.get('title', 'Untitled Idea'),
'description': description,
'content_type': idea_data.get('content_type', 'blog_post'),
'content_structure': idea_data.get('content_structure', 'supporting_page'),
'target_keywords': target_keywords,
'keyword_cluster': cluster,
'estimated_word_count': idea_data.get('estimated_word_count', 1500),
'status': 'new',
'account': account, # Use account field
'site': site,
'sector': cluster.sector,
}
logger.info(f" - Creating ContentIdeas record with:")
logger.info(f" - idea_title: {idea_record_data['idea_title'][:50]}...")
logger.info(f" - content_type: {idea_record_data['content_type']}")
logger.info(f" - content_structure: {idea_record_data['content_structure']}")
logger.info(f" - account_id: {idea_record_data['account'].id if idea_record_data['account'] else 'None'}")
logger.info(f" - site_id: {idea_record_data['site'].id if idea_record_data['site'] else 'None'}")
logger.info(f" - sector_id: {idea_record_data['sector'].id if idea_record_data['sector'] else 'None'}")
logger.info(f" - keyword_cluster_id: {cluster.id}")
try:
# Create ContentIdeas record
ContentIdeas.objects.create(**idea_record_data)
ideas_created += 1
logger.info(f" - ✓ Successfully created ContentIdeas record")
except Exception as create_error:
logger.error(f" - ✗ Failed to create ContentIdeas record: {type(create_error).__name__}: {str(create_error)}")
logger.error(f" - Error details: {create_error}", exc_info=True)
raise # Re-raise to see the full traceback
# Final progress update
final_message = f"Ideas generation complete: {ideas_created} ideas created for {total_clusters} clusters"
logger.info(final_message)
return {
'success': True,
'ideas_created': ideas_created,
'message': final_message,
}
except Exception as e:
logger.error(f"Error in auto_generate_ideas_task: {str(e)}", exc_info=True)
self.update_state(
state='FAILURE',
meta={
'error': str(e),
'message': f'Error: {str(e)}'
}
)
raise
def _generate_single_idea_core(cluster_id: int, account_id: int = None, progress_callback=None):
"""
Core logic for generating a single content idea for a cluster. Can be called with or without Celery.
Args:
cluster_id: Cluster ID to generate idea for
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates (for Celery tasks)
"""
account_id = account_id
try:
# Initialize progress if callback provided
if progress_callback:
progress_callback(
state='PROGRESS',
meta={
'current': 0,
'total': 1,
'percentage': 0,
'message': 'Initializing single idea generation...',
'phase': 'initializing'
}
)
# Get cluster with keywords and relationships
clusters_queryset = Clusters.objects.filter(id=cluster_id)
if account_id:
clusters_queryset = clusters_queryset.filter(account_id=account_id)
clusters = list(clusters_queryset.select_related('sector', 'account', 'site').prefetch_related('keywords'))
if not clusters:
logger.warning(f"Cluster not found: {cluster_id}")
return {'success': False, 'error': 'Cluster not found'}
cluster = clusters[0]
# Update progress: Preparing cluster (0-10%)
if progress_callback:
progress_callback(
state='PROGRESS',
meta={
'current': 0,
'total': 1,
'percentage': 5,
'message': f'Preparing cluster "{cluster.name}"...',
'phase': 'preparing',
'current_item': cluster.name
}
)
# Get keywords for this cluster
keywords = Keywords.objects.filter(cluster=cluster).values_list('keyword', flat=True)
# Format cluster data for AI
cluster_data = [{
'id': cluster.id,
'name': cluster.name,
'description': cluster.description or '',
'keywords': list(keywords),
}]
# Update progress: Generating idea with AI (10-80%)
if progress_callback:
progress_callback(
state='PROGRESS',
meta={
'current': 0,
'total': 1,
'percentage': 10,
'message': 'Generating content idea with AI...',
'phase': 'generating'
}
)
# Create AIProcessor instance with account to load API keys from IntegrationSettings
account = getattr(cluster, 'account', None)
from igny8_core.utils.ai_processor import AIProcessor
processor = AIProcessor(account=account)
result = processor.generate_ideas(cluster_data, account=account)
if result.get('error'):
logger.error(f"AI idea generation error: {result['error']}")
return {'success': False, 'error': result['error']}
# Update progress: Saving idea (80-95%)
ideas_data = result.get('ideas', [])
if not ideas_data:
logger.warning(f"No ideas generated for cluster: {cluster.name}")
return {'success': False, 'error': 'No ideas generated by AI'}
# Take the first idea (since we're generating for a single cluster)
idea_data = ideas_data[0]
if progress_callback:
progress_callback(
state='PROGRESS',
meta={
'current': 1,
'total': 1,
'percentage': 80,
'message': f"Saving idea '{idea_data.get('title', 'Untitled')}'...",
'phase': 'saving',
'current_item': idea_data.get('title', 'Untitled')
}
)
idea_created = 0
# Create ContentIdeas record
with transaction.atomic():
# Handle description - it might be a dict (structured outline) or string
description = idea_data.get('description', '')
if isinstance(description, dict):
# Convert structured outline to JSON string
import json
description = json.dumps(description)
elif not isinstance(description, str):
description = str(description)
# Handle target_keywords - might be in covered_keywords or target_keywords
target_keywords = idea_data.get('covered_keywords', '') or idea_data.get('target_keywords', '')
# Get account
account = getattr(cluster, 'account', None)
# Create ContentIdeas record
ContentIdeas.objects.create(
idea_title=idea_data.get('title', 'Untitled Idea'),
description=description,
content_type=idea_data.get('content_type', 'blog_post'),
content_structure=idea_data.get('content_structure', 'supporting_page'),
target_keywords=target_keywords,
keyword_cluster=cluster,
estimated_word_count=idea_data.get('estimated_word_count', 1500),
status='new',
account=account, # Use account field
site=cluster.site,
sector=cluster.sector,
)
idea_created = 1
# Final progress update
final_message = f"Idea generation complete: '{idea_data.get('title', 'Untitled Idea')}' created"
logger.info(final_message)
if progress_callback:
progress_callback(
state='SUCCESS',
meta={
'current': 1,
'total': 1,
'percentage': 100,
'message': final_message,
'phase': 'completed'
}
)
return {
'success': True,
'idea_created': idea_created,
'message': final_message,
}
except Exception as e:
logger.error(f"Error in _generate_single_idea_core: {str(e)}", exc_info=True)
if progress_callback:
progress_callback(
state='FAILURE',
meta={
'error': str(e),
'message': f'Error: {str(e)}'
}
)
return {'success': False, 'error': str(e)}
@shared_task(bind=True, max_retries=3)
def generate_single_idea_task(self, cluster_id: int, account_id: int = None):
"""
Celery task to generate a single content idea for a cluster using AI.
Args:
cluster_id: Cluster ID
account_id: Account ID for account isolation
"""
def progress_callback(state, meta):
self.update_state(state=state, meta=meta)
return _generate_single_idea_core(cluster_id, account_id, progress_callback)

View File

@@ -714,102 +714,7 @@ class ClusterViewSet(SiteSectorModelViewSet):
# Save with all required fields explicitly # Save with all required fields explicitly
serializer.save(account=account, site=site, sector=sector) serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['post'], url_path='auto_generate_ideas', url_name='auto_generate_ideas') # REMOVED: auto_generate_ideas action - idea generation function removed
def auto_generate_ideas(self, request):
"""Generate content ideas for clusters using AI - New unified framework"""
import logging
from igny8_core.ai.tasks import run_ai_task
from kombu.exceptions import OperationalError as KombuOperationalError
logger = logging.getLogger(__name__)
try:
# Get account
account = getattr(request, 'account', None)
account_id = account.id if account else None
# Prepare payload
payload = {
'ids': request.data.get('ids', [])
}
logger.info(f"auto_generate_ideas called with ids={payload['ids']}, account_id={account_id}")
# Validate basic input
if not payload['ids']:
return Response({
'success': False,
'error': 'No cluster IDs provided'
}, status=status.HTTP_400_BAD_REQUEST)
if len(payload['ids']) > 10:
return Response({
'success': False,
'error': 'Maximum 10 clusters allowed for idea generation'
}, status=status.HTTP_400_BAD_REQUEST)
# Try to queue Celery task
try:
if hasattr(run_ai_task, 'delay'):
task = run_ai_task.delay(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
logger.info(f"Task queued: {task.id}")
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Idea generation started'
}, status=status.HTTP_200_OK)
else:
# Celery not available - execute synchronously
logger.warning("Celery not available, executing synchronously")
result = run_ai_task(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
**result
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except (KombuOperationalError, ConnectionError) as e:
# Broker connection failed - fall back to synchronous execution
logger.warning(f"Celery broker unavailable, falling back to synchronous execution: {str(e)}")
result = run_ai_task(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
**result
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"Error in auto_generate_ideas: {str(e)}", exc_info=True)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"Unexpected error in auto_generate_ideas: {str(e)}", exc_info=True)
return Response({
'success': False,
'error': f'Unexpected error: {str(e)}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request, *args, **kwargs): def list(self, request, *args, **kwargs):
""" """
@@ -941,95 +846,4 @@ class ContentIdeasViewSet(SiteSectorModelViewSet):
'message': f'Successfully queued {len(created_tasks)} ideas to writer' 'message': f'Successfully queued {len(created_tasks)} ideas to writer'
}, status=status.HTTP_200_OK) }, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'], url_path='generate_idea', url_name='generate_idea') # REMOVED: generate_idea action - idea generation function removed
def generate_idea(self, request, pk=None):
"""Generate a single content idea for a cluster using AI"""
import logging
logger = logging.getLogger(__name__)
try:
cluster_id = request.data.get('cluster_id')
if not cluster_id:
return Response({'error': 'cluster_id is required'}, status=status.HTTP_400_BAD_REQUEST)
# Get account - handle RelatedObjectDoesNotExist
account = None
account_id = None
try:
account = getattr(request, 'account', None)
if account:
account_id = getattr(account, 'pk', None) or getattr(account, 'id', None)
except Exception as e:
logger.error(f"Error getting account: {type(e).__name__}: {e}", exc_info=True)
account_id = None
# Try to queue Celery task, fall back to synchronous if Celery not available
try:
from .tasks import generate_single_idea_task
from kombu.exceptions import OperationalError as KombuOperationalError
if hasattr(generate_single_idea_task, 'delay'):
try:
# Celery is available - queue async task
task = generate_single_idea_task.delay(cluster_id, account_id=account_id)
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Idea generation started'
}, status=status.HTTP_200_OK)
except (KombuOperationalError, ConnectionError) as e:
# Celery connection failed - execute synchronously
logger.warning(f"Celery connection failed, executing synchronously: {e}")
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
result = generate_ideas_core(cluster_id, account_id=account_id, progress_callback=None)
if result.get('success'):
return Response({
'success': True,
'idea_created': result.get('idea_created', 0),
'message': 'Idea generated successfully'
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
# Celery not available - execute synchronously
logger.info("Celery not available, executing synchronously")
from .tasks import _generate_single_idea_core
result = _generate_single_idea_core(cluster_id, account_id=account_id, progress_callback=None)
if result.get('success'):
return Response({
'success': True,
'idea_created': result.get('idea_created', 0),
'message': 'Idea generated successfully'
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except ImportError as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(f"Error importing tasks module: {error_type}: {error_msg}", exc_info=True)
return Response({
'success': False,
'error': 'AI tasks module not available'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(f"Error in generate_idea: {error_type}: {error_msg}", exc_info=True)
return Response({
'success': False,
'error': f'Unexpected error: {error_msg}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(f"Unexpected error in generate_idea: {error_type}: {error_msg}", exc_info=True)
return Response({
'success': False,
'error': f'Unexpected error: {error_msg}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

View File

@@ -1025,15 +1025,30 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
'meta': response_meta 'meta': response_meta
}) })
elif task_state == 'FAILURE': elif task_state == 'FAILURE':
# Try to get error from task.info meta first (this is where run_ai_task sets it)
if not error_message and isinstance(task_info, dict):
error_message = task_info.get('error') or task_info.get('message', '')
error_type = task_info.get('error_type', 'UnknownError')
# Also check if message contains error info
if not error_message and 'message' in task_info:
msg = task_info.get('message', '')
if msg and 'Error:' in msg:
error_message = msg.replace('Error: ', '')
# Use extracted error_message if available, otherwise try to get from error_info # Use extracted error_message if available, otherwise try to get from error_info
if not error_message: if not error_message:
error_info = task_info error_info = task_info
if isinstance(error_info, Exception): if isinstance(error_info, Exception):
error_message = str(error_info) error_message = str(error_info)
elif isinstance(error_info, dict): elif isinstance(error_info, dict):
error_message = error_info.get('error', str(error_info)) error_message = error_info.get('error') or error_info.get('message', '') or str(error_info)
else: elif error_info:
error_message = str(error_info) if error_info else 'Task failed' error_message = str(error_info)
# Final fallback - ensure we always have an error message
if not error_message or error_message.strip() == '':
error_message = f'Task execution failed - check Celery worker logs for task {task_id}'
error_type = 'ExecutionError'
# If still no error message, try to get from task backend directly # If still no error message, try to get from task backend directly
if not error_message: if not error_message:

View File

@@ -128,70 +128,60 @@ export default function ResourceDebugOverlay({ enabled }: ResourceDebugOverlayPr
headers['Authorization'] = `Bearer ${token}`; headers['Authorization'] = `Bearer ${token}`;
} }
const response = await nativeFetch.call(window, `${API_BASE_URL}/v1/system/request-metrics/${requestId}/`, { // Silently handle 404s and other errors - metrics might not exist for all requests
method: 'GET', try {
headers, const response = await nativeFetch.call(window, `${API_BASE_URL}/v1/system/request-metrics/${requestId}/`, {
credentials: 'include', // Include session cookies for authentication method: 'GET',
}); headers,
credentials: 'include', // Include session cookies for authentication
if (response.ok) { });
const data = await response.json();
// Only log in debug mode to reduce console noise if (response.ok) {
if (import.meta.env.DEV) { const data = await response.json();
console.debug('Fetched metrics for request:', requestId, data); // Only log in debug mode to reduce console noise
} if (import.meta.env.DEV) {
metricsRef.current = [...metricsRef.current, data]; console.debug('Fetched metrics for request:', requestId, data);
setMetrics([...metricsRef.current]);
} else if (response.status === 401) {
// Token might be expired - try to refresh and retry once
try {
await useAuthStore.getState().refreshToken();
const newToken = useAuthStore.getState().token;
if (newToken) {
const retryHeaders: HeadersInit = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${newToken}`,
};
const retryResponse = await nativeFetch.call(window, `${API_BASE_URL}/v1/system/request-metrics/${requestId}/`, {
method: 'GET',
headers: retryHeaders,
credentials: 'include',
});
if (retryResponse.ok) {
const data = await retryResponse.json();
metricsRef.current = [...metricsRef.current, data];
setMetrics([...metricsRef.current]);
return;
}
} }
} catch (refreshError) { metricsRef.current = [...metricsRef.current, data];
// Refresh failed - user needs to re-login setMetrics([...metricsRef.current]);
console.warn('Token refresh failed, user may need to re-authenticate'); } else if (response.status === 401) {
} // Token might be expired - try to refresh and retry once
// Silently ignore 401 errors - user might not be authenticated try {
} else if (response.status === 404) { await useAuthStore.getState().refreshToken();
// Metrics not found - could be race condition, retry once after short delay const newToken = useAuthStore.getState().token;
if (retryCount === 0) { if (newToken) {
// First attempt failed, retry once after 200ms (middleware might still be storing) const retryHeaders: HeadersInit = {
setTimeout(() => fetchRequestMetrics(requestId, 1), 200); 'Content-Type': 'application/json',
'Authorization': `Bearer ${newToken}`,
};
const retryResponse = await nativeFetch.call(window, `${API_BASE_URL}/v1/system/request-metrics/${requestId}/`, {
method: 'GET',
headers: retryHeaders,
credentials: 'include',
});
if (retryResponse.ok) {
const data = await retryResponse.json();
metricsRef.current = [...metricsRef.current, data];
setMetrics([...metricsRef.current]);
return;
}
}
} catch (refreshError) {
// Refresh failed - silently ignore
}
// Silently ignore 401 errors - user might not be authenticated
} else if (response.status === 404) {
// Metrics not found - silently ignore (metrics might not exist for all requests)
return;
} else {
// Other errors - silently ignore
return; return;
} }
// Second attempt also failed - metrics truly not available } catch (error) {
// This is expected: metrics expired (5min TTL), request wasn't tracked, or middleware error // Silently ignore all fetch errors (network errors, etc.)
// Silently ignore - no need to log or show error // Metrics are optional and not critical for functionality
return;
} else {
// Only log non-404/401 errors (500, 403, etc.)
console.warn('Failed to fetch metrics:', response.status, response.statusText, 'for request:', requestId);
}
} catch (error) {
// Only log non-network errors
if (error instanceof TypeError && error.message.includes('fetch')) {
// Network error - silently ignore
return; return;
} }
console.error('Failed to fetch request metrics:', error);
}
}; };
// Calculate page load time // Calculate page load time

View File

@@ -430,11 +430,14 @@ export function useProgressModal(): UseProgressModalReturn {
} }
} else if (response.state === 'FAILURE') { } else if (response.state === 'FAILURE') {
const meta = response.meta || {}; const meta = response.meta || {};
const errorMsg = meta.error || 'Task failed'; // Try multiple error message sources
const errorMsg = meta.error || meta.message || response.error || 'Task failed - exception details unavailable';
const errorType = meta.error_type || 'Error';
setProgress({ setProgress({
percentage: 0, percentage: 0,
message: `Error: ${errorMsg}`, message: errorMsg.includes('exception details unavailable') ? errorMsg : `Error: ${errorMsg}`,
status: 'error', status: 'error',
details: meta.error_type ? `${errorType}: ${errorMsg}` : errorMsg,
}); });
// Update step logs from failure response // Update step logs from failure response

View File

@@ -54,21 +54,6 @@ export default function Clusters() {
const [totalPages, setTotalPages] = useState(1); const [totalPages, setTotalPages] = useState(1);
const [totalCount, setTotalCount] = useState(0); const [totalCount, setTotalCount] = useState(0);
// AI Function logging state
const [aiLogs, setAiLogs] = useState<Array<{
timestamp: string;
type: 'request' | 'success' | 'error' | 'step';
action: string;
data: any;
stepName?: string;
percentage?: number;
}>>([]);
// Track last logged step to avoid duplicates
const lastLoggedStepRef = useRef<string | null>(null);
const lastLoggedPercentageRef = useRef<number>(-1);
const hasReloadedRef = useRef<boolean>(false);
// Sorting state // Sorting state
const [sortBy, setSortBy] = useState<string>('name'); const [sortBy, setSortBy] = useState<string>('name');
const [sortDirection, setSortDirection] = useState<'asc' | 'desc'>('asc'); const [sortDirection, setSortDirection] = useState<'asc' | 'desc'>('asc');
@@ -86,6 +71,7 @@ export default function Clusters() {
// Progress modal for AI functions // Progress modal for AI functions
const progressModal = useProgressModal(); const progressModal = useProgressModal();
const hasReloadedRef = useRef(false);
// Load clusters - wrapped in useCallback to prevent infinite loops // Load clusters - wrapped in useCallback to prevent infinite loops
const loadClusters = useCallback(async () => { const loadClusters = useCallback(async () => {
@@ -216,62 +202,20 @@ export default function Clusters() {
// Row action handler // Row action handler
const handleRowAction = useCallback(async (action: string, row: Cluster) => { const handleRowAction = useCallback(async (action: string, row: Cluster) => {
if (action === 'generate_ideas') { if (action === 'generate_ideas') {
const requestData = {
ids: [row.id],
cluster_name: row.name,
cluster_id: row.id,
};
// Log request
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'request',
action: 'generate_ideas (Row Action)',
data: requestData,
}]);
try { try {
const result = await autoGenerateIdeas([row.id]); const result = await autoGenerateIdeas([row.id]);
if (result.success && result.task_id) { if (result.success && result.task_id) {
// Log success with task_id
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'success',
action: 'generate_ideas (Row Action)',
data: { task_id: result.task_id, message: result.message },
}]);
// Async task - show progress modal // Async task - show progress modal
progressModal.openModal(result.task_id, 'Generating Ideas'); progressModal.openModal(result.task_id, 'Generating Ideas');
} else if (result.success && result.ideas_created) { } else if (result.success && result.ideas_created) {
// Log success with ideas_created
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'success',
action: 'generate_ideas (Row Action)',
data: { ideas_created: result.ideas_created, message: result.message },
}]);
// Synchronous completion // Synchronous completion
toast.success(result.message || 'Ideas generated successfully'); toast.success(result.message || 'Ideas generated successfully');
await loadClusters(); await loadClusters();
} else { } else {
// Log error
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'error',
action: 'generate_ideas (Row Action)',
data: { error: result.error || 'Failed to generate ideas' },
}]);
toast.error(result.error || 'Failed to generate ideas'); toast.error(result.error || 'Failed to generate ideas');
} }
} catch (error: any) { } catch (error: any) {
// Log error
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'error',
action: 'generate_ideas (Row Action)',
data: { error: error.message || 'Unknown error occurred' },
}]);
toast.error(`Failed to generate ideas: ${error.message}`); toast.error(`Failed to generate ideas: ${error.message}`);
} }
} }
@@ -284,192 +228,58 @@ export default function Clusters() {
toast.error('Please select at least one cluster to generate ideas'); toast.error('Please select at least one cluster to generate ideas');
return; return;
} }
if (ids.length > 10) { if (ids.length > 5) {
toast.error('Maximum 10 clusters allowed for idea generation'); toast.error('Maximum 5 clusters allowed for idea generation');
return; return;
} }
const numIds = ids.map(id => parseInt(id));
const selectedClusters = clusters.filter(c => numIds.includes(c.id));
const requestData = {
ids: numIds,
cluster_count: numIds.length,
cluster_names: selectedClusters.map(c => c.name),
};
// Log request
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'request',
action: 'auto_generate_ideas (Bulk Action)',
data: requestData,
}]);
try { try {
const numIds = ids.map(id => parseInt(id));
const result = await autoGenerateIdeas(numIds); const result = await autoGenerateIdeas(numIds);
if (result.success) {
// Check if result has success field - if false, it's an error response
if (result && result.success === false) {
// Error response from API
const errorMsg = result.error || 'Failed to generate ideas';
toast.error(errorMsg);
return;
}
if (result && result.success) {
if (result.task_id) { if (result.task_id) {
// Log success with task_id // Async task - open progress modal
setAiLogs(prev => [...prev, { hasReloadedRef.current = false;
timestamp: new Date().toISOString(),
type: 'success',
action: 'auto_generate_ideas (Bulk Action)',
data: { task_id: result.task_id, message: result.message, cluster_count: numIds.length },
}]);
// Async task - show progress modal
progressModal.openModal(result.task_id, 'Generating Content Ideas'); progressModal.openModal(result.task_id, 'Generating Content Ideas');
// Don't show toast - progress modal will show status // Don't show toast - progress modal will show status
} else { } else {
// Log success with ideas_created
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: 'success',
action: 'auto_generate_ideas (Bulk Action)',
data: { ideas_created: result.ideas_created || 0, message: result.message, cluster_count: numIds.length },
}]);
// Synchronous completion // Synchronous completion
toast.success(`Ideas generation complete: ${result.ideas_created || 0} ideas created`); toast.success(`Ideas generation complete: ${result.ideas_created || 0} ideas created`);
await loadClusters(); if (!hasReloadedRef.current) {
hasReloadedRef.current = true;
loadClusters();
}
} }
} else { } else {
// Log error // Unexpected response format - show error
setAiLogs(prev => [...prev, { const errorMsg = result?.error || 'Unexpected response format';
timestamp: new Date().toISOString(), toast.error(errorMsg);
type: 'error',
action: 'auto_generate_ideas (Bulk Action)',
data: { error: result.error || 'Failed to generate ideas', cluster_count: numIds.length },
}]);
toast.error(result.error || 'Failed to generate ideas');
} }
} catch (error: any) { } catch (error: any) {
// Log error // API error (network error, parse error, etc.)
setAiLogs(prev => [...prev, { let errorMsg = 'Failed to generate ideas';
timestamp: new Date().toISOString(), if (error.message) {
type: 'error', // Extract clean error message from API error format
action: 'auto_generate_ideas (Bulk Action)', errorMsg = error.message.replace(/^API Error \(\d+\): [^-]+ - /, '').trim();
data: { error: error.message || 'Unknown error occurred', cluster_count: numIds.length }, if (!errorMsg || errorMsg === error.message) {
}]); errorMsg = error.message;
toast.error(`Failed to generate ideas: ${error.message}`); }
}
toast.error(errorMsg);
} }
} else { } else {
toast.info(`Bulk action "${action}" for ${ids.length} items`); toast.info(`Bulk action "${action}" for ${ids.length} items`);
} }
}, [toast, loadClusters, progressModal, clusters]); }, [toast, loadClusters, progressModal]);
// Log AI function progress steps
useEffect(() => {
if (!progressModal.taskId || !progressModal.isOpen) {
return;
}
const progress = progressModal.progress;
const currentStep = progress.details?.phase || '';
const currentPercentage = progress.percentage;
const currentMessage = progress.message;
const currentStatus = progress.status;
// Log step changes
if (currentStep && currentStep !== lastLoggedStepRef.current) {
const stepType = currentStatus === 'error' ? 'error' :
currentStatus === 'completed' ? 'success' : 'step';
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: stepType,
action: progressModal.title || 'AI Function',
stepName: currentStep,
percentage: currentPercentage,
data: {
step: currentStep,
message: currentMessage,
percentage: currentPercentage,
status: currentStatus,
details: progress.details,
},
}]);
lastLoggedStepRef.current = currentStep;
lastLoggedPercentageRef.current = currentPercentage;
}
// Log percentage changes for same step (if significant change)
else if (currentStep && Math.abs(currentPercentage - lastLoggedPercentageRef.current) >= 10) {
const stepType = currentStatus === 'error' ? 'error' :
currentStatus === 'completed' ? 'success' : 'step';
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: stepType,
action: progressModal.title || 'AI Function',
stepName: currentStep,
percentage: currentPercentage,
data: {
step: currentStep,
message: currentMessage,
percentage: currentPercentage,
status: currentStatus,
details: progress.details,
},
}]);
lastLoggedPercentageRef.current = currentPercentage;
}
// Log status changes (error, completed)
else if (currentStatus === 'error' || currentStatus === 'completed') {
// Only log if we haven't already logged this status for this step
if (currentStep !== lastLoggedStepRef.current ||
(currentStatus === 'error' && lastLoggedStepRef.current !== 'error') ||
(currentStatus === 'completed' && lastLoggedStepRef.current !== 'completed')) {
const stepType = currentStatus === 'error' ? 'error' : 'success';
setAiLogs(prev => [...prev, {
timestamp: new Date().toISOString(),
type: stepType,
action: progressModal.title || 'AI Function',
stepName: currentStep || 'Final',
percentage: currentPercentage,
data: {
step: currentStep || 'Final',
message: currentMessage,
percentage: currentPercentage,
status: currentStatus,
details: progress.details,
},
}]);
lastLoggedStepRef.current = currentStep || currentStatus;
}
}
}, [progressModal.progress, progressModal.taskId, progressModal.isOpen, progressModal.title]);
// Reset step tracking when modal closes or opens
useEffect(() => {
if (!progressModal.isOpen) {
lastLoggedStepRef.current = null;
lastLoggedPercentageRef.current = -1;
hasReloadedRef.current = false; // Reset reload flag when modal closes
} else {
// Reset reload flag when modal opens for a new task
hasReloadedRef.current = false;
}
}, [progressModal.isOpen, progressModal.taskId]);
// Handle modal close - memoized to prevent repeated calls
const handleProgressModalClose = useCallback(() => {
const wasCompleted = progressModal.progress.status === 'completed';
progressModal.closeModal();
// Reload data after modal closes (if completed) - only once
if (wasCompleted && !hasReloadedRef.current) {
hasReloadedRef.current = true;
// Use setTimeout to ensure modal is fully closed before reloading
setTimeout(() => {
loadClusters();
// Reset the flag after a delay to allow for future reloads
setTimeout(() => {
hasReloadedRef.current = false;
}, 1000);
}, 100);
}
}, [progressModal.progress.status, progressModal.closeModal, loadClusters]);
// Close volume dropdown when clicking outside // Close volume dropdown when clicking outside
useEffect(() => { useEffect(() => {
@@ -671,77 +481,16 @@ export default function Clusters() {
message={progressModal.progress.message} message={progressModal.progress.message}
details={progressModal.progress.details} details={progressModal.progress.details}
taskId={progressModal.taskId || undefined} taskId={progressModal.taskId || undefined}
onClose={handleProgressModalClose} onClose={() => {
progressModal.closeModal();
// Reload once when modal closes if task was completed
if (progressModal.progress.status === 'completed' && !hasReloadedRef.current) {
hasReloadedRef.current = true;
loadClusters();
}
}}
/> />
{/* AI Function Logs - Display below table */}
{aiLogs.length > 0 && (
<div className="mt-6 bg-gray-50 dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700 p-4">
<div className="flex items-center justify-between mb-3">
<h3 className="text-sm font-semibold text-gray-900 dark:text-gray-100">
AI Function Logs
</h3>
<button
onClick={() => setAiLogs([])}
className="text-xs text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200"
>
Clear Logs
</button>
</div>
<div className="space-y-2 max-h-96 overflow-y-auto">
{aiLogs.slice().reverse().map((log, index) => (
<div
key={index}
className={`p-3 rounded border text-xs font-mono ${
log.type === 'request'
? 'bg-blue-50 dark:bg-blue-900/20 border-blue-200 dark:border-blue-800'
: log.type === 'success'
? 'bg-green-50 dark:bg-green-900/20 border-green-200 dark:border-green-800'
: log.type === 'error'
? 'bg-red-50 dark:bg-red-900/20 border-red-200 dark:border-red-800'
: 'bg-purple-50 dark:bg-purple-900/20 border-purple-200 dark:border-purple-800'
}`}
>
<div className="flex items-center justify-between mb-1">
<div className="flex items-center gap-2 flex-wrap">
<span className={`font-semibold ${
log.type === 'request'
? 'text-blue-700 dark:text-blue-300'
: log.type === 'success'
? 'text-green-700 dark:text-green-300'
: log.type === 'error'
? 'text-red-700 dark:text-red-300'
: 'text-purple-700 dark:text-purple-300'
}`}>
[{log.type.toUpperCase()}]
</span>
<span className="text-gray-700 dark:text-gray-300">
{log.action}
</span>
{log.stepName && (
<span className="text-xs px-2 py-0.5 rounded bg-gray-200 dark:bg-gray-700 text-gray-600 dark:text-gray-400">
{log.stepName}
</span>
)}
{log.percentage !== undefined && (
<span className="text-xs text-gray-500 dark:text-gray-400">
{log.percentage}%
</span>
)}
</div>
<span className="text-gray-500 dark:text-gray-400">
{new Date(log.timestamp).toLocaleTimeString()}
</span>
</div>
<pre className="text-xs text-gray-700 dark:text-gray-300 whitespace-pre-wrap break-words">
{JSON.stringify(log.data, null, 2)}
</pre>
</div>
))}
</div>
</div>
)}
{/* Create/Edit Modal */} {/* Create/Edit Modal */}
<FormModal <FormModal
isOpen={isModalOpen} isOpen={isModalOpen}