ai fucntiosn adn otehr atuoamtion fixes

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-14 23:08:48 +00:00
parent cb2d109593
commit 6bb3dd3df4
11 changed files with 1289 additions and 197 deletions

View File

@@ -432,7 +432,8 @@ class AIEngine:
final_save_msg = save_msg
# Phase 5.5: DEDUCT CREDITS - Deduct credits after successful save
if self.account and raw_response:
logger.info(f"[AIEngine] Credit deduction check: account={self.account is not None}, raw_response={raw_response is not None}")
if self.account and raw_response is not None:
try:
from igny8_core.business.billing.services.credit_service import CreditService
from igny8_core.business.billing.exceptions import InsufficientCreditsError
@@ -444,6 +445,12 @@ class AIEngine:
tokens_input = raw_response.get('input_tokens', 0)
tokens_output = raw_response.get('output_tokens', 0)
logger.info(
f"[AIEngine] Deducting credits: operation={operation_type}, "
f"tokens_in={tokens_input}, tokens_out={tokens_output}, "
f"model={raw_response.get('model', 'unknown')}"
)
# Extract site_id from save_result (could be from content, cluster, or task)
site_id = save_result.get('site_id') or save_result.get('site')
@@ -468,15 +475,17 @@ class AIEngine:
)
logger.info(
f"[AIEngine] Credits deducted: {operation_type}, "
f"tokens: {tokens_input + tokens_output} ({tokens_input} in, {tokens_output} out)"
f"[AIEngine] Credits deducted successfully: {operation_type}, "
f"total tokens: {tokens_input + tokens_output} ({tokens_input} in, {tokens_output} out)"
)
except InsufficientCreditsError as e:
# This shouldn't happen since we checked before, but log it
logger.error(f"[AIEngine] Insufficient credits during deduction: {e}")
except Exception as e:
logger.warning(f"[AIEngine] Failed to deduct credits: {e}", exc_info=True)
logger.error(f"[AIEngine] Failed to deduct credits: {e}", exc_info=True)
# Don't fail the operation if credit deduction fails (for backward compatibility)
else:
logger.warning(f"[AIEngine] Skipping credit deduction: account={self.account is not None}, raw_response={raw_response is not None}")
# Phase 6: DONE - Finalization (98-100%)
done_msg = self._get_done_message(function_name, save_result)

View File

@@ -68,7 +68,22 @@ class AutoClusterFunction(BaseAIFunction):
f"[AutoCluster] Validation passed: {min_validation['count']} keywords available (min: {min_validation['required']})"
)
# Removed plan limits check
# Validate single sector - keywords must all belong to the same sector
keywords = Keywords.objects.filter(id__in=ids)
if account:
keywords = keywords.filter(account=account)
sector_ids = set(keywords.values_list('sector_id', flat=True))
# Remove None values
sector_ids.discard(None)
if len(sector_ids) > 1:
logger.warning(f"[AutoCluster] Validation failed: keywords span {len(sector_ids)} sectors")
return {
'valid': False,
'error': f'Keywords must be from a single sector. Selected keywords span {len(sector_ids)} different sectors. Please filter by sector first.',
'sector_count': len(sector_ids)
}
return {'valid': True}
@@ -216,23 +231,23 @@ class AutoClusterFunction(BaseAIFunction):
if not keywords:
raise ValueError("No keywords available for saving")
# Get context from first keyword (account/site/sector already validated at page level)
# Get context from first keyword (account/site already validated at page level)
first_keyword = keywords[0]
account = account or first_keyword.account
site = first_keyword.site
# Get sector if needed
from igny8_core.auth.models import Sector
sector = first_keyword.sector
if not sector and sector_id:
try:
sector = Sector.objects.get(id=sector_id)
except Sector.DoesNotExist:
sector = None
if not account:
raise ValueError("Account is required for cluster creation")
# Build a lookup of keyword text -> keyword object for matching
# Keywords may span multiple sectors, so don't filter by sector here
keyword_by_text = {
kw_obj.keyword.strip().lower(): kw_obj
for kw_obj in keywords
}
logger.info(f"[save_output] Processing {len(parsed)} clusters for {len(keywords)} keywords")
clusters_created = 0
keywords_updated = 0
@@ -253,74 +268,88 @@ class AutoClusterFunction(BaseAIFunction):
cluster_keywords = cluster_data.get('keywords', [])
if not cluster_name or not cluster_keywords:
logger.warning(f"[save_output] Skipping cluster with empty name or keywords: {cluster_data}")
continue
# Get or create cluster
if sector:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
# Match keywords from AI response to actual keyword objects
matched_keyword_objects = []
for kw_text in cluster_keywords:
kw_normalized = kw_text.strip().lower()
if kw_normalized in keyword_by_text:
matched_keyword_objects.append(keyword_by_text[kw_normalized])
if not matched_keyword_objects:
logger.warning(f"[save_output] No keywords matched for cluster '{cluster_name}': {cluster_keywords}")
continue
# Determine sector for cluster from the matched keywords
# Use the sector from the first matched keyword (all should ideally be same sector)
cluster_sector = matched_keyword_objects[0].sector
# Try to find existing cluster by name (case-insensitive) in same site/sector
# This allows reusing clusters even if AI generates slightly different casing
existing_cluster = None
if cluster_sector:
existing_cluster = Clusters.objects.filter(
account=account,
site=site,
sector=sector,
defaults={
'description': cluster_data.get('description', ''),
'status': 'new', # FIXED: Changed from 'active' to 'new'
}
)
sector=cluster_sector,
name__iexact=cluster_name,
deleted_at__isnull=True # Exclude soft-deleted clusters
).first()
else:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
existing_cluster = Clusters.objects.filter(
account=account,
site=site,
sector__isnull=True,
defaults={
'description': cluster_data.get('description', ''),
'status': 'new', # FIXED: Changed from 'active' to 'new'
'sector': None,
}
name__iexact=cluster_name,
deleted_at__isnull=True
).first()
if existing_cluster:
cluster = existing_cluster
created = False
logger.info(f"[save_output] Found existing cluster '{cluster.name}' (id={cluster.id})")
else:
# Create new cluster
cluster = Clusters.objects.create(
name=cluster_name,
account=account,
site=site,
sector=cluster_sector,
description=cluster_data.get('description', ''),
status='new',
)
created = True
if created:
clusters_created += 1
logger.info(f"[save_output] Created cluster '{cluster_name}' (id={cluster.id}) in sector {cluster_sector.id if cluster_sector else 'None'}")
# Match and assign keywords (case-insensitive)
cluster_keywords_normalized = {kw.strip().lower(): kw.strip() for kw in cluster_keywords}
available_keywords_normalized = {
kw_obj.keyword.strip().lower(): kw_obj
for kw_obj in keywords
}
matched_keyword_objects = []
for cluster_kw_normalized, cluster_kw_original in cluster_keywords_normalized.items():
if cluster_kw_normalized in available_keywords_normalized:
matched_keyword_objects.append(available_keywords_normalized[cluster_kw_normalized])
# Update matched keywords
if matched_keyword_objects:
matched_ids = [kw.id for kw in matched_keyword_objects]
keyword_filter = Keywords.objects.filter(
id__in=matched_ids,
account=account
)
if sector:
keyword_filter = keyword_filter.filter(sector=sector)
else:
keyword_filter = keyword_filter.filter(sector__isnull=True)
# FIXED: Ensure keywords status updates from 'new' to 'mapped'
updated_count = keyword_filter.update(
cluster=cluster,
status='mapped' # Status changes from 'new' to 'mapped'
)
keywords_updated += updated_count
# Update matched keywords - directly by their IDs, no sector filtering needed
# since we already matched them from the input keywords list
matched_ids = [kw.id for kw in matched_keyword_objects]
updated_count = Keywords.objects.filter(
id__in=matched_ids,
account=account
).update(
cluster=cluster,
status='mapped'
)
keywords_updated += updated_count
logger.info(f"[save_output] Cluster '{cluster_name}': matched {len(matched_keyword_objects)} keywords, updated {updated_count}")
# Recalculate cluster metrics
# Recalculate cluster metrics for all clusters in this site
from django.db.models import Sum, Case, When, F, IntegerField
cluster_filter = Clusters.objects.filter(account=account)
if sector:
cluster_filter = cluster_filter.filter(sector=sector)
else:
cluster_filter = cluster_filter.filter(sector__isnull=True)
# Get all cluster IDs that were created/updated in this batch
updated_cluster_ids = set()
for kw in keywords:
if kw.cluster_id:
updated_cluster_ids.add(kw.cluster_id)
# Also include newly created clusters
cluster_filter = Clusters.objects.filter(account=account, site=site)
for cluster in cluster_filter:
cluster.keywords_count = Keywords.objects.filter(cluster=cluster).count()

View File

@@ -175,7 +175,7 @@ class SiteSerializer(serializers.ModelSerializer):
site=obj,
platform='wordpress',
is_active=True
).exists() or bool(obj.wp_url)
).exists()
class IndustrySectorSerializer(serializers.ModelSerializer):

View File

@@ -172,6 +172,22 @@ class AutomationService:
total_count = pending_keywords.count()
# IMPORTANT: Group keywords by sector to avoid mixing sectors in clustering
# Each sector's keywords must be processed separately
from collections import defaultdict
keywords_by_sector = defaultdict(list)
for kw_id, sector_id in pending_keywords.values_list('id', 'sector_id'):
# Use sector_id or 'no_sector' for keywords without a sector
key = sector_id if sector_id else 'no_sector'
keywords_by_sector[key].append(kw_id)
sector_count = len(keywords_by_sector)
if sector_count > 1:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Keywords span {sector_count} sectors - will process each sector separately"
)
# NEW: Pre-stage validation for minimum keywords
from igny8_core.ai.validators.cluster_validators import validate_minimum_keywords
@@ -229,20 +245,19 @@ class AutomationService:
# Process in batches with dynamic sizing
batch_size = self.config.stage_1_batch_size
# FIXED: Use min() for dynamic batch sizing
actual_batch_size = min(total_count, batch_size)
keywords_processed = 0
clusters_created = 0
batches_run = 0
credits_before = self._get_credits_used()
keyword_ids = list(pending_keywords.values_list('id', flat=True))
# Get total keyword count for progress tracking
total_keyword_count = sum(len(ids) for ids in keywords_by_sector.values())
# INITIAL SAVE: Set keywords_total immediately so frontend shows accurate counts from start
self.run.stage_1_result = {
'keywords_processed': 0,
'keywords_total': len(keyword_ids),
'keywords_total': total_keyword_count,
'clusters_created': 0,
'batches_run': 0,
'credits_used': 0,
@@ -251,17 +266,28 @@ class AutomationService:
}
self.run.save(update_fields=['stage_1_result'])
for i in range(0, len(keyword_ids), actual_batch_size):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
if should_stop:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Stage {reason} - saving progress ({keywords_processed} keywords processed)"
)
# Save current progress
credits_used = self._get_credits_used() - credits_before
time_elapsed = self._format_time_elapsed(start_time)
# Process each sector's keywords separately to avoid mixing sectors
for sector_idx, (sector_key, sector_keyword_ids) in enumerate(keywords_by_sector.items()):
sector_name = f"Sector {sector_key}" if sector_key != 'no_sector' else "No Sector"
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing {sector_name} ({len(sector_keyword_ids)} keywords) [{sector_idx + 1}/{len(keywords_by_sector)}]"
)
# Dynamic batch sizing per sector
actual_batch_size = min(len(sector_keyword_ids), batch_size)
for i in range(0, len(sector_keyword_ids), actual_batch_size):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
if should_stop:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Stage {reason} - saving progress ({keywords_processed} keywords processed)"
)
# Save current progress
credits_used = self._get_credits_used() - credits_before
time_elapsed = self._format_time_elapsed(start_time)
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'clusters_created': clusters_created,
@@ -275,92 +301,92 @@ class AutomationService:
self.run.save()
return
try:
batch = keyword_ids[i:i + actual_batch_size]
batch_num = (i // actual_batch_size) + 1
total_batches = (len(keyword_ids) + actual_batch_size - 1) // actual_batch_size
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing batch {batch_num}/{total_batches} ({len(batch)} keywords)"
)
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=AutoClusterFunction(),
payload={'ids': batch}
)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Clustering failed for batch {batch_num}: {error_msg}")
# Continue to next batch
keywords_processed += len(batch)
batches_run += 1
# Log progress
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Batch {batch_num} complete"
)
# INCREMENTAL SAVE: Update stage result after each batch for real-time UI progress
clusters_so_far = Clusters.objects.filter(
site=self.site,
created_at__gte=self.run.started_at
).count()
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'keywords_total': len(keyword_ids),
'clusters_created': clusters_so_far,
'batches_run': batches_run,
'credits_used': self._get_credits_used() - credits_before,
'time_elapsed': self._format_time_elapsed(start_time),
'in_progress': True
}
self.run.save(update_fields=['stage_1_result'])
# Emit per-item trace event for UI progress tracking
try:
self.logger.append_trace(self.account.id, self.site.id, self.run.run_id, {
'event': 'stage_item_processed',
'run_id': self.run.run_id,
'stage': stage_number,
'processed': keywords_processed,
'total': len(keyword_ids),
'batch_num': batch_num,
'timestamp': datetime.now().isoformat()
})
except Exception:
pass
except Exception as e:
# FIXED: Log error but continue processing remaining batches
error_msg = f"Failed to process batch {batch_num}: {str(e)}"
logger.error(f"[AutomationService] {error_msg}", exc_info=True)
self.logger.log_stage_error(
self.run.run_id, self.account.id, self.site.id,
stage_number, error_msg
)
# Continue to next batch
continue
# ADDED: Within-stage delay (between batches)
if i + actual_batch_size < len(keyword_ids): # Not the last batch
delay = self.config.within_stage_delay
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Waiting {delay} seconds before next batch..."
)
time.sleep(delay)
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, "Delay complete, resuming processing"
)
batch = sector_keyword_ids[i:i + actual_batch_size]
batch_num = (i // actual_batch_size) + 1
total_batches = (len(sector_keyword_ids) + actual_batch_size - 1) // actual_batch_size
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing {sector_name} batch {batch_num}/{total_batches} ({len(batch)} keywords)"
)
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=AutoClusterFunction(),
payload={'ids': batch}
)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Clustering failed for {sector_name} batch {batch_num}: {error_msg}")
# Continue to next batch
keywords_processed += len(batch)
batches_run += 1
# Log progress
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"{sector_name} batch {batch_num} complete"
)
# INCREMENTAL SAVE: Update stage result after each batch for real-time UI progress
clusters_so_far = Clusters.objects.filter(
site=self.site,
created_at__gte=self.run.started_at
).count()
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'keywords_total': total_keyword_count,
'clusters_created': clusters_so_far,
'batches_run': batches_run,
'credits_used': self._get_credits_used() - credits_before,
'time_elapsed': self._format_time_elapsed(start_time),
'in_progress': True
}
self.run.save(update_fields=['stage_1_result'])
# Emit per-item trace event for UI progress tracking
try:
self.logger.append_trace(self.account.id, self.site.id, self.run.run_id, {
'event': 'stage_item_processed',
'run_id': self.run.run_id,
'stage': stage_number,
'processed': keywords_processed,
'total': total_keyword_count,
'batch_num': batch_num,
'timestamp': datetime.now().isoformat()
})
except Exception:
pass
except Exception as e:
# FIXED: Log error but continue processing remaining batches
error_msg = f"Failed to process {sector_name} batch {batch_num}: {str(e)}"
logger.error(f"[AutomationService] {error_msg}", exc_info=True)
self.logger.log_stage_error(
self.run.run_id, self.account.id, self.site.id,
stage_number, error_msg
)
# Continue to next batch
continue
# ADDED: Within-stage delay (between batches)
if i + actual_batch_size < len(sector_keyword_ids): # Not the last batch in this sector
delay = self.config.within_stage_delay
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Waiting {delay} seconds before next batch..."
)
time.sleep(delay)
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, "Delay complete, resuming processing"
)
# Get clusters created count
clusters_created = Clusters.objects.filter(

View File

@@ -156,7 +156,7 @@ class CreditService:
raise CreditCalculationError(f"Error calculating credits: {e}")
@staticmethod
def calculate_credits_from_tokens(operation_type, tokens_input, tokens_output):
def calculate_credits_from_tokens(operation_type, tokens_input, tokens_output, model_name=None):
"""
Calculate credits from actual token usage using configured ratio.
This is the ONLY way credits are calculated in the system.
@@ -165,6 +165,7 @@ class CreditService:
operation_type: Type of operation
tokens_input: Input tokens used
tokens_output: Output tokens used
model_name: Optional AI model name (e.g., 'gpt-4o') for model-specific tokens_per_credit
Returns:
int: Credits to deduct
@@ -174,7 +175,7 @@ class CreditService:
"""
import logging
import math
from igny8_core.business.billing.models import CreditCostConfig, BillingConfiguration
from igny8_core.business.billing.models import CreditCostConfig, BillingConfiguration, AIModelConfig
logger = logging.getLogger(__name__)
@@ -184,15 +185,32 @@ class CreditService:
is_active=True
).first()
if not config:
# Use global billing config as fallback
billing_config = BillingConfiguration.get_config()
# Get tokens_per_credit from AIModelConfig if model_name provided
billing_config = BillingConfiguration.get_config()
tokens_per_credit = None
if model_name:
# Try to get model-specific tokens_per_credit from AIModelConfig
model_config = AIModelConfig.objects.filter(
model_name=model_name,
is_active=True
).first()
if model_config and model_config.tokens_per_credit:
tokens_per_credit = model_config.tokens_per_credit
logger.info(f"Using model-specific tokens_per_credit: {tokens_per_credit} for {model_name}")
# Fallback to global billing config
if tokens_per_credit is None:
tokens_per_credit = billing_config.default_tokens_per_credit
logger.info(f"Using global default tokens_per_credit: {tokens_per_credit}")
if not config:
min_credits = 1
logger.info(f"No config for {operation_type}, using default: {tokens_per_credit} tokens/credit")
logger.info(f"No config for {operation_type}, using default: {tokens_per_credit} tokens/credit, min 1 credit")
else:
tokens_per_credit = config.tokens_per_credit
min_credits = config.min_credits
# Use base_credits as minimum for this operation
min_credits = config.base_credits
logger.info(f"Config for {operation_type}: {tokens_per_credit} tokens/credit, min {min_credits} credits")
# Calculate total tokens
total_tokens = (tokens_input or 0) + (tokens_output or 0)
@@ -250,8 +268,8 @@ class CreditService:
).first()
if config:
# Use minimum credits as estimate for token-based operations
required = config.min_credits
# Use base_credits as estimate for token-based operations
required = config.base_credits
else:
# Fallback to constants
required = CREDIT_COSTS.get(operation_type, 1)
@@ -377,10 +395,22 @@ class CreditService:
metadata=metadata or {}
)
# Convert site_id to Site instance if needed
site_instance = None
if site is not None:
from igny8_core.auth.models import Site
if isinstance(site, int):
try:
site_instance = Site.objects.get(id=site)
except Site.DoesNotExist:
logger.warning(f"Site with id {site} not found for credit usage log")
else:
site_instance = site
# Create CreditUsageLog
CreditUsageLog.objects.create(
account=account,
site=site,
site=site_instance,
operation_type=operation_type,
credits_used=amount,
cost_usd=cost_usd,
@@ -442,9 +472,9 @@ class CreditService:
f"Got: tokens_input={tokens_input}, tokens_output={tokens_output}"
)
# Calculate credits from actual token usage
# Calculate credits from actual token usage (pass model_used for model-specific rate)
credits_required = CreditService.calculate_credits_from_tokens(
operation_type, tokens_input, tokens_output
operation_type, tokens_input, tokens_output, model_name=model_used
)
# Check sufficient credits

View File

@@ -711,6 +711,113 @@ class KeywordViewSet(SiteSectorModelViewSet):
request=request
)
@action(detail=False, methods=['get'], url_path='stats', url_name='stats')
def stats(self, request):
"""
Get aggregate statistics for keywords.
Returns total keywords count and total volume across all keywords for the current site.
Used for header metrics display.
"""
from django.db.models import Sum, Count, Case, When, F, IntegerField
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Aggregate keyword stats
keyword_stats = queryset.aggregate(
total_keywords=Count('id'),
total_volume=Sum(
Case(
When(volume_override__isnull=False, then=F('volume_override')),
default=F('seed_keyword__volume'),
output_field=IntegerField()
)
)
)
return success_response(
data={
'total_keywords': keyword_stats['total_keywords'] or 0,
'total_volume': keyword_stats['total_volume'] or 0,
},
request=request
)
except Exception as e:
logger.error(f"Error in keywords stats: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch keyword stats: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['get'], url_path='filter_options', url_name='filter_options')
def filter_options(self, request):
"""
Get distinct filter values from current data.
Returns only countries and statuses that exist in the current site's keywords.
"""
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Get distinct countries from seed_keyword (use set for proper deduplication)
countries = list(set(queryset.values_list('seed_keyword__country', flat=True)))
countries = sorted([c for c in countries if c]) # Sort and filter nulls
# Map country codes to display names
from igny8_core.auth.models import SeedKeyword
country_choices = dict(SeedKeyword.COUNTRY_CHOICES)
country_options = [
{'value': c, 'label': country_choices.get(c, c)}
for c in countries
]
# Get distinct statuses (use set for proper deduplication)
statuses = list(set(queryset.values_list('status', flat=True)))
statuses = sorted([s for s in statuses if s]) # Sort and filter nulls
status_labels = {
'new': 'New',
'mapped': 'Mapped',
}
status_options = [
{'value': s, 'label': status_labels.get(s, s.title())}
for s in statuses
]
# Get distinct clusters (use set for proper deduplication)
cluster_ids = list(set(
queryset.exclude(cluster_id__isnull=True)
.values_list('cluster_id', flat=True)
))
clusters = Clusters.objects.filter(id__in=cluster_ids).values('id', 'name').order_by('name')
cluster_options = [
{'value': str(c['id']), 'label': c['name']}
for c in clusters
]
return success_response(
data={
'countries': country_options,
'statuses': status_options,
'clusters': cluster_options,
},
request=request
)
except Exception as e:
logger.error(f"Error in filter_options: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch filter options: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['post'], url_path='auto_cluster', url_name='auto_cluster')
def auto_cluster(self, request):
"""Auto-cluster keywords using ClusteringService"""