ai fucntiosn adn otehr atuoamtion fixes

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-14 23:08:48 +00:00
parent cb2d109593
commit 6bb3dd3df4
11 changed files with 1289 additions and 197 deletions

View File

@@ -432,7 +432,8 @@ class AIEngine:
final_save_msg = save_msg
# Phase 5.5: DEDUCT CREDITS - Deduct credits after successful save
if self.account and raw_response:
logger.info(f"[AIEngine] Credit deduction check: account={self.account is not None}, raw_response={raw_response is not None}")
if self.account and raw_response is not None:
try:
from igny8_core.business.billing.services.credit_service import CreditService
from igny8_core.business.billing.exceptions import InsufficientCreditsError
@@ -444,6 +445,12 @@ class AIEngine:
tokens_input = raw_response.get('input_tokens', 0)
tokens_output = raw_response.get('output_tokens', 0)
logger.info(
f"[AIEngine] Deducting credits: operation={operation_type}, "
f"tokens_in={tokens_input}, tokens_out={tokens_output}, "
f"model={raw_response.get('model', 'unknown')}"
)
# Extract site_id from save_result (could be from content, cluster, or task)
site_id = save_result.get('site_id') or save_result.get('site')
@@ -468,15 +475,17 @@ class AIEngine:
)
logger.info(
f"[AIEngine] Credits deducted: {operation_type}, "
f"tokens: {tokens_input + tokens_output} ({tokens_input} in, {tokens_output} out)"
f"[AIEngine] Credits deducted successfully: {operation_type}, "
f"total tokens: {tokens_input + tokens_output} ({tokens_input} in, {tokens_output} out)"
)
except InsufficientCreditsError as e:
# This shouldn't happen since we checked before, but log it
logger.error(f"[AIEngine] Insufficient credits during deduction: {e}")
except Exception as e:
logger.warning(f"[AIEngine] Failed to deduct credits: {e}", exc_info=True)
logger.error(f"[AIEngine] Failed to deduct credits: {e}", exc_info=True)
# Don't fail the operation if credit deduction fails (for backward compatibility)
else:
logger.warning(f"[AIEngine] Skipping credit deduction: account={self.account is not None}, raw_response={raw_response is not None}")
# Phase 6: DONE - Finalization (98-100%)
done_msg = self._get_done_message(function_name, save_result)

View File

@@ -68,7 +68,22 @@ class AutoClusterFunction(BaseAIFunction):
f"[AutoCluster] Validation passed: {min_validation['count']} keywords available (min: {min_validation['required']})"
)
# Removed plan limits check
# Validate single sector - keywords must all belong to the same sector
keywords = Keywords.objects.filter(id__in=ids)
if account:
keywords = keywords.filter(account=account)
sector_ids = set(keywords.values_list('sector_id', flat=True))
# Remove None values
sector_ids.discard(None)
if len(sector_ids) > 1:
logger.warning(f"[AutoCluster] Validation failed: keywords span {len(sector_ids)} sectors")
return {
'valid': False,
'error': f'Keywords must be from a single sector. Selected keywords span {len(sector_ids)} different sectors. Please filter by sector first.',
'sector_count': len(sector_ids)
}
return {'valid': True}
@@ -216,23 +231,23 @@ class AutoClusterFunction(BaseAIFunction):
if not keywords:
raise ValueError("No keywords available for saving")
# Get context from first keyword (account/site/sector already validated at page level)
# Get context from first keyword (account/site already validated at page level)
first_keyword = keywords[0]
account = account or first_keyword.account
site = first_keyword.site
# Get sector if needed
from igny8_core.auth.models import Sector
sector = first_keyword.sector
if not sector and sector_id:
try:
sector = Sector.objects.get(id=sector_id)
except Sector.DoesNotExist:
sector = None
if not account:
raise ValueError("Account is required for cluster creation")
# Build a lookup of keyword text -> keyword object for matching
# Keywords may span multiple sectors, so don't filter by sector here
keyword_by_text = {
kw_obj.keyword.strip().lower(): kw_obj
for kw_obj in keywords
}
logger.info(f"[save_output] Processing {len(parsed)} clusters for {len(keywords)} keywords")
clusters_created = 0
keywords_updated = 0
@@ -253,74 +268,88 @@ class AutoClusterFunction(BaseAIFunction):
cluster_keywords = cluster_data.get('keywords', [])
if not cluster_name or not cluster_keywords:
logger.warning(f"[save_output] Skipping cluster with empty name or keywords: {cluster_data}")
continue
# Get or create cluster
if sector:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
# Match keywords from AI response to actual keyword objects
matched_keyword_objects = []
for kw_text in cluster_keywords:
kw_normalized = kw_text.strip().lower()
if kw_normalized in keyword_by_text:
matched_keyword_objects.append(keyword_by_text[kw_normalized])
if not matched_keyword_objects:
logger.warning(f"[save_output] No keywords matched for cluster '{cluster_name}': {cluster_keywords}")
continue
# Determine sector for cluster from the matched keywords
# Use the sector from the first matched keyword (all should ideally be same sector)
cluster_sector = matched_keyword_objects[0].sector
# Try to find existing cluster by name (case-insensitive) in same site/sector
# This allows reusing clusters even if AI generates slightly different casing
existing_cluster = None
if cluster_sector:
existing_cluster = Clusters.objects.filter(
account=account,
site=site,
sector=sector,
defaults={
'description': cluster_data.get('description', ''),
'status': 'new', # FIXED: Changed from 'active' to 'new'
}
)
sector=cluster_sector,
name__iexact=cluster_name,
deleted_at__isnull=True # Exclude soft-deleted clusters
).first()
else:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
existing_cluster = Clusters.objects.filter(
account=account,
site=site,
sector__isnull=True,
defaults={
'description': cluster_data.get('description', ''),
'status': 'new', # FIXED: Changed from 'active' to 'new'
'sector': None,
}
name__iexact=cluster_name,
deleted_at__isnull=True
).first()
if existing_cluster:
cluster = existing_cluster
created = False
logger.info(f"[save_output] Found existing cluster '{cluster.name}' (id={cluster.id})")
else:
# Create new cluster
cluster = Clusters.objects.create(
name=cluster_name,
account=account,
site=site,
sector=cluster_sector,
description=cluster_data.get('description', ''),
status='new',
)
created = True
if created:
clusters_created += 1
logger.info(f"[save_output] Created cluster '{cluster_name}' (id={cluster.id}) in sector {cluster_sector.id if cluster_sector else 'None'}")
# Match and assign keywords (case-insensitive)
cluster_keywords_normalized = {kw.strip().lower(): kw.strip() for kw in cluster_keywords}
available_keywords_normalized = {
kw_obj.keyword.strip().lower(): kw_obj
for kw_obj in keywords
}
# Update matched keywords - directly by their IDs, no sector filtering needed
# since we already matched them from the input keywords list
matched_ids = [kw.id for kw in matched_keyword_objects]
updated_count = Keywords.objects.filter(
id__in=matched_ids,
account=account
).update(
cluster=cluster,
status='mapped'
)
keywords_updated += updated_count
logger.info(f"[save_output] Cluster '{cluster_name}': matched {len(matched_keyword_objects)} keywords, updated {updated_count}")
matched_keyword_objects = []
for cluster_kw_normalized, cluster_kw_original in cluster_keywords_normalized.items():
if cluster_kw_normalized in available_keywords_normalized:
matched_keyword_objects.append(available_keywords_normalized[cluster_kw_normalized])
# Update matched keywords
if matched_keyword_objects:
matched_ids = [kw.id for kw in matched_keyword_objects]
keyword_filter = Keywords.objects.filter(
id__in=matched_ids,
account=account
)
if sector:
keyword_filter = keyword_filter.filter(sector=sector)
else:
keyword_filter = keyword_filter.filter(sector__isnull=True)
# FIXED: Ensure keywords status updates from 'new' to 'mapped'
updated_count = keyword_filter.update(
cluster=cluster,
status='mapped' # Status changes from 'new' to 'mapped'
)
keywords_updated += updated_count
# Recalculate cluster metrics
# Recalculate cluster metrics for all clusters in this site
from django.db.models import Sum, Case, When, F, IntegerField
cluster_filter = Clusters.objects.filter(account=account)
if sector:
cluster_filter = cluster_filter.filter(sector=sector)
else:
cluster_filter = cluster_filter.filter(sector__isnull=True)
# Get all cluster IDs that were created/updated in this batch
updated_cluster_ids = set()
for kw in keywords:
if kw.cluster_id:
updated_cluster_ids.add(kw.cluster_id)
# Also include newly created clusters
cluster_filter = Clusters.objects.filter(account=account, site=site)
for cluster in cluster_filter:
cluster.keywords_count = Keywords.objects.filter(cluster=cluster).count()

View File

@@ -175,7 +175,7 @@ class SiteSerializer(serializers.ModelSerializer):
site=obj,
platform='wordpress',
is_active=True
).exists() or bool(obj.wp_url)
).exists()
class IndustrySectorSerializer(serializers.ModelSerializer):

View File

@@ -172,6 +172,22 @@ class AutomationService:
total_count = pending_keywords.count()
# IMPORTANT: Group keywords by sector to avoid mixing sectors in clustering
# Each sector's keywords must be processed separately
from collections import defaultdict
keywords_by_sector = defaultdict(list)
for kw_id, sector_id in pending_keywords.values_list('id', 'sector_id'):
# Use sector_id or 'no_sector' for keywords without a sector
key = sector_id if sector_id else 'no_sector'
keywords_by_sector[key].append(kw_id)
sector_count = len(keywords_by_sector)
if sector_count > 1:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Keywords span {sector_count} sectors - will process each sector separately"
)
# NEW: Pre-stage validation for minimum keywords
from igny8_core.ai.validators.cluster_validators import validate_minimum_keywords
@@ -229,20 +245,19 @@ class AutomationService:
# Process in batches with dynamic sizing
batch_size = self.config.stage_1_batch_size
# FIXED: Use min() for dynamic batch sizing
actual_batch_size = min(total_count, batch_size)
keywords_processed = 0
clusters_created = 0
batches_run = 0
credits_before = self._get_credits_used()
keyword_ids = list(pending_keywords.values_list('id', flat=True))
# Get total keyword count for progress tracking
total_keyword_count = sum(len(ids) for ids in keywords_by_sector.values())
# INITIAL SAVE: Set keywords_total immediately so frontend shows accurate counts from start
self.run.stage_1_result = {
'keywords_processed': 0,
'keywords_total': len(keyword_ids),
'keywords_total': total_keyword_count,
'clusters_created': 0,
'batches_run': 0,
'credits_used': 0,
@@ -251,17 +266,28 @@ class AutomationService:
}
self.run.save(update_fields=['stage_1_result'])
for i in range(0, len(keyword_ids), actual_batch_size):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
if should_stop:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Stage {reason} - saving progress ({keywords_processed} keywords processed)"
)
# Save current progress
credits_used = self._get_credits_used() - credits_before
time_elapsed = self._format_time_elapsed(start_time)
# Process each sector's keywords separately to avoid mixing sectors
for sector_idx, (sector_key, sector_keyword_ids) in enumerate(keywords_by_sector.items()):
sector_name = f"Sector {sector_key}" if sector_key != 'no_sector' else "No Sector"
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing {sector_name} ({len(sector_keyword_ids)} keywords) [{sector_idx + 1}/{len(keywords_by_sector)}]"
)
# Dynamic batch sizing per sector
actual_batch_size = min(len(sector_keyword_ids), batch_size)
for i in range(0, len(sector_keyword_ids), actual_batch_size):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
if should_stop:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Stage {reason} - saving progress ({keywords_processed} keywords processed)"
)
# Save current progress
credits_used = self._get_credits_used() - credits_before
time_elapsed = self._format_time_elapsed(start_time)
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'clusters_created': clusters_created,
@@ -275,92 +301,92 @@ class AutomationService:
self.run.save()
return
try:
batch = keyword_ids[i:i + actual_batch_size]
batch_num = (i // actual_batch_size) + 1
total_batches = (len(keyword_ids) + actual_batch_size - 1) // actual_batch_size
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing batch {batch_num}/{total_batches} ({len(batch)} keywords)"
)
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=AutoClusterFunction(),
payload={'ids': batch}
)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Clustering failed for batch {batch_num}: {error_msg}")
# Continue to next batch
keywords_processed += len(batch)
batches_run += 1
# Log progress
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Batch {batch_num} complete"
)
# INCREMENTAL SAVE: Update stage result after each batch for real-time UI progress
clusters_so_far = Clusters.objects.filter(
site=self.site,
created_at__gte=self.run.started_at
).count()
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'keywords_total': len(keyword_ids),
'clusters_created': clusters_so_far,
'batches_run': batches_run,
'credits_used': self._get_credits_used() - credits_before,
'time_elapsed': self._format_time_elapsed(start_time),
'in_progress': True
}
self.run.save(update_fields=['stage_1_result'])
# Emit per-item trace event for UI progress tracking
try:
self.logger.append_trace(self.account.id, self.site.id, self.run.run_id, {
'event': 'stage_item_processed',
'run_id': self.run.run_id,
'stage': stage_number,
'processed': keywords_processed,
'total': len(keyword_ids),
'batch_num': batch_num,
'timestamp': datetime.now().isoformat()
})
except Exception:
pass
except Exception as e:
# FIXED: Log error but continue processing remaining batches
error_msg = f"Failed to process batch {batch_num}: {str(e)}"
logger.error(f"[AutomationService] {error_msg}", exc_info=True)
self.logger.log_stage_error(
self.run.run_id, self.account.id, self.site.id,
stage_number, error_msg
)
# Continue to next batch
continue
batch = sector_keyword_ids[i:i + actual_batch_size]
batch_num = (i // actual_batch_size) + 1
total_batches = (len(sector_keyword_ids) + actual_batch_size - 1) // actual_batch_size
# ADDED: Within-stage delay (between batches)
if i + actual_batch_size < len(keyword_ids): # Not the last batch
delay = self.config.within_stage_delay
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Waiting {delay} seconds before next batch..."
)
time.sleep(delay)
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, "Delay complete, resuming processing"
)
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Processing {sector_name} batch {batch_num}/{total_batches} ({len(batch)} keywords)"
)
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=AutoClusterFunction(),
payload={'ids': batch}
)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Clustering failed for {sector_name} batch {batch_num}: {error_msg}")
# Continue to next batch
keywords_processed += len(batch)
batches_run += 1
# Log progress
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"{sector_name} batch {batch_num} complete"
)
# INCREMENTAL SAVE: Update stage result after each batch for real-time UI progress
clusters_so_far = Clusters.objects.filter(
site=self.site,
created_at__gte=self.run.started_at
).count()
self.run.stage_1_result = {
'keywords_processed': keywords_processed,
'keywords_total': total_keyword_count,
'clusters_created': clusters_so_far,
'batches_run': batches_run,
'credits_used': self._get_credits_used() - credits_before,
'time_elapsed': self._format_time_elapsed(start_time),
'in_progress': True
}
self.run.save(update_fields=['stage_1_result'])
# Emit per-item trace event for UI progress tracking
try:
self.logger.append_trace(self.account.id, self.site.id, self.run.run_id, {
'event': 'stage_item_processed',
'run_id': self.run.run_id,
'stage': stage_number,
'processed': keywords_processed,
'total': total_keyword_count,
'batch_num': batch_num,
'timestamp': datetime.now().isoformat()
})
except Exception:
pass
except Exception as e:
# FIXED: Log error but continue processing remaining batches
error_msg = f"Failed to process {sector_name} batch {batch_num}: {str(e)}"
logger.error(f"[AutomationService] {error_msg}", exc_info=True)
self.logger.log_stage_error(
self.run.run_id, self.account.id, self.site.id,
stage_number, error_msg
)
# Continue to next batch
continue
# ADDED: Within-stage delay (between batches)
if i + actual_batch_size < len(sector_keyword_ids): # Not the last batch in this sector
delay = self.config.within_stage_delay
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Waiting {delay} seconds before next batch..."
)
time.sleep(delay)
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, "Delay complete, resuming processing"
)
# Get clusters created count
clusters_created = Clusters.objects.filter(

View File

@@ -156,7 +156,7 @@ class CreditService:
raise CreditCalculationError(f"Error calculating credits: {e}")
@staticmethod
def calculate_credits_from_tokens(operation_type, tokens_input, tokens_output):
def calculate_credits_from_tokens(operation_type, tokens_input, tokens_output, model_name=None):
"""
Calculate credits from actual token usage using configured ratio.
This is the ONLY way credits are calculated in the system.
@@ -165,6 +165,7 @@ class CreditService:
operation_type: Type of operation
tokens_input: Input tokens used
tokens_output: Output tokens used
model_name: Optional AI model name (e.g., 'gpt-4o') for model-specific tokens_per_credit
Returns:
int: Credits to deduct
@@ -174,7 +175,7 @@ class CreditService:
"""
import logging
import math
from igny8_core.business.billing.models import CreditCostConfig, BillingConfiguration
from igny8_core.business.billing.models import CreditCostConfig, BillingConfiguration, AIModelConfig
logger = logging.getLogger(__name__)
@@ -184,15 +185,32 @@ class CreditService:
is_active=True
).first()
if not config:
# Use global billing config as fallback
billing_config = BillingConfiguration.get_config()
# Get tokens_per_credit from AIModelConfig if model_name provided
billing_config = BillingConfiguration.get_config()
tokens_per_credit = None
if model_name:
# Try to get model-specific tokens_per_credit from AIModelConfig
model_config = AIModelConfig.objects.filter(
model_name=model_name,
is_active=True
).first()
if model_config and model_config.tokens_per_credit:
tokens_per_credit = model_config.tokens_per_credit
logger.info(f"Using model-specific tokens_per_credit: {tokens_per_credit} for {model_name}")
# Fallback to global billing config
if tokens_per_credit is None:
tokens_per_credit = billing_config.default_tokens_per_credit
logger.info(f"Using global default tokens_per_credit: {tokens_per_credit}")
if not config:
min_credits = 1
logger.info(f"No config for {operation_type}, using default: {tokens_per_credit} tokens/credit")
logger.info(f"No config for {operation_type}, using default: {tokens_per_credit} tokens/credit, min 1 credit")
else:
tokens_per_credit = config.tokens_per_credit
min_credits = config.min_credits
# Use base_credits as minimum for this operation
min_credits = config.base_credits
logger.info(f"Config for {operation_type}: {tokens_per_credit} tokens/credit, min {min_credits} credits")
# Calculate total tokens
total_tokens = (tokens_input or 0) + (tokens_output or 0)
@@ -250,8 +268,8 @@ class CreditService:
).first()
if config:
# Use minimum credits as estimate for token-based operations
required = config.min_credits
# Use base_credits as estimate for token-based operations
required = config.base_credits
else:
# Fallback to constants
required = CREDIT_COSTS.get(operation_type, 1)
@@ -377,10 +395,22 @@ class CreditService:
metadata=metadata or {}
)
# Convert site_id to Site instance if needed
site_instance = None
if site is not None:
from igny8_core.auth.models import Site
if isinstance(site, int):
try:
site_instance = Site.objects.get(id=site)
except Site.DoesNotExist:
logger.warning(f"Site with id {site} not found for credit usage log")
else:
site_instance = site
# Create CreditUsageLog
CreditUsageLog.objects.create(
account=account,
site=site,
site=site_instance,
operation_type=operation_type,
credits_used=amount,
cost_usd=cost_usd,
@@ -442,9 +472,9 @@ class CreditService:
f"Got: tokens_input={tokens_input}, tokens_output={tokens_output}"
)
# Calculate credits from actual token usage
# Calculate credits from actual token usage (pass model_used for model-specific rate)
credits_required = CreditService.calculate_credits_from_tokens(
operation_type, tokens_input, tokens_output
operation_type, tokens_input, tokens_output, model_name=model_used
)
# Check sufficient credits

View File

@@ -711,6 +711,113 @@ class KeywordViewSet(SiteSectorModelViewSet):
request=request
)
@action(detail=False, methods=['get'], url_path='stats', url_name='stats')
def stats(self, request):
"""
Get aggregate statistics for keywords.
Returns total keywords count and total volume across all keywords for the current site.
Used for header metrics display.
"""
from django.db.models import Sum, Count, Case, When, F, IntegerField
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Aggregate keyword stats
keyword_stats = queryset.aggregate(
total_keywords=Count('id'),
total_volume=Sum(
Case(
When(volume_override__isnull=False, then=F('volume_override')),
default=F('seed_keyword__volume'),
output_field=IntegerField()
)
)
)
return success_response(
data={
'total_keywords': keyword_stats['total_keywords'] or 0,
'total_volume': keyword_stats['total_volume'] or 0,
},
request=request
)
except Exception as e:
logger.error(f"Error in keywords stats: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch keyword stats: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['get'], url_path='filter_options', url_name='filter_options')
def filter_options(self, request):
"""
Get distinct filter values from current data.
Returns only countries and statuses that exist in the current site's keywords.
"""
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Get distinct countries from seed_keyword (use set for proper deduplication)
countries = list(set(queryset.values_list('seed_keyword__country', flat=True)))
countries = sorted([c for c in countries if c]) # Sort and filter nulls
# Map country codes to display names
from igny8_core.auth.models import SeedKeyword
country_choices = dict(SeedKeyword.COUNTRY_CHOICES)
country_options = [
{'value': c, 'label': country_choices.get(c, c)}
for c in countries
]
# Get distinct statuses (use set for proper deduplication)
statuses = list(set(queryset.values_list('status', flat=True)))
statuses = sorted([s for s in statuses if s]) # Sort and filter nulls
status_labels = {
'new': 'New',
'mapped': 'Mapped',
}
status_options = [
{'value': s, 'label': status_labels.get(s, s.title())}
for s in statuses
]
# Get distinct clusters (use set for proper deduplication)
cluster_ids = list(set(
queryset.exclude(cluster_id__isnull=True)
.values_list('cluster_id', flat=True)
))
clusters = Clusters.objects.filter(id__in=cluster_ids).values('id', 'name').order_by('name')
cluster_options = [
{'value': str(c['id']), 'label': c['name']}
for c in clusters
]
return success_response(
data={
'countries': country_options,
'statuses': status_options,
'clusters': cluster_options,
},
request=request
)
except Exception as e:
logger.error(f"Error in filter_options: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch filter options: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['post'], url_path='auto_cluster', url_name='auto_cluster')
def auto_cluster(self, request):
"""Auto-cluster keywords using ClusteringService"""

View File

@@ -0,0 +1,744 @@
# Free Account Options - Architecture Analysis
**Date:** January 14, 2026
**Status:** Planning Phase
**Purpose:** Compare two approaches for free user onboarding with limited AI operations
---
## Current System Architecture
### 1. **Account & Plan System**
```python
Account Model:
- plan (FK to Plan)
- credits (Integer, current balance)
- status (trial, active, suspended, pending_payment, cancelled)
- payment_method
- usage_ahrefs_queries (monthly counter)
- usage_period_start/end
Plan Model:
- name, slug, price, billing_cycle
- is_internal (hide from public listings)
- max_sites, max_keywords, max_users, max_author_profiles
- included_credits (monthly allocation)
- extra_credit_price
- allow_credit_topup
- max_ahrefs_queries (monthly limit)
```
### 2. **AI Configuration System**
```python
AIModelConfig (Global - Single Source of Truth):
- model_name (e.g., 'gpt-4o-mini', 'hidream-full')
- model_type (text/image)
- provider (openai, runware, etc.)
- is_default (one default per type)
- is_active
- cost_per_1k_input/output (text models)
- credits_per_image (image models)
- tokens_per_credit (text models)
AISettings (Per-Account Overrides):
- account (FK)
- integration_type (openai, runware)
- config (API keys, settings)
- model_preferences (per operation type)
- cost_limits (budgets)
```
### 3. **Credit Tracking System**
```python
CreditTransaction:
- transaction_type (purchase, subscription, deduction, adjustment)
- amount (positive/negative)
- balance_after
- description, metadata
CreditUsageLog (Per AI Operation):
- operation_type (clustering, idea_generation, content_generation, image_generation)
- credits_used
- cost_usd
- model_used
- tokens_input/output
- site (FK for filtering)
- related_object_type/id
```
### 4. **Current Registration Flow**
1. User registers → `RegisterSerializer.create()`
2. If `plan_slug` not provided or = 'free':
- Assigns Plan.slug='free' (must exist)
- Account.status = 'trial'
- Account.credits = plan.included_credits
- Creates CreditTransaction (initial allocation)
3. User can perform AI operations until credits exhausted
---
## 📊 Option 1: Individual Free Accounts (Isolated)
### **Concept**
Each user gets their own free account with:
- Fixed cheaper AI models (GPT-4o mini, Hidream-full)
- Low credit allocation (50-100 operations)
- Own isolated data/workspace
- Ability to upgrade to paid plan
### **Implementation Plan**
#### **Step 1: Create Free Plan**
```sql
-- Admin action via Django Admin
INSERT INTO igny8_plans (
name, slug, price, billing_cycle,
is_featured, is_internal, is_active,
max_sites, max_users, max_keywords,
included_credits, allow_credit_topup,
max_ahrefs_queries
) VALUES (
'Free Starter', 'free', 0.00, 'monthly',
false, true, true,
1, -- max_sites: 1 site only
1, -- max_users: owner only
100, -- max_keywords: 100
100, -- included_credits: 100 credits (~50 operations)
false, -- No credit topup for free
0 -- No Ahrefs access
);
```
#### **Step 2: Create AI Model Configs (If Not Exist)**
```sql
-- GPT-4o Mini (cheaper text model)
INSERT INTO igny8_ai_model_config (
model_name, model_type, provider, display_name,
is_default, is_active,
cost_per_1k_input, cost_per_1k_output,
tokens_per_credit, max_tokens, context_window
) VALUES (
'gpt-4o-mini', 'text', 'openai', 'GPT-4o Mini (Fast & Efficient)',
false, true,
0.00015, 0.0006, -- Cheaper than GPT-4
1000, 16384, 128000
);
-- Hidream Full (cheaper image model)
INSERT INTO igny8_ai_model_config (
model_name, model_type, provider, display_name,
is_default, is_active,
credits_per_image, quality_tier,
square_size, landscape_size
) VALUES (
'hidream-full', 'image', 'runware', 'Hidream Full (Standard Quality)',
false, true,
1, -- 1 credit per image (cheapest)
'basic',
'1024x1024', '1280x768'
);
```
#### **Step 3: Update Registration Logic**
```python
# In auth/serializers.py RegisterSerializer.create()
# No changes needed! Current logic already handles this:
if not plan_slug or plan_slug == 'free':
plan = Plan.objects.get(slug='free', is_active=True)
account_status = 'trial'
initial_credits = plan.included_credits # 100 credits
```
#### **Step 4: Force Free Plan AI Models**
**Option A: Global Default (Simplest)**
```sql
-- Set GPT-4o Mini and Hidream as defaults
UPDATE igny8_ai_model_config
SET is_default = false
WHERE model_type = 'text';
UPDATE igny8_ai_model_config
SET is_default = true
WHERE model_name = 'gpt-4o-mini';
UPDATE igny8_ai_model_config
SET is_default = false
WHERE model_type = 'image';
UPDATE igny8_ai_model_config
SET is_default = true
WHERE model_name = 'hidream-full';
```
**Pros:** Zero code changes, all free accounts inherit defaults
**Cons:** Affects ALL accounts (paid users too)
**Option B: Per-Account AI Settings (Recommended)**
```python
# In auth/serializers.py RegisterSerializer.create()
# After account creation:
if account_status == 'trial': # Free accounts only
from igny8_core.modules.system.settings_models import AISettings
# Create AI settings for OpenAI (text)
AISettings.objects.create(
account=account,
integration_type='openai',
model_preferences={
'clustering': 'gpt-4o-mini',
'idea_generation': 'gpt-4o-mini',
'content_generation': 'gpt-4o-mini',
'optimization': 'gpt-4o-mini',
},
is_active=True
)
# Create AI settings for Runware (images)
AISettings.objects.create(
account=account,
integration_type='runware',
model_preferences={
'image_generation': 'hidream-full',
},
is_active=True
)
```
**Pros:** Free accounts locked to cheap models, paid accounts unaffected
**Cons:** Requires code change in registration flow
**Option C: Plan-Level AI Model Configuration**
```python
# Add new field to Plan model (migration required)
class Plan(models.Model):
# ... existing fields ...
allowed_text_models = models.JSONField(
default=list,
help_text="Allowed text AI models (empty = all)"
)
allowed_image_models = models.JSONField(
default=list,
help_text="Allowed image AI models (empty = all)"
)
force_default_models = models.BooleanField(
default=False,
help_text="Force plan defaults, ignore user overrides"
)
# Update Free plan:
plan = Plan.objects.get(slug='free')
plan.allowed_text_models = ['gpt-4o-mini']
plan.allowed_image_models = ['hidream-full']
plan.force_default_models = True
plan.save()
# In AI operation logic (ai/services.py or similar):
def get_ai_model_for_account(account, operation_type):
plan = account.plan
if plan.force_default_models:
if operation_type in ['clustering', 'idea_generation', 'content_generation']:
return 'gpt-4o-mini'
elif operation_type == 'image_generation':
return 'hidream-full'
# ... existing logic for paid accounts
```
**Pros:** Centralized plan-based control, scalable
**Cons:** Requires migration + AI operation logic changes
#### **Step 5: Frontend Restrictions**
```typescript
// In frontend, check plan limits
if (user.account.plan.slug === 'free') {
// Hide model selector (force defaults)
// Show "Upgrade for more models" message
// Disable credit topup
// Disable Ahrefs research
}
```
### **✅ Pros: Individual Free Accounts**
1. **Full User Experience** - Users get their own workspace, test all features
2. **Data Isolation** - Private data, no cross-contamination
3. **Smooth Upgrade Path** - Existing account → upgrade plan → keep data
4. **Proper Multi-Tenancy** - Each account is isolated, secure
5. **Credit Tracking** - Accurate per-user usage analytics
6. **Marketing Value** - "100 Free Credits" sounds generous
### **❌ Cons: Individual Free Accounts**
1. **Database Growth** - Each user = new Account + User + potential Sites/Keywords
2. **Abuse Potential** - Users can create multiple emails for free credits
3. **Complex Enforcement** - Need to enforce model restrictions per account
4. **Storage Costs** - Each account stores independent data
5. **Migration Complexity** - If user upgrades, need to handle plan transition
### **Effort Estimate: Individual Free Accounts**
- **Minimal Approach** (Option A): **1 hour**
- Create free plan via admin
- Set default models globally
- Update frontend to hide topup for free users
- **Recommended Approach** (Option B): **4-6 hours**
- Create free plan via admin
- Update registration to create AISettings per free account
- Update AI operation logic to read account-specific models
- Frontend: Hide model selector for free users
- Testing across all AI operations
- **Enterprise Approach** (Option C): **1-2 days**
- Migration: Add allowed_models fields to Plan
- Update registration flow
- Refactor AI operation logic (all modules)
- Admin UI for plan model management
- Comprehensive testing
---
## 🎭 Option 2: Shared Demo Account (Multi-User)
### **Concept**
One demo account shared by multiple users:
- Users provide email → get "demo access" token
- Limited operations pool (50-100 per user, tracked separately)
- Shared data (users see what others created)
- Pre-configured cheaper AI models
- No upgrade path (must create new account)
### **Implementation Plan**
#### **Step 1: Create Demo Account**
```sql
-- Create demo plan (internal)
INSERT INTO igny8_plans (
name, slug, price, billing_cycle,
is_internal, is_active,
max_sites, max_users, max_keywords,
included_credits, allow_credit_topup
) VALUES (
'Demo Access', 'demo', 0.00, 'monthly',
true, true,
1, -- 1 demo site
999, -- Unlimited demo users
50, -- Limited keywords
10000, -- Large shared pool
false
);
-- Create demo account
INSERT INTO igny8_tenants (
name, slug, owner_id, plan_id,
credits, status
) VALUES (
'IGNY8 Demo Workspace', 'igny8-demo', 1, -- owner = admin
(SELECT id FROM igny8_plans WHERE slug='demo'),
10000, 'active'
);
-- Create demo site
INSERT INTO igny8_sites (
name, url, account_id, is_active
) VALUES (
'Demo Content Site', 'https://demo.example.com',
(SELECT id FROM igny8_tenants WHERE slug='igny8-demo'),
true
);
```
#### **Step 2: Create DemoUserAccess Model**
```python
# In auth/models.py
class DemoUserAccess(models.Model):
"""Track individual demo user access and limits"""
email = models.EmailField(unique=True, db_index=True)
demo_account = models.ForeignKey(
'Account',
on_delete=models.CASCADE,
related_name='demo_users'
)
access_token = models.CharField(max_length=255, unique=True)
operations_used = models.IntegerField(default=0)
operations_limit = models.IntegerField(default=50)
created_at = models.DateTimeField(auto_now_add=True)
last_accessed = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta:
db_table = 'igny8_demo_user_access'
indexes = [
models.Index(fields=['email', 'is_active']),
models.Index(fields=['access_token']),
]
def __str__(self):
return f"Demo: {self.email} ({self.operations_used}/{self.operations_limit})"
def has_operations_remaining(self):
return self.operations_used < self.operations_limit
```
#### **Step 3: Migration**
```python
# migrations/0014_demo_user_access.py
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('igny8_core_auth', '0013_add_plan_is_internal'),
]
operations = [
migrations.CreateModel(
name='DemoUserAccess',
fields=[
('id', models.AutoField(primary_key=True)),
('email', models.EmailField(unique=True, db_index=True)),
('demo_account', models.ForeignKey(
on_delete=models.CASCADE,
to='igny8_core_auth.Account',
related_name='demo_users'
)),
('access_token', models.CharField(max_length=255, unique=True)),
('operations_used', models.IntegerField(default=0)),
('operations_limit', models.IntegerField(default=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_accessed', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
],
options={'db_table': 'igny8_demo_user_access'},
),
]
```
#### **Step 4: Demo Access Endpoint**
```python
# In auth/views.py AuthViewSet
@action(detail=False, methods=['post'], permission_classes=[])
def request_demo_access(self, request):
"""Request demo account access with email only"""
email = request.data.get('email')
if not email:
return error_response(
error='Email is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Validate email format
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
except ValidationError:
return error_response(
error='Invalid email format',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get demo account
try:
demo_account = Account.objects.get(slug='igny8-demo', status='active')
except Account.DoesNotExist:
return error_response(
error='Demo account not configured',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
# Get or create demo user access
from .models import DemoUserAccess
import secrets
demo_user, created = DemoUserAccess.objects.get_or_create(
email=email,
demo_account=demo_account,
defaults={
'access_token': secrets.token_urlsafe(32),
'operations_limit': 50,
}
)
if not demo_user.is_active:
return error_response(
error='Demo access suspended. Please contact support.',
status_code=status.HTTP_403_FORBIDDEN,
request=request
)
if not demo_user.has_operations_remaining():
return error_response(
error='Demo operation limit reached. Please sign up for a full account.',
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
request=request
)
# Generate temporary JWT for demo account
# (Custom token that includes demo_user_id)
access_token = generate_demo_access_token(demo_account, demo_user)
return success_response(
data={
'access_token': access_token,
'demo_user': {
'email': demo_user.email,
'operations_remaining': demo_user.operations_limit - demo_user.operations_used,
'operations_limit': demo_user.operations_limit,
},
'account': {
'id': demo_account.id,
'name': demo_account.name,
'is_demo': True,
}
},
message='Demo access granted' if created else 'Welcome back to demo',
request=request
)
```
#### **Step 5: Custom JWT with Demo Context**
```python
# In auth/utils.py
def generate_demo_access_token(account, demo_user):
"""Generate JWT for demo access with demo_user context"""
import jwt
from datetime import datetime, timedelta
from django.conf import settings
expiry = datetime.utcnow() + timedelta(hours=24) # 24-hour demo session
payload = {
'account_id': account.id,
'account_slug': account.slug,
'demo_user_id': demo_user.id,
'demo_user_email': demo_user.email,
'is_demo': True,
'exp': expiry,
'iat': datetime.utcnow(),
}
return jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
```
#### **Step 6: Demo Operation Tracking Middleware**
```python
# In middleware/demo_tracking.py
class DemoOperationTrackingMiddleware:
"""Track demo user operations and enforce limits"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Check if demo user
if hasattr(request, 'demo_user_id'):
from igny8_core.auth.models import DemoUserAccess
demo_user = DemoUserAccess.objects.select_for_update().get(
id=request.demo_user_id
)
# Check limit before processing
if not demo_user.has_operations_remaining():
return JsonResponse({
'success': False,
'error': 'Demo operation limit reached. Please sign up for a full account.',
'upgrade_url': '/pricing'
}, status=429)
# Store demo_user in request for operation tracking
request.demo_user = demo_user
response = self.get_response(request)
return response
# Add to settings.py MIDDLEWARE
```
#### **Step 7: Update AI Operation Logic**
```python
# In ai/services.py or wherever AI operations are tracked
def log_ai_operation(account, operation_type, credits_used, **kwargs):
"""Log AI operation and increment demo counter if demo user"""
from igny8_core.business.billing.models import CreditUsageLog
from django.db import transaction
with transaction.atomic():
# Create credit usage log
CreditUsageLog.objects.create(
account=account,
operation_type=operation_type,
credits_used=credits_used,
**kwargs
)
# Deduct credits from account
account.credits -= credits_used
account.save()
# If demo user, increment their personal counter
from threading import local
_request = getattr(local(), 'request', None)
if _request and hasattr(_request, 'demo_user'):
demo_user = _request.demo_user
demo_user.operations_used += 1
demo_user.save()
```
#### **Step 8: Frontend Demo Flow**
```typescript
// New demo signup flow
async function requestDemoAccess(email: string) {
const response = await api.post('/v1/auth/request-demo-access/', { email });
if (response.success) {
// Store demo token
localStorage.setItem('demo_token', response.data.access_token);
localStorage.setItem('is_demo', 'true');
// Show demo banner
showDemoBanner({
operationsRemaining: response.data.demo_user.operations_remaining,
operationsLimit: response.data.demo_user.operations_limit,
});
// Redirect to demo workspace
router.push('/dashboard');
}
}
// Demo banner component
<DemoBanner>
<p>🎭 You're in Demo Mode - {operationsRemaining} operations remaining</p>
<Button onClick={() => router.push('/pricing')}>
Upgrade for Full Access
</Button>
</DemoBanner>
// Disable certain features in demo mode
if (isDemo) {
disableFeatures(['integrations', 'automation', 'wordpress_sync']);
showSharedDataWarning();
}
```
### **✅ Pros: Shared Demo Account**
1. **Zero Database Growth** - One account, minimal new records
2. **Instant Access** - No account creation, just email → token
3. **Showcase Content** - Users see real AI-generated examples from others
4. **Anti-Abuse** - Email-based tracking, hard limits per email
5. **Conversion Pressure** - "See others creating, sign up for your own workspace"
6. **Cost Efficient** - Shared credit pool, bulk tracking
### **❌ Cons: Shared Demo Account**
1. **No Data Privacy** - All users see shared workspace (could be feature or bug)
2. **Complex Access Control** - Need custom JWT + middleware + tracking
3. **No Upgrade Path** - Demo token ≠ real account, must register separately
4. **Shared Credit Pool** - If pool exhausted, demo is down for everyone
5. **Feature Limitations** - Can't show integrations, automation, publishing
6. **User Confusion** - "Why do I see others' content?" + "Lost my demo data!"
7. **Backend Complexity** - New model, middleware, JWT type, operation tracking
### **Effort Estimate: Shared Demo Account**
**Full Implementation**: **2-3 days**
- Create demo plan + account + site (1 hour)
- Create DemoUserAccess model + migration (2 hours)
- Build request_demo_access endpoint (2 hours)
- Custom JWT generation with demo context (2 hours)
- Middleware for demo tracking + limits (3 hours)
- Update AI operation logging (2 hours)
- Frontend: Demo flow + banner + restrictions (4 hours)
- Admin: Dashboard to manage demo users (2 hours)
- Testing: Edge cases, limits, shared data (4 hours)
---
## 🎯 Recommendation
### **🏆 Winner: Option 1 - Individual Free Accounts (Option B)**
**Rationale:**
1. **Simpler Architecture** - Leverages existing multi-tenancy, no custom JWT/middleware
2. **Better UX** - Private workspace, smooth upgrade path, feels like real product
3. **Faster Implementation** - 4-6 hours vs 2-3 days
4. **Lower Risk** - No shared data confusion, no new access control layer
5. **Marketing Win** - "100 Free Credits" > "Demo Access with Shared Data"
6. **Scalable** - If abuse becomes issue, add email verification or captcha
**Implementation Checklist:**
```markdown
- [ ] Create 'free' plan via Django Admin
- [ ] Set: included_credits=100, max_sites=1, max_keywords=100
- [ ] Set: is_internal=true, allow_credit_topup=false
- [ ] Verify AI Model Configs exist
- [ ] GPT-4o Mini (text, cheap)
- [ ] Hidream Full (image, cheap)
- [ ] Update RegisterSerializer (auth/serializers.py)
- [ ] After account creation for trial status:
- [ ] Create AISettings for openai (text) → gpt-4o-mini
- [ ] Create AISettings for runware (images) → hidream-full
- [ ] Update Frontend
- [ ] Hide model selector for free plan
- [ ] Disable credit topup for free plan
- [ ] Show "Upgrade for more models" CTA
- [ ] Testing
- [ ] Register new free account
- [ ] Run text AI operation → verify gpt-4o-mini used
- [ ] Run image AI operation → verify hidream-full used
- [ ] Verify 100 credits allocated
- [ ] Verify upgrade flow works
```
---
## 🔮 Future Enhancements (Optional)
### For Option 1 (Individual Free Accounts):
1. **Email Verification** - Require verified email to prevent abuse
2. **Captcha** - Add reCAPTCHA on free signups
3. **Usage Analytics** - Track free-to-paid conversion rates
4. **Referral Credits** - Give 50 bonus credits for referrals
5. **Time-Limited Trial** - 30-day access instead of credit-limited
### For Option 2 (Shared Demo - If Pursued):
1. **Demo Content Curation** - Pre-seed with high-quality examples
2. **Demo Reset** - Daily reset to clean state
3. **Anonymous Mode** - Show "User A, User B" instead of emails
4. **Live Activity Feed** - "User just generated an article about X"
5. **Demo Leaderboard** - Gamify the experience
---
## 📚 Reference Files
**Models:**
- `/backend/igny8_core/auth/models.py` - Account, Plan, User
- `/backend/igny8_core/business/billing/models.py` - AIModelConfig, CreditTransaction, CreditUsageLog
- `/backend/igny8_core/modules/system/settings_models.py` - AISettings
**Registration:**
- `/backend/igny8_core/auth/serializers.py` - RegisterSerializer
- `/backend/igny8_core/auth/views.py` - AuthViewSet.register()
**AI Operations:**
- Check modules: clustering, ideas, content, images for credit deduction logic
---
## ✅ Decision
**Recommended:** Proceed with **Option 1 - Individual Free Accounts (Option B)**
**Estimated Time:** 4-6 hours
**Risk Level:** Low
**User Experience:** Excellent
Consider **Option 2** only if:
- Need to showcase "collaborative" aspect
- Want zero database growth (high traffic expected)
- Marketing wants "see what others create" feature

View File

@@ -136,6 +136,10 @@ export const createKeywordsPageConfig = (
volumeDropdownRef: React.RefObject<HTMLDivElement | null>;
setCurrentPage: (page: number) => void;
loadKeywords: () => Promise<void>;
// Dynamic filter options
countryOptions?: Array<{ value: string; label: string }>;
statusOptions?: Array<{ value: string; label: string }>;
clusterOptions?: Array<{ value: string; label: string }>;
}
): KeywordsPageConfig => {
const showSectorColumn = !handlers.activeSector; // Show when viewing all sectors
@@ -267,8 +271,14 @@ export const createKeywordsPageConfig = (
type: 'select',
options: [
{ value: '', label: 'All Status' },
{ value: 'new', label: 'New' },
{ value: 'mapped', label: 'Mapped' },
// Use dynamic options if available, otherwise show default options
...(handlers.statusOptions && handlers.statusOptions.length > 0
? handlers.statusOptions
: [
{ value: 'new', label: 'New' },
{ value: 'mapped', label: 'Mapped' },
]
),
],
},
{
@@ -277,13 +287,19 @@ export const createKeywordsPageConfig = (
type: 'select',
options: [
{ value: '', label: 'All Countries' },
{ value: 'US', label: 'United States' },
{ value: 'CA', label: 'Canada' },
{ value: 'GB', label: 'United Kingdom' },
{ value: 'AE', label: 'United Arab Emirates' },
{ value: 'AU', label: 'Australia' },
{ value: 'IN', label: 'India' },
{ value: 'PK', label: 'Pakistan' },
// Use dynamic options if available, otherwise show default options
...(handlers.countryOptions && handlers.countryOptions.length > 0
? handlers.countryOptions
: [
{ value: 'US', label: 'United States' },
{ value: 'CA', label: 'Canada' },
{ value: 'GB', label: 'United Kingdom' },
{ value: 'AE', label: 'United Arab Emirates' },
{ value: 'AU', label: 'Australia' },
{ value: 'IN', label: 'India' },
{ value: 'PK', label: 'Pakistan' },
]
),
],
},
{
@@ -299,6 +315,20 @@ export const createKeywordsPageConfig = (
{ value: '5', label: '5 - Very Hard' },
],
},
{
key: 'cluster',
label: 'Cluster',
type: 'select',
dynamicOptions: 'clusters', // Flag for dynamic option loading
options: [
{ value: '', label: 'All Clusters' },
// Use dynamic cluster options if available
...(handlers.clusterOptions && handlers.clusterOptions.length > 0
? handlers.clusterOptions
: handlers.clusters.map(c => ({ value: String(c.id), label: c.name }))
),
],
},
{
key: 'volume',
label: 'Volume Range',

View File

@@ -85,6 +85,47 @@ const AutomationPage: React.FC = () => {
// Track site ID to avoid duplicate calls when activeSite object reference changes
const siteId = activeSite?.id;
/**
* Calculate time remaining until next scheduled run
* Returns formatted string like "in 5h 23m" or "in 2d 3h"
*/
const getNextRunTime = (config: AutomationConfig): string => {
if (!config.is_enabled || !config.scheduled_time) return '';
const now = new Date();
const [schedHours, schedMinutes] = config.scheduled_time.split(':').map(Number);
// Create next run date
const nextRun = new Date();
nextRun.setUTCHours(schedHours, schedMinutes, 0, 0);
// If scheduled time has passed today, set to tomorrow
if (nextRun <= now) {
if (config.frequency === 'daily') {
nextRun.setUTCDate(nextRun.getUTCDate() + 1);
} else if (config.frequency === 'weekly') {
nextRun.setUTCDate(nextRun.getUTCDate() + 7);
}
}
// Calculate difference in milliseconds
const diff = nextRun.getTime() - now.getTime();
const totalMinutes = Math.floor(diff / (1000 * 60));
const totalHours = Math.floor(totalMinutes / 60);
const days = Math.floor(totalHours / 24);
const remainingHours = totalHours % 24;
const remainingMinutes = totalMinutes % 60;
// Format output
if (days > 0) {
return `in ${days}d ${remainingHours}h`;
} else if (remainingHours > 0) {
return `in ${remainingHours}h ${remainingMinutes}m`;
} else {
return `in ${remainingMinutes}m`;
}
};
useEffect(() => {
if (!siteId) return;
// Reset state when site changes
@@ -547,6 +588,14 @@ const AutomationPage: React.FC = () => {
<div className="text-sm text-white/80">
Last: <span className="font-medium">{config.last_run_at ? new Date(config.last_run_at).toLocaleDateString() : 'Never'}</span>
</div>
{config.is_enabled && (
<>
<div className="h-4 w-px bg-white/25"></div>
<div className="text-sm text-white/90">
Next: <span className="font-medium">{getNextRunTime(config)}</span>
</div>
</>
)}
<div className="h-4 w-px bg-white/25"></div>
<div className="text-sm text-white/90">
<span className="font-medium">Est:</span>{' '}

View File

@@ -14,6 +14,9 @@ import {
deleteKeyword,
bulkDeleteKeywords,
bulkUpdateKeywordsStatus,
fetchPlannerKeywordStats,
fetchPlannerKeywordFilterOptions,
FilterOption,
Keyword,
KeywordFilters,
KeywordCreateData,
@@ -53,6 +56,11 @@ export default function Keywords() {
const [totalVolume, setTotalVolume] = useState(0);
const [totalImagesCount, setTotalImagesCount] = useState(0);
// Dynamic filter options (loaded from backend based on current data)
const [countryOptions, setCountryOptions] = useState<FilterOption[]>([]);
const [statusOptions, setStatusOptions] = useState<FilterOption[]>([]);
const [clusterOptions, setClusterOptions] = useState<FilterOption[]>([]);
// Filter state - match Keywords.tsx
const [searchTerm, setSearchTerm] = useState('');
const [statusFilter, setStatusFilter] = useState('');
@@ -115,13 +123,32 @@ export default function Keywords() {
loadClusters();
}, []);
// Load dynamic filter options based on current site's data
const loadFilterOptions = useCallback(async () => {
if (!activeSite) return;
try {
const options = await fetchPlannerKeywordFilterOptions(activeSite.id);
setCountryOptions(options.countries || []);
setStatusOptions(options.statuses || []);
setClusterOptions(options.clusters || []);
} catch (error) {
console.error('Error loading filter options:', error);
}
}, [activeSite]);
// Load filter options when site changes
useEffect(() => {
loadFilterOptions();
}, [loadFilterOptions]);
// Load total metrics for footer widget (site-wide totals, no sector filter)
const loadTotalMetrics = useCallback(async () => {
if (!activeSite) return;
try {
// Batch all API calls in parallel for better performance
const [allRes, mappedRes, newRes, imagesRes] = await Promise.all([
const [allRes, mappedRes, newRes, imagesRes, statsRes] = await Promise.all([
// Get total keywords count (site-wide)
fetchKeywords({
page_size: 1,
@@ -141,17 +168,15 @@ export default function Keywords() {
}),
// Get actual total images count
fetchImages({ page_size: 1 }),
// Get total volume from stats endpoint
fetchPlannerKeywordStats(activeSite.id),
]);
setTotalCount(allRes.count || 0);
setTotalClustered(mappedRes.count || 0);
setTotalUnmapped(newRes.count || 0);
setTotalImagesCount(imagesRes.count || 0);
// Get total volume across all keywords (we need to fetch all or rely on backend aggregation)
// For now, we'll just calculate from current data or set to 0
// TODO: Backend should provide total volume as an aggregated metric
setTotalVolume(0);
setTotalVolume(statsRes.total_volume || 0);
} catch (error) {
console.error('Error loading total metrics:', error);
}
@@ -353,6 +378,14 @@ export default function Keywords() {
const numIds = ids.map(id => parseInt(id));
const sectorId = activeSector?.id;
const selectedKeywords = keywords.filter(k => numIds.includes(k.id));
// Validate single sector - keywords must all be from the same sector
const uniqueSectors = new Set(selectedKeywords.map(k => k.sector_id).filter(Boolean));
if (uniqueSectors.size > 1) {
toast.error(`Selected keywords span ${uniqueSectors.size} different sectors. Please select keywords from a single sector only.`);
return;
}
try {
const result = await autoClusterKeywords(numIds, sectorId);
@@ -516,6 +549,10 @@ export default function Keywords() {
volumeDropdownRef,
setCurrentPage,
loadKeywords,
// Dynamic filter options
countryOptions,
statusOptions,
clusterOptions,
});
}, [
clusters,
@@ -533,6 +570,9 @@ export default function Keywords() {
tempVolumeMax,
loadKeywords,
activeSite,
countryOptions,
statusOptions,
clusterOptions,
]);
// Calculate header metrics - use totalClustered/totalUnmapped from API calls (not page data)

View File

@@ -730,6 +730,34 @@ export async function bulkUpdateKeywordsStatus(ids: number[], status: string): P
});
}
// Planner keyword stats interface and function
export interface PlannerKeywordStats {
total_keywords: number;
total_volume: number;
}
export async function fetchPlannerKeywordStats(siteId?: number): Promise<PlannerKeywordStats> {
const queryParams = siteId ? `?site_id=${siteId}` : '';
return fetchAPI(`/v1/planner/keywords/stats/${queryParams}`);
}
// Planner keyword filter options interface and function
export interface FilterOption {
value: string;
label: string;
}
export interface PlannerKeywordFilterOptions {
countries: FilterOption[];
statuses: FilterOption[];
clusters: FilterOption[];
}
export async function fetchPlannerKeywordFilterOptions(siteId?: number): Promise<PlannerKeywordFilterOptions> {
const queryParams = siteId ? `?site_id=${siteId}` : '';
return fetchAPI(`/v1/planner/keywords/filter_options/${queryParams}`);
}
// Clusters-specific API functions
export interface ClusterFilters {
search?: string;