Files
igny8/backend/igny8_core/modules/planner/views.py
2026-02-19 07:38:56 +00:00

1959 lines
82 KiB
Python

from rest_framework import viewsets, filters, status
from rest_framework.decorators import action
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from django.db import transaction
from django.db.models import Max, Count, Sum, Q
from django.http import HttpResponse
from django.conf import settings
import csv
import json
import time
from drf_spectacular.utils import extend_schema, extend_schema_view
from igny8_core.api.base import SiteSectorModelViewSet
from igny8_core.api.pagination import CustomPageNumberPagination
from igny8_core.api.response import success_response, error_response
from igny8_core.api.throttles import DebugScopedRateThrottle
from igny8_core.api.permissions import IsAuthenticatedAndActive, IsViewerOrAbove, IsEditorOrAbove
from .models import Keywords, Clusters, ContentIdeas
from .serializers import KeywordSerializer, ContentIdeasSerializer
from .cluster_serializers import ClusterSerializer
from igny8_core.business.planning.services.clustering_service import ClusteringService
from igny8_core.business.planning.services.ideas_service import IdeasService
from igny8_core.business.billing.exceptions import InsufficientCreditsError
# Custom FilterSets with date range filtering support
class KeywordsFilter(django_filters.FilterSet):
"""Custom filter for Keywords with date range support"""
created_at__gte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='gte')
created_at__lte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='lte')
class Meta:
model = Keywords
fields = ['status', 'cluster_id', 'seed_keyword__country', 'seed_keyword_id', 'created_at__gte', 'created_at__lte']
class ClustersFilter(django_filters.FilterSet):
"""Custom filter for Clusters with date range support"""
created_at__gte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='gte')
created_at__lte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='lte')
class Meta:
model = Clusters
fields = ['status', 'created_at__gte', 'created_at__lte']
class ContentIdeasFilter(django_filters.FilterSet):
"""Custom filter for ContentIdeas with date range support.
Uses CharFilter for content_type and content_structure to accept any value
(database may have legacy values not in current model choices).
"""
created_at__gte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='gte')
created_at__lte = django_filters.IsoDateTimeFilter(field_name='created_at', lookup_expr='lte')
content_type = django_filters.CharFilter(field_name='content_type')
content_structure = django_filters.CharFilter(field_name='content_structure')
class Meta:
model = ContentIdeas
fields = ['status', 'keyword_cluster_id', 'content_type', 'content_structure', 'created_at__gte', 'created_at__lte']
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class KeywordViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing keywords with CRUD operations
Provides list, create, retrieve, update, and destroy actions
Unified API Standard v1.0 compliant
"""
queryset = Keywords.objects.all()
serializer_class = KeywordSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination # Explicitly use custom pagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle]
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by seed_keyword's keyword field
search_fields = ['seed_keyword__keyword']
# Ordering configuration - allow ordering by created_at, volume, difficulty (from seed_keyword)
ordering_fields = ['created_at', 'seed_keyword__volume', 'seed_keyword__difficulty']
ordering = ['-created_at'] # Default ordering (newest first)
# Filter configuration - use custom filterset for date range filtering
filterset_class = KeywordsFilter
def get_queryset(self):
"""
Override to support custom difficulty range filtering
Uses parent's get_queryset() which properly handles developer role and site/sector filtering
"""
import logging
logger = logging.getLogger(__name__)
try:
# Use parent's get_queryset() which handles developer role and site filtering correctly
queryset = super().get_queryset()
# Safely access query_params (DRF wraps request with Request class)
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
# Fallback for non-DRF requests
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
# Custom difficulty range filtering (check override first, then seed_keyword)
difficulty_min = query_params.get('difficulty_min')
difficulty_max = query_params.get('difficulty_max')
if difficulty_min is not None:
try:
# Filter by seed_keyword difficulty (override logic handled in property)
queryset = queryset.filter(
Q(difficulty_override__gte=int(difficulty_min)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__gte=int(difficulty_min))
)
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
queryset = queryset.filter(
Q(difficulty_override__lte=int(difficulty_max)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__lte=int(difficulty_max))
)
except (ValueError, TypeError):
pass
# Custom volume range filtering (check override first, then seed_keyword)
volume_min = query_params.get('volume_min')
volume_max = query_params.get('volume_max')
if volume_min is not None:
try:
queryset = queryset.filter(
Q(volume_override__gte=int(volume_min)) |
Q(volume_override__isnull=True, seed_keyword__volume__gte=int(volume_min))
)
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
queryset = queryset.filter(
Q(volume_override__lte=int(volume_max)) |
Q(volume_override__isnull=True, seed_keyword__volume__lte=int(volume_max))
)
except (ValueError, TypeError):
pass
return queryset
except Exception as e:
logger.error(f"Error in KeywordViewSet.get_queryset(): {type(e).__name__}: {str(e)}", exc_info=True)
# Return empty queryset instead of raising exception
return Keywords.objects.none()
def list(self, request, *args, **kwargs):
"""
Override list method to add error handling
"""
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return success_response(
data=serializer.data,
request=request
)
except Exception as e:
logger.error(f"Error in KeywordViewSet.list(): {type(e).__name__}: {str(e)}", exc_info=True)
return error_response(
error=f'Error loading keywords: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
# Get site_id and sector_id from validated_data or query params
# Safely access query_params
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
# Verify sector belongs to the site
if sector.site_id != site_id:
raise ValidationError(f"Sector '{sector.name}' does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
# Remove site_id and sector_id from validated_data as they're not model fields
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
# Get account from site or user
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Check hard limit for keywords (enforces ALL entry points: manual, import, AI, automation)
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
try:
LimitService.check_hard_limit(account, 'keywords', additional_count=1)
except HardLimitExceededError as e:
raise ValidationError(str(e))
# Save with all required fields explicitly
serializer.save(account=account, site=site, sector=sector)
def destroy(self, request, *args, **kwargs):
"""Override destroy to use hard delete for keywords"""
instance = self.get_object()
instance.hard_delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete keywords - uses hard delete to avoid unique constraint issues"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
items_to_delete = queryset.filter(id__in=ids)
deleted_count = items_to_delete.count()
# Hard delete to avoid unique constraint violations when re-adding same keywords
for item in items_to_delete:
item.hard_delete()
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_update', url_name='bulk_update')
def bulk_update(self, request):
"""Bulk update cluster status"""
ids = request.data.get('ids', [])
status_value = request.data.get('status')
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not status_value:
return error_response(
error='No status provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
updated_count = queryset.filter(id__in=ids).update(status=status_value)
return success_response(data={'updated_count': updated_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_update', url_name='bulk_update')
def bulk_update(self, request):
"""Bulk update keyword status"""
ids = request.data.get('ids', [])
status_value = request.data.get('status')
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not status_value:
return error_response(
error='No status provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
updated_count = queryset.filter(id__in=ids).update(status=status_value)
return success_response(data={'updated_count': updated_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_add_from_seed', url_name='bulk_add_from_seed')
def bulk_add_from_seed(self, request):
"""Bulk add SeedKeywords to workflow (create Keywords records)"""
from igny8_core.auth.models import SeedKeyword, Site, Sector
seed_keyword_ids = request.data.get('seed_keyword_ids', [])
site_id = request.data.get('site_id')
sector_id = request.data.get('sector_id')
if not seed_keyword_ids:
return error_response(
error='No seed keyword IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not site_id:
return error_response(
error='site_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not sector_id:
return error_response(
error='sector_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
site = Site.objects.get(id=site_id)
sector = Sector.objects.get(id=sector_id)
except (Site.DoesNotExist, Sector.DoesNotExist) as e:
return error_response(
error=f'Invalid site or sector: {str(e)}',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Validate sector belongs to site
if sector.site != site:
return error_response(
error='Sector does not belong to the specified site',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get account from site
account = site.account
if not account:
return error_response(
error='Site has no account assigned',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get SeedKeywords
seed_keywords = SeedKeyword.objects.filter(id__in=seed_keyword_ids, is_active=True)
if not seed_keywords.exists():
return error_response(
error='No valid seed keywords found',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Check hard limit BEFORE bulk operation (check max potential additions)
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
try:
LimitService.check_hard_limit(account, 'keywords', additional_count=len(seed_keyword_ids))
except HardLimitExceededError as e:
return error_response(
error=str(e),
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
created_count = 0
skipped_count = 0
errors = []
with transaction.atomic():
for seed_keyword in seed_keywords:
try:
# Validate industry/sector match
if site.industry != seed_keyword.industry:
errors.append(
f"Keyword '{seed_keyword.keyword}': industry mismatch "
f"(site={site.industry.name if site.industry else 'None'}, "
f"seed={seed_keyword.industry.name if seed_keyword.industry else 'None'})"
)
skipped_count += 1
continue
# Check if sector has industry_sector set
if not sector.industry_sector:
errors.append(
f"Keyword '{seed_keyword.keyword}': sector '{sector.name}' has no industry_sector set. "
f"Please update the sector to reference an industry sector."
)
skipped_count += 1
continue
if sector.industry_sector != seed_keyword.sector:
errors.append(
f"Keyword '{seed_keyword.keyword}': sector mismatch "
f"(sector={sector.industry_sector.name if sector.industry_sector else 'None'}, "
f"seed={seed_keyword.sector.name if seed_keyword.sector else 'None'})"
)
skipped_count += 1
continue
# Create Keyword if it doesn't exist
# New keywords should default to status 'new' (per updated workflow plan)
keyword, created = Keywords.objects.get_or_create(
seed_keyword=seed_keyword,
site=site,
sector=sector,
defaults={
'status': 'new',
'account': account
}
)
# Ensure status is explicitly set to 'new' for newly created keywords
if created:
if getattr(keyword, 'status', None) != 'new':
keyword.status = 'new'
keyword.save(update_fields=['status'])
created_count += 1
else:
skipped_count += 1
except Exception as e:
errors.append(f"Error adding '{seed_keyword.keyword}': {str(e)}")
skipped_count += 1
# Create notification if keywords were added
if created_count > 0:
try:
from igny8_core.business.notifications.services import NotificationService
NotificationService.notify_keywords_imported(
account=account,
site=site,
count=created_count
)
except Exception as e:
# Don't fail the request if notification fails
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Failed to create notification for keywords import: {e}")
return success_response(
data={
'created': created_count,
'skipped': skipped_count,
'errors': errors[:10] if errors else [] # Limit errors to first 10
},
request=request
)
@action(detail=False, methods=['get'], url_path='export', url_name='export')
def export(self, request):
"""
Export keywords to CSV
Query params: search, status, cluster_id, ids (comma-separated)
Note: Always exports as CSV. The 'format' parameter is ignored to avoid DRF format suffix conflicts.
If 'ids' parameter is provided, ONLY those IDs will be exported (other filters are ignored).
"""
# Get base queryset with site/sector/account filtering
queryset = self.get_queryset()
# Handle IDs filter for bulk export of selected records
# If IDs are provided, ONLY export those IDs and ignore all other filters
ids_param = request.query_params.get('ids', '')
if ids_param:
try:
ids_list = [int(id_str.strip()) for id_str in ids_param.split(',') if id_str.strip()]
if ids_list:
print(f"Backend parses IDs: {ids_list}")
# Filter ONLY by IDs when IDs parameter is present
queryset = queryset.filter(id__in=ids_list)
print(f"Backend filters queryset: queryset.filter(id__in={ids_list})")
except (ValueError, TypeError):
# If IDs parsing fails, fall through to regular filtering
queryset = self.filter_queryset(queryset)
else:
# Apply all filters from query params (search, status, cluster_id) when no IDs specified
queryset = self.filter_queryset(queryset)
# Export all matching records
keywords = queryset.all()
record_count = keywords.count()
print(f"Backend generates CSV with only those {record_count} records")
# Generate CSV
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="keywords.csv"'
writer = csv.writer(response)
# Header row
writer.writerow(['ID', 'Keyword', 'Volume', 'Difficulty', 'Country', 'Status', 'Cluster ID', 'Created At'])
# Data rows
for keyword in keywords:
writer.writerow([
keyword.id,
keyword.keyword,
keyword.volume,
keyword.difficulty,
keyword.country,
keyword.status,
keyword.cluster_id or '',
keyword.created_at.isoformat() if keyword.created_at else '',
])
# Print raw CSV content for debugging
csv_content = response.content.decode('utf-8')
print("=== RAW CSV CONTENT ===")
print(csv_content)
print("=== END CSV CONTENT ===")
print("Backend returns CSV as HTTP response")
return response
@action(detail=False, methods=['post'], url_path='import_keywords', url_name='import_keywords')
def import_keywords(self, request):
"""
Import keywords from CSV file.
Automatically links keywords to current active site/sector.
"""
if 'file' not in request.FILES:
return error_response(
error='No file provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
file = request.FILES['file']
if not file.name.endswith('.csv'):
return error_response(
error='File must be a CSV',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
user = getattr(request, 'user', None)
# Get site_id and sector_id from query params or use active site
try:
query_params = getattr(request, 'query_params', None)
if query_params is None:
query_params = getattr(request, 'GET', {})
except AttributeError:
query_params = {}
site_id = query_params.get('site_id')
sector_id = query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
return error_response(
error='site_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
return error_response(
error=f'Site with id {site_id} does not exist',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Sector ID is REQUIRED
if not sector_id:
return error_response(
error='sector_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
sector = Sector.objects.get(id=sector_id)
if sector.site_id != site_id:
return error_response(
error='Sector does not belong to the selected site',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
except Sector.DoesNotExist:
return error_response(
error=f'Sector with id {sector_id} does not exist',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get account
account = getattr(request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Parse CSV and count rows first for limit check
try:
decoded_file = file.read().decode('utf-8')
lines = decoded_file.splitlines()
csv_reader = csv.DictReader(lines)
# Count non-empty keyword rows
row_count = sum(1 for row in csv_reader if row.get('keyword', '').strip())
# Check hard limit BEFORE importing
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
try:
LimitService.check_hard_limit(account, 'keywords', additional_count=row_count)
except HardLimitExceededError as e:
return error_response(
error=str(e),
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Reset reader for actual import
csv_reader = csv.DictReader(lines)
imported_count = 0
skipped_count = 0
errors = []
with transaction.atomic():
for row_num, row in enumerate(csv_reader, start=2): # Start at 2 (header is row 1)
try:
keyword_text = row.get('keyword', '').strip()
if not keyword_text:
skipped_count += 1
continue
# Check if keyword already exists for this site/sector
existing = Keywords.objects.filter(
keyword=keyword_text,
site=site,
sector=sector,
account=account
).first()
if existing:
skipped_count += 1
continue
# Note: This direct creation bypasses seed_keyword linkage
# Keywords should ideally be created through seed_keyword FK
# Country comes from seed_keyword.country property
Keywords.objects.create(
keyword=keyword_text,
volume=int(row.get('volume', 0) or 0),
difficulty=int(row.get('difficulty', 0) or 0),
status=row.get('status', 'new') or 'new',
site=site,
sector=sector,
account=account
)
imported_count += 1
except Exception as e:
errors.append(f"Row {row_num}: {str(e)}")
continue
return success_response(
data={
'imported': imported_count,
'skipped': skipped_count,
'errors': errors[:10] if errors else [] # Limit errors to first 10
},
request=request
)
except Exception as e:
return error_response(
error=f'Failed to parse CSV: {str(e)}',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
@action(detail=False, methods=['get'], url_path='stats', url_name='stats')
def stats(self, request):
"""
Get aggregate statistics for keywords.
Returns total keywords count and total volume across all keywords for the current site.
Used for header metrics display.
"""
from django.db.models import Sum, Count, Case, When, F, IntegerField
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Aggregate keyword stats
keyword_stats = queryset.aggregate(
total_keywords=Count('id'),
total_volume=Sum(
Case(
When(volume_override__isnull=False, then=F('volume_override')),
default=F('seed_keyword__volume'),
output_field=IntegerField()
)
)
)
return success_response(
data={
'total_keywords': keyword_stats['total_keywords'] or 0,
'total_volume': keyword_stats['total_volume'] or 0,
},
request=request
)
except Exception as e:
logger.error(f"Error in keywords stats: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch keyword stats: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['get'], url_path='filter_options', url_name='filter_options')
def filter_options(self, request):
"""
Get distinct filter values from current data.
Returns only values that exist in the filtered queryset.
Supports cascading filters - pass current filter values to get remaining options.
"""
import logging
from django.db.models import Q
from django.db.models.functions import Coalesce
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Apply current filters to get cascading options
# Each filter's options are based on data that matches OTHER filters
status_filter = request.query_params.get('status')
country_filter = request.query_params.get('country')
cluster_filter = request.query_params.get('cluster_id')
difficulty_min = request.query_params.get('difficulty_min')
difficulty_max = request.query_params.get('difficulty_max')
volume_min = request.query_params.get('volume_min')
volume_max = request.query_params.get('volume_max')
search_term = request.query_params.get('search')
# Base queryset for each filter option calculation
# For countries: apply status, cluster, difficulty, volume, search filters
countries_qs = queryset
if status_filter:
countries_qs = countries_qs.filter(status=status_filter)
if cluster_filter:
countries_qs = countries_qs.filter(cluster_id=cluster_filter)
if difficulty_min is not None:
try:
countries_qs = countries_qs.filter(
Q(difficulty_override__gte=int(difficulty_min)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__gte=int(difficulty_min))
)
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
countries_qs = countries_qs.filter(
Q(difficulty_override__lte=int(difficulty_max)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__lte=int(difficulty_max))
)
except (ValueError, TypeError):
pass
if volume_min is not None:
try:
countries_qs = countries_qs.filter(seed_keyword__volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
countries_qs = countries_qs.filter(seed_keyword__volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
if search_term:
countries_qs = countries_qs.filter(
Q(seed_keyword__keyword__icontains=search_term) |
Q(cluster__name__icontains=search_term)
)
# For statuses: apply country, cluster, difficulty, volume, search filters
statuses_qs = queryset
if country_filter:
statuses_qs = statuses_qs.filter(seed_keyword__country=country_filter)
if cluster_filter:
statuses_qs = statuses_qs.filter(cluster_id=cluster_filter)
if difficulty_min is not None:
try:
statuses_qs = statuses_qs.filter(
Q(difficulty_override__gte=int(difficulty_min)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__gte=int(difficulty_min))
)
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
statuses_qs = statuses_qs.filter(
Q(difficulty_override__lte=int(difficulty_max)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__lte=int(difficulty_max))
)
except (ValueError, TypeError):
pass
if volume_min is not None:
try:
statuses_qs = statuses_qs.filter(seed_keyword__volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
statuses_qs = statuses_qs.filter(seed_keyword__volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
if search_term:
statuses_qs = statuses_qs.filter(
Q(seed_keyword__keyword__icontains=search_term) |
Q(cluster__name__icontains=search_term)
)
# For clusters: apply status, country, difficulty, volume, search filters
clusters_qs = queryset
if status_filter:
clusters_qs = clusters_qs.filter(status=status_filter)
if country_filter:
clusters_qs = clusters_qs.filter(seed_keyword__country=country_filter)
if difficulty_min is not None:
try:
clusters_qs = clusters_qs.filter(
Q(difficulty_override__gte=int(difficulty_min)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__gte=int(difficulty_min))
)
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
clusters_qs = clusters_qs.filter(
Q(difficulty_override__lte=int(difficulty_max)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__lte=int(difficulty_max))
)
except (ValueError, TypeError):
pass
if volume_min is not None:
try:
clusters_qs = clusters_qs.filter(seed_keyword__volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
clusters_qs = clusters_qs.filter(seed_keyword__volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
if search_term:
clusters_qs = clusters_qs.filter(
Q(seed_keyword__keyword__icontains=search_term) |
Q(cluster__name__icontains=search_term)
)
# For difficulties: apply status, country, cluster, volume, search filters
difficulties_qs = queryset
if status_filter:
difficulties_qs = difficulties_qs.filter(status=status_filter)
if country_filter:
difficulties_qs = difficulties_qs.filter(seed_keyword__country=country_filter)
if cluster_filter:
difficulties_qs = difficulties_qs.filter(cluster_id=cluster_filter)
if volume_min is not None:
try:
difficulties_qs = difficulties_qs.filter(seed_keyword__volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
difficulties_qs = difficulties_qs.filter(seed_keyword__volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
if search_term:
difficulties_qs = difficulties_qs.filter(
Q(seed_keyword__keyword__icontains=search_term) |
Q(cluster__name__icontains=search_term)
)
# Get distinct countries
countries = list(set(countries_qs.values_list('seed_keyword__country', flat=True)))
countries = sorted([c for c in countries if c])
from igny8_core.auth.models import SeedKeyword
country_choices = dict(SeedKeyword.COUNTRY_CHOICES)
country_options = [
{'value': c, 'label': country_choices.get(c, c)}
for c in countries
]
# Get distinct statuses from filtered queryset
statuses = list(set(statuses_qs.values_list('status', flat=True)))
statuses = sorted([s for s in statuses if s])
status_labels = {
'new': 'New',
'mapped': 'Mapped',
}
status_options = [
{'value': s, 'label': status_labels.get(s, s.title())}
for s in statuses
]
# Get distinct clusters from filtered queryset
cluster_ids = list(set(
clusters_qs.exclude(cluster_id__isnull=True)
.values_list('cluster_id', flat=True)
))
clusters = Clusters.objects.filter(id__in=cluster_ids).values('id', 'name').order_by('name')
cluster_options = [
{'value': str(c['id']), 'label': c['name']}
for c in clusters
]
# Get distinct difficulty levels from filtered queryset (mapped to 1-5 scale)
# Difficulty is stored as 0-100 in seed_keyword, mapped to 1-5 scale
from django.db.models import Case, When, Value, IntegerField
# Get effective difficulty (override or seed_keyword)
difficulty_values = difficulties_qs.annotate(
effective_difficulty=Coalesce('difficulty_override', 'seed_keyword__difficulty')
).exclude(effective_difficulty__isnull=True).values_list('effective_difficulty', flat=True)
# Map raw difficulty (0-100) to 1-5 scale and find unique values
difficulty_levels = set()
for d in difficulty_values:
if d is not None:
if d <= 10:
difficulty_levels.add(1)
elif d <= 30:
difficulty_levels.add(2)
elif d <= 50:
difficulty_levels.add(3)
elif d <= 70:
difficulty_levels.add(4)
else:
difficulty_levels.add(5)
difficulty_labels = {
1: '1 - Very Easy',
2: '2 - Easy',
3: '3 - Medium',
4: '4 - Hard',
5: '5 - Very Hard',
}
difficulty_options = [
{'value': str(d), 'label': difficulty_labels[d]}
for d in sorted(difficulty_levels)
]
return success_response(
data={
'countries': country_options,
'statuses': status_options,
'clusters': cluster_options,
'difficulties': difficulty_options,
},
request=request
)
except Exception as e:
logger.error(f"Error in filter_options: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch filter options: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['post'], url_path='auto_cluster', url_name='auto_cluster')
def auto_cluster(self, request):
"""Auto-cluster keywords using ClusteringService"""
import logging
from igny8_core.ai.validators.cluster_validators import validate_minimum_keywords
logger = logging.getLogger(__name__)
try:
keyword_ids = request.data.get('ids', [])
sector_id = request.data.get('sector_id')
# Get account
account = getattr(request, 'account', None)
if not account:
return error_response(
error='Account is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# NEW: Validate minimum keywords BEFORE queuing task
if not keyword_ids:
return error_response(
error='No keyword IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
validation = validate_minimum_keywords(
keyword_ids=keyword_ids,
account=account,
min_required=5
)
if not validation['valid']:
return error_response(
error=validation['error'],
status_code=status.HTTP_400_BAD_REQUEST,
request=request,
debug_info={
'count': validation.get('count'),
'required': validation.get('required')
} if settings.DEBUG else None
)
# Validation passed - proceed with clustering
# Use service to cluster keywords
service = ClusteringService()
try:
result = service.cluster_keywords(keyword_ids, account, sector_id)
if result.get('success'):
if 'task_id' in result:
# Async task queued
return success_response(
data={'task_id': result['task_id']},
message=f'Clustering started with {validation["count"]} keywords',
request=request
)
else:
# Synchronous execution
return success_response(
data=result,
request=request
)
else:
return error_response(
error=result.get('error', 'Clustering failed'),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except InsufficientCreditsError as e:
return error_response(
error=str(e),
status_code=status.HTTP_402_PAYMENT_REQUIRED,
request=request
)
except Exception as e:
logger.error(f"Error in auto_cluster: {str(e)}", exc_info=True)
return error_response(
error=str(e),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except Exception as e:
logger.error(f"Unexpected error in auto_cluster: {str(e)}", exc_info=True)
return error_response(
error=f'Unexpected error: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class ClusterViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing clusters with CRUD operations
Unified API Standard v1.0 compliant
"""
queryset = Clusters.objects.all()
serializer_class = ClusterSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination # Explicitly use custom pagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle]
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by name
search_fields = ['name']
# Ordering configuration - volume and difficulty are model fields kept in sync
ordering_fields = ['name', 'created_at', 'keywords_count', 'volume', 'difficulty']
ordering = ['name'] # Default ordering
# Filter configuration - use custom filterset for date range filtering
filterset_class = ClustersFilter
def get_queryset(self):
"""
Get all clusters - keywords_count, volume, and difficulty are calculated in the serializer
since there's no ForeignKey relationship between Clusters and Keywords
Uses parent's get_queryset for filtering
Annotates queryset with volume and difficulty for filtering
"""
queryset = super().get_queryset()
# Annotate queryset with aggregated volume and difficulty for filtering
from django.db.models import Sum, Avg, Case, When, F, IntegerField
# Since volume and difficulty are properties (not DB fields), we need to use
# COALESCE to check volume_override/difficulty_override first, then fallback to seed_keyword
# Volume: COALESCE(volume_override, seed_keyword__volume)
# Difficulty: COALESCE(difficulty_override, seed_keyword__difficulty)
queryset = queryset.annotate(
_annotated_volume=Sum(
Case(
When(keywords__volume_override__isnull=False, then=F('keywords__volume_override')),
default=F('keywords__seed_keyword__volume'),
output_field=IntegerField()
)
),
_annotated_difficulty=Avg(
Case(
When(keywords__difficulty_override__isnull=False, then=F('keywords__difficulty_override')),
default=F('keywords__seed_keyword__difficulty'),
output_field=IntegerField()
)
)
)
# Apply volume range filtering
query_params = getattr(self.request, 'query_params', {})
volume_min = query_params.get('volume_min')
volume_max = query_params.get('volume_max')
if volume_min is not None:
try:
queryset = queryset.filter(_annotated_volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
queryset = queryset.filter(_annotated_volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
# Apply difficulty range filtering
difficulty_min = query_params.get('difficulty_min')
difficulty_max = query_params.get('difficulty_max')
if difficulty_min is not None:
try:
queryset = queryset.filter(_annotated_difficulty__gte=float(difficulty_min))
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
queryset = queryset.filter(_annotated_difficulty__lte=float(difficulty_max))
except (ValueError, TypeError):
pass
return queryset
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
# Get site_id and sector_id from validated_data or query params
# Safely access query_params
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
# Fallback for non-DRF requests
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
# Verify sector belongs to the site
if sector.site_id != site_id:
raise ValidationError(f"Sector '{sector.name}' does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
# Remove site_id and sector_id from validated_data as they're not model fields
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
# Get account from site or user
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Check hard limit for clusters (enforces ALL entry points: manual, import, AI, automation)
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
try:
LimitService.check_hard_limit(account, 'clusters', additional_count=1)
except HardLimitExceededError as e:
raise ValidationError(str(e))
# Save with all required fields explicitly
serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['get'], url_path='summary', url_name='summary')
def summary(self, request):
"""
Get aggregate summary metrics for clusters.
Returns total keywords count and total volume across all clusters (unfiltered).
Used for header metrics display.
"""
from django.db.models import Sum, Count, Case, When, F, IntegerField
queryset = self.get_queryset()
# Get cluster IDs
cluster_ids = list(queryset.values_list('id', flat=True))
# Aggregate keyword stats across all clusters
keyword_stats = (
Keywords.objects
.filter(cluster_id__in=cluster_ids)
.aggregate(
total_keywords=Count('id'),
total_volume=Sum(
Case(
When(volume_override__isnull=False, then=F('volume_override')),
default=F('seed_keyword__volume'),
output_field=IntegerField()
)
)
)
)
return success_response(
data={
'total_clusters': len(cluster_ids),
'total_keywords': keyword_stats['total_keywords'] or 0,
'total_volume': keyword_stats['total_volume'] or 0,
},
request=request
)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete clusters"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
items_to_delete = queryset.filter(id__in=ids)
deleted_count = items_to_delete.count()
items_to_delete.delete() # Soft delete via SoftDeletableModel
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['get'], url_path='filter_options', url_name='filter_options')
def filter_options(self, request):
"""
Get distinct filter values from current data with cascading filter support.
Returns only statuses and difficulties that exist based on other active filters.
"""
import logging
from django.db.models import Q, Sum, Avg, Case, When, F, IntegerField
logger = logging.getLogger(__name__)
try:
# Start with base queryset (already has volume/difficulty annotations from get_queryset)
queryset = self.get_queryset()
# Get filter parameters for cascading
status_filter = request.query_params.get('status', '')
difficulty_min = request.query_params.get('difficulty_min', '')
difficulty_max = request.query_params.get('difficulty_max', '')
volume_min = request.query_params.get('volume_min', '')
volume_max = request.query_params.get('volume_max', '')
search = request.query_params.get('search', '')
# ===== GET STATUS OPTIONS =====
# Apply OTHER filters (exclude status) to get valid status options
status_qs = queryset
if difficulty_min:
try:
status_qs = status_qs.filter(_annotated_difficulty__gte=float(difficulty_min))
except ValueError:
pass
if difficulty_max:
try:
status_qs = status_qs.filter(_annotated_difficulty__lte=float(difficulty_max))
except ValueError:
pass
if volume_min:
try:
status_qs = status_qs.filter(_annotated_volume__gte=int(volume_min))
except ValueError:
pass
if volume_max:
try:
status_qs = status_qs.filter(_annotated_volume__lte=int(volume_max))
except ValueError:
pass
if search:
status_qs = status_qs.filter(
Q(name__icontains=search) | Q(description__icontains=search)
)
# Get distinct statuses
statuses = list(set(status_qs.values_list('status', flat=True)))
statuses = sorted([s for s in statuses if s])
status_labels = {
'new': 'New',
'mapped': 'Mapped',
}
status_options = [
{'value': '', 'label': 'All Status'},
] + [
{'value': s, 'label': status_labels.get(s, s.title())}
for s in statuses
]
# ===== GET DIFFICULTY OPTIONS =====
# Apply OTHER filters (exclude difficulty) to get valid difficulty options
difficulty_qs = queryset
if status_filter:
difficulty_qs = difficulty_qs.filter(status=status_filter)
if volume_min:
try:
difficulty_qs = difficulty_qs.filter(_annotated_volume__gte=int(volume_min))
except ValueError:
pass
if volume_max:
try:
difficulty_qs = difficulty_qs.filter(_annotated_volume__lte=int(volume_max))
except ValueError:
pass
if search:
difficulty_qs = difficulty_qs.filter(
Q(name__icontains=search) | Q(description__icontains=search)
)
# Get raw difficulty values (0-100) from annotated field
difficulty_values = difficulty_qs.exclude(_annotated_difficulty__isnull=True).values_list('_annotated_difficulty', flat=True)
# Map raw difficulty (0-100) to 1-5 scale and find unique values
difficulty_levels = set()
for d in difficulty_values:
if d is not None:
if d <= 10:
difficulty_levels.add(1)
elif d <= 30:
difficulty_levels.add(2)
elif d <= 50:
difficulty_levels.add(3)
elif d <= 70:
difficulty_levels.add(4)
else:
difficulty_levels.add(5)
difficulty_labels = {
1: '1 - Very Easy',
2: '2 - Easy',
3: '3 - Medium',
4: '4 - Hard',
5: '5 - Very Hard',
}
difficulty_options = [
{'value': '', 'label': 'All Difficulty'},
] + [
{'value': str(d), 'label': difficulty_labels[d]}
for d in sorted(difficulty_levels)
]
return success_response(
data={
'statuses': status_options,
'difficulties': difficulty_options,
},
request=request
)
except Exception as e:
logger.error(f"Error in filter_options: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch filter options: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['post'], url_path='auto_generate_ideas', url_name='auto_generate_ideas')
def auto_generate_ideas(self, request):
"""Auto-generate ideas for clusters using IdeasService"""
import logging
logger = logging.getLogger(__name__)
try:
cluster_ids = request.data.get('ids', [])
# Get account
account = getattr(request, 'account', None)
if not account:
return error_response(
error='Account is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Use service to generate ideas
service = IdeasService()
try:
result = service.generate_ideas(cluster_ids, account)
if result.get('success'):
if 'task_id' in result:
# Async task queued
return success_response(
data={'task_id': result['task_id']},
message=result.get('message', 'Idea generation started'),
request=request
)
else:
# Synchronous execution
return success_response(
data=result,
request=request
)
else:
return error_response(
error=result.get('error', 'Idea generation failed'),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except InsufficientCreditsError as e:
return error_response(
error=str(e),
status_code=status.HTTP_402_PAYMENT_REQUIRED,
request=request
)
except Exception as e:
logger.error(f"Error in auto_generate_ideas: {str(e)}", exc_info=True)
return error_response(
error=str(e),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except Exception as e:
logger.error(f"Unexpected error in auto_generate_ideas: {str(e)}", exc_info=True)
return error_response(
error=f'Unexpected error: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
def list(self, request, *args, **kwargs):
"""
Override list to optimize keyword stats calculation using bulk aggregation
"""
queryset = self.filter_queryset(self.get_queryset())
# Handle pagination first
page = self.paginate_queryset(queryset)
if page is not None:
# Optimize keyword stats for the paginated clusters
cluster_list = list(page)
ClusterSerializer.prefetch_keyword_stats(cluster_list)
serializer = self.get_serializer(cluster_list, many=True)
return self.get_paginated_response(serializer.data)
# No pagination - optimize all clusters
cluster_list = list(queryset)
ClusterSerializer.prefetch_keyword_stats(cluster_list)
serializer = self.get_serializer(cluster_list, many=True)
return success_response(
data=serializer.data,
request=request
)
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class ContentIdeasViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing content ideas with CRUD operations
Unified API Standard v1.0 compliant
"""
queryset = ContentIdeas.objects.all()
serializer_class = ContentIdeasSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle] # Explicitly use custom pagination
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by idea_title
search_fields = ['idea_title']
# Ordering configuration
ordering_fields = ['idea_title', 'created_at', 'estimated_word_count']
ordering = ['-created_at'] # Default ordering (newest first)
# Filter configuration - use custom filterset for date range filtering
filterset_class = ContentIdeasFilter
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
if sector.site_id != site_id:
raise ValidationError(f"Sector does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Check monthly limit for content_ideas (enforces ALL entry points: manual, import, AI, automation)
from igny8_core.business.billing.services.limit_service import LimitService, MonthlyLimitExceededError
try:
LimitService.check_monthly_limit(account, 'content_ideas', additional_count=1)
except MonthlyLimitExceededError as e:
raise ValidationError(str(e))
serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['get'], url_path='filter_options', url_name='filter_options')
def filter_options(self, request):
"""
Get distinct filter values from current data with cascading filter support.
Returns only values that exist based on other active filters.
"""
import logging
from django.db.models import Q
logger = logging.getLogger(__name__)
try:
queryset = self.get_queryset()
# Get filter parameters for cascading
status_filter = request.query_params.get('status', '')
content_type_filter = request.query_params.get('content_type', '')
content_structure_filter = request.query_params.get('content_structure', '')
cluster_filter = request.query_params.get('cluster', '')
search = request.query_params.get('search', '')
# Apply search filter to all options if provided
base_qs = queryset
if search:
base_qs = base_qs.filter(
Q(idea_title__icontains=search) | Q(description__icontains=search)
)
# Get statuses (filtered by type, structure, cluster)
status_qs = base_qs
if content_type_filter:
status_qs = status_qs.filter(content_type=content_type_filter)
if content_structure_filter:
status_qs = status_qs.filter(content_structure=content_structure_filter)
if cluster_filter:
status_qs = status_qs.filter(keyword_cluster_id=cluster_filter)
statuses = list(set(status_qs.values_list('status', flat=True)))
statuses = sorted([s for s in statuses if s])
status_labels = {
'new': 'New',
'queued': 'Queued',
'completed': 'Completed',
}
status_options = [
{'value': '', 'label': 'All Status'},
] + [
{'value': s, 'label': status_labels.get(s, s.title())}
for s in statuses
]
# Get content_types (filtered by status, structure, cluster)
type_qs = base_qs
if status_filter:
type_qs = type_qs.filter(status=status_filter)
if content_structure_filter:
type_qs = type_qs.filter(content_structure=content_structure_filter)
if cluster_filter:
type_qs = type_qs.filter(keyword_cluster_id=cluster_filter)
content_types = list(set(type_qs.values_list('content_type', flat=True)))
content_types = sorted([t for t in content_types if t])
type_labels = {
'post': 'Post',
'page': 'Page',
'product': 'Product',
'taxonomy': 'Taxonomy',
}
content_type_options = [
{'value': '', 'label': 'All Types'},
] + [
{'value': t, 'label': type_labels.get(t, t.title())}
for t in content_types
]
# Get content_structures (filtered by status, type, cluster)
structure_qs = base_qs
if status_filter:
structure_qs = structure_qs.filter(status=status_filter)
if content_type_filter:
structure_qs = structure_qs.filter(content_type=content_type_filter)
if cluster_filter:
structure_qs = structure_qs.filter(keyword_cluster_id=cluster_filter)
structures = list(set(structure_qs.values_list('content_structure', flat=True)))
structures = sorted([s for s in structures if s])
structure_labels = {
'article': 'Article', 'guide': 'Guide', 'comparison': 'Comparison',
'review': 'Review', 'listicle': 'Listicle', 'landing_page': 'Landing Page',
'business_page': 'Business Page', 'service_page': 'Service Page',
'general': 'General', 'cluster_hub': 'Cluster Hub',
'product_page': 'Product Page', 'category_archive': 'Category Archive',
'tag_archive': 'Tag Archive', 'attribute_archive': 'Attribute Archive',
}
content_structure_options = [
{'value': '', 'label': 'All Structures'},
] + [
{'value': s, 'label': structure_labels.get(s, s.replace('_', ' ').title())}
for s in structures
]
# Get distinct clusters (filtered by status, type, structure)
cluster_qs = base_qs
if status_filter:
cluster_qs = cluster_qs.filter(status=status_filter)
if content_type_filter:
cluster_qs = cluster_qs.filter(content_type=content_type_filter)
if content_structure_filter:
cluster_qs = cluster_qs.filter(content_structure=content_structure_filter)
cluster_ids = list(set(
cluster_qs.exclude(keyword_cluster_id__isnull=True)
.values_list('keyword_cluster_id', flat=True)
))
clusters = Clusters.objects.filter(id__in=cluster_ids).values('id', 'name').order_by('name')
cluster_options = [
{'value': '', 'label': 'All Clusters'},
] + [
{'value': str(c['id']), 'label': c['name']}
for c in clusters
]
return success_response(
data={
'statuses': status_options,
'content_types': content_type_options,
'content_structures': content_structure_options,
'clusters': cluster_options,
},
request=request
)
except Exception as e:
logger.error(f"Error in filter_options: {str(e)}", exc_info=True)
return error_response(
error=f'Failed to fetch filter options: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete content ideas"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
items_to_delete = queryset.filter(id__in=ids)
deleted_count = items_to_delete.count()
items_to_delete.delete() # Soft delete via SoftDeletableModel
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_queue_to_writer', url_name='bulk_queue_to_writer')
def bulk_queue_to_writer(self, request):
"""Queue ideas to writer by creating Tasks"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
# Get ALL requested ideas first (don't filter by status yet)
all_ideas = queryset.filter(id__in=ids)
# Check which ones can be queued (status='new')
queueable_ideas = all_ideas.filter(status='new')
from igny8_core.modules.writer.models import Tasks
created_tasks = []
errors = []
skipped = []
# Add skipped ideas (not 'new' status)
for idea in all_ideas:
if idea.status != 'new':
skipped.append({
'idea_id': idea.id,
'title': idea.idea_title,
'reason': f'Already {idea.status}'
})
# Process queueable ideas
for idea in queueable_ideas:
try:
# Validate required fields
if not idea.keyword_cluster:
errors.append({
'idea_id': idea.id,
'title': idea.idea_title,
'error': 'Missing required cluster - assign idea to a cluster first'
})
continue
# Build keywords string from idea's keyword objects
keywords_str = ''
if idea.keyword_objects.exists():
keywords_str = ', '.join([kw.keyword for kw in idea.keyword_objects.all()])
elif idea.target_keywords:
keywords_str = idea.target_keywords
# Direct copy - no mapping needed
task = Tasks.objects.create(
title=idea.idea_title,
description=idea.description or '',
cluster=idea.keyword_cluster,
content_type=idea.content_type or 'post',
content_structure=idea.content_structure or 'article',
taxonomy_term=None, # Can be set later if taxonomy is available
keywords=keywords_str, # Comma-separated keywords string
word_count=idea.estimated_word_count, # Copy word count from idea
status='queued',
account=idea.account,
site=idea.site,
sector=idea.sector,
idea=idea, # Link back to the original idea
)
created_tasks.append(task.id)
# Update idea status to queued
idea.status = 'queued'
idea.save()
except Exception as e:
errors.append({
'idea_id': idea.id,
'title': idea.idea_title,
'error': str(e)
})
# Return appropriate response based on results
if len(created_tasks) == 0 and (errors or skipped):
# Complete failure
return error_response(
error=f'Failed to queue any ideas: {len(errors)} errors, {len(skipped)} skipped',
errors=errors if errors else skipped,
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
elif errors:
# Partial success - some created, some failed
return success_response(
data={
'created_count': len(created_tasks),
'task_ids': created_tasks,
'errors': errors,
'skipped': skipped,
},
message=f'Queued {len(created_tasks)} ideas ({len(errors)} failed, {len(skipped)} skipped)',
request=request
)
else:
# Complete success
return success_response(
data={
'created_count': len(created_tasks),
'task_ids': created_tasks,
'skipped': skipped,
},
message=f'Successfully queued {len(created_tasks)} ideas to writer' + (f' ({len(skipped)} already scheduled)' if skipped else ''),
request=request
)
@action(detail=False, methods=['get'], url_path='export', url_name='export')
def export(self, request):
"""
Export content ideas to CSV
Query params: search, status, keyword_cluster_id, content_type, content_structure, ids (comma-separated)
If 'ids' parameter is provided, ONLY those IDs will be exported (other filters are ignored).
"""
# Get base queryset with site/sector/account filtering
queryset = self.get_queryset()
# Handle IDs filter for bulk export of selected records
ids_param = request.query_params.get('ids', '')
if ids_param:
try:
ids_list = [int(id_str.strip()) for id_str in ids_param.split(',') if id_str.strip()]
if ids_list:
queryset = queryset.filter(id__in=ids_list)
except (ValueError, TypeError):
queryset = self.filter_queryset(queryset)
else:
queryset = self.filter_queryset(queryset)
ideas = queryset.all()
# Generate CSV
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="ideas.csv"'
writer = csv.writer(response)
# Header row
writer.writerow([
'ID', 'Title', 'Description', 'Primary Focus Keywords', 'Target Keywords',
'Cluster', 'Content Type', 'Content Structure', 'Status',
'Estimated Word Count', 'Created At',
])
# Data rows
for idea in ideas:
writer.writerow([
idea.id,
idea.idea_title,
idea.description or '',
idea.primary_focus_keywords or '',
idea.target_keywords or '',
idea.keyword_cluster.name if idea.keyword_cluster else '',
idea.content_type or '',
idea.content_structure or '',
idea.status or '',
idea.estimated_word_count or '',
idea.created_at.isoformat() if idea.created_at else '',
])
return response
# REMOVED: generate_idea action - idea generation function removed