Files
igny8/backend/igny8_core/modules/planner/views.py
2025-12-03 05:13:53 +00:00

1127 lines
46 KiB
Python

from rest_framework import viewsets, filters, status
from rest_framework.decorators import action
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from django.db import transaction
from django.db.models import Max, Count, Sum, Q
from django.http import HttpResponse
import csv
import json
import time
from drf_spectacular.utils import extend_schema, extend_schema_view
from igny8_core.api.base import SiteSectorModelViewSet
from igny8_core.api.pagination import CustomPageNumberPagination
from igny8_core.api.response import success_response, error_response
from igny8_core.api.throttles import DebugScopedRateThrottle
from igny8_core.api.permissions import IsAuthenticatedAndActive, IsViewerOrAbove, IsEditorOrAbove
from .models import Keywords, Clusters, ContentIdeas
from .serializers import KeywordSerializer, ContentIdeasSerializer
from .cluster_serializers import ClusterSerializer
from igny8_core.business.planning.services.clustering_service import ClusteringService
from igny8_core.business.planning.services.ideas_service import IdeasService
from igny8_core.business.billing.exceptions import InsufficientCreditsError
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class KeywordViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing keywords with CRUD operations
Provides list, create, retrieve, update, and destroy actions
Unified API Standard v1.0 compliant
"""
queryset = Keywords.objects.all()
serializer_class = KeywordSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination # Explicitly use custom pagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle]
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by seed_keyword's keyword field
search_fields = ['seed_keyword__keyword']
# Ordering configuration - allow ordering by created_at, volume, difficulty (from seed_keyword)
ordering_fields = ['created_at', 'seed_keyword__volume', 'seed_keyword__difficulty']
ordering = ['-created_at'] # Default ordering (newest first)
# Filter configuration - filter by status, cluster_id, and seed_keyword fields
filterset_fields = ['status', 'cluster_id', 'seed_keyword__intent', 'seed_keyword_id']
def get_queryset(self):
"""
Override to support custom difficulty range filtering
Uses parent's get_queryset() which properly handles developer role and site/sector filtering
"""
import logging
logger = logging.getLogger(__name__)
try:
# Use parent's get_queryset() which handles developer role and site filtering correctly
queryset = super().get_queryset()
# Safely access query_params (DRF wraps request with Request class)
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
# Fallback for non-DRF requests
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
# Custom difficulty range filtering (check override first, then seed_keyword)
difficulty_min = query_params.get('difficulty_min')
difficulty_max = query_params.get('difficulty_max')
if difficulty_min is not None:
try:
# Filter by seed_keyword difficulty (override logic handled in property)
queryset = queryset.filter(
Q(difficulty_override__gte=int(difficulty_min)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__gte=int(difficulty_min))
)
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
queryset = queryset.filter(
Q(difficulty_override__lte=int(difficulty_max)) |
Q(difficulty_override__isnull=True, seed_keyword__difficulty__lte=int(difficulty_max))
)
except (ValueError, TypeError):
pass
# Custom volume range filtering (check override first, then seed_keyword)
volume_min = query_params.get('volume_min')
volume_max = query_params.get('volume_max')
if volume_min is not None:
try:
queryset = queryset.filter(
Q(volume_override__gte=int(volume_min)) |
Q(volume_override__isnull=True, seed_keyword__volume__gte=int(volume_min))
)
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
queryset = queryset.filter(
Q(volume_override__lte=int(volume_max)) |
Q(volume_override__isnull=True, seed_keyword__volume__lte=int(volume_max))
)
except (ValueError, TypeError):
pass
return queryset
except Exception as e:
logger.error(f"Error in KeywordViewSet.get_queryset(): {type(e).__name__}: {str(e)}", exc_info=True)
# Return empty queryset instead of raising exception
return Keywords.objects.none()
def list(self, request, *args, **kwargs):
"""
Override list method to add error handling
"""
import logging
logger = logging.getLogger(__name__)
try:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return success_response(
data=serializer.data,
request=request
)
except Exception as e:
logger.error(f"Error in KeywordViewSet.list(): {type(e).__name__}: {str(e)}", exc_info=True)
return error_response(
error=f'Error loading keywords: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
# Get site_id and sector_id from validated_data or query params
# Safely access query_params
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
# Verify sector belongs to the site
if sector.site_id != site_id:
raise ValidationError(f"Sector '{sector.name}' does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
# Remove site_id and sector_id from validated_data as they're not model fields
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
# Get account from site or user
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Save with all required fields explicitly
serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete keywords"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
deleted_count, _ = queryset.filter(id__in=ids).delete()
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_update', url_name='bulk_update')
def bulk_update(self, request):
"""Bulk update keyword status"""
ids = request.data.get('ids', [])
status_value = request.data.get('status')
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not status_value:
return error_response(
error='No status provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
updated_count = queryset.filter(id__in=ids).update(status=status_value)
return success_response(data={'updated_count': updated_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_add_from_seed', url_name='bulk_add_from_seed')
def bulk_add_from_seed(self, request):
"""Bulk add SeedKeywords to workflow (create Keywords records)"""
from igny8_core.auth.models import SeedKeyword, Site, Sector
seed_keyword_ids = request.data.get('seed_keyword_ids', [])
site_id = request.data.get('site_id')
sector_id = request.data.get('sector_id')
if not seed_keyword_ids:
return error_response(
error='No seed keyword IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not site_id:
return error_response(
error='site_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
if not sector_id:
return error_response(
error='sector_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
site = Site.objects.get(id=site_id)
sector = Sector.objects.get(id=sector_id)
except (Site.DoesNotExist, Sector.DoesNotExist) as e:
return error_response(
error=f'Invalid site or sector: {str(e)}',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Validate sector belongs to site
if sector.site != site:
return error_response(
error='Sector does not belong to the specified site',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get account from site
account = site.account
if not account:
return error_response(
error='Site has no account assigned',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get SeedKeywords
seed_keywords = SeedKeyword.objects.filter(id__in=seed_keyword_ids, is_active=True)
if not seed_keywords.exists():
return error_response(
error='No valid seed keywords found',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
created_count = 0
skipped_count = 0
errors = []
with transaction.atomic():
for seed_keyword in seed_keywords:
try:
# Validate industry/sector match
if site.industry != seed_keyword.industry:
errors.append(
f"Keyword '{seed_keyword.keyword}': industry mismatch "
f"(site={site.industry.name if site.industry else 'None'}, "
f"seed={seed_keyword.industry.name if seed_keyword.industry else 'None'})"
)
skipped_count += 1
continue
# Check if sector has industry_sector set
if not sector.industry_sector:
errors.append(
f"Keyword '{seed_keyword.keyword}': sector '{sector.name}' has no industry_sector set. "
f"Please update the sector to reference an industry sector."
)
skipped_count += 1
continue
if sector.industry_sector != seed_keyword.sector:
errors.append(
f"Keyword '{seed_keyword.keyword}': sector mismatch "
f"(sector={sector.industry_sector.name if sector.industry_sector else 'None'}, "
f"seed={seed_keyword.sector.name if seed_keyword.sector else 'None'})"
)
skipped_count += 1
continue
# Create Keyword if it doesn't exist
keyword, created = Keywords.objects.get_or_create(
seed_keyword=seed_keyword,
site=site,
sector=sector,
defaults={
'status': 'pending',
'account': account
}
)
if created:
created_count += 1
else:
skipped_count += 1
except Exception as e:
errors.append(f"Error adding '{seed_keyword.keyword}': {str(e)}")
skipped_count += 1
return success_response(
data={
'created': created_count,
'skipped': skipped_count,
'errors': errors[:10] if errors else [] # Limit errors to first 10
},
request=request
)
@action(detail=False, methods=['get'], url_path='export', url_name='export')
def export(self, request):
"""
Export keywords to CSV
Query params: search, status, cluster_id, ids (comma-separated)
Note: Always exports as CSV. The 'format' parameter is ignored to avoid DRF format suffix conflicts.
If 'ids' parameter is provided, ONLY those IDs will be exported (other filters are ignored).
"""
# Get base queryset with site/sector/account filtering
queryset = self.get_queryset()
# Handle IDs filter for bulk export of selected records
# If IDs are provided, ONLY export those IDs and ignore all other filters
ids_param = request.query_params.get('ids', '')
if ids_param:
try:
ids_list = [int(id_str.strip()) for id_str in ids_param.split(',') if id_str.strip()]
if ids_list:
print(f"Backend parses IDs: {ids_list}")
# Filter ONLY by IDs when IDs parameter is present
queryset = queryset.filter(id__in=ids_list)
print(f"Backend filters queryset: queryset.filter(id__in={ids_list})")
except (ValueError, TypeError):
# If IDs parsing fails, fall through to regular filtering
queryset = self.filter_queryset(queryset)
else:
# Apply all filters from query params (search, status, cluster_id) when no IDs specified
queryset = self.filter_queryset(queryset)
# Export all matching records
keywords = queryset.all()
record_count = keywords.count()
print(f"Backend generates CSV with only those {record_count} records")
# Generate CSV
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="keywords.csv"'
writer = csv.writer(response)
# Header row
writer.writerow(['ID', 'Keyword', 'Volume', 'Difficulty', 'Intent', 'Status', 'Cluster ID', 'Created At'])
# Data rows
for keyword in keywords:
writer.writerow([
keyword.id,
keyword.keyword,
keyword.volume,
keyword.difficulty,
keyword.intent,
keyword.status,
keyword.cluster_id or '',
keyword.created_at.isoformat() if keyword.created_at else '',
])
# Print raw CSV content for debugging
csv_content = response.content.decode('utf-8')
print("=== RAW CSV CONTENT ===")
print(csv_content)
print("=== END CSV CONTENT ===")
print("Backend returns CSV as HTTP response")
return response
@action(detail=False, methods=['post'], url_path='import_keywords', url_name='import_keywords')
def import_keywords(self, request):
"""
Import keywords from CSV file.
Automatically links keywords to current active site/sector.
"""
if 'file' not in request.FILES:
return error_response(
error='No file provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
file = request.FILES['file']
if not file.name.endswith('.csv'):
return error_response(
error='File must be a CSV',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
user = getattr(request, 'user', None)
# Get site_id and sector_id from query params or use active site
try:
query_params = getattr(request, 'query_params', None)
if query_params is None:
query_params = getattr(request, 'GET', {})
except AttributeError:
query_params = {}
site_id = query_params.get('site_id')
sector_id = query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
return error_response(
error='site_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
return error_response(
error=f'Site with id {site_id} does not exist',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Sector ID is REQUIRED
if not sector_id:
return error_response(
error='sector_id is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
try:
sector = Sector.objects.get(id=sector_id)
if sector.site_id != site_id:
return error_response(
error='Sector does not belong to the selected site',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
except Sector.DoesNotExist:
return error_response(
error=f'Sector with id {sector_id} does not exist',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Get account
account = getattr(request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Parse CSV
try:
decoded_file = file.read().decode('utf-8')
csv_reader = csv.DictReader(decoded_file.splitlines())
imported_count = 0
skipped_count = 0
errors = []
with transaction.atomic():
for row_num, row in enumerate(csv_reader, start=2): # Start at 2 (header is row 1)
try:
keyword_text = row.get('keyword', '').strip()
if not keyword_text:
skipped_count += 1
continue
# Check if keyword already exists for this site/sector
existing = Keywords.objects.filter(
keyword=keyword_text,
site=site,
sector=sector,
account=account
).first()
if existing:
skipped_count += 1
continue
# Create keyword
Keywords.objects.create(
keyword=keyword_text,
volume=int(row.get('volume', 0) or 0),
difficulty=int(row.get('difficulty', 0) or 0),
intent=row.get('intent', 'informational') or 'informational',
status=row.get('status', 'new') or 'new',
site=site,
sector=sector,
account=account
)
imported_count += 1
except Exception as e:
errors.append(f"Row {row_num}: {str(e)}")
continue
return success_response(
data={
'imported': imported_count,
'skipped': skipped_count,
'errors': errors[:10] if errors else [] # Limit errors to first 10
},
request=request
)
except Exception as e:
return error_response(
error=f'Failed to parse CSV: {str(e)}',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
@action(detail=False, methods=['post'], url_path='auto_cluster', url_name='auto_cluster')
def auto_cluster(self, request):
"""Auto-cluster keywords using ClusteringService"""
import logging
logger = logging.getLogger(__name__)
try:
keyword_ids = request.data.get('ids', [])
sector_id = request.data.get('sector_id')
# Get account
account = getattr(request, 'account', None)
if not account:
return error_response(
error='Account is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Use service to cluster keywords
service = ClusteringService()
try:
result = service.cluster_keywords(keyword_ids, account, sector_id)
if result.get('success'):
if 'task_id' in result:
# Async task queued
return success_response(
data={'task_id': result['task_id']},
message=result.get('message', 'Clustering started'),
request=request
)
else:
# Synchronous execution
return success_response(
data=result,
request=request
)
else:
return error_response(
error=result.get('error', 'Clustering failed'),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except InsufficientCreditsError as e:
return error_response(
error=str(e),
status_code=status.HTTP_402_PAYMENT_REQUIRED,
request=request
)
except Exception as e:
logger.error(f"Error in auto_cluster: {str(e)}", exc_info=True)
return error_response(
error=str(e),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except Exception as e:
logger.error(f"Unexpected error in auto_cluster: {str(e)}", exc_info=True)
return error_response(
error=f'Unexpected error: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class ClusterViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing clusters with CRUD operations
Unified API Standard v1.0 compliant
"""
queryset = Clusters.objects.all()
serializer_class = ClusterSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination # Explicitly use custom pagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle]
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by name
search_fields = ['name']
# Ordering configuration
ordering_fields = ['name', 'created_at', 'keywords_count', 'volume', 'difficulty']
ordering = ['name'] # Default ordering
# Filter configuration
filterset_fields = ['status']
def get_queryset(self):
"""
Get all clusters - keywords_count, volume, and difficulty are calculated in the serializer
since there's no ForeignKey relationship between Clusters and Keywords
Uses parent's get_queryset for filtering
Annotates queryset with volume and difficulty for filtering
"""
queryset = super().get_queryset()
# Annotate queryset with aggregated volume and difficulty for filtering
from django.db.models import Sum, Avg, Case, When, F, IntegerField
# Since volume and difficulty are properties (not DB fields), we need to use
# COALESCE to check volume_override/difficulty_override first, then fallback to seed_keyword
# Volume: COALESCE(volume_override, seed_keyword__volume)
# Difficulty: COALESCE(difficulty_override, seed_keyword__difficulty)
queryset = queryset.annotate(
_annotated_volume=Sum(
Case(
When(keywords__volume_override__isnull=False, then=F('keywords__volume_override')),
default=F('keywords__seed_keyword__volume'),
output_field=IntegerField()
)
),
_annotated_difficulty=Avg(
Case(
When(keywords__difficulty_override__isnull=False, then=F('keywords__difficulty_override')),
default=F('keywords__seed_keyword__difficulty'),
output_field=IntegerField()
)
)
)
# Apply volume range filtering
query_params = getattr(self.request, 'query_params', {})
volume_min = query_params.get('volume_min')
volume_max = query_params.get('volume_max')
if volume_min is not None:
try:
queryset = queryset.filter(_annotated_volume__gte=int(volume_min))
except (ValueError, TypeError):
pass
if volume_max is not None:
try:
queryset = queryset.filter(_annotated_volume__lte=int(volume_max))
except (ValueError, TypeError):
pass
# Apply difficulty range filtering
difficulty_min = query_params.get('difficulty_min')
difficulty_max = query_params.get('difficulty_max')
if difficulty_min is not None:
try:
queryset = queryset.filter(_annotated_difficulty__gte=float(difficulty_min))
except (ValueError, TypeError):
pass
if difficulty_max is not None:
try:
queryset = queryset.filter(_annotated_difficulty__lte=float(difficulty_max))
except (ValueError, TypeError):
pass
return queryset
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
# Get site_id and sector_id from validated_data or query params
# Safely access query_params
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
# Fallback for non-DRF requests
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
# Import here to avoid circular imports
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
# Verify sector belongs to the site
if sector.site_id != site_id:
raise ValidationError(f"Sector '{sector.name}' does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
# Remove site_id and sector_id from validated_data as they're not model fields
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
# Get account from site or user
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
# Save with all required fields explicitly
serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete clusters"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
deleted_count, _ = queryset.filter(id__in=ids).delete()
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['post'], url_path='auto_generate_ideas', url_name='auto_generate_ideas')
def auto_generate_ideas(self, request):
"""Auto-generate ideas for clusters using IdeasService"""
import logging
logger = logging.getLogger(__name__)
try:
cluster_ids = request.data.get('ids', [])
# Get account
account = getattr(request, 'account', None)
if not account:
return error_response(
error='Account is required',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
# Use service to generate ideas
service = IdeasService()
try:
result = service.generate_ideas(cluster_ids, account)
if result.get('success'):
if 'task_id' in result:
# Async task queued
return success_response(
data={'task_id': result['task_id']},
message=result.get('message', 'Idea generation started'),
request=request
)
else:
# Synchronous execution
return success_response(
data=result,
request=request
)
else:
return error_response(
error=result.get('error', 'Idea generation failed'),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except InsufficientCreditsError as e:
return error_response(
error=str(e),
status_code=status.HTTP_402_PAYMENT_REQUIRED,
request=request
)
except Exception as e:
logger.error(f"Error in auto_generate_ideas: {str(e)}", exc_info=True)
return error_response(
error=str(e),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
except Exception as e:
logger.error(f"Unexpected error in auto_generate_ideas: {str(e)}", exc_info=True)
return error_response(
error=f'Unexpected error: {str(e)}',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
def list(self, request, *args, **kwargs):
"""
Override list to optimize keyword stats calculation using bulk aggregation
"""
queryset = self.filter_queryset(self.get_queryset())
# Handle pagination first
page = self.paginate_queryset(queryset)
if page is not None:
# Optimize keyword stats for the paginated clusters
cluster_list = list(page)
ClusterSerializer.prefetch_keyword_stats(cluster_list)
serializer = self.get_serializer(cluster_list, many=True)
return self.get_paginated_response(serializer.data)
# No pagination - optimize all clusters
cluster_list = list(queryset)
ClusterSerializer.prefetch_keyword_stats(cluster_list)
serializer = self.get_serializer(cluster_list, many=True)
return success_response(
data=serializer.data,
request=request
)
@extend_schema_view(
list=extend_schema(tags=['Planner']),
create=extend_schema(tags=['Planner']),
retrieve=extend_schema(tags=['Planner']),
update=extend_schema(tags=['Planner']),
partial_update=extend_schema(tags=['Planner']),
destroy=extend_schema(tags=['Planner']),
)
class ContentIdeasViewSet(SiteSectorModelViewSet):
"""
ViewSet for managing content ideas with CRUD operations
Unified API Standard v1.0 compliant
"""
queryset = ContentIdeas.objects.all()
serializer_class = ContentIdeasSerializer
permission_classes = [IsAuthenticatedAndActive, IsViewerOrAbove]
pagination_class = CustomPageNumberPagination
throttle_scope = 'planner'
throttle_classes = [DebugScopedRateThrottle] # Explicitly use custom pagination
# DRF filtering configuration
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
# Search configuration - search by idea_title
search_fields = ['idea_title']
# Ordering configuration
ordering_fields = ['idea_title', 'created_at', 'estimated_word_count']
ordering = ['-created_at'] # Default ordering (newest first)
# Filter configuration (updated for new structure)
filterset_fields = ['status', 'keyword_cluster_id', 'content_type', 'content_structure']
def perform_create(self, serializer):
"""Require explicit site_id and sector_id - no defaults."""
user = getattr(self.request, 'user', None)
try:
query_params = getattr(self.request, 'query_params', None)
if query_params is None:
query_params = getattr(self.request, 'GET', {})
except AttributeError:
query_params = {}
site_id = serializer.validated_data.get('site_id') or query_params.get('site_id')
sector_id = serializer.validated_data.get('sector_id') or query_params.get('sector_id')
from igny8_core.auth.models import Site, Sector
from rest_framework.exceptions import ValidationError
# Site ID is REQUIRED
if not site_id:
raise ValidationError("site_id is required. Please select a site.")
try:
site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
raise ValidationError(f"Site with id {site_id} does not exist")
# Sector ID is REQUIRED
if not sector_id:
raise ValidationError("sector_id is required. Please select a sector.")
try:
sector = Sector.objects.get(id=sector_id)
if sector.site_id != site_id:
raise ValidationError(f"Sector does not belong to the selected site")
except Sector.DoesNotExist:
raise ValidationError(f"Sector with id {sector_id} does not exist")
serializer.validated_data.pop('site_id', None)
serializer.validated_data.pop('sector_id', None)
account = getattr(self.request, 'account', None)
if not account and user and user.is_authenticated:
account = getattr(user, 'account', None)
if not account:
account = getattr(site, 'account', None)
serializer.save(account=account, site=site, sector=sector)
@action(detail=False, methods=['POST'], url_path='bulk_delete', url_name='bulk_delete')
def bulk_delete(self, request):
"""Bulk delete content ideas"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
deleted_count, _ = queryset.filter(id__in=ids).delete()
return success_response(data={'deleted_count': deleted_count}, request=request)
@action(detail=False, methods=['post'], url_path='bulk_queue_to_writer', url_name='bulk_queue_to_writer')
def bulk_queue_to_writer(self, request):
"""Queue ideas to writer by creating Tasks"""
ids = request.data.get('ids', [])
if not ids:
return error_response(
error='No IDs provided',
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
queryset = self.get_queryset()
# Get ALL requested ideas first (don't filter by status yet)
all_ideas = queryset.filter(id__in=ids)
# Check which ones can be queued (status='new')
queueable_ideas = all_ideas.filter(status='new')
from igny8_core.modules.writer.models import Tasks
created_tasks = []
errors = []
skipped = []
# Add skipped ideas (not 'new' status)
for idea in all_ideas:
if idea.status != 'new':
skipped.append({
'idea_id': idea.id,
'title': idea.idea_title,
'reason': f'Already {idea.status}'
})
# Process queueable ideas
for idea in queueable_ideas:
try:
# Validate required fields
if not idea.keyword_cluster:
errors.append({
'idea_id': idea.id,
'title': idea.idea_title,
'error': 'Missing required cluster - assign idea to a cluster first'
})
continue
# Build keywords string from idea's keyword objects
keywords_str = ''
if idea.keyword_objects.exists():
keywords_str = ', '.join([kw.keyword for kw in idea.keyword_objects.all()])
elif idea.target_keywords:
keywords_str = idea.target_keywords
# Direct copy - no mapping needed
task = Tasks.objects.create(
title=idea.idea_title,
description=idea.description or '',
cluster=idea.keyword_cluster,
content_type=idea.content_type or 'post',
content_structure=idea.content_structure or 'article',
taxonomy_term=None, # Can be set later if taxonomy is available
keywords=keywords_str, # Comma-separated keywords string
status='queued',
account=idea.account,
site=idea.site,
sector=idea.sector,
idea=idea, # Link back to the original idea
)
created_tasks.append(task.id)
# Update idea status to queued
idea.status = 'queued'
idea.save()
except Exception as e:
errors.append({
'idea_id': idea.id,
'title': idea.idea_title,
'error': str(e)
})
# Return appropriate response based on results
if len(created_tasks) == 0 and (errors or skipped):
# Complete failure
return error_response(
error=f'Failed to queue any ideas: {len(errors)} errors, {len(skipped)} skipped',
errors=errors if errors else skipped,
status_code=status.HTTP_400_BAD_REQUEST,
request=request
)
elif errors:
# Partial success - some created, some failed
return success_response(
data={
'created_count': len(created_tasks),
'task_ids': created_tasks,
'errors': errors,
'skipped': skipped,
},
message=f'Queued {len(created_tasks)} ideas ({len(errors)} failed, {len(skipped)} skipped)',
request=request
)
else:
# Complete success
return success_response(
data={
'created_count': len(created_tasks),
'task_ids': created_tasks,
'skipped': skipped,
},
message=f'Successfully queued {len(created_tasks)} ideas to writer' + (f' ({len(skipped)} already scheduled)' if skipped else ''),
request=request
)
# REMOVED: generate_idea action - idea generation function removed