automation overview page implemeantion initital complete

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-17 08:24:44 +00:00
parent 79398c908d
commit 6b1fa0c1ee
22 changed files with 3789 additions and 178 deletions

View File

@@ -8,11 +8,15 @@ from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.db.models import Count, Sum, Avg, F
from datetime import timedelta
from drf_spectacular.utils import extend_schema
from igny8_core.business.automation.models import AutomationConfig, AutomationRun
from igny8_core.business.automation.services import AutomationService
from igny8_core.auth.models import Account, Site
from igny8_core.business.planning.models import Keywords, Clusters, ContentIdeas
from igny8_core.business.content.models import Tasks, Content, Images
class AutomationViewSet(viewsets.ViewSet):
@@ -299,6 +303,293 @@ class AutomationViewSet(viewsets.ViewSet):
status=status.HTTP_404_NOT_FOUND
)
def _calculate_run_number(self, site, run):
"""Calculate sequential run number for a site"""
return AutomationRun.objects.filter(
site=site,
started_at__lte=run.started_at
).count()
def _calculate_historical_averages(self, site, completed_runs):
"""Calculate historical averages from completed runs"""
if completed_runs.count() < 3:
# Not enough data, return defaults
return {
'period_days': 30,
'runs_analyzed': completed_runs.count(),
'avg_credits_stage_1': 0.2,
'avg_credits_stage_2': 2.0,
'avg_credits_stage_4': 5.0,
'avg_credits_stage_5': 2.0,
'avg_credits_stage_6': 2.0,
'avg_output_ratio_stage_1': 0.125,
'avg_output_ratio_stage_2': 8.7,
'avg_output_ratio_stage_5': 4.0,
'avg_output_ratio_stage_6': 1.0,
}
# Calculate per-stage averages
stage_1_credits = []
stage_2_credits = []
stage_4_credits = []
stage_5_credits = []
stage_6_credits = []
output_ratios_1 = []
output_ratios_2 = []
output_ratios_5 = []
output_ratios_6 = []
for run in completed_runs[:10]: # Last 10 runs
if run.stage_1_result:
processed = run.stage_1_result.get('keywords_processed', 0)
created = run.stage_1_result.get('clusters_created', 0)
credits = run.stage_1_result.get('credits_used', 0)
if processed > 0:
stage_1_credits.append(credits / processed)
if created > 0 and processed > 0:
output_ratios_1.append(created / processed)
if run.stage_2_result:
processed = run.stage_2_result.get('clusters_processed', 0)
created = run.stage_2_result.get('ideas_created', 0)
credits = run.stage_2_result.get('credits_used', 0)
if processed > 0:
stage_2_credits.append(credits / processed)
if created > 0 and processed > 0:
output_ratios_2.append(created / processed)
if run.stage_4_result:
processed = run.stage_4_result.get('tasks_processed', 0)
credits = run.stage_4_result.get('credits_used', 0)
if processed > 0:
stage_4_credits.append(credits / processed)
if run.stage_5_result:
processed = run.stage_5_result.get('content_processed', 0)
created = run.stage_5_result.get('prompts_created', 0)
credits = run.stage_5_result.get('credits_used', 0)
if processed > 0:
stage_5_credits.append(credits / processed)
if created > 0 and processed > 0:
output_ratios_5.append(created / processed)
if run.stage_6_result:
processed = run.stage_6_result.get('images_processed', 0)
created = run.stage_6_result.get('images_generated', 0)
credits = run.stage_6_result.get('credits_used', 0)
if processed > 0:
stage_6_credits.append(credits / processed)
if created > 0 and processed > 0:
output_ratios_6.append(created / processed)
def avg(lst):
return sum(lst) / len(lst) if lst else 0
return {
'period_days': 30,
'runs_analyzed': min(completed_runs.count(), 10),
'avg_credits_stage_1': round(avg(stage_1_credits), 2),
'avg_credits_stage_2': round(avg(stage_2_credits), 2),
'avg_credits_stage_4': round(avg(stage_4_credits), 2),
'avg_credits_stage_5': round(avg(stage_5_credits), 2),
'avg_credits_stage_6': round(avg(stage_6_credits), 2),
'avg_output_ratio_stage_1': round(avg(output_ratios_1), 3),
'avg_output_ratio_stage_2': round(avg(output_ratios_2), 1),
'avg_output_ratio_stage_5': round(avg(output_ratios_5), 1),
'avg_output_ratio_stage_6': round(avg(output_ratios_6), 1),
}
def _calculate_predictive_analysis(self, site, historical_averages):
"""Calculate predictive cost and output analysis"""
# Get pending counts
pending_keywords = Keywords.objects.filter(site=site, status='new', disabled=False).count()
pending_clusters = Clusters.objects.filter(site=site, status='new', disabled=False).exclude(ideas__isnull=False).count()
pending_ideas = ContentIdeas.objects.filter(site=site, status='new').count()
pending_tasks = Tasks.objects.filter(site=site, status='queued').count()
pending_content = Content.objects.filter(site=site, status='draft').annotate(images_count=Count('images')).filter(images_count=0).count()
pending_images = Images.objects.filter(site=site, status='pending').count()
pending_review = Content.objects.filter(site=site, status='review').count()
# Calculate estimates using historical averages
stage_1_credits = int(pending_keywords * historical_averages['avg_credits_stage_1'])
stage_2_credits = int(pending_clusters * historical_averages['avg_credits_stage_2'])
stage_4_credits = int(pending_tasks * historical_averages['avg_credits_stage_4'])
stage_5_credits = int(pending_content * historical_averages['avg_credits_stage_5'])
stage_6_credits = int(pending_images * historical_averages['avg_credits_stage_6'])
total_estimated = stage_1_credits + stage_2_credits + stage_4_credits + stage_5_credits + stage_6_credits
recommended_buffer = int(total_estimated * 1.2)
# Calculate expected outputs
expected_clusters = int(pending_keywords * historical_averages['avg_output_ratio_stage_1']) if historical_averages['avg_output_ratio_stage_1'] > 0 else 0
expected_ideas = int(pending_clusters * historical_averages['avg_output_ratio_stage_2']) if historical_averages['avg_output_ratio_stage_2'] > 0 else 0
expected_prompts = int(pending_content * historical_averages['avg_output_ratio_stage_5']) if historical_averages['avg_output_ratio_stage_5'] > 0 else 0
expected_images = int(pending_images * historical_averages['avg_output_ratio_stage_6']) if historical_averages['avg_output_ratio_stage_6'] > 0 else 0
return {
'stages': [
{
'stage': 1,
'name': 'Keywords → Clusters',
'pending_items': pending_keywords,
'avg_credits_per_item': historical_averages['avg_credits_stage_1'],
'estimated_credits': stage_1_credits,
'avg_output_ratio': historical_averages['avg_output_ratio_stage_1'],
'estimated_output': expected_clusters,
'output_type': 'clusters'
},
{
'stage': 2,
'name': 'Clusters → Ideas',
'pending_items': pending_clusters,
'avg_credits_per_item': historical_averages['avg_credits_stage_2'],
'estimated_credits': stage_2_credits,
'avg_output_ratio': historical_averages['avg_output_ratio_stage_2'],
'estimated_output': expected_ideas,
'output_type': 'ideas'
},
{
'stage': 3,
'name': 'Ideas → Tasks',
'pending_items': pending_ideas,
'avg_credits_per_item': 0,
'estimated_credits': 0,
'avg_output_ratio': 1.0,
'estimated_output': pending_ideas,
'output_type': 'tasks'
},
{
'stage': 4,
'name': 'Tasks → Content',
'pending_items': pending_tasks,
'avg_credits_per_item': historical_averages['avg_credits_stage_4'],
'estimated_credits': stage_4_credits,
'avg_output_ratio': 1.0,
'estimated_output': pending_tasks,
'output_type': 'content'
},
{
'stage': 5,
'name': 'Content → Image Prompts',
'pending_items': pending_content,
'avg_credits_per_item': historical_averages['avg_credits_stage_5'],
'estimated_credits': stage_5_credits,
'avg_output_ratio': historical_averages['avg_output_ratio_stage_5'],
'estimated_output': expected_prompts,
'output_type': 'prompts'
},
{
'stage': 6,
'name': 'Image Prompts → Images',
'pending_items': pending_images,
'avg_credits_per_item': historical_averages['avg_credits_stage_6'],
'estimated_credits': stage_6_credits,
'avg_output_ratio': historical_averages['avg_output_ratio_stage_6'],
'estimated_output': expected_images,
'output_type': 'images'
},
{
'stage': 7,
'name': 'Review → Approved',
'pending_items': pending_review,
'avg_credits_per_item': 0,
'estimated_credits': 0,
'avg_output_ratio': 1.0,
'estimated_output': pending_review,
'output_type': 'approved'
},
],
'total_estimated_credits': total_estimated,
'recommended_buffer': recommended_buffer,
'current_balance': site.account.credits,
'is_sufficient': site.account.credits >= recommended_buffer,
'expected_outputs': {
'clusters': expected_clusters,
'ideas': expected_ideas,
'content': pending_tasks,
'images': expected_images,
}
}
def _get_attention_items(self, site):
"""Get items requiring attention"""
# Count items with issues
skipped_ideas = ContentIdeas.objects.filter(site=site, status='skipped').count()
failed_content = Content.objects.filter(site=site, status='failed').count()
failed_images = Images.objects.filter(site=site, status='failed').count()
return {
'skipped_ideas': skipped_ideas,
'failed_content': failed_content,
'failed_images': failed_images,
'total_attention_needed': skipped_ideas + failed_content + failed_images,
}
@extend_schema(tags=['Automation'])
@action(detail=False, methods=['get'])
def overview_stats(self, request):
"""
GET /api/v1/automation/overview_stats/?site_id=123
Get comprehensive automation statistics for overview page
"""
site, error_response = self._get_site(request)
if error_response:
return error_response
# Calculate run statistics from last 30 days
thirty_days_ago = timezone.now() - timedelta(days=30)
seven_days_ago = timezone.now() - timedelta(days=7)
fourteen_days_ago = timezone.now() - timedelta(days=14)
all_runs = AutomationRun.objects.filter(site=site)
recent_runs = all_runs.filter(started_at__gte=thirty_days_ago)
this_week_runs = all_runs.filter(started_at__gte=seven_days_ago)
last_week_runs = all_runs.filter(started_at__gte=fourteen_days_ago, started_at__lt=seven_days_ago)
completed_runs = recent_runs.filter(status='completed')
failed_runs = recent_runs.filter(status='failed')
# Calculate averages from completed runs
avg_duration = completed_runs.annotate(
duration=F('completed_at') - F('started_at')
).aggregate(avg=Avg('duration'))['avg']
avg_credits = completed_runs.aggregate(avg=Avg('total_credits_used'))['avg'] or 0
# Calculate historical averages per stage
historical_averages = self._calculate_historical_averages(site, completed_runs)
# Get pending items and calculate predictions
predictive_analysis = self._calculate_predictive_analysis(site, historical_averages)
# Get attention items (failed/skipped)
attention_items = self._get_attention_items(site)
# Calculate trends
last_week_avg_credits = last_week_runs.filter(status='completed').aggregate(avg=Avg('total_credits_used'))['avg'] or 0
credits_trend = 0
if last_week_avg_credits > 0:
this_week_avg = this_week_runs.filter(status='completed').aggregate(avg=Avg('total_credits_used'))['avg'] or 0
credits_trend = round(((this_week_avg - last_week_avg_credits) / last_week_avg_credits) * 100, 1)
return Response({
'run_statistics': {
'total_runs': all_runs.count(),
'completed_runs': completed_runs.count(),
'failed_runs': failed_runs.count(),
'success_rate': round(completed_runs.count() / recent_runs.count() * 100, 1) if recent_runs.count() > 0 else 0,
'avg_duration_seconds': int(avg_duration.total_seconds()) if avg_duration else 0,
'avg_credits_per_run': round(avg_credits, 1),
'runs_this_week': this_week_runs.count(),
'runs_last_week': last_week_runs.count(),
'credits_trend': credits_trend,
},
'predictive_analysis': predictive_analysis,
'attention_items': attention_items,
'historical_averages': historical_averages,
})
@extend_schema(tags=['Automation'])
@action(detail=False, methods=['get'])
def history(self, request):
@@ -310,23 +601,286 @@ class AutomationViewSet(viewsets.ViewSet):
if error_response:
return error_response
runs = AutomationRun.objects.filter(
site=site
).order_by('-started_at')[:20]
# Get pagination params
page = int(request.query_params.get('page', 1))
page_size = int(request.query_params.get('page_size', 20))
runs_qs = AutomationRun.objects.filter(site=site).order_by('-started_at')
total_count = runs_qs.count()
# Paginate
start = (page - 1) * page_size
end = start + page_size
runs = runs_qs[start:end]
# Build response with enhanced data
runs_data = []
for run in runs:
# Calculate run number
run_number = self._calculate_run_number(site, run)
# Calculate duration
duration_seconds = 0
if run.completed_at and run.started_at:
duration_seconds = int((run.completed_at - run.started_at).total_seconds())
# Count completed and failed stages
stages_completed = 0
stages_failed = 0
stage_statuses = []
for stage_num in range(1, 8):
result = getattr(run, f'stage_{stage_num}_result', None)
if result:
if result.get('credits_used', 0) >= 0: # Stage ran
stages_completed += 1
stage_statuses.append('completed')
else:
stages_failed += 1
stage_statuses.append('failed')
else:
if run.status == 'completed' and stage_num <= run.current_stage:
stage_statuses.append('skipped')
else:
stage_statuses.append('pending')
# Calculate summary stats from stage results
items_processed = run.initial_snapshot.get('total_initial_items', 0) if run.initial_snapshot else 0
items_created = 0
content_created = 0
images_generated = 0
if run.stage_1_result:
items_created += run.stage_1_result.get('clusters_created', 0)
if run.stage_2_result:
items_created += run.stage_2_result.get('ideas_created', 0)
if run.stage_4_result:
content_created = run.stage_4_result.get('content_created', 0)
items_created += content_created
if run.stage_6_result:
images_generated = run.stage_6_result.get('images_generated', 0)
items_created += images_generated
runs_data.append({
'run_id': run.run_id,
'run_number': run_number,
'run_title': f"{site.domain} #{run_number}",
'status': run.status,
'trigger_type': run.trigger_type,
'started_at': run.started_at,
'completed_at': run.completed_at,
'duration_seconds': duration_seconds,
'total_credits_used': run.total_credits_used,
'current_stage': run.current_stage,
'stages_completed': stages_completed,
'stages_failed': stages_failed,
'initial_snapshot': run.initial_snapshot or {},
'summary': {
'items_processed': items_processed,
'items_created': items_created,
'content_created': content_created,
'images_generated': images_generated,
},
'stage_statuses': stage_statuses,
})
return Response({
'runs': [
{
'run_id': run.run_id,
'status': run.status,
'trigger_type': run.trigger_type,
'started_at': run.started_at,
'completed_at': run.completed_at,
'total_credits_used': run.total_credits_used,
'current_stage': run.current_stage,
'runs': runs_data,
'pagination': {
'page': page,
'page_size': page_size,
'total_count': total_count,
'total_pages': (total_count + page_size - 1) // page_size,
}
})
@extend_schema(tags=['Automation'])
@action(detail=False, methods=['get'])
def run_detail(self, request):
"""
GET /api/v1/automation/run_detail/?run_id=abc123
Get detailed information about a specific automation run
"""
site, error_response = self._get_site(request)
if error_response:
return error_response
run_id = request.query_params.get('run_id')
if not run_id:
return Response(
{'error': 'run_id parameter is required'},
status=status.HTTP_400_BAD_REQUEST
)
try:
run = AutomationRun.objects.get(run_id=run_id, site=site)
except AutomationRun.DoesNotExist:
return Response(
{'error': 'Automation run not found'},
status=status.HTTP_404_NOT_FOUND
)
# Basic run info
run_number = self._calculate_run_number(site, run)
duration_seconds = 0
if run.completed_at and run.started_at:
duration_seconds = int((run.completed_at - run.started_at).total_seconds())
# Get historical averages for comparison
completed_runs = AutomationRun.objects.filter(
site=site,
status='completed'
).order_by('-completed_at')[:10]
historical_averages = self._calculate_historical_averages(site, completed_runs)
# Build detailed stage analysis
stages = []
total_credits = 0
total_items_processed = 0
total_items_created = 0
stage_names = [
'Keyword Clustering',
'Idea Generation',
'Task Creation',
'Content Writing',
'Content SEO Optimization',
'Image Generation',
'Image SEO Optimization'
]
for stage_num in range(1, 8):
result = getattr(run, f'stage_{stage_num}_result', None) or {}
credits_used = result.get('credits_used', 0)
items_processed = result.get('items_processed', 0)
items_created = result.get('items_created', 0)
# Try alternative field names
if items_created == 0:
items_created = result.get('clusters_created', 0)
items_created += result.get('ideas_created', 0)
items_created += result.get('tasks_created', 0)
items_created += result.get('content_created', 0)
items_created += result.get('images_generated', 0)
stage_status = 'pending'
if result:
if credits_used > 0 or items_created > 0:
stage_status = 'completed'
elif result.get('error'):
stage_status = 'failed'
elif run.status == 'completed' and stage_num <= run.current_stage:
stage_status = 'skipped'
# Compare to historical averages
historical_credits = 0
historical_items = 0
if historical_averages['stages']:
for hist_stage in historical_averages['stages']:
if hist_stage['stage_number'] == stage_num:
historical_credits = hist_stage['avg_credits']
historical_items = hist_stage['avg_items_created']
break
credit_variance = 0
items_variance = 0
if historical_credits > 0:
credit_variance = ((credits_used - historical_credits) / historical_credits) * 100
if historical_items > 0:
items_variance = ((items_created - historical_items) / historical_items) * 100
stages.append({
'stage_number': stage_num,
'stage_name': stage_names[stage_num - 1],
'status': stage_status,
'credits_used': credits_used,
'items_processed': items_processed,
'items_created': items_created,
'duration_seconds': result.get('duration', 0),
'error': result.get('error', ''),
'comparison': {
'historical_avg_credits': historical_credits,
'historical_avg_items': historical_items,
'credit_variance_pct': round(credit_variance, 1),
'items_variance_pct': round(items_variance, 1),
}
for run in runs
]
})
total_credits += credits_used
total_items_processed += items_processed
total_items_created += items_created
# Calculate efficiency metrics
efficiency = {
'credits_per_item': round(total_credits / total_items_created, 2) if total_items_created > 0 else 0,
'items_per_minute': round(total_items_created / (duration_seconds / 60), 2) if duration_seconds > 0 else 0,
'credits_per_minute': round(total_credits / (duration_seconds / 60), 2) if duration_seconds > 0 else 0,
}
# Generate insights
insights = []
# Check for variance issues
for stage in stages:
comp = stage['comparison']
if abs(comp['credit_variance_pct']) > 20:
direction = 'higher' if comp['credit_variance_pct'] > 0 else 'lower'
insights.append({
'type': 'variance',
'severity': 'warning' if abs(comp['credit_variance_pct']) > 50 else 'info',
'message': f"{stage['stage_name']} used {abs(comp['credit_variance_pct']):.0f}% {direction} credits than average"
})
# Check for failures
for stage in stages:
if stage['status'] == 'failed':
insights.append({
'type': 'error',
'severity': 'error',
'message': f"{stage['stage_name']} failed: {stage['error']}"
})
# Check efficiency
if historical_averages['avg_credits_per_item'] > 0:
efficiency_diff = ((efficiency['credits_per_item'] - historical_averages['avg_credits_per_item'])
/ historical_averages['avg_credits_per_item']) * 100
if efficiency_diff < -10:
insights.append({
'type': 'success',
'severity': 'info',
'message': f"This run was {abs(efficiency_diff):.0f}% more credit-efficient than average"
})
elif efficiency_diff > 10:
insights.append({
'type': 'warning',
'severity': 'warning',
'message': f"This run used {efficiency_diff:.0f}% more credits per item than average"
})
return Response({
'run': {
'run_id': run.run_id,
'run_number': run_number,
'run_title': f"{site.domain} #{run_number}",
'status': run.status,
'trigger_type': run.trigger_type,
'started_at': run.started_at,
'completed_at': run.completed_at,
'duration_seconds': duration_seconds,
'current_stage': run.current_stage,
'total_credits_used': total_credits,
'initial_snapshot': run.initial_snapshot or {},
},
'stages': stages,
'efficiency': efficiency,
'insights': insights,
'historical_comparison': {
'avg_credits': historical_averages['avg_total_credits'],
'avg_duration_seconds': historical_averages['avg_duration_seconds'],
'avg_credits_per_item': historical_averages['avg_credits_per_item'],
}
})
@extend_schema(tags=['Automation'])