Automation final fixes

This commit is contained in:
IGNY8 VPS (Salman)
2025-12-28 20:37:46 +00:00
parent 7f82ef4551
commit 748de099dd
8 changed files with 292 additions and 144 deletions

View File

@@ -197,6 +197,7 @@ class Igny8AdminSite(UnfoldAdminSite):
('writer', 'Tasks'),
('writer', 'Content'),
('writer', 'Images'),
('writer', 'ImagePrompts'),
],
},
'Taxonomy & Organization': {

View File

@@ -228,6 +228,18 @@ class AutomationService:
keyword_ids = list(pending_keywords.values_list('id', flat=True))
# INITIAL SAVE: Set keywords_total immediately so frontend shows accurate counts from start
self.run.stage_1_result = {
'keywords_processed': 0,
'keywords_total': len(keyword_ids),
'clusters_created': 0,
'batches_run': 0,
'credits_used': 0,
'time_elapsed': '0m 0s',
'in_progress': True
}
self.run.save(update_fields=['stage_1_result'])
for i in range(0, len(keyword_ids), actual_batch_size):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
@@ -262,18 +274,20 @@ class AutomationService:
stage_number, f"Processing batch {batch_num}/{total_batches} ({len(batch)} keywords)"
)
# Call AI function via AIEngine
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=AutoClusterFunction(),
payload={'ids': batch}
)
# Monitor task
task_id = result.get('task_id')
if task_id:
# FIXED: Pass continue_on_error=True to keep processing other batches on failure
self._wait_for_task(task_id, stage_number, f"Batch {batch_num}", continue_on_error=True)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Clustering failed for batch {batch_num}: {error_msg}")
# Continue to next batch
keywords_processed += len(batch)
batches_run += 1
@@ -451,7 +465,20 @@ class AutomationService:
clusters_processed = 0
credits_before = self._get_credits_used()
for cluster in pending_clusters:
# INITIAL SAVE: Set clusters_total immediately so frontend shows accurate counts from start
cluster_list = list(pending_clusters)
total_clusters = len(cluster_list)
self.run.stage_2_result = {
'clusters_processed': 0,
'clusters_total': total_clusters,
'ideas_created': 0,
'credits_used': 0,
'time_elapsed': '0m 0s',
'in_progress': True
}
self.run.save(update_fields=['stage_2_result'])
for cluster in cluster_list:
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
if should_stop:
@@ -485,18 +512,20 @@ class AutomationService:
stage_number, f"Generating ideas for cluster: {cluster.name}"
)
# Call AI function via AIEngine
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=GenerateIdeasFunction(),
payload={'ids': [cluster.id]}
)
# Monitor task
task_id = result.get('task_id')
if task_id:
# FIXED: Pass continue_on_error=True to keep processing other clusters on failure
self._wait_for_task(task_id, stage_number, f"Cluster '{cluster.name}'", continue_on_error=True)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Idea generation failed for cluster '{cluster.name}': {error_msg}")
# Continue to next cluster
clusters_processed += 1
@@ -797,6 +826,17 @@ class AutomationService:
task_list = list(pending_tasks)
total_tasks = len(task_list)
# INITIAL SAVE: Set tasks_total immediately so frontend shows accurate counts from start
self.run.stage_4_result = {
'tasks_processed': 0,
'tasks_total': total_tasks,
'content_created': 0,
'credits_used': 0,
'time_elapsed': '0m 0s',
'in_progress': True
}
self.run.save(update_fields=['stage_4_result'])
for idx, task in enumerate(task_list, 1):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
@@ -831,18 +871,21 @@ class AutomationService:
stage_number, f"Generating content for task {idx}/{total_tasks}: {task.title}"
)
# Call AI function via AIEngine
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=GenerateContentFunction(),
payload={'ids': [task.id]}
)
# Monitor task
task_id = result.get('task_id')
if task_id:
# FIXED: Pass continue_on_error=True to keep processing other tasks on failure
self._wait_for_task(task_id, stage_number, f"Task '{task.title}'", continue_on_error=True)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# The result contains 'task_id' which is the DB Task model ID, NOT a Celery task ID
# So we do NOT call _wait_for_task here
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Content generation failed for task '{task.title}': {error_msg}")
# Continue to next task
tasks_processed += 1
@@ -1029,6 +1072,17 @@ class AutomationService:
content_list = list(content_without_images)
total_content = len(content_list)
# INITIAL SAVE: Set content_total immediately so frontend shows accurate counts from start
self.run.stage_5_result = {
'content_processed': 0,
'content_total': total_content,
'prompts_created': 0,
'credits_used': 0,
'time_elapsed': '0m 0s',
'in_progress': True
}
self.run.save(update_fields=['stage_5_result'])
for idx, content in enumerate(content_list, 1):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()
@@ -1063,18 +1117,20 @@ class AutomationService:
stage_number, f"Extracting prompts {idx}/{total_content}: {content.title}"
)
# Call AI function via AIEngine
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
engine = AIEngine(account=self.account)
result = engine.execute(
fn=GenerateImagePromptsFunction(),
payload={'ids': [content.id]}
)
# Monitor task
task_id = result.get('task_id')
if task_id:
# FIXED: Pass continue_on_error=True to keep processing other content on failure
self._wait_for_task(task_id, stage_number, f"Content '{content.title}'", continue_on_error=True)
# NOTE: AIEngine.execute() runs synchronously and returns immediately
# No Celery task polling needed
if not result.get('success'):
error_msg = result.get('error', 'Unknown error')
logger.warning(f"[AutomationService] Image prompt generation failed for content '{content.title}': {error_msg}")
# Continue to next content
content_processed += 1
@@ -1203,6 +1259,18 @@ class AutomationService:
image_list = list(pending_images)
total_images = len(image_list)
# INITIAL SAVE: Set images_total immediately so frontend shows accurate counts from start
self.run.stage_6_result = {
'images_processed': 0,
'images_total': total_images,
'images_generated': 0,
'content_moved_to_review': 0,
'credits_used': 0,
'time_elapsed': '0m 0s',
'in_progress': True
}
self.run.save(update_fields=['stage_6_result'])
for idx, image in enumerate(image_list, 1):
# Check if automation should stop (paused or cancelled)
should_stop, reason = self._check_should_stop()

View File

@@ -675,3 +675,14 @@ class ContentAttribute(SiteSectorBaseModel):
# Backward compatibility alias
ContentAttributeMap = ContentAttribute
class ImagePrompts(Images):
"""
Proxy model for Images to provide a separate admin interface focused on prompts.
This allows a dedicated "Image Prompts" view in the admin sidebar.
"""
class Meta:
proxy = True
verbose_name = 'Image Prompt'
verbose_name_plural = 'Image Prompts'
app_label = 'writer'

View File

@@ -8,7 +8,7 @@ from unfold.contrib.filters.admin import (
ChoicesDropdownFilter,
)
from igny8_core.admin.base import SiteSectorAdminMixin, Igny8ModelAdmin
from .models import Tasks, Images, Content
from .models import Tasks, Images, Content, ImagePrompts
from igny8_core.business.content.models import ContentTaxonomy, ContentAttribute, ContentTaxonomyRelation, ContentClusterMap
from import_export.admin import ExportMixin, ImportExportMixin
from import_export import resources
@@ -294,6 +294,145 @@ class ImagesAdmin(ImportExportMixin, SiteSectorAdminMixin, Igny8ModelAdmin):
bulk_soft_delete.short_description = 'Soft delete selected images'
# ============================================================================
# Image Prompts Admin (Using Proxy Model from models.py)
# ============================================================================
class ImagePromptsResource(resources.ModelResource):
"""Resource class for exporting Image Prompts"""
class Meta:
model = ImagePrompts
fields = ('id', 'content__title', 'site__name', 'sector__name', 'image_type', 'prompt', 'caption', 'status', 'created_at')
export_order = fields
@admin.register(ImagePrompts)
class ImagePromptsAdmin(ExportMixin, SiteSectorAdminMixin, Igny8ModelAdmin):
"""
Specialized admin for viewing and managing image prompts.
This provides a focused view of the prompt field from Images model.
"""
resource_class = ImagePromptsResource
list_display = ['get_content_title', 'site', 'sector', 'image_type', 'get_prompt_preview', 'status', 'created_at']
list_filter = [
('image_type', ChoicesDropdownFilter),
('status', ChoicesDropdownFilter),
('site', RelatedDropdownFilter),
('sector', RelatedDropdownFilter),
('created_at', RangeDateFilter),
]
search_fields = ['content__title', 'prompt', 'caption']
ordering = ['-created_at']
readonly_fields = ['get_content_title', 'site', 'sector', 'image_type', 'prompt', 'caption',
'status', 'position', 'image_url', 'image_path', 'created_at', 'updated_at']
actions = [
'bulk_export_prompts',
'bulk_copy_prompts_to_clipboard',
]
fieldsets = (
('Content Information', {
'fields': ('get_content_title', 'site', 'sector', 'image_type', 'position')
}),
('Prompt Details', {
'fields': ('prompt', 'caption'),
'description': 'AI-generated prompts used for image creation'
}),
('Image Information', {
'fields': ('status', 'image_url', 'image_path'),
'classes': ('collapse',)
}),
('Timestamps', {
'fields': ('created_at', 'updated_at'),
'classes': ('collapse',)
}),
)
def get_prompt_preview(self, obj):
"""Display a truncated preview of the prompt"""
if obj.prompt:
return obj.prompt[:100] + '...' if len(obj.prompt) > 100 else obj.prompt
return '-'
get_prompt_preview.short_description = 'Prompt Preview'
def get_content_title(self, obj):
"""Get content title, fallback to task title if no content"""
if obj.content:
return obj.content.title or obj.content.meta_title or f"Content #{obj.content.id}"
elif obj.task:
return obj.task.title or f"Task #{obj.task.id}"
return '-'
get_content_title.short_description = 'Content'
def get_queryset(self, request):
"""Filter to only show images that have prompts"""
qs = super().get_queryset(request)
return qs.filter(prompt__isnull=False).exclude(prompt='')
def has_add_permission(self, request):
"""Image prompts are created through content generation workflow"""
return False
def has_change_permission(self, request, obj=None):
"""Image prompts are read-only"""
return False
def has_delete_permission(self, request, obj=None):
"""Prevent deletion from this view (use Images admin instead)"""
return False
def bulk_export_prompts(self, request, queryset):
"""Export selected image prompts to CSV"""
import csv
from django.http import HttpResponse
from datetime import datetime
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="image_prompts_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv"'
writer = csv.writer(response)
writer.writerow(['Content Title', 'Site', 'Image Type', 'Prompt', 'Caption', 'Status', 'Created'])
for obj in queryset:
content_title = self.get_content_title(obj)
site_name = obj.site.name if obj.site else '-'
writer.writerow([
content_title,
site_name,
obj.image_type,
obj.prompt or '',
obj.caption or '',
obj.status,
obj.created_at.strftime('%Y-%m-%d %H:%M:%S')
])
self.message_user(request, f'{queryset.count()} image prompt(s) exported to CSV.', messages.SUCCESS)
return response
bulk_export_prompts.short_description = 'Export prompts to CSV'
def bulk_copy_prompts_to_clipboard(self, request, queryset):
"""Generate a text summary of prompts for copying"""
prompts_text = []
for obj in queryset:
content_title = self.get_content_title(obj)
prompts_text.append(f"--- {content_title} ({obj.image_type}) ---")
prompts_text.append(f"Prompt: {obj.prompt or 'N/A'}")
if obj.caption:
prompts_text.append(f"Caption: {obj.caption}")
prompts_text.append("")
# Store in session for display
request.session['prompts_export'] = '\n'.join(prompts_text)
self.message_user(
request,
f'Generated text for {queryset.count()} prompt(s). Copy from the message below.',
messages.INFO
)
bulk_copy_prompts_to_clipboard.short_description = 'Copy prompts as text'
class ContentResource(resources.ModelResource):
"""Resource class for importing/exporting Content"""
class Meta:

View File

@@ -1,4 +1,4 @@
# Backward compatibility aliases - models moved to business/content/
from igny8_core.business.content.models import Tasks, Content, Images
from igny8_core.business.content.models import Tasks, Content, Images, ImagePrompts
__all__ = ['Tasks', 'Content', 'Images']
__all__ = ['Tasks', 'Content', 'Images', 'ImagePrompts']

View File

@@ -1,9 +1,9 @@
/**
* Current Processing Card V2 - Simplified
* Shows real-time automation progress with animated progress bar
* Shows real-time automation progress
* Clean UI without cluttered "Currently Processing" and "Up Next" sections
*/
import React, { useEffect, useState, useRef } from 'react';
import React, { useEffect, useState } from 'react';
import { automationService, ProcessingState, AutomationRun, PipelineStage } from '../../services/automationService';
import { useToast } from '../ui/toast/ToastContainer';
import Button from '../ui/button/Button';
@@ -101,11 +101,6 @@ const CurrentProcessingCard: React.FC<CurrentProcessingCardProps> = ({
const [isResuming, setIsResuming] = useState(false);
const [isCancelling, setIsCancelling] = useState(false);
// Animated progress state - moves 1% per second until 80%, then waits for actual data
const [animatedPercent, setAnimatedPercent] = useState(0);
const lastRealPercent = useRef(0);
const animationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
const toast = useToast();
// Fetch processing state
@@ -147,44 +142,9 @@ const CurrentProcessingCard: React.FC<CurrentProcessingCardProps> = ({
const realTotal = processingState?.total_items ?? getTotalFromResult(stageResult, currentRun.current_stage) ?? (stageOverview?.pending ?? 0) + realProcessed;
const realPercent = realTotal > 0 ? Math.round((realProcessed / realTotal) * 100) : 0;
// Animated progress: moves 1% per second up to 80%, then follows real progress
useEffect(() => {
// Track when real percent changes
if (realPercent > lastRealPercent.current) {
lastRealPercent.current = realPercent;
setAnimatedPercent(realPercent);
}
// Clear existing timer
if (animationTimer.current) {
clearInterval(animationTimer.current);
}
// Only animate if running and not paused
if (currentRun.status !== 'running') {
return;
}
// Animate 1% per second up to 80% of current ceiling
const ceiling = Math.min(80, realPercent + 20); // Don't go more than 20% ahead
animationTimer.current = setInterval(() => {
setAnimatedPercent(prev => {
if (prev >= ceiling || prev >= realPercent + 10) {
return prev; // Stop animation
}
return Math.min(prev + 1, ceiling);
});
}, 1000);
return () => {
if (animationTimer.current) {
clearInterval(animationTimer.current);
}
};
}, [currentRun.status, realPercent]);
// Use the higher of animated or real percent
const displayPercent = Math.max(animatedPercent, realPercent);
// REMOVED: Animated progress that was causing confusion
// Now using real percentage directly from backend
const displayPercent = Math.min(realPercent, 100);
const isPaused = currentRun.status === 'paused';
const stageName = STAGE_NAMES[currentRun.current_stage] || `Stage ${currentRun.current_stage}`;

View File

@@ -58,11 +58,6 @@ const GlobalProgressBar: React.FC<GlobalProgressBarProps> = ({
stages,
initialSnapshot,
}) => {
// Animated progress state - moves 1% every 10 seconds
const [animatedPercent, setAnimatedPercent] = React.useState(0);
const lastRealPercent = React.useRef(0);
const animationTimer = React.useRef<ReturnType<typeof setInterval> | null>(null);
// Don't render if no run or run is completed with 100%
if (!currentRun) {
return null;
@@ -106,41 +101,9 @@ const GlobalProgressBar: React.FC<GlobalProgressBarProps> = ({
const { percentage: realPercent, completed, total } = calculateGlobalProgress();
// Animated progress: moves 1% per 10 seconds, within current stage bounds
React.useEffect(() => {
if (realPercent > lastRealPercent.current) {
lastRealPercent.current = realPercent;
setAnimatedPercent(realPercent);
}
if (animationTimer.current) {
clearInterval(animationTimer.current);
}
if (currentRun.status !== 'running') {
return;
}
// Calculate ceiling based on current stage (each stage is ~14% of total)
const stageCeiling = Math.min(currentRun.current_stage * 14, realPercent + 10);
animationTimer.current = setInterval(() => {
setAnimatedPercent(prev => {
if (prev >= stageCeiling || prev >= realPercent + 5) {
return prev;
}
return Math.min(prev + 1, stageCeiling);
});
}, 10000); // 1% every 10 seconds
return () => {
if (animationTimer.current) {
clearInterval(animationTimer.current);
}
};
}, [currentRun.status, realPercent, currentRun.current_stage]);
const percentage = Math.max(animatedPercent, realPercent);
// REMOVED: Animated progress that was causing confusion
// Now using real percentage directly from backend
const percentage = realPercent;
// Hide if completed and at 100%
if (currentRun.status === 'completed' && percentage >= 100) {

View File

@@ -754,14 +754,15 @@ const AutomationPage: React.FC = () => {
};
const resultTotal = result?.[totalKeyMap[stage.number]] ?? 0;
// pending = items still waiting to be processed (real-time from DB)
const pending = stage.pending ?? 0;
// For total: prioritize result total (set at stage start), then fallback to DB pending + processed
const dbPending = stage.pending ?? 0;
const total = resultTotal > 0 ? resultTotal : (isActive || isComplete ? dbPending + processed : dbPending);
// For active/completed stages: use result total if available, else pending + processed
// For pending stages: just show current pending count
const total = isActive || isComplete
? (resultTotal > 0 ? resultTotal : pending + processed)
: pending;
// FIXED: For active stages, "Pending" = items remaining = total - processed
// For inactive stages, "Pending" = items ready in queue (from DB)
const pending = isActive || isComplete
? Math.max(0, total - processed)
: dbPending;
const progressPercent = total > 0 ? Math.min(Math.round((processed / total) * 100), 100) : 0;
@@ -866,10 +867,15 @@ const AutomationPage: React.FC = () => {
6: 'images_total',
};
const resultTotal = result?.[totalKeyMap[stage.number]] ?? 0;
const pending = stage.pending ?? 0;
const total = isActive || isComplete
? (resultTotal > 0 ? resultTotal : pending + processed)
: pending;
// For total: prioritize result total (set at stage start), then fallback to DB pending + processed
const dbPending = stage.pending ?? 0;
const total = resultTotal > 0 ? resultTotal : (isActive || isComplete ? dbPending + processed : dbPending);
// FIXED: For active stages, "Pending" = items remaining = total - processed
const pending = isActive || isComplete
? Math.max(0, total - processed)
: dbPending;
const progressPercent = total > 0 ? Math.min(Math.round((processed / total) * 100), 100) : 0;