Automation final fixes
This commit is contained in:
@@ -225,9 +225,21 @@ class AutomationService:
|
||||
clusters_created = 0
|
||||
batches_run = 0
|
||||
credits_before = self._get_credits_used()
|
||||
|
||||
|
||||
keyword_ids = list(pending_keywords.values_list('id', flat=True))
|
||||
|
||||
|
||||
# INITIAL SAVE: Set keywords_total immediately so frontend shows accurate counts from start
|
||||
self.run.stage_1_result = {
|
||||
'keywords_processed': 0,
|
||||
'keywords_total': len(keyword_ids),
|
||||
'clusters_created': 0,
|
||||
'batches_run': 0,
|
||||
'credits_used': 0,
|
||||
'time_elapsed': '0m 0s',
|
||||
'in_progress': True
|
||||
}
|
||||
self.run.save(update_fields=['stage_1_result'])
|
||||
|
||||
for i in range(0, len(keyword_ids), actual_batch_size):
|
||||
# Check if automation should stop (paused or cancelled)
|
||||
should_stop, reason = self._check_should_stop()
|
||||
@@ -262,19 +274,21 @@ class AutomationService:
|
||||
stage_number, f"Processing batch {batch_num}/{total_batches} ({len(batch)} keywords)"
|
||||
)
|
||||
|
||||
# Call AI function via AIEngine
|
||||
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
|
||||
engine = AIEngine(account=self.account)
|
||||
result = engine.execute(
|
||||
fn=AutoClusterFunction(),
|
||||
payload={'ids': batch}
|
||||
)
|
||||
|
||||
# Monitor task
|
||||
task_id = result.get('task_id')
|
||||
if task_id:
|
||||
# FIXED: Pass continue_on_error=True to keep processing other batches on failure
|
||||
self._wait_for_task(task_id, stage_number, f"Batch {batch_num}", continue_on_error=True)
|
||||
|
||||
|
||||
# NOTE: AIEngine.execute() runs synchronously and returns immediately
|
||||
# No Celery task polling needed
|
||||
|
||||
if not result.get('success'):
|
||||
error_msg = result.get('error', 'Unknown error')
|
||||
logger.warning(f"[AutomationService] Clustering failed for batch {batch_num}: {error_msg}")
|
||||
# Continue to next batch
|
||||
|
||||
keywords_processed += len(batch)
|
||||
batches_run += 1
|
||||
|
||||
@@ -450,8 +464,21 @@ class AutomationService:
|
||||
# Process one at a time
|
||||
clusters_processed = 0
|
||||
credits_before = self._get_credits_used()
|
||||
|
||||
for cluster in pending_clusters:
|
||||
|
||||
# INITIAL SAVE: Set clusters_total immediately so frontend shows accurate counts from start
|
||||
cluster_list = list(pending_clusters)
|
||||
total_clusters = len(cluster_list)
|
||||
self.run.stage_2_result = {
|
||||
'clusters_processed': 0,
|
||||
'clusters_total': total_clusters,
|
||||
'ideas_created': 0,
|
||||
'credits_used': 0,
|
||||
'time_elapsed': '0m 0s',
|
||||
'in_progress': True
|
||||
}
|
||||
self.run.save(update_fields=['stage_2_result'])
|
||||
|
||||
for cluster in cluster_list:
|
||||
# Check if automation should stop (paused or cancelled)
|
||||
should_stop, reason = self._check_should_stop()
|
||||
if should_stop:
|
||||
@@ -485,19 +512,21 @@ class AutomationService:
|
||||
stage_number, f"Generating ideas for cluster: {cluster.name}"
|
||||
)
|
||||
|
||||
# Call AI function via AIEngine
|
||||
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
|
||||
engine = AIEngine(account=self.account)
|
||||
result = engine.execute(
|
||||
fn=GenerateIdeasFunction(),
|
||||
payload={'ids': [cluster.id]}
|
||||
)
|
||||
|
||||
# Monitor task
|
||||
task_id = result.get('task_id')
|
||||
if task_id:
|
||||
# FIXED: Pass continue_on_error=True to keep processing other clusters on failure
|
||||
self._wait_for_task(task_id, stage_number, f"Cluster '{cluster.name}'", continue_on_error=True)
|
||||
|
||||
|
||||
# NOTE: AIEngine.execute() runs synchronously and returns immediately
|
||||
# No Celery task polling needed
|
||||
|
||||
if not result.get('success'):
|
||||
error_msg = result.get('error', 'Unknown error')
|
||||
logger.warning(f"[AutomationService] Idea generation failed for cluster '{cluster.name}': {error_msg}")
|
||||
# Continue to next cluster
|
||||
|
||||
clusters_processed += 1
|
||||
|
||||
self.logger.log_stage_progress(
|
||||
@@ -792,11 +821,22 @@ class AutomationService:
|
||||
# Process one at a time
|
||||
tasks_processed = 0
|
||||
credits_before = self._get_credits_used()
|
||||
|
||||
|
||||
# FIXED: Ensure ALL tasks are processed by iterating over queryset list
|
||||
task_list = list(pending_tasks)
|
||||
total_tasks = len(task_list)
|
||||
|
||||
|
||||
# INITIAL SAVE: Set tasks_total immediately so frontend shows accurate counts from start
|
||||
self.run.stage_4_result = {
|
||||
'tasks_processed': 0,
|
||||
'tasks_total': total_tasks,
|
||||
'content_created': 0,
|
||||
'credits_used': 0,
|
||||
'time_elapsed': '0m 0s',
|
||||
'in_progress': True
|
||||
}
|
||||
self.run.save(update_fields=['stage_4_result'])
|
||||
|
||||
for idx, task in enumerate(task_list, 1):
|
||||
# Check if automation should stop (paused or cancelled)
|
||||
should_stop, reason = self._check_should_stop()
|
||||
@@ -831,19 +871,22 @@ class AutomationService:
|
||||
stage_number, f"Generating content for task {idx}/{total_tasks}: {task.title}"
|
||||
)
|
||||
|
||||
# Call AI function via AIEngine
|
||||
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
|
||||
engine = AIEngine(account=self.account)
|
||||
result = engine.execute(
|
||||
fn=GenerateContentFunction(),
|
||||
payload={'ids': [task.id]}
|
||||
)
|
||||
|
||||
# Monitor task
|
||||
task_id = result.get('task_id')
|
||||
if task_id:
|
||||
# FIXED: Pass continue_on_error=True to keep processing other tasks on failure
|
||||
self._wait_for_task(task_id, stage_number, f"Task '{task.title}'", continue_on_error=True)
|
||||
|
||||
|
||||
# NOTE: AIEngine.execute() runs synchronously and returns immediately
|
||||
# The result contains 'task_id' which is the DB Task model ID, NOT a Celery task ID
|
||||
# So we do NOT call _wait_for_task here
|
||||
|
||||
if not result.get('success'):
|
||||
error_msg = result.get('error', 'Unknown error')
|
||||
logger.warning(f"[AutomationService] Content generation failed for task '{task.title}': {error_msg}")
|
||||
# Continue to next task
|
||||
|
||||
tasks_processed += 1
|
||||
|
||||
# Log progress
|
||||
@@ -1025,10 +1068,21 @@ class AutomationService:
|
||||
# Process one at a time
|
||||
content_processed = 0
|
||||
credits_before = self._get_credits_used()
|
||||
|
||||
|
||||
content_list = list(content_without_images)
|
||||
total_content = len(content_list)
|
||||
|
||||
|
||||
# INITIAL SAVE: Set content_total immediately so frontend shows accurate counts from start
|
||||
self.run.stage_5_result = {
|
||||
'content_processed': 0,
|
||||
'content_total': total_content,
|
||||
'prompts_created': 0,
|
||||
'credits_used': 0,
|
||||
'time_elapsed': '0m 0s',
|
||||
'in_progress': True
|
||||
}
|
||||
self.run.save(update_fields=['stage_5_result'])
|
||||
|
||||
for idx, content in enumerate(content_list, 1):
|
||||
# Check if automation should stop (paused or cancelled)
|
||||
should_stop, reason = self._check_should_stop()
|
||||
@@ -1063,19 +1117,21 @@ class AutomationService:
|
||||
stage_number, f"Extracting prompts {idx}/{total_content}: {content.title}"
|
||||
)
|
||||
|
||||
# Call AI function via AIEngine
|
||||
# Call AI function via AIEngine (runs synchronously - no Celery subtask)
|
||||
engine = AIEngine(account=self.account)
|
||||
result = engine.execute(
|
||||
fn=GenerateImagePromptsFunction(),
|
||||
payload={'ids': [content.id]}
|
||||
)
|
||||
|
||||
# Monitor task
|
||||
task_id = result.get('task_id')
|
||||
if task_id:
|
||||
# FIXED: Pass continue_on_error=True to keep processing other content on failure
|
||||
self._wait_for_task(task_id, stage_number, f"Content '{content.title}'", continue_on_error=True)
|
||||
|
||||
|
||||
# NOTE: AIEngine.execute() runs synchronously and returns immediately
|
||||
# No Celery task polling needed
|
||||
|
||||
if not result.get('success'):
|
||||
error_msg = result.get('error', 'Unknown error')
|
||||
logger.warning(f"[AutomationService] Image prompt generation failed for content '{content.title}': {error_msg}")
|
||||
# Continue to next content
|
||||
|
||||
content_processed += 1
|
||||
|
||||
self.logger.log_stage_progress(
|
||||
@@ -1199,10 +1255,22 @@ class AutomationService:
|
||||
# Process one at a time
|
||||
images_processed = 0
|
||||
credits_before = self._get_credits_used()
|
||||
|
||||
|
||||
image_list = list(pending_images)
|
||||
total_images = len(image_list)
|
||||
|
||||
|
||||
# INITIAL SAVE: Set images_total immediately so frontend shows accurate counts from start
|
||||
self.run.stage_6_result = {
|
||||
'images_processed': 0,
|
||||
'images_total': total_images,
|
||||
'images_generated': 0,
|
||||
'content_moved_to_review': 0,
|
||||
'credits_used': 0,
|
||||
'time_elapsed': '0m 0s',
|
||||
'in_progress': True
|
||||
}
|
||||
self.run.save(update_fields=['stage_6_result'])
|
||||
|
||||
for idx, image in enumerate(image_list, 1):
|
||||
# Check if automation should stop (paused or cancelled)
|
||||
should_stop, reason = self._check_should_stop()
|
||||
|
||||
@@ -675,3 +675,14 @@ class ContentAttribute(SiteSectorBaseModel):
|
||||
|
||||
# Backward compatibility alias
|
||||
ContentAttributeMap = ContentAttribute
|
||||
|
||||
class ImagePrompts(Images):
|
||||
"""
|
||||
Proxy model for Images to provide a separate admin interface focused on prompts.
|
||||
This allows a dedicated "Image Prompts" view in the admin sidebar.
|
||||
"""
|
||||
class Meta:
|
||||
proxy = True
|
||||
verbose_name = 'Image Prompt'
|
||||
verbose_name_plural = 'Image Prompts'
|
||||
app_label = 'writer'
|
||||
Reference in New Issue
Block a user