Add Image Generation from Prompts: Implement new functionality to generate images from prompts, including backend processing, API integration, and frontend handling with progress modal. Update settings and registry for new AI function.

This commit is contained in:
IGNY8 VPS (Salman)
2025-11-11 20:49:11 +00:00
parent 5f11da03e4
commit 5638ea78df
11 changed files with 1511 additions and 129 deletions

View File

@@ -81,6 +81,12 @@ class AIEngine:
total_images = 1 + max_images
return f"Mapping Content for {total_images} Image Prompts"
return f"Mapping Content for Image Prompts"
elif function_name == 'generate_images_from_prompts':
# Extract image count from data
if isinstance(data, dict) and 'images' in data:
total_images = len(data.get('images', []))
return f"Preparing to generate {total_images} image{'s' if total_images != 1 else ''}"
return f"Preparing image generation queue"
return f"Preparing {count} item{'s' if count != 1 else ''}"
def _get_ai_call_message(self, function_name: str, count: int) -> str:
@@ -93,6 +99,8 @@ class AIEngine:
return f"Writing article{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images':
return f"Creating image{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images_from_prompts':
return f"Generating images with AI"
return f"Processing with AI"
def _get_parse_message(self, function_name: str) -> str:
@@ -123,6 +131,8 @@ class AIEngine:
if in_article_count > 0:
return f"Writing {in_article_count} Inarticle Image Prompts"
return "Writing Inarticle Image Prompts"
elif function_name == 'generate_images_from_prompts':
return f"{count} image{'s' if count != 1 else ''} generated"
return f"{count} item{'s' if count != 1 else ''} processed"
def _get_save_message(self, function_name: str, count: int) -> str:
@@ -138,6 +148,8 @@ class AIEngine:
elif function_name == 'generate_image_prompts':
# Count is total prompts created
return f"Assigning {count} Prompts to Dedicated Slots"
elif function_name == 'generate_images_from_prompts':
return f"Saving {count} image{'s' if count != 1 else ''}"
return f"Saving {count} item{'s' if count != 1 else ''}"
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
@@ -196,131 +208,144 @@ class AIEngine:
prep_message = self._get_prep_message(function_name, data_count, data)
self.console_tracker.prep(prep_message)
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
# For image generation, build_prompt returns placeholder
# Actual processing happens in save_output
if function_name == 'generate_images_from_prompts':
prompt = "Image generation queue prepared"
else:
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
self.step_tracker.add_request_step("PREP", "success", prep_message)
self.tracker.update("PREP", 25, prep_message, meta=self.step_tracker.get_meta())
# Phase 3: AI_CALL - Provider API Call (25-70%)
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
# For image generation, AI calls happen in save_output, so skip this phase
if function_name == 'generate_images_from_prompts':
# Skip AI_CALL phase - processing happens in save_output
raw_response = {'content': 'Image generation queue ready'}
parsed = {'processed': True}
else:
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
# Pass step_tracker to save_output so it can add validation steps

View File

@@ -6,6 +6,7 @@ from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction
from igny8_core.ai.functions.generate_content import GenerateContentFunction
from igny8_core.ai.functions.generate_images import GenerateImagesFunction, generate_images_core
from igny8_core.ai.functions.generate_image_prompts import GenerateImagePromptsFunction
from igny8_core.ai.functions.generate_images_from_prompts import GenerateImagesFromPromptsFunction
__all__ = [
'AutoClusterFunction',
@@ -14,4 +15,5 @@ __all__ = [
'GenerateImagesFunction',
'generate_images_core',
'GenerateImagePromptsFunction',
'GenerateImagesFromPromptsFunction',
]

View File

@@ -0,0 +1,311 @@
"""
Generate Images from Prompts AI Function
Generates actual images from existing image prompts using AI
"""
import logging
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.writer.models import Images, Content
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import validate_ids
from igny8_core.ai.prompts import PromptRegistry
logger = logging.getLogger(__name__)
class GenerateImagesFromPromptsFunction(BaseAIFunction):
"""Generate actual images from image prompts using AI"""
def get_name(self) -> str:
return 'generate_images_from_prompts'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Images from Prompts',
'description': 'Generate actual images from existing image prompts',
'phases': {
'INIT': 'Validating image prompts...',
'PREP': 'Preparing image generation queue...',
'AI_CALL': 'Generating images with AI...',
'PARSE': 'Processing image URLs...',
'SAVE': 'Saving image URLs...',
'DONE': 'Images generated!'
}
}
def get_max_items(self) -> int:
return 100 # Max images per batch
def validate(self, payload: dict, account=None) -> Dict:
"""Validate image IDs exist and have prompts"""
result = validate_ids(payload, max_items=self.get_max_items())
if not result['valid']:
return result
# Check images exist and have prompts
image_ids = payload.get('ids', [])
if image_ids:
queryset = Images.objects.filter(id__in=image_ids)
if account:
queryset = queryset.filter(account=account)
images = list(queryset.select_related('content', 'task'))
if not images:
return {
'valid': False,
'error': 'No images found with provided IDs'
}
# Check all images have prompts
images_without_prompts = [img.id for img in images if not img.prompt or not img.prompt.strip()]
if images_without_prompts:
return {
'valid': False,
'error': f'Images {images_without_prompts} do not have prompts'
}
# Check all images are pending
images_not_pending = [img.id for img in images if img.status != 'pending']
if images_not_pending:
return {
'valid': False,
'error': f'Images {images_not_pending} are not in pending status'
}
return {'valid': True}
def prepare(self, payload: dict, account=None) -> Dict:
"""Load images and image generation settings"""
image_ids = payload.get('ids', [])
queryset = Images.objects.filter(id__in=image_ids, status='pending')
if account:
queryset = queryset.filter(account=account)
images = list(queryset.select_related('content', 'task', 'account', 'site', 'sector'))
if not images:
raise ValueError("No pending images found with prompts")
# Get image generation settings
image_settings = {}
if account:
try:
from igny8_core.modules.system.models import IntegrationSettings
integration = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
image_settings = integration.config or {}
except Exception as e:
logger.warning(f"Failed to load image generation settings: {e}")
# Extract settings with defaults
provider = image_settings.get('provider') or image_settings.get('service', 'openai')
if provider == 'runware':
model = image_settings.get('model') or image_settings.get('runwareModel', 'runware:97@1')
else:
model = image_settings.get('model', 'dall-e-3')
# Get prompt templates
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
negative_prompt = PromptRegistry.get_negative_prompt(account)
return {
'images': images,
'account': account,
'provider': provider,
'model': model,
'image_type': image_settings.get('image_type', 'realistic'),
'image_format': image_settings.get('image_format', 'webp'),
'image_prompt_template': image_prompt_template,
'negative_prompt': negative_prompt,
}
def build_prompt(self, data: Dict, account=None) -> str:
"""
Build prompt for AI_CALL phase.
For image generation, we return a placeholder since we process images in save_output.
"""
# Return placeholder - actual processing happens in save_output
return "Image generation queue prepared"
def parse_response(self, response: str, step_tracker=None) -> Dict:
"""
Parse response from AI_CALL.
For image generation, we process images directly in save_output, so this is a placeholder.
"""
return {'processed': True}
def save_output(
self,
parsed: Dict,
original_data: Dict,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""
Process all images sequentially and generate them.
This method handles the loop and makes AI calls directly.
"""
images = original_data.get('images', [])
if not images:
raise ValueError("No images to process")
provider = original_data.get('provider', 'openai')
model = original_data.get('model', 'dall-e-3')
image_type = original_data.get('image_type', 'realistic')
image_prompt_template = original_data.get('image_prompt_template', '')
negative_prompt = original_data.get('negative_prompt', '')
ai_core = AICore(account=account or original_data.get('account'))
total_images = len(images)
images_generated = 0
images_failed = 0
errors = []
# Process each image sequentially
for index, image in enumerate(images, 1):
try:
# Get content title
content = image.content
if not content:
# Fallback to task if no content
if image.task:
content_title = image.task.title
else:
content_title = "Content"
else:
content_title = content.title or content.meta_title or "Content"
# Format prompt using template
if image_prompt_template:
try:
formatted_prompt = image_prompt_template.format(
post_title=content_title,
image_prompt=image.prompt,
image_type=image_type
)
except KeyError as e:
logger.warning(f"Template formatting error: {e}, using simple format")
formatted_prompt = f"Create a high-quality {image_type} image: {image.prompt}"
else:
# Fallback template
formatted_prompt = f"Create a high-quality {image_type} image: {image.prompt}"
# Update progress: PREP phase for this image
if progress_tracker and step_tracker:
prep_msg = f"Generating image {index} of {total_images}: {image.image_type}"
step_tracker.add_request_step("PREP", "success", prep_msg)
progress_pct = 10 + int((index - 1) / total_images * 15) # 10-25% for PREP
progress_tracker.update("PREP", progress_pct, prep_msg, meta=step_tracker.get_meta())
# Generate image
if progress_tracker and step_tracker:
ai_msg = f"Generating {image.image_type} image {index} of {total_images} with AI"
step_tracker.add_response_step("AI_CALL", "success", ai_msg)
progress_pct = 25 + int((index - 1) / total_images * 45) # 25-70% for AI_CALL
progress_tracker.update("AI_CALL", progress_pct, ai_msg, meta=step_tracker.get_meta())
result = ai_core.generate_image(
prompt=formatted_prompt,
provider=provider,
model=model,
size='1024x1024',
negative_prompt=negative_prompt if provider == 'runware' else None,
function_name='generate_images_from_prompts'
)
if result.get('error'):
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: {result['error']}"
errors.append(error_msg)
images_failed += 1
logger.error(f"Image generation failed for image {image.id}: {result['error']}")
if progress_tracker and step_tracker:
parse_msg = f"Image {index} failed: {result['error']}"
step_tracker.add_response_step("PARSE", "error", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15) # 70-85% for PARSE
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
continue
image_url = result.get('url')
if not image_url:
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: No URL returned"
errors.append(error_msg)
images_failed += 1
logger.error(f"No image URL returned for image {image.id}")
if progress_tracker and step_tracker:
parse_msg = f"Image {index} failed: No URL returned"
step_tracker.add_response_step("PARSE", "error", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15)
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
continue
# Update progress: PARSE phase
if progress_tracker and step_tracker:
parse_msg = f"Image {index} of {total_images} generated successfully"
step_tracker.add_response_step("PARSE", "success", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15) # 70-85% for PARSE
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
# Update image record
with transaction.atomic():
image.image_url = image_url
image.status = 'generated'
image.save(update_fields=['image_url', 'status', 'updated_at'])
images_generated += 1
logger.info(f"Image {image.id} ({image.image_type}) generated successfully: {image_url}")
# Update progress: SAVE phase
if progress_tracker and step_tracker:
save_msg = f"Saved image {index} of {total_images}"
step_tracker.add_request_step("SAVE", "success", save_msg)
progress_pct = 85 + int((index - 1) / total_images * 13) # 85-98% for SAVE
progress_tracker.update("SAVE", progress_pct, save_msg, meta=step_tracker.get_meta())
except Exception as e:
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: {str(e)}"
errors.append(error_msg)
images_failed += 1
logger.error(f"Exception generating image {image.id}: {str(e)}", exc_info=True)
continue
# Final progress update
if progress_tracker and step_tracker:
final_msg = f"Generated {images_generated} of {total_images} images"
step_tracker.add_request_step("SAVE", "success", final_msg)
progress_tracker.update("SAVE", 98, final_msg, meta=step_tracker.get_meta())
return {
'count': images_generated,
'images_generated': images_generated,
'images_failed': images_failed,
'total_images': total_images,
'errors': errors if errors else None
}

View File

@@ -94,9 +94,15 @@ def _load_generate_image_prompts():
from igny8_core.ai.functions.generate_image_prompts import GenerateImagePromptsFunction
return GenerateImagePromptsFunction
def _load_generate_images_from_prompts():
"""Lazy loader for generate_images_from_prompts function"""
from igny8_core.ai.functions.generate_images_from_prompts import GenerateImagesFromPromptsFunction
return GenerateImagesFromPromptsFunction
register_lazy_function('auto_cluster', _load_auto_cluster)
register_lazy_function('generate_ideas', _load_generate_ideas)
register_lazy_function('generate_content', _load_generate_content)
register_lazy_function('generate_images', _load_generate_images)
register_lazy_function('generate_image_prompts', _load_generate_image_prompts)
register_lazy_function('generate_images_from_prompts', _load_generate_images_from_prompts)

View File

@@ -40,6 +40,12 @@ MODEL_CONFIG = {
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_images_from_prompts": {
"model": "dall-e-3", # Default, overridden by IntegrationSettings
"max_tokens": None, # Not used for images
"temperature": None, # Not used for images
"response_format": None, # Not used for images
},
}
# Function name aliases (for backward compatibility)

View File

@@ -513,6 +513,60 @@ class ImagesViewSet(SiteSectorModelViewSet):
'count': len(grouped_data),
'results': grouped_data
}, status=status.HTTP_200_OK)
@action(detail=False, methods=['post'], url_path='generate_images', url_name='generate_images')
def generate_images(self, request):
"""Generate images from prompts for image records"""
from igny8_core.ai.tasks import run_ai_task
account = getattr(request, 'account', None)
ids = request.data.get('ids', [])
if not ids:
return Response({
'error': 'No IDs provided',
'type': 'ValidationError'
}, status=status.HTTP_400_BAD_REQUEST)
account_id = account.id if account else None
# Queue Celery task
try:
if hasattr(run_ai_task, 'delay'):
task = run_ai_task.delay(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Image generation started'
}, status=status.HTTP_200_OK)
else:
# Fallback to synchronous execution
result = run_ai_task(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
'images_generated': result.get('count', 0),
'images_failed': result.get('images_failed', 0),
'message': 'Images generated successfully'
}, status=status.HTTP_200_OK)
else:
return Response({
'error': result.get('error', 'Image generation failed'),
'type': 'TaskExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
return Response({
'error': str(e),
'type': 'ExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentViewSet(SiteSectorModelViewSet):

View File

@@ -0,0 +1,841 @@
# Image Generation Implementation Plan
## Complete Plan for Generating Images from Prompts
**Date:** 2025-01-XX
**Scope:** Implement image generation AI function following existing AI framework patterns
---
## Table of Contents
1. [System Understanding](#1-system-understanding)
2. [Architecture Overview](#2-architecture-overview)
3. [Implementation Plan](#3-implementation-plan)
4. [Technical Details](#4-technical-details)
5. [Frontend Integration](#5-frontend-integration)
6. [Testing Strategy](#6-testing-strategy)
---
## 1. System Understanding
### 1.1 Current AI Framework Architecture
The system uses a unified AI framework with the following components:
**Core Flow:**
```
Frontend API Call
views.py (@action endpoint)
run_ai_task (ai/tasks.py) - Unified Celery task entrypoint
AIEngine (ai/engine.py) - Orchestrator (6 phases: INIT, PREP, AI_CALL, PARSE, SAVE, DONE)
BaseAIFunction implementation
AICore (ai/ai_core.py) - Centralized AI request handler
AI Provider (OpenAI/Runware)
```
**Existing AI Functions:**
1. **AutoClusterFunction** (`auto_cluster.py`) - Groups keywords into clusters
2. **GenerateIdeasFunction** (`generate_ideas.py`) - Generates content ideas from clusters
3. **GenerateContentFunction** (`generate_content.py`) - Generates article content from ideas
4. **GenerateImagePromptsFunction** (`generate_image_prompts.py`) - Extracts image prompts from content
**Key Components:**
- **BaseAIFunction** - Abstract base class with methods: `get_name()`, `validate()`, `prepare()`, `build_prompt()`, `parse_response()`, `save_output()`
- **AIEngine** - Manages lifecycle, progress tracking, cost tracking, error handling
- **PromptRegistry** - Centralized prompt management with hierarchy (task → DB → default)
- **AICore** - Handles API calls to OpenAI/Runware for both text and image generation
- **IntegrationSettings** - Stores account-specific configurations (models, API keys, image settings)
### 1.2 Image Generation System (WordPress Plugin Reference)
**Key Learnings from WP Plugin:**
1. **Queue-Based Processing:**
- Images are processed sequentially in a queue
- Each image has its own progress bar (0-50% in 7s, 50-75% in 5s, 75-95% incrementally)
- Progress modal shows all images being processed with individual status
2. **Image Types:**
- Featured image (1 per content)
- In-article images (configurable: 1-5 per content)
- Desktop images (if enabled)
- Mobile images (if enabled)
3. **Settings from IntegrationSettings:**
- `provider`: 'openai' or 'runware'
- `model`: Model name (e.g., 'dall-e-3', 'runware:97@1')
- `image_type`: 'realistic', 'artistic', 'cartoon'
- `max_in_article_images`: 1-5
- `image_format`: 'webp', 'jpg', 'png'
- `desktop_enabled`: boolean
- `mobile_enabled`: boolean
4. **Prompt Templates:**
- `image_prompt_template`: Template for formatting prompts (uses {post_title}, {image_prompt}, {image_type})
- `negative_prompt`: Negative prompt for Runware (OpenAI doesn't support)
5. **Progress Tracking:**
- Real-time progress updates via Celery
- Individual image status tracking
- Success/failure per image
### 1.3 Current Image Generation Function
**Existing:** `GenerateImagesFunction` (`generate_images.py`)
- **Status:** Partially implemented, uses old pattern
- **Issues:**
- Still references `Tasks` instead of `Content`
- Doesn't follow the new unified framework pattern
- Uses legacy `generate_images_core()` wrapper
- Doesn't properly queue multiple images
**What We Need:**
- New function: `GenerateImagesFromPromptsFunction`
- Should work with `Images` model (which now has `content` relationship)
- Should process images in queue (one at a time)
- Should use progress modal similar to other AI functions
- Should use prompt templates and negative prompts from Thinker/Prompts
---
## 2. Architecture Overview
### 2.1 New Function: `GenerateImagesFromPromptsFunction`
**Purpose:** Generate actual images from existing image prompts stored in `Images` model
**Input:**
- `ids`: List of Image IDs (or Content IDs) to generate images for
- Images must have `prompt` field populated (from `GenerateImagePromptsFunction`)
**Output:**
- Updates `Images` records with:
- `image_url`: Generated image URL
- `status`: 'generated' (or 'failed' on error)
**Flow:**
1. **INIT (0-10%)**: Validate image IDs, check prompts exist
2. **PREP (10-25%)**: Load images, get settings, prepare queue
3. **AI_CALL (25-70%)**: Generate images sequentially (one per AI_CALL phase)
4. **PARSE (70-85%)**: Parse image URLs from responses
5. **SAVE (85-98%)**: Update Images records with URLs
6. **DONE (98-100%)**: Complete
### 2.2 Key Differences from Other Functions
**Unlike text generation functions:**
- **Multiple AI calls**: One AI call per image (not one call for all)
- **Sequential processing**: Images must be generated one at a time (rate limits)
- **Progress per image**: Need to track progress for each individual image
- **Different API**: Uses `AICore.generate_image()` instead of `AICore.run_ai_request()`
**Similarities:**
- Uses same `BaseAIFunction` pattern
- Uses same `AIEngine` orchestrator
- Uses same progress tracking system
- Uses same error handling
---
## 3. Implementation Plan
### Phase 1: Backend AI Function
#### 3.1 Create `GenerateImagesFromPromptsFunction`
**File:** `backend/igny8_core/ai/functions/generate_images_from_prompts.py`
**Class Structure:**
```python
class GenerateImagesFromPromptsFunction(BaseAIFunction):
def get_name(self) -> str:
return 'generate_images_from_prompts'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Images from Prompts',
'description': 'Generate actual images from image prompts',
'phases': {
'INIT': 'Validating image prompts...',
'PREP': 'Preparing image generation queue...',
'AI_CALL': 'Generating images with AI...',
'PARSE': 'Processing image URLs...',
'SAVE': 'Saving image URLs...',
'DONE': 'Images generated!'
}
}
def validate(self, payload: dict, account=None) -> Dict:
"""Validate image IDs and check prompts exist"""
# Check for 'ids' array
# Check images exist and have prompts
# Check images have status='pending'
# Check account matches
def prepare(self, payload: dict, account=None) -> Dict:
"""Load images and settings"""
# Load Images records by IDs
# Get IntegrationSettings for image_generation
# Extract: provider, model, image_type, image_format, etc.
# Get prompt templates from PromptRegistry
# Return: {
# 'images': [Image objects],
# 'settings': {...},
# 'image_prompt_template': str,
# 'negative_prompt': str
# }
def build_prompt(self, data: Dict, account=None) -> Dict:
"""Format prompt using template"""
# For each image in queue:
# - Get content title (from image.content)
# - Format prompt using image_prompt_template
# - Return formatted prompt + image_type
# Note: This is called once per image (AIEngine handles iteration)
def parse_response(self, response: Dict, step_tracker=None) -> Dict:
"""Parse image URL from response"""
# Response from AICore.generate_image() has:
# - 'url': Image URL
# - 'revised_prompt': (optional)
# - 'cost': (optional)
# Return: {'url': str, 'revised_prompt': str, 'cost': float}
def save_output(self, parsed: Dict, original_data: Dict, account=None, ...) -> Dict:
"""Update Images record with URL"""
# Get image from original_data
# Update Images record:
# - image_url = parsed['url']
# - status = 'generated'
# - updated_at = now()
# Return: {'count': 1, 'images_generated': 1}
```
**Key Implementation Details:**
1. **Multiple AI Calls Handling:**
- `AIEngine` will call `build_prompt()``AI_CALL``parse_response()``SAVE` for each image
- Need to track which image is being processed
- Use `step_tracker` to log progress per image
2. **Prompt Formatting:**
```python
# Get template from PromptRegistry
template = PromptRegistry.get_image_prompt_template(account)
# Format with content title and prompt
formatted = template.format(
post_title=image.content.title or image.content.meta_title,
image_prompt=image.prompt,
image_type=settings['image_type']
)
```
3. **Image Generation:**
```python
# Use AICore.generate_image()
result = ai_core.generate_image(
prompt=formatted_prompt,
provider=settings['provider'],
model=settings['model'],
size='1024x1024', # Default or from settings
negative_prompt=negative_prompt if provider == 'runware' else None,
function_name='generate_images_from_prompts'
)
```
4. **Progress Tracking:**
- Track total images: `len(images)`
- Track completed: Increment after each SAVE
- Update progress: `(completed / total) * 100`
#### 3.2 Update AIEngine for Multiple AI Calls
**File:** `backend/igny8_core/ai/engine.py`
**Changes Needed:**
- Detect if function needs multiple AI calls (check function name or metadata)
- For `generate_images_from_prompts`:
- Loop through images in PREP data
- For each image:
- Call `build_prompt()` with single image
- Call `AI_CALL` phase (generate image)
- Call `parse_response()`
- Call `SAVE` phase
- Update progress: `(current_image / total_images) * 100`
- After all images: Call DONE phase
**Alternative Approach (Simpler):**
- Process all images in `save_output()` method
- Make AI calls directly in `save_output()` (not through AIEngine phases)
- Update progress manually via `progress_tracker.update()`
- This is simpler but less consistent with framework
**Recommended Approach:**
- Use AIEngine's phase system
- Add metadata flag: `requires_multiple_ai_calls: True`
- AIEngine detects this and loops through items
#### 3.3 Register Function
**File:** `backend/igny8_core/ai/registry.py`
```python
def _load_generate_images_from_prompts():
from igny8_core.ai.functions.generate_images_from_prompts import GenerateImagesFromPromptsFunction
return GenerateImagesFromPromptsFunction
register_lazy_function('generate_images_from_prompts', _load_generate_images_from_prompts)
```
**File:** `backend/igny8_core/ai/functions/__init__.py`
```python
from .generate_images_from_prompts import GenerateImagesFromPromptsFunction
__all__ = [
...
'GenerateImagesFromPromptsFunction',
]
```
#### 3.4 Add Model Configuration
**File:** `backend/igny8_core/ai/settings.py`
```python
MODEL_CONFIG = {
...
'generate_images_from_prompts': {
'model': 'dall-e-3', # Default, overridden by IntegrationSettings
'max_tokens': None, # Not used for images
'temperature': None, # Not used for images
'response_format': None, # Not used for images
},
}
FUNCTION_TO_PROMPT_TYPE = {
...
'generate_images_from_prompts': None, # Uses image_prompt_template, not text prompt
}
```
#### 3.5 Update Progress Messages
**File:** `backend/igny8_core/ai/engine.py`
```python
def _get_prep_message(self, function_name: str, count: int, data: Any) -> str:
...
elif function_name == 'generate_images_from_prompts':
total_images = len(data.get('images', []))
return f"Preparing to generate {total_images} image{'s' if total_images != 1 else ''}"
def _get_ai_call_message(self, function_name: str, count: int) -> str:
...
elif function_name == 'generate_images_from_prompts':
return f"Generating image {count} of {total} with AI"
def _get_parse_message_with_count(self, function_name: str, count: int) -> str:
...
elif function_name == 'generate_images_from_prompts':
return f"{count} image{'s' if count != 1 else ''} generated"
def _get_save_message(self, function_name: str, count: int) -> str:
...
elif function_name == 'generate_images_from_prompts':
return f"Saving {count} image{'s' if count != 1 else ''}"
```
### Phase 2: API Endpoint
#### 3.6 Add API Endpoint
**File:** `backend/igny8_core/modules/writer/views.py`
**Add to `ImagesViewSet`:**
```python
@action(detail=False, methods=['post'], url_path='generate_images', url_name='generate_images')
def generate_images(self, request):
"""Generate images from prompts for image records"""
from igny8_core.ai.tasks import run_ai_task
account = getattr(request, 'account', None)
ids = request.data.get('ids', [])
if not ids:
return Response({
'error': 'No IDs provided',
'type': 'ValidationError'
}, status=status.HTTP_400_BAD_REQUEST)
account_id = account.id if account else None
# Queue Celery task
try:
if hasattr(run_ai_task, 'delay'):
task = run_ai_task.delay(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Image generation started'
}, status=status.HTTP_200_OK)
else:
# Fallback to synchronous execution
result = run_ai_task(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
'images_generated': result.get('count', 0),
'message': 'Images generated successfully'
}, status=status.HTTP_200_OK)
else:
return Response({
'error': result.get('error', 'Image generation failed'),
'type': 'TaskExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
return Response({
'error': str(e),
'type': 'ExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
```
### Phase 3: Frontend Integration
#### 3.7 Add API Function
**File:** `frontend/src/services/api.ts`
```typescript
export async function generateImages(imageIds: number[]): Promise<any> {
return fetchAPI('/v1/writer/images/generate_images/', {
method: 'POST',
body: JSON.stringify({ ids: imageIds }),
});
}
```
#### 3.8 Add Generate Images Button
**File:** `frontend/src/config/pages/images.config.tsx`
**Add to row actions or status column:**
- Add "Generate Images" button in status column
- Only show if status is 'pending' and prompt exists
- Button should trigger generation for all images for that content
**File:** `frontend/src/pages/Writer/Images.tsx`
**Add handler:**
```typescript
const handleGenerateImages = useCallback(async (contentId: number) => {
try {
// Get all pending images for this content
const contentImages = images.find(g => g.content_id === contentId);
if (!contentImages) return;
// Collect all image IDs with prompts
const imageIds: number[] = [];
if (contentImages.featured_image?.id && contentImages.featured_image.status === 'pending') {
imageIds.push(contentImages.featured_image.id);
}
contentImages.in_article_images.forEach(img => {
if (img.id && img.status === 'pending' && img.prompt) {
imageIds.push(img.id);
}
});
if (imageIds.length === 0) {
toast.info('No pending images with prompts found');
return;
}
const result = await generateImages(imageIds);
if (result.success) {
if (result.task_id) {
// Open progress modal
progressModal.openModal(
result.task_id,
'Generate Images',
'ai-generate-images-from-prompts-01-desktop'
);
} else {
toast.success(`Images generated: ${result.images_generated || 0} image${(result.images_generated || 0) === 1 ? '' : 's'} created`);
loadImages();
}
} else {
toast.error(result.error || 'Failed to generate images');
}
} catch (error: any) {
toast.error(`Failed to generate images: ${error.message}`);
}
}, [toast, progressModal, loadImages, images]);
```
#### 3.9 Update Progress Modal
**File:** `frontend/src/components/common/ProgressModal.tsx`
**Add support for image generation:**
- Update step labels for `generate_images_from_prompts`
- Show progress per image
- Display generated images in modal (optional, like WP plugin)
**Step Labels:**
```typescript
if (funcName.includes('generate_images_from_prompts')) {
return [
{ phase: 'INIT', label: 'Validating image prompts' },
{ phase: 'PREP', label: 'Preparing image generation queue' },
{ phase: 'AI_CALL', label: 'Generating images with AI' },
{ phase: 'PARSE', label: 'Processing image URLs' },
{ phase: 'SAVE', label: 'Saving image URLs' },
];
}
```
**Success Message:**
```typescript
if (funcName.includes('generate_images_from_prompts')) {
const imageCount = extractCount(/(\d+)\s+image/i, stepLogs || []);
if (imageCount) {
return `${imageCount} image${imageCount !== '1' ? 's' : ''} generated successfully`;
}
return 'Images generated successfully';
}
```
---
## 4. Technical Details
### 4.1 Image Generation API
**AICore.generate_image()** already exists and handles:
- OpenAI DALL-E (dall-e-2, dall-e-3)
- Runware API
- Negative prompts (Runware only)
- Cost tracking
- Error handling
**Usage:**
```python
result = ai_core.generate_image(
prompt=formatted_prompt,
provider='openai', # or 'runware'
model='dall-e-3', # or 'runware:97@1'
size='1024x1024',
negative_prompt=negative_prompt, # Only for Runware
function_name='generate_images_from_prompts'
)
```
**Response:**
```python
{
'url': 'https://...', # Image URL
'revised_prompt': '...', # OpenAI may revise prompt
'cost': 0.04, # Cost in USD
'error': None # Error message if failed
}
```
### 4.2 Settings Retrieval
**From IntegrationSettings:**
```python
integration = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
config = integration.config
provider = config.get('provider') or config.get('service', 'openai')
if provider == 'runware':
model = config.get('model') or config.get('runwareModel', 'runware:97@1')
else:
model = config.get('model', 'dall-e-3')
image_type = config.get('image_type', 'realistic')
image_format = config.get('image_format', 'webp')
```
### 4.3 Prompt Templates
**From PromptRegistry:**
```python
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
negative_prompt = PromptRegistry.get_negative_prompt(account)
```
**Formatting:**
```python
formatted = image_prompt_template.format(
post_title=content.title or content.meta_title,
image_prompt=image.prompt,
image_type=image_type # 'realistic', 'artistic', 'cartoon'
)
```
### 4.4 Error Handling
**Per-Image Errors:**
- If one image fails, continue with others
- Mark failed image: `status='failed'`
- Log error in `Images` record or separate error field
- Return success with partial count: `{'success': True, 'images_generated': 3, 'images_failed': 1}`
**Validation Errors:**
- No prompts: Skip image, log warning
- No settings: Return error, don't start generation
- Invalid provider/model: Return error
---
## 5. Frontend Integration
### 5.1 Images Page Updates
**File:** `frontend/src/pages/Writer/Images.tsx`
**Changes:**
1. Add "Generate Images" button in status column (or row actions)
2. Button only enabled if:
- Status is 'pending'
- Prompt exists
- Content has at least one pending image
3. On click: Collect all pending image IDs for that content
4. Call API: `generateImages(imageIds)`
5. Open progress modal if async
6. Reload images on completion
### 5.2 Progress Modal Updates
**File:** `frontend/src/components/common/ProgressModal.tsx`
**Changes:**
1. Add step definitions for `generate_images_from_prompts`
2. Update progress messages
3. Show image count in messages
4. Optional: Display generated images in modal (like WP plugin)
### 5.3 Table Actions Config
**File:** `frontend/src/config/pages/table-actions.config.tsx`
**Add row action (optional):**
```typescript
'/writer/images': {
rowActions: [
{
key: 'generate_images',
label: 'Generate Images',
icon: <BoltIcon className="w-5 h-5" />,
variant: 'primary',
},
],
}
```
---
## 6. Testing Strategy
### 6.1 Unit Tests
**Test Function Methods:**
- `validate()`: Test with valid/invalid IDs, missing prompts, wrong status
- `prepare()`: Test settings retrieval, prompt template loading
- `build_prompt()`: Test prompt formatting
- `parse_response()`: Test URL extraction
- `save_output()`: Test Images record update
### 6.2 Integration Tests
**Test Full Flow:**
1. Create Images records with prompts
2. Call API endpoint
3. Verify Celery task created
4. Verify progress updates
5. Verify Images records updated with URLs
6. Verify status changed to 'generated'
### 6.3 Error Scenarios
**Test:**
- Missing IntegrationSettings
- Invalid provider/model
- API errors (rate limits, invalid API key)
- Partial failures (some images succeed, some fail)
- Missing prompts
- Invalid image IDs
---
## 7. Implementation Checklist
### Backend
- [ ] Create `GenerateImagesFromPromptsFunction` class
- [ ] Implement `validate()` method
- [ ] Implement `prepare()` method
- [ ] Implement `build_prompt()` method
- [ ] Implement `parse_response()` method
- [ ] Implement `save_output()` method
- [ ] Register function in `registry.py`
- [ ] Add to `__init__.py` exports
- [ ] Add model config in `settings.py`
- [ ] Update `AIEngine` progress messages
- [ ] Add API endpoint in `ImagesViewSet`
- [ ] Test with OpenAI provider
- [ ] Test with Runware provider
- [ ] Test error handling
### Frontend
- [ ] Add `generateImages()` API function
- [ ] Add "Generate Images" button to Images page
- [ ] Add click handler
- [ ] Integrate progress modal
- [ ] Update progress modal step labels
- [ ] Update success messages
- [ ] Test UI flow
- [ ] Test error handling
### Documentation
- [ ] Update AI_MASTER_ARCHITECTURE.md
- [ ] Add function to AI_FUNCTIONS_AUDIT_REPORT.md
- [ ] Document API endpoint
- [ ] Document settings requirements
---
## 8. Key Considerations
### 8.1 Rate Limiting
**Issue:** Image generation APIs have rate limits
**Solution:** Process images sequentially (one at a time)
**Implementation:** AIEngine loops through images, waits for each to complete
### 8.2 Cost Tracking
**Issue:** Need to track costs per image
**Solution:** AICore already tracks costs, store in AITaskLog
**Implementation:** Cost is returned from `generate_image()`, log in step_tracker
### 8.3 Progress Updates
**Issue:** Need granular progress (per image)
**Solution:** Update progress after each image: `(completed / total) * 100`
**Implementation:** Track in `save_output()`, update via `progress_tracker.update()`
### 8.4 Error Recovery
**Issue:** If one image fails, should continue with others
**Solution:** Catch errors per image, mark as failed, continue
**Implementation:** Try-catch in `save_output()` per image
### 8.5 Image Display
**Issue:** Should show generated images in progress modal?
**Solution:** Optional enhancement, can add later
**Implementation:** Store image URLs in step logs, display in modal
---
## 9. Alternative Approaches Considered
### 9.1 Process All in save_output()
**Pros:**
- Simpler implementation
- Direct control over loop
**Cons:**
- Doesn't use AIEngine phases properly
- Harder to track progress per image
- Less consistent with framework
**Decision:** Use AIEngine phases with loop detection
### 9.2 Separate Function Per Image
**Pros:**
- Each image is independent task
- Better error isolation
**Cons:**
- Too many Celery tasks
- Harder to track overall progress
- More complex frontend
**Decision:** Single function processes all images sequentially
---
## 10. Success Criteria
✅ Function follows `BaseAIFunction` pattern
✅ Uses `AIEngine` orchestrator
✅ Integrates with progress modal
✅ Uses prompt templates from Thinker/Prompts
✅ Uses settings from IntegrationSettings
✅ Handles errors gracefully
✅ Tracks progress per image
✅ Updates Images records correctly
✅ Works with both OpenAI and Runware
✅ Frontend button triggers generation
✅ Progress modal shows correct steps
✅ Success message shows image count
---
## 11. Next Steps
1. **Start with Backend Function**
- Create `GenerateImagesFromPromptsFunction`
- Implement all methods
- Test with single image
2. **Add API Endpoint**
- Add to `ImagesViewSet`
- Test endpoint
3. **Frontend Integration**
- Add button
- Add handler
- Test flow
4. **Progress Modal**
- Update step labels
- Test progress updates
5. **Error Handling**
- Test error scenarios
- Verify graceful failures
6. **Documentation**
- Update architecture docs
- Add API docs
---
**End of Plan**

View File

@@ -73,7 +73,8 @@ const getSuccessMessage = (functionId?: string, title?: string, stepLogs?: any[]
}
return 'Article drafted successfully.';
}
if (funcName.includes('image')) {
if (funcName.includes('image') && (funcName.includes('prompt') || funcName.includes('extract'))) {
// Image prompt generation
// Try to extract from SAVE step message first (most reliable)
const saveStepLog = stepLogs?.find(log => log.stepName === 'SAVE');
if (saveStepLog?.message) {
@@ -119,6 +120,13 @@ const getSuccessMessage = (functionId?: string, title?: string, stepLogs?: any[]
// Default message
return 'Featured Image and X Inarticle Image Prompts ready for image generation';
} else if (funcName.includes('image') && funcName.includes('from')) {
// Image generation from prompts
const imageCount = extractCount(/(\d+)\s+image/i, stepLogs || []);
if (imageCount) {
return `${imageCount} image${imageCount !== '1' ? 's' : ''} generated successfully`;
}
return 'Images generated successfully';
}
return 'Task completed successfully.';
};
@@ -157,7 +165,8 @@ const getStepsForFunction = (functionId?: string, title?: string): Array<{phase:
];
}
if (funcName.includes('image')) {
if (funcName.includes('image') && (funcName.includes('prompt') || funcName.includes('extract'))) {
// Image prompt generation
return [
{ phase: 'INIT', label: 'Checking content and image slots' },
{ phase: 'PREP', label: 'Mapping Content for X Image Prompts' },
@@ -165,6 +174,15 @@ const getStepsForFunction = (functionId?: string, title?: string): Array<{phase:
{ phase: 'PARSE', label: 'Writing X Inarticle Image Prompts' },
{ phase: 'SAVE', label: 'Assigning Prompts to Dedicated Slots' },
];
} else if (funcName.includes('image') && funcName.includes('from')) {
// Image generation from prompts
return [
{ phase: 'INIT', label: 'Validating image prompts' },
{ phase: 'PREP', label: 'Preparing image generation queue' },
{ phase: 'AI_CALL', label: 'Generating images with AI' },
{ phase: 'PARSE', label: 'Processing image URLs' },
{ phase: 'SAVE', label: 'Saving image URLs' },
];
}
// Default fallback

View File

@@ -49,6 +49,7 @@ export const createImagesPageConfig = (
setStatusFilter: (value: string) => void;
setCurrentPage: (page: number) => void;
maxInArticleImages?: number; // Optional: max in-article images to display
onGenerateImages?: (contentId: number) => void; // Handler for generate images button
}
): ImagesPageConfig => {
const maxImages = handlers.maxInArticleImages || 5; // Default to 5 in-article images
@@ -99,13 +100,13 @@ export const createImagesPageConfig = (
});
}
// Add overall status column
// Add overall status column with Generate Images button
columns.push({
key: 'overall_status',
label: 'Status',
sortable: false,
width: '120px',
render: (value: string) => {
width: '180px',
render: (value: string, row: ContentImagesGroup) => {
const statusColors: Record<string, 'success' | 'warning' | 'error' | 'info'> = {
'complete': 'success',
'partial': 'info',
@@ -118,13 +119,34 @@ export const createImagesPageConfig = (
'pending': 'Pending',
'failed': 'Failed',
};
// Check if there are any pending images with prompts
const hasPendingImages =
(row.featured_image?.status === 'pending' && row.featured_image?.prompt) ||
row.in_article_images.some(img => img.status === 'pending' && img.prompt);
return (
<Badge
color={statusColors[value] || 'warning'}
size="sm"
>
{labels[value] || value}
</Badge>
<div className="flex items-center gap-2">
<Badge
color={statusColors[value] || 'warning'}
size="sm"
>
{labels[value] || value}
</Badge>
{hasPendingImages && handlers.onGenerateImages && (
<button
onClick={(e) => {
e.stopPropagation();
handlers.onGenerateImages!(row.content_id);
}}
className="inline-flex items-center gap-1 px-2 py-1 text-xs font-medium text-white bg-brand-500 hover:bg-brand-600 rounded transition-colors"
title="Generate Images"
>
<BoltIcon className="w-3 h-3" />
Generate
</button>
)}
</div>
);
},
});

View File

@@ -3,19 +3,26 @@
* Shows content images grouped by content - one row per content
*/
import { useState, useEffect, useMemo, useCallback } from 'react';
import { useState, useEffect, useMemo, useCallback, useRef } from 'react';
import TablePageTemplate from '../../templates/TablePageTemplate';
import {
fetchContentImages,
ContentImagesGroup,
ContentImagesResponse,
generateImages,
} from '../../services/api';
import { useToast } from '../../components/ui/toast/ToastContainer';
import { FileIcon, DownloadIcon } from '../../icons';
import { FileIcon, DownloadIcon, BoltIcon } from '../../icons';
import { createImagesPageConfig } from '../../config/pages/images.config';
import ProgressModal from '../../components/common/ProgressModal';
import { useProgressModal } from '../../hooks/useProgressModal';
export default function Images() {
const toast = useToast();
// Progress modal for AI functions
const progressModal = useProgressModal();
const hasReloadedRef = useRef(false);
// Data state
const [images, setImages] = useState<ContentImagesGroup[]>([]);
@@ -136,6 +143,62 @@ export default function Images() {
toast.info(`Bulk action "${action}" for ${ids.length} items`);
}, [toast]);
// Generate images handler
const handleGenerateImages = useCallback(async (contentId: number) => {
try {
// Get all pending images for this content
const contentImages = images.find(g => g.content_id === contentId);
if (!contentImages) {
toast.error('Content not found');
return;
}
// Collect all image IDs with prompts and pending status
const imageIds: number[] = [];
if (contentImages.featured_image?.id &&
contentImages.featured_image.status === 'pending' &&
contentImages.featured_image.prompt) {
imageIds.push(contentImages.featured_image.id);
}
contentImages.in_article_images.forEach(img => {
if (img.id && img.status === 'pending' && img.prompt) {
imageIds.push(img.id);
}
});
if (imageIds.length === 0) {
toast.info('No pending images with prompts found for this content');
return;
}
const result = await generateImages(imageIds);
if (result.success) {
if (result.task_id) {
// Open progress modal for async task
progressModal.openModal(
result.task_id,
'Generate Images',
'ai-generate-images-from-prompts-01-desktop'
);
} else {
// Synchronous completion
const generated = result.images_generated || 0;
const failed = result.images_failed || 0;
if (generated > 0) {
toast.success(`Images generated: ${generated} image${generated !== 1 ? 's' : ''} created${failed > 0 ? `, ${failed} failed` : ''}`);
} else {
toast.error(`Image generation failed: ${failed} image${failed !== 1 ? 's' : ''} failed`);
}
loadImages(); // Reload to show new images
}
} else {
toast.error(result.error || 'Failed to generate images');
}
} catch (error: any) {
toast.error(`Failed to generate images: ${error.message}`);
}
}, [toast, progressModal, loadImages, images]);
// Get max in-article images from the data (to determine column count)
const maxInArticleImages = useMemo(() => {
if (images.length === 0) return 5; // Default
@@ -152,8 +215,9 @@ export default function Images() {
setStatusFilter,
setCurrentPage,
maxInArticleImages,
onGenerateImages: handleGenerateImages,
});
}, [searchTerm, statusFilter, maxInArticleImages]);
}, [searchTerm, statusFilter, maxInArticleImages, handleGenerateImages]);
// Calculate header metrics
const headerMetrics = useMemo(() => {
@@ -166,6 +230,7 @@ export default function Images() {
}, [pageConfig?.headerMetrics, images, totalCount]);
return (
<>
<TablePageTemplate
title="Content Images"
titleIcon={<FileIcon className="text-purple-500 size-5" />}
@@ -218,5 +283,30 @@ export default function Images() {
setCurrentPage(1);
}}
/>
{/* Progress Modal for AI Functions */}
<ProgressModal
isOpen={progressModal.isOpen}
title={progressModal.title}
percentage={progressModal.progress.percentage}
status={progressModal.progress.status}
message={progressModal.progress.message}
details={progressModal.progress.details}
taskId={progressModal.taskId || undefined}
functionId={progressModal.functionId}
onClose={() => {
const wasCompleted = progressModal.progress.status === 'completed';
progressModal.closeModal();
// Reload data after modal closes (if completed)
if (wasCompleted && !hasReloadedRef.current) {
hasReloadedRef.current = true;
loadImages();
setTimeout(() => {
hasReloadedRef.current = false;
}, 1000);
}
}}
/>
</>
);
}

View File

@@ -1032,6 +1032,13 @@ export async function fetchContentImages(): Promise<ContentImagesResponse> {
return fetchAPI('/v1/writer/images/content_images/');
}
export async function generateImages(imageIds: number[]): Promise<any> {
return fetchAPI('/v1/writer/images/generate_images/', {
method: 'POST',
body: JSON.stringify({ ids: imageIds }),
});
}
export async function deleteTaskImage(id: number): Promise<void> {
return fetchAPI(`/v1/writer/images/${id}/`, {
method: 'DELETE',