Add Image Generation from Prompts: Implement new functionality to generate images from prompts, including backend processing, API integration, and frontend handling with progress modal. Update settings and registry for new AI function.

This commit is contained in:
IGNY8 VPS (Salman)
2025-11-11 20:49:11 +00:00
parent 5f11da03e4
commit 5638ea78df
11 changed files with 1511 additions and 129 deletions

View File

@@ -81,6 +81,12 @@ class AIEngine:
total_images = 1 + max_images
return f"Mapping Content for {total_images} Image Prompts"
return f"Mapping Content for Image Prompts"
elif function_name == 'generate_images_from_prompts':
# Extract image count from data
if isinstance(data, dict) and 'images' in data:
total_images = len(data.get('images', []))
return f"Preparing to generate {total_images} image{'s' if total_images != 1 else ''}"
return f"Preparing image generation queue"
return f"Preparing {count} item{'s' if count != 1 else ''}"
def _get_ai_call_message(self, function_name: str, count: int) -> str:
@@ -93,6 +99,8 @@ class AIEngine:
return f"Writing article{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images':
return f"Creating image{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images_from_prompts':
return f"Generating images with AI"
return f"Processing with AI"
def _get_parse_message(self, function_name: str) -> str:
@@ -123,6 +131,8 @@ class AIEngine:
if in_article_count > 0:
return f"Writing {in_article_count} Inarticle Image Prompts"
return "Writing Inarticle Image Prompts"
elif function_name == 'generate_images_from_prompts':
return f"{count} image{'s' if count != 1 else ''} generated"
return f"{count} item{'s' if count != 1 else ''} processed"
def _get_save_message(self, function_name: str, count: int) -> str:
@@ -138,6 +148,8 @@ class AIEngine:
elif function_name == 'generate_image_prompts':
# Count is total prompts created
return f"Assigning {count} Prompts to Dedicated Slots"
elif function_name == 'generate_images_from_prompts':
return f"Saving {count} image{'s' if count != 1 else ''}"
return f"Saving {count} item{'s' if count != 1 else ''}"
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
@@ -196,131 +208,144 @@ class AIEngine:
prep_message = self._get_prep_message(function_name, data_count, data)
self.console_tracker.prep(prep_message)
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
# For image generation, build_prompt returns placeholder
# Actual processing happens in save_output
if function_name == 'generate_images_from_prompts':
prompt = "Image generation queue prepared"
else:
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
self.step_tracker.add_request_step("PREP", "success", prep_message)
self.tracker.update("PREP", 25, prep_message, meta=self.step_tracker.get_meta())
# Phase 3: AI_CALL - Provider API Call (25-70%)
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
# For image generation, AI calls happen in save_output, so skip this phase
if function_name == 'generate_images_from_prompts':
# Skip AI_CALL phase - processing happens in save_output
raw_response = {'content': 'Image generation queue ready'}
parsed = {'processed': True}
else:
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
# Pass step_tracker to save_output so it can add validation steps

View File

@@ -6,6 +6,7 @@ from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction
from igny8_core.ai.functions.generate_content import GenerateContentFunction
from igny8_core.ai.functions.generate_images import GenerateImagesFunction, generate_images_core
from igny8_core.ai.functions.generate_image_prompts import GenerateImagePromptsFunction
from igny8_core.ai.functions.generate_images_from_prompts import GenerateImagesFromPromptsFunction
__all__ = [
'AutoClusterFunction',
@@ -14,4 +15,5 @@ __all__ = [
'GenerateImagesFunction',
'generate_images_core',
'GenerateImagePromptsFunction',
'GenerateImagesFromPromptsFunction',
]

View File

@@ -0,0 +1,311 @@
"""
Generate Images from Prompts AI Function
Generates actual images from existing image prompts using AI
"""
import logging
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.writer.models import Images, Content
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import validate_ids
from igny8_core.ai.prompts import PromptRegistry
logger = logging.getLogger(__name__)
class GenerateImagesFromPromptsFunction(BaseAIFunction):
"""Generate actual images from image prompts using AI"""
def get_name(self) -> str:
return 'generate_images_from_prompts'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Images from Prompts',
'description': 'Generate actual images from existing image prompts',
'phases': {
'INIT': 'Validating image prompts...',
'PREP': 'Preparing image generation queue...',
'AI_CALL': 'Generating images with AI...',
'PARSE': 'Processing image URLs...',
'SAVE': 'Saving image URLs...',
'DONE': 'Images generated!'
}
}
def get_max_items(self) -> int:
return 100 # Max images per batch
def validate(self, payload: dict, account=None) -> Dict:
"""Validate image IDs exist and have prompts"""
result = validate_ids(payload, max_items=self.get_max_items())
if not result['valid']:
return result
# Check images exist and have prompts
image_ids = payload.get('ids', [])
if image_ids:
queryset = Images.objects.filter(id__in=image_ids)
if account:
queryset = queryset.filter(account=account)
images = list(queryset.select_related('content', 'task'))
if not images:
return {
'valid': False,
'error': 'No images found with provided IDs'
}
# Check all images have prompts
images_without_prompts = [img.id for img in images if not img.prompt or not img.prompt.strip()]
if images_without_prompts:
return {
'valid': False,
'error': f'Images {images_without_prompts} do not have prompts'
}
# Check all images are pending
images_not_pending = [img.id for img in images if img.status != 'pending']
if images_not_pending:
return {
'valid': False,
'error': f'Images {images_not_pending} are not in pending status'
}
return {'valid': True}
def prepare(self, payload: dict, account=None) -> Dict:
"""Load images and image generation settings"""
image_ids = payload.get('ids', [])
queryset = Images.objects.filter(id__in=image_ids, status='pending')
if account:
queryset = queryset.filter(account=account)
images = list(queryset.select_related('content', 'task', 'account', 'site', 'sector'))
if not images:
raise ValueError("No pending images found with prompts")
# Get image generation settings
image_settings = {}
if account:
try:
from igny8_core.modules.system.models import IntegrationSettings
integration = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
image_settings = integration.config or {}
except Exception as e:
logger.warning(f"Failed to load image generation settings: {e}")
# Extract settings with defaults
provider = image_settings.get('provider') or image_settings.get('service', 'openai')
if provider == 'runware':
model = image_settings.get('model') or image_settings.get('runwareModel', 'runware:97@1')
else:
model = image_settings.get('model', 'dall-e-3')
# Get prompt templates
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
negative_prompt = PromptRegistry.get_negative_prompt(account)
return {
'images': images,
'account': account,
'provider': provider,
'model': model,
'image_type': image_settings.get('image_type', 'realistic'),
'image_format': image_settings.get('image_format', 'webp'),
'image_prompt_template': image_prompt_template,
'negative_prompt': negative_prompt,
}
def build_prompt(self, data: Dict, account=None) -> str:
"""
Build prompt for AI_CALL phase.
For image generation, we return a placeholder since we process images in save_output.
"""
# Return placeholder - actual processing happens in save_output
return "Image generation queue prepared"
def parse_response(self, response: str, step_tracker=None) -> Dict:
"""
Parse response from AI_CALL.
For image generation, we process images directly in save_output, so this is a placeholder.
"""
return {'processed': True}
def save_output(
self,
parsed: Dict,
original_data: Dict,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""
Process all images sequentially and generate them.
This method handles the loop and makes AI calls directly.
"""
images = original_data.get('images', [])
if not images:
raise ValueError("No images to process")
provider = original_data.get('provider', 'openai')
model = original_data.get('model', 'dall-e-3')
image_type = original_data.get('image_type', 'realistic')
image_prompt_template = original_data.get('image_prompt_template', '')
negative_prompt = original_data.get('negative_prompt', '')
ai_core = AICore(account=account or original_data.get('account'))
total_images = len(images)
images_generated = 0
images_failed = 0
errors = []
# Process each image sequentially
for index, image in enumerate(images, 1):
try:
# Get content title
content = image.content
if not content:
# Fallback to task if no content
if image.task:
content_title = image.task.title
else:
content_title = "Content"
else:
content_title = content.title or content.meta_title or "Content"
# Format prompt using template
if image_prompt_template:
try:
formatted_prompt = image_prompt_template.format(
post_title=content_title,
image_prompt=image.prompt,
image_type=image_type
)
except KeyError as e:
logger.warning(f"Template formatting error: {e}, using simple format")
formatted_prompt = f"Create a high-quality {image_type} image: {image.prompt}"
else:
# Fallback template
formatted_prompt = f"Create a high-quality {image_type} image: {image.prompt}"
# Update progress: PREP phase for this image
if progress_tracker and step_tracker:
prep_msg = f"Generating image {index} of {total_images}: {image.image_type}"
step_tracker.add_request_step("PREP", "success", prep_msg)
progress_pct = 10 + int((index - 1) / total_images * 15) # 10-25% for PREP
progress_tracker.update("PREP", progress_pct, prep_msg, meta=step_tracker.get_meta())
# Generate image
if progress_tracker and step_tracker:
ai_msg = f"Generating {image.image_type} image {index} of {total_images} with AI"
step_tracker.add_response_step("AI_CALL", "success", ai_msg)
progress_pct = 25 + int((index - 1) / total_images * 45) # 25-70% for AI_CALL
progress_tracker.update("AI_CALL", progress_pct, ai_msg, meta=step_tracker.get_meta())
result = ai_core.generate_image(
prompt=formatted_prompt,
provider=provider,
model=model,
size='1024x1024',
negative_prompt=negative_prompt if provider == 'runware' else None,
function_name='generate_images_from_prompts'
)
if result.get('error'):
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: {result['error']}"
errors.append(error_msg)
images_failed += 1
logger.error(f"Image generation failed for image {image.id}: {result['error']}")
if progress_tracker and step_tracker:
parse_msg = f"Image {index} failed: {result['error']}"
step_tracker.add_response_step("PARSE", "error", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15) # 70-85% for PARSE
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
continue
image_url = result.get('url')
if not image_url:
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: No URL returned"
errors.append(error_msg)
images_failed += 1
logger.error(f"No image URL returned for image {image.id}")
if progress_tracker and step_tracker:
parse_msg = f"Image {index} failed: No URL returned"
step_tracker.add_response_step("PARSE", "error", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15)
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
continue
# Update progress: PARSE phase
if progress_tracker and step_tracker:
parse_msg = f"Image {index} of {total_images} generated successfully"
step_tracker.add_response_step("PARSE", "success", parse_msg)
progress_pct = 70 + int((index - 1) / total_images * 15) # 70-85% for PARSE
progress_tracker.update("PARSE", progress_pct, parse_msg, meta=step_tracker.get_meta())
# Update image record
with transaction.atomic():
image.image_url = image_url
image.status = 'generated'
image.save(update_fields=['image_url', 'status', 'updated_at'])
images_generated += 1
logger.info(f"Image {image.id} ({image.image_type}) generated successfully: {image_url}")
# Update progress: SAVE phase
if progress_tracker and step_tracker:
save_msg = f"Saved image {index} of {total_images}"
step_tracker.add_request_step("SAVE", "success", save_msg)
progress_pct = 85 + int((index - 1) / total_images * 13) # 85-98% for SAVE
progress_tracker.update("SAVE", progress_pct, save_msg, meta=step_tracker.get_meta())
except Exception as e:
# Mark as failed
with transaction.atomic():
image.status = 'failed'
image.save(update_fields=['status', 'updated_at'])
error_msg = f"Image {index} failed: {str(e)}"
errors.append(error_msg)
images_failed += 1
logger.error(f"Exception generating image {image.id}: {str(e)}", exc_info=True)
continue
# Final progress update
if progress_tracker and step_tracker:
final_msg = f"Generated {images_generated} of {total_images} images"
step_tracker.add_request_step("SAVE", "success", final_msg)
progress_tracker.update("SAVE", 98, final_msg, meta=step_tracker.get_meta())
return {
'count': images_generated,
'images_generated': images_generated,
'images_failed': images_failed,
'total_images': total_images,
'errors': errors if errors else None
}

View File

@@ -94,9 +94,15 @@ def _load_generate_image_prompts():
from igny8_core.ai.functions.generate_image_prompts import GenerateImagePromptsFunction
return GenerateImagePromptsFunction
def _load_generate_images_from_prompts():
"""Lazy loader for generate_images_from_prompts function"""
from igny8_core.ai.functions.generate_images_from_prompts import GenerateImagesFromPromptsFunction
return GenerateImagesFromPromptsFunction
register_lazy_function('auto_cluster', _load_auto_cluster)
register_lazy_function('generate_ideas', _load_generate_ideas)
register_lazy_function('generate_content', _load_generate_content)
register_lazy_function('generate_images', _load_generate_images)
register_lazy_function('generate_image_prompts', _load_generate_image_prompts)
register_lazy_function('generate_images_from_prompts', _load_generate_images_from_prompts)

View File

@@ -40,6 +40,12 @@ MODEL_CONFIG = {
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_images_from_prompts": {
"model": "dall-e-3", # Default, overridden by IntegrationSettings
"max_tokens": None, # Not used for images
"temperature": None, # Not used for images
"response_format": None, # Not used for images
},
}
# Function name aliases (for backward compatibility)

View File

@@ -513,6 +513,60 @@ class ImagesViewSet(SiteSectorModelViewSet):
'count': len(grouped_data),
'results': grouped_data
}, status=status.HTTP_200_OK)
@action(detail=False, methods=['post'], url_path='generate_images', url_name='generate_images')
def generate_images(self, request):
"""Generate images from prompts for image records"""
from igny8_core.ai.tasks import run_ai_task
account = getattr(request, 'account', None)
ids = request.data.get('ids', [])
if not ids:
return Response({
'error': 'No IDs provided',
'type': 'ValidationError'
}, status=status.HTTP_400_BAD_REQUEST)
account_id = account.id if account else None
# Queue Celery task
try:
if hasattr(run_ai_task, 'delay'):
task = run_ai_task.delay(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Image generation started'
}, status=status.HTTP_200_OK)
else:
# Fallback to synchronous execution
result = run_ai_task(
function_name='generate_images_from_prompts',
payload={'ids': ids},
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
'images_generated': result.get('count', 0),
'images_failed': result.get('images_failed', 0),
'message': 'Images generated successfully'
}, status=status.HTTP_200_OK)
else:
return Response({
'error': result.get('error', 'Image generation failed'),
'type': 'TaskExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
return Response({
'error': str(e),
'type': 'ExecutionError'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentViewSet(SiteSectorModelViewSet):