Add Image Generation from Prompts: Implement new functionality to generate images from prompts, including backend processing, API integration, and frontend handling with progress modal. Update settings and registry for new AI function.

This commit is contained in:
IGNY8 VPS (Salman)
2025-11-11 20:49:11 +00:00
parent 5f11da03e4
commit 5638ea78df
11 changed files with 1511 additions and 129 deletions

View File

@@ -81,6 +81,12 @@ class AIEngine:
total_images = 1 + max_images
return f"Mapping Content for {total_images} Image Prompts"
return f"Mapping Content for Image Prompts"
elif function_name == 'generate_images_from_prompts':
# Extract image count from data
if isinstance(data, dict) and 'images' in data:
total_images = len(data.get('images', []))
return f"Preparing to generate {total_images} image{'s' if total_images != 1 else ''}"
return f"Preparing image generation queue"
return f"Preparing {count} item{'s' if count != 1 else ''}"
def _get_ai_call_message(self, function_name: str, count: int) -> str:
@@ -93,6 +99,8 @@ class AIEngine:
return f"Writing article{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images':
return f"Creating image{'s' if count != 1 else ''} with AI"
elif function_name == 'generate_images_from_prompts':
return f"Generating images with AI"
return f"Processing with AI"
def _get_parse_message(self, function_name: str) -> str:
@@ -123,6 +131,8 @@ class AIEngine:
if in_article_count > 0:
return f"Writing {in_article_count} Inarticle Image Prompts"
return "Writing Inarticle Image Prompts"
elif function_name == 'generate_images_from_prompts':
return f"{count} image{'s' if count != 1 else ''} generated"
return f"{count} item{'s' if count != 1 else ''} processed"
def _get_save_message(self, function_name: str, count: int) -> str:
@@ -138,6 +148,8 @@ class AIEngine:
elif function_name == 'generate_image_prompts':
# Count is total prompts created
return f"Assigning {count} Prompts to Dedicated Slots"
elif function_name == 'generate_images_from_prompts':
return f"Saving {count} image{'s' if count != 1 else ''}"
return f"Saving {count} item{'s' if count != 1 else ''}"
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
@@ -196,131 +208,144 @@ class AIEngine:
prep_message = self._get_prep_message(function_name, data_count, data)
self.console_tracker.prep(prep_message)
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
# For image generation, build_prompt returns placeholder
# Actual processing happens in save_output
if function_name == 'generate_images_from_prompts':
prompt = "Image generation queue prepared"
else:
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
self.step_tracker.add_request_step("PREP", "success", prep_message)
self.tracker.update("PREP", 25, prep_message, meta=self.step_tracker.get_meta())
# Phase 3: AI_CALL - Provider API Call (25-70%)
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
# For image generation, AI calls happen in save_output, so skip this phase
if function_name == 'generate_images_from_prompts':
# Skip AI_CALL phase - processing happens in save_output
raw_response = {'content': 'Image generation queue ready'}
parsed = {'processed': True}
else:
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Generate function_id for tracking (ai-{function_name}-01)
# Normalize underscores to hyphens to match frontend tracking IDs
function_id_base = function_name.replace('_', '-')
function_id = f"ai-{function_id_base}-01-desktop"
# Get model config from settings (Stage 4 requirement)
# Pass account to read model from IntegrationSettings
model_config = get_model_config(function_name, account=self.account)
model = model_config.get('model')
# Read model straight from IntegrationSettings for visibility
model_from_integration = None
if self.account:
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
from igny8_core.modules.system.models import IntegrationSettings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
model_from_integration = openai_settings.config.get('model')
except Exception as integration_error:
logger.warning(
"[AIEngine] Unable to read model from IntegrationSettings: %s",
integration_error,
exc_info=True,
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
)
# Debug logging: Show model configuration (console only, not in step tracker)
logger.info(f"[AIEngine] Model Configuration for {function_name}:")
logger.info(f" - Model from get_model_config: {model}")
logger.info(f" - Full model_config: {model_config}")
self.console_tracker.ai_call(f"Model from settings: {model_from_integration or 'Not set'}")
self.console_tracker.ai_call(f"Model selected for request: {model or 'default'}")
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
self.console_tracker.ai_call(f"Function ID: {function_id}")
# Track AI call start with user-friendly message
ai_call_message = self._get_ai_call_message(function_name, data_count)
self.step_tracker.add_response_step("AI_CALL", "success", ai_call_message)
self.tracker.update("AI_CALL", 50, ai_call_message, meta=self.step_tracker.get_meta())
try:
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
function_id=function_id, # Pass function_id for tracking
tracker=self.console_tracker # Pass console tracker for logging
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
parse_message = self._get_parse_message(function_name)
self.console_tracker.parse(parse_message)
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
parsed_count = 1
# Update parse message with count for better UX
parse_message = self._get_parse_message_with_count(function_name, parsed_count)
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", parse_message)
self.tracker.update("PARSE", 85, parse_message, meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
# Pass step_tracker to save_output so it can add validation steps