Add SEO fields to Tasks model, improve content generation response handling, and enhance progress bar animation

- Added primary_keyword, secondary_keywords, tags, and categories fields to Tasks model
- Updated generate_content function to handle full JSON response with all SEO fields
- Improved progress bar animation: smooth 1% increments every 300ms
- Enhanced step detection for content generation vs clustering vs ideas
- Fixed progress modal to show correct messages for each function type
- Added comprehensive logging to Keywords and Tasks pages for AI functions
- Fixed error handling to show meaningful error messages instead of generic failures
This commit is contained in:
Gitea Deploy
2025-11-09 21:22:34 +00:00
parent 09d22ab0e2
commit 961362e088
17340 changed files with 10636 additions and 2248776 deletions

View File

@@ -6,6 +6,33 @@ Unified framework for all AI functions with consistent lifecycle, progress track
from igny8_core.ai.registry import register_function, get_function, list_functions
from igny8_core.ai.engine import AIEngine
from igny8_core.ai.base import BaseAIFunction
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import (
validate_ids,
validate_keywords_exist,
validate_cluster_limits,
validate_cluster_exists,
validate_tasks_exist,
validate_api_key,
validate_model,
validate_image_size,
)
from igny8_core.ai.constants import (
MODEL_RATES,
IMAGE_MODEL_RATES,
VALID_OPENAI_IMAGE_MODELS,
VALID_SIZES_BY_MODEL,
DEFAULT_AI_MODEL,
JSON_MODE_MODELS,
)
from igny8_core.ai.prompts import PromptRegistry, get_prompt
from igny8_core.ai.settings import (
MODEL_CONFIG,
get_model_config,
get_model,
get_max_tokens,
get_temperature,
)
# Don't auto-import functions here - let apps.py handle it lazily
# This prevents circular import issues during Django startup
@@ -13,8 +40,34 @@ from igny8_core.ai.base import BaseAIFunction
__all__ = [
'AIEngine',
'BaseAIFunction',
'AICore',
'register_function',
'get_function',
'list_functions',
# Validators
'validate_ids',
'validate_keywords_exist',
'validate_cluster_limits',
'validate_cluster_exists',
'validate_tasks_exist',
'validate_api_key',
'validate_model',
'validate_image_size',
# Constants
'MODEL_RATES',
'IMAGE_MODEL_RATES',
'VALID_OPENAI_IMAGE_MODELS',
'VALID_SIZES_BY_MODEL',
'DEFAULT_AI_MODEL',
'JSON_MODE_MODELS',
# Prompts
'PromptRegistry',
'get_prompt',
# Settings
'MODEL_CONFIG',
'get_model_config',
'get_model',
'get_max_tokens',
'get_temperature',
]

View File

@@ -0,0 +1,695 @@
"""
AI Core - Centralized execution and logging layer for all AI requests
Handles API calls, model selection, response parsing, and console logging
"""
import logging
import json
import re
import requests
import time
from typing import Dict, Any, Optional, List
from django.conf import settings
from .constants import (
DEFAULT_AI_MODEL,
JSON_MODE_MODELS,
MODEL_RATES,
IMAGE_MODEL_RATES,
VALID_OPENAI_IMAGE_MODELS,
VALID_SIZES_BY_MODEL,
DEBUG_MODE,
)
from .tracker import ConsoleStepTracker
logger = logging.getLogger(__name__)
class AICore:
"""
Centralized AI operations handler with console logging.
All AI requests go through run_ai_request() for consistent execution and logging.
"""
def __init__(self, account=None):
"""
Initialize AICore with account context.
Args:
account: Optional account object for API key/model loading
"""
self.account = account
self._openai_api_key = None
self._runware_api_key = None
self._default_model = None
self._load_account_settings()
def _load_account_settings(self):
"""Load API keys and model from IntegrationSettings or Django settings"""
if self.account:
try:
from igny8_core.modules.system.models import IntegrationSettings
# Load OpenAI settings
openai_settings = IntegrationSettings.objects.filter(
integration_type='openai',
account=self.account,
is_active=True
).first()
if openai_settings and openai_settings.config:
self._openai_api_key = openai_settings.config.get('apiKey')
model = openai_settings.config.get('model')
if model and model in MODEL_RATES:
self._default_model = model
# Load Runware settings
runware_settings = IntegrationSettings.objects.filter(
integration_type='runware',
account=self.account,
is_active=True
).first()
if runware_settings and runware_settings.config:
self._runware_api_key = runware_settings.config.get('apiKey')
except Exception as e:
logger.warning(f"Could not load account settings: {e}", exc_info=True)
# Fallback to Django settings
if not self._openai_api_key:
self._openai_api_key = getattr(settings, 'OPENAI_API_KEY', None)
if not self._runware_api_key:
self._runware_api_key = getattr(settings, 'RUNWARE_API_KEY', None)
if not self._default_model:
self._default_model = getattr(settings, 'DEFAULT_AI_MODEL', DEFAULT_AI_MODEL)
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
"""Get API key for integration type"""
if integration_type == 'openai':
return self._openai_api_key
elif integration_type == 'runware':
return self._runware_api_key
return None
def get_model(self, integration_type: str = 'openai') -> str:
"""Get model for integration type"""
if integration_type == 'openai':
return self._default_model
return DEFAULT_AI_MODEL
def run_ai_request(
self,
prompt: str,
model: Optional[str] = None,
max_tokens: int = 4000,
temperature: float = 0.7,
response_format: Optional[Dict] = None,
api_key: Optional[str] = None,
function_name: str = 'ai_request',
tracker: Optional[ConsoleStepTracker] = None
) -> Dict[str, Any]:
"""
Centralized AI request handler with console logging.
All AI text generation requests go through this method.
Args:
prompt: Prompt text
model: Model name (defaults to account's default)
max_tokens: Maximum tokens
temperature: Temperature (0-1)
response_format: Optional response format dict (for JSON mode)
api_key: Optional API key override
function_name: Function name for logging (e.g., 'cluster_keywords')
tracker: Optional ConsoleStepTracker instance for logging
Returns:
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
'model', 'cost', 'error', 'api_id'
"""
# Use provided tracker or create a new one
if tracker is None:
tracker = ConsoleStepTracker(function_name)
tracker.ai_call("Preparing request...")
# Step 1: Validate API key
api_key = api_key or self._openai_api_key
if not api_key:
error_msg = 'OpenAI API key not configured'
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': model or self._default_model,
'cost': 0.0,
'api_id': None,
}
# Step 2: Determine model
active_model = model or self._default_model
tracker.ai_call(f"Using model: {active_model}")
# Step 3: Auto-enable JSON mode for supported models
if response_format is None and active_model in JSON_MODE_MODELS:
response_format = {'type': 'json_object'}
tracker.ai_call(f"Auto-enabled JSON mode for {active_model}")
elif response_format:
tracker.ai_call(f"Using custom response format: {response_format}")
else:
tracker.ai_call("Using text response format")
# Step 4: Validate prompt length
prompt_length = len(prompt)
tracker.ai_call(f"Prompt length: {prompt_length} characters")
# Step 5: Build request payload
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json',
}
body_data = {
'model': active_model,
'messages': [{'role': 'user', 'content': prompt}],
'temperature': temperature,
}
if max_tokens:
body_data['max_tokens'] = max_tokens
if response_format:
body_data['response_format'] = response_format
tracker.ai_call(f"Request payload prepared (model={active_model}, max_tokens={max_tokens}, temp={temperature})")
# Step 6: Send request
tracker.ai_call("Sending request to OpenAI API...")
request_start = time.time()
try:
response = requests.post(url, headers=headers, json=body_data, timeout=60)
request_duration = time.time() - request_start
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
# Step 7: Validate HTTP response
if response.status_code != 200:
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
error_message = f"HTTP {response.status_code} error"
if isinstance(error_data, dict) and 'error' in error_data:
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
error_message += f": {error_data['error']['message']}"
# Check for rate limit
if response.status_code == 429:
retry_after = response.headers.get('retry-after', '60')
tracker.rate_limit(retry_after)
error_message += f" (Rate limit - retry after {retry_after}s)"
else:
tracker.error('HTTPError', error_message)
logger.error(f"OpenAI API HTTP error {response.status_code}: {error_message}")
return {
'content': None,
'error': error_message,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
# Step 8: Parse response JSON
try:
data = response.json()
except json.JSONDecodeError as e:
error_msg = f'Failed to parse JSON response: {str(e)}'
tracker.malformed_json(str(e))
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
api_id = data.get('id')
# Step 9: Extract content
if 'choices' in data and len(data['choices']) > 0:
content = data['choices'][0]['message']['content']
usage = data.get('usage', {})
input_tokens = usage.get('prompt_tokens', 0)
output_tokens = usage.get('completion_tokens', 0)
total_tokens = usage.get('total_tokens', 0)
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
tracker.parse(f"Content length: {len(content)} characters")
# Step 10: Calculate cost
rates = MODEL_RATES.get(active_model, {'input': 2.00, 'output': 8.00})
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
tracker.parse(f"Cost calculated: ${cost:.6f}")
tracker.done("Request completed successfully")
return {
'content': content,
'input_tokens': input_tokens,
'output_tokens': output_tokens,
'total_tokens': total_tokens,
'model': active_model,
'cost': cost,
'error': None,
'api_id': api_id,
}
else:
error_msg = 'No content in OpenAI response'
tracker.error('EmptyResponse', error_msg)
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': api_id,
}
except requests.exceptions.Timeout:
error_msg = 'Request timeout (60s exceeded)'
tracker.timeout(60)
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
except requests.exceptions.RequestException as e:
error_msg = f'Request exception: {str(e)}'
tracker.error('RequestException', error_msg, e)
logger.error(f"OpenAI API error: {error_msg}", exc_info=True)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(error_msg, exc_info=True)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
def extract_json(self, response_text: str) -> Optional[Dict]:
"""
Extract JSON from response text.
Handles markdown code blocks, multiline JSON, etc.
Args:
response_text: Raw response text from AI
Returns:
Parsed JSON dict or None
"""
if not response_text or not response_text.strip():
return None
# Try direct JSON parse first
try:
return json.loads(response_text.strip())
except json.JSONDecodeError:
pass
# Try to extract JSON from markdown code blocks
json_block_pattern = r'```(?:json)?\s*(\{.*?\}|\[.*?\])\s*```'
matches = re.findall(json_block_pattern, response_text, re.DOTALL)
if matches:
try:
return json.loads(matches[0])
except json.JSONDecodeError:
pass
# Try to find JSON object/array in text
json_pattern = r'(\{.*\}|\[.*\])'
matches = re.findall(json_pattern, response_text, re.DOTALL)
for match in matches:
try:
return json.loads(match)
except json.JSONDecodeError:
continue
return None
def generate_image(
self,
prompt: str,
provider: str = 'openai',
model: Optional[str] = None,
size: str = '1024x1024',
n: int = 1,
api_key: Optional[str] = None,
negative_prompt: Optional[str] = None,
function_name: str = 'generate_image'
) -> Dict[str, Any]:
"""
Generate image using AI with console logging.
Args:
prompt: Image prompt
provider: 'openai' or 'runware'
model: Model name
size: Image size
n: Number of images
api_key: Optional API key override
negative_prompt: Optional negative prompt
function_name: Function name for logging
Returns:
Dict with 'url', 'revised_prompt', 'cost', 'error', etc.
"""
print(f"[AI][{function_name}] Step 1: Preparing image generation request...")
if provider == 'openai':
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name)
elif provider == 'runware':
return self._generate_image_runware(prompt, model, size, n, api_key, negative_prompt, function_name)
else:
error_msg = f'Unknown provider: {provider}'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': provider,
'cost': 0.0,
'error': error_msg,
}
def _generate_image_openai(
self,
prompt: str,
model: Optional[str],
size: str,
n: int,
api_key: Optional[str],
negative_prompt: Optional[str],
function_name: str
) -> Dict[str, Any]:
"""Generate image using OpenAI DALL-E"""
print(f"[AI][{function_name}] Provider: OpenAI")
api_key = api_key or self._openai_api_key
if not api_key:
error_msg = 'OpenAI API key not configured'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
model = model or 'dall-e-3'
print(f"[AI][{function_name}] Step 2: Using model: {model}, size: {size}")
# Validate model
if model not in VALID_OPENAI_IMAGE_MODELS:
error_msg = f"Model '{model}' is not valid for OpenAI image generation. Only {', '.join(VALID_OPENAI_IMAGE_MODELS)} are supported."
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
# Validate size
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
if size not in valid_sizes:
error_msg = f"Image size '{size}' is not valid for model '{model}'. Valid sizes: {', '.join(valid_sizes)}"
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
url = 'https://api.openai.com/v1/images/generations'
print(f"[AI][{function_name}] Step 3: Sending request to OpenAI Images API...")
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json',
}
data = {
'model': model,
'prompt': prompt,
'n': n,
'size': size
}
if negative_prompt:
# Note: OpenAI DALL-E doesn't support negative_prompt in API, but we log it
print(f"[AI][{function_name}] Note: Negative prompt provided but OpenAI DALL-E doesn't support it")
request_start = time.time()
try:
response = requests.post(url, headers=headers, json=data, timeout=150)
request_duration = time.time() - request_start
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
if response.status_code != 200:
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
error_message = f"HTTP {response.status_code} error"
if isinstance(error_data, dict) and 'error' in error_data:
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
error_message += f": {error_data['error']['message']}"
print(f"[AI][{function_name}][Error] {error_message}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_message,
}
body = response.json()
if 'data' in body and len(body['data']) > 0:
image_data = body['data'][0]
image_url = image_data.get('url')
revised_prompt = image_data.get('revised_prompt')
cost = IMAGE_MODEL_RATES.get(model, 0.040) * n
print(f"[AI][{function_name}] Step 5: Image generated successfully")
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
print(f"[AI][{function_name}][Success] Image generation completed")
return {
'url': image_url,
'revised_prompt': revised_prompt,
'provider': 'openai',
'cost': cost,
'error': None,
}
else:
error_msg = 'No image data in response'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
except requests.exceptions.Timeout:
error_msg = 'Request timeout (150s exceeded)'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(error_msg, exc_info=True)
return {
'url': None,
'revised_prompt': None,
'provider': 'openai',
'cost': 0.0,
'error': error_msg,
}
def _generate_image_runware(
self,
prompt: str,
model: Optional[str],
size: str,
n: int,
api_key: Optional[str],
negative_prompt: Optional[str],
function_name: str
) -> Dict[str, Any]:
"""Generate image using Runware"""
print(f"[AI][{function_name}] Provider: Runware")
api_key = api_key or self._runware_api_key
if not api_key:
error_msg = 'Runware API key not configured'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'runware',
'cost': 0.0,
'error': error_msg,
}
runware_model = model or 'runware:97@1'
print(f"[AI][{function_name}] Step 2: Using model: {runware_model}, size: {size}")
# Parse size
try:
width, height = map(int, size.split('x'))
except ValueError:
error_msg = f"Invalid size format: {size}. Expected format: WIDTHxHEIGHT"
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'runware',
'cost': 0.0,
'error': error_msg,
}
url = 'https://api.runware.ai/v1'
print(f"[AI][{function_name}] Step 3: Sending request to Runware API...")
# Runware uses array payload
payload = [{
'taskType': 'imageInference',
'model': runware_model,
'prompt': prompt,
'width': width,
'height': height,
'apiKey': api_key
}]
if negative_prompt:
payload[0]['negativePrompt'] = negative_prompt
request_start = time.time()
try:
response = requests.post(url, json=payload, timeout=150)
request_duration = time.time() - request_start
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
if response.status_code != 200:
error_msg = f"HTTP {response.status_code} error"
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'runware',
'cost': 0.0,
'error': error_msg,
}
body = response.json()
# Runware returns array with image data
if isinstance(body, list) and len(body) > 0:
image_data = body[0]
image_url = image_data.get('imageURL') or image_data.get('url')
cost = 0.036 * n # Runware pricing
print(f"[AI][{function_name}] Step 5: Image generated successfully")
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
print(f"[AI][{function_name}][Success] Image generation completed")
return {
'url': image_url,
'provider': 'runware',
'cost': cost,
'error': None,
}
else:
error_msg = 'No image data in Runware response'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'runware',
'cost': 0.0,
'error': error_msg,
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(error_msg, exc_info=True)
return {
'url': None,
'provider': 'runware',
'cost': 0.0,
'error': error_msg,
}
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
"""Calculate cost for API call"""
if model_type == 'text':
rates = MODEL_RATES.get(model, {'input': 2.00, 'output': 8.00})
input_cost = (input_tokens / 1_000_000) * rates['input']
output_cost = (output_tokens / 1_000_000) * rates['output']
return input_cost + output_cost
elif model_type == 'image':
rate = IMAGE_MODEL_RATES.get(model, 0.040)
return rate * 1
return 0.0
# Legacy method names for backward compatibility
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 4000,
temperature: float = 0.7, response_format: Optional[Dict] = None,
api_key: Optional[str] = None) -> Dict[str, Any]:
"""Legacy method - redirects to run_ai_request()"""
return self.run_ai_request(
prompt=prompt,
model=model,
max_tokens=max_tokens,
temperature=temperature,
response_format=response_format,
api_key=api_key,
function_name='call_openai'
)

View File

@@ -0,0 +1,41 @@
"""
AI Constants - Model pricing, valid models, and configuration constants
"""
# Model pricing (per 1M tokens) - EXACT from reference plugin model-rates-config.php
MODEL_RATES = {
'gpt-4.1': {'input': 2.00, 'output': 8.00},
'gpt-4o-mini': {'input': 0.15, 'output': 0.60},
'gpt-4o': {'input': 2.50, 'output': 10.00},
}
# Image model pricing (per image) - EXACT from reference plugin
IMAGE_MODEL_RATES = {
'dall-e-3': 0.040,
'dall-e-2': 0.020,
'gpt-image-1': 0.042,
'gpt-image-1-mini': 0.011,
}
# Valid OpenAI image generation models (only these work with /v1/images/generations endpoint)
VALID_OPENAI_IMAGE_MODELS = {
'dall-e-3',
'dall-e-2',
# Note: gpt-image-1 and gpt-image-1-mini are NOT valid for OpenAI's /v1/images/generations endpoint
}
# Valid image sizes per model (from OpenAI official documentation)
VALID_SIZES_BY_MODEL = {
'dall-e-3': ['1024x1024', '1024x1792', '1792x1024'],
'dall-e-2': ['256x256', '512x512', '1024x1024'],
}
# Default model
DEFAULT_AI_MODEL = 'gpt-4.1'
# JSON mode supported models
JSON_MODE_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
# Debug mode - controls console logging
# Set to False in production to disable verbose logging
DEBUG_MODE = True

View File

@@ -4,8 +4,9 @@ AI Engine - Central orchestrator for all AI functions
import logging
from typing import Dict, Any, Optional
from igny8_core.ai.base import BaseAIFunction
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker
from igny8_core.ai.processor import AIProcessor
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker, ConsoleStepTracker
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.settings import get_model_config
logger = logging.getLogger(__name__)
@@ -20,7 +21,8 @@ class AIEngine:
self.task = celery_task
self.account = account
self.tracker = ProgressTracker(celery_task)
self.step_tracker = StepTracker('ai_engine')
self.step_tracker = StepTracker('ai_engine') # For Celery progress callbacks
self.console_tracker = None # Will be initialized per function
self.cost_tracker = CostTracker()
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
@@ -38,46 +40,69 @@ class AIEngine:
function_name = fn.get_name()
self.step_tracker.function_name = function_name
# Initialize console tracker for logging (Stage 3 requirement)
self.console_tracker = ConsoleStepTracker(function_name)
self.console_tracker.init(f"Starting {function_name} execution")
try:
# Phase 1: INIT - Validation & Setup (0-10%)
self.console_tracker.prep("Validating input payload")
validated = fn.validate(payload, self.account)
if not validated['valid']:
self.console_tracker.error('ValidationError', validated['error'])
return self._handle_error(validated['error'], fn)
self.console_tracker.prep("Validation complete")
self.step_tracker.add_request_step("INIT", "success", "Validation complete")
self.tracker.update("INIT", 10, "Validation complete", meta=self.step_tracker.get_meta())
# Phase 2: PREP - Data Loading & Prompt Building (10-25%)
self.console_tracker.prep("Loading data from database")
data = fn.prepare(payload, self.account)
if isinstance(data, (list, tuple)):
data_count = len(data)
elif isinstance(data, dict):
data_count = len(data.get('keywords', [])) if 'keywords' in data else data.get('count', 1)
# Check for cluster_data (for generate_ideas) or keywords (for auto_cluster)
if 'cluster_data' in data:
data_count = len(data['cluster_data'])
elif 'keywords' in data:
data_count = len(data['keywords'])
else:
data_count = data.get('count', 1)
else:
data_count = 1
self.console_tracker.prep(f"Building prompt from {data_count} items")
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
self.step_tracker.add_request_step("PREP", "success", f"Loaded {data_count} items, built prompt ({len(prompt)} chars)")
self.tracker.update("PREP", 25, f"Data prepared: {data_count} items", meta=self.step_tracker.get_meta())
# Phase 3: AI_CALL - Provider API Call (25-70%)
processor = AIProcessor(account=self.account)
model = fn.get_model(self.account)
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Get model config from settings (Stage 4 requirement)
model_config = get_model_config(function_name)
model = model_config.get('model')
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
# Track AI call start
self.step_tracker.add_response_step("AI_CALL", "success", f"Calling {model or 'default'} model...")
self.tracker.update("AI_CALL", 30, f"Sending to {model or 'default'}...", meta=self.step_tracker.get_meta())
try:
raw_response = processor.call(
prompt,
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
# Don't pass response_steps - the processor ignores it anyway
# Step tracking is handled by the engine
progress_callback=lambda state, meta: self.tracker.update_ai_progress(state, {
**meta,
**self.step_tracker.get_meta()
})
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
tracker=self.console_tracker # Pass console tracker for logging
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
@@ -112,16 +137,22 @@ class AIEngine:
# Phase 4: PARSE - Response Parsing (70-85%)
try:
self.console_tracker.parse("Parsing AI response")
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
parsed_count = parsed.get('count', 1)
# Check if it's a content dict (has 'content' field) or a result dict (has 'count')
if 'content' in parsed:
parsed_count = 1 # Single content item
else:
parsed_count = parsed.get('count', 1)
else:
parsed_count = 1
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", f"Parsed {parsed_count} items from AI response")
self.tracker.update("PARSE", 85, f"Parsed {parsed_count} items", meta=self.step_tracker.get_meta())
except Exception as parse_error:
@@ -131,12 +162,27 @@ class AIEngine:
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
self.console_tracker.save("Saving results to database")
# Pass step_tracker to save_output so it can add validation steps
save_result = fn.save_output(parsed, data, self.account, self.tracker, step_tracker=self.step_tracker)
clusters_created = save_result.get('clusters_created', 0)
keywords_updated = save_result.get('keywords_updated', 0)
self.step_tracker.add_request_step("SAVE", "success", f"Created {clusters_created} clusters, updated {keywords_updated} keywords")
self.tracker.update("SAVE", 98, f"Saved: {clusters_created} clusters, {keywords_updated} keywords", meta=self.step_tracker.get_meta())
count = save_result.get('count', 0)
# Build success message based on function type
if clusters_created:
save_msg = f"Created {clusters_created} clusters, updated {keywords_updated} keywords"
elif count:
save_msg = f"Saved {count} items"
else:
save_msg = "Results saved successfully"
self.console_tracker.save(save_msg)
self.step_tracker.add_request_step("SAVE", "success", save_msg)
self.tracker.update("SAVE", 98, save_msg, meta=self.step_tracker.get_meta())
# Store save_msg for use in DONE phase
final_save_msg = save_msg
# Track credit usage after successful save
if self.account and raw_response:
@@ -171,6 +217,8 @@ class AIEngine:
logger.warning(f"Failed to log credit usage: {e}", exc_info=True)
# Phase 6: DONE - Finalization (98-100%)
success_msg = f"Task completed: {final_save_msg}" if 'final_save_msg' in locals() else "Task completed successfully"
self.console_tracker.done(success_msg)
self.step_tracker.add_request_step("DONE", "success", "Task completed successfully")
self.tracker.update("DONE", 100, "Task complete!", meta=self.step_tracker.get_meta())
@@ -193,6 +241,12 @@ class AIEngine:
def _handle_error(self, error: str, fn: BaseAIFunction = None, exc_info=False):
"""Centralized error handling"""
function_name = fn.get_name() if fn else 'unknown'
# Log to console tracker if available (Stage 3 requirement)
if self.console_tracker:
error_type = type(error).__name__ if isinstance(error, Exception) else 'Error'
self.console_tracker.error(error_type, str(error), exception=error if isinstance(error, Exception) else None)
self.step_tracker.add_request_step("Error", "error", error, error=error)
error_meta = {

View File

@@ -1,4 +1,17 @@
"""
AI Function implementations
"""
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction, generate_ideas_core
from igny8_core.ai.functions.generate_content import GenerateContentFunction, generate_content_core
from igny8_core.ai.functions.generate_images import GenerateImagesFunction, generate_images_core
__all__ = [
'AutoClusterFunction',
'GenerateIdeasFunction',
'generate_ideas_core',
'GenerateContentFunction',
'generate_content_core',
'GenerateImagesFunction',
'generate_images_core',
]

View File

@@ -6,7 +6,9 @@ from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.planner.models import Keywords, Clusters
from igny8_core.modules.system.utils import get_prompt_value
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.prompts import PromptRegistry
from igny8_core.ai.settings import get_model_config
logger = logging.getLogger(__name__)
@@ -36,49 +38,23 @@ class AutoClusterFunction(BaseAIFunction):
def validate(self, payload: dict, account=None) -> Dict:
"""Custom validation for clustering with plan limit checks"""
result = super().validate(payload, account)
from igny8_core.ai.validators import validate_ids, validate_keywords_exist, validate_cluster_limits
# Base validation
result = validate_ids(payload, max_items=self.get_max_items())
if not result['valid']:
return result
# Additional validation: check keywords exist
# Check keywords exist
ids = payload.get('ids', [])
queryset = Keywords.objects.filter(id__in=ids)
if account:
queryset = queryset.filter(account=account)
keywords_result = validate_keywords_exist(ids, account)
if not keywords_result['valid']:
return keywords_result
if queryset.count() == 0:
return {'valid': False, 'error': 'No keywords found'}
# Plan limit validation
if account:
plan = getattr(account, 'plan', None)
if plan:
from django.utils import timezone
from igny8_core.modules.planner.models import Clusters
# Check daily cluster limit
now = timezone.now()
start_of_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
clusters_today = Clusters.objects.filter(
account=account,
created_at__gte=start_of_day
).count()
if plan.daily_cluster_limit and clusters_today >= plan.daily_cluster_limit:
return {
'valid': False,
'error': f'Daily cluster limit reached ({plan.daily_cluster_limit} clusters per day). Please try again tomorrow.'
}
# Check max clusters limit
total_clusters = Clusters.objects.filter(account=account).count()
if plan.max_clusters and total_clusters >= plan.max_clusters:
return {
'valid': False,
'error': f'Maximum cluster limit reached ({plan.max_clusters} clusters). Please upgrade your plan or delete existing clusters.'
}
else:
return {'valid': False, 'error': 'Account does not have an active plan'}
# Check plan limits
limit_result = validate_cluster_limits(account, operation_type='cluster')
if not limit_result['valid']:
return limit_result
return {'valid': True}
@@ -115,20 +91,18 @@ class AutoClusterFunction(BaseAIFunction):
}
def build_prompt(self, data: Dict, account=None) -> str:
"""Build clustering prompt"""
"""Build clustering prompt using registry"""
keyword_data = data['keyword_data']
sector_id = data.get('sector_id')
# Get prompt template
prompt_template = get_prompt_value(account, 'clustering')
# Format keywords
keywords_text = '\n'.join([
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']}, Intent: {kw['intent']})"
for kw in keyword_data
])
prompt = prompt_template.replace('[IGNY8_KEYWORDS]', keywords_text)
# Build context
context = {'KEYWORDS': keywords_text}
# Add sector context if available
if sector_id:
@@ -136,14 +110,26 @@ class AutoClusterFunction(BaseAIFunction):
from igny8_core.auth.models import Sector
sector = Sector.objects.get(id=sector_id)
if sector:
prompt += f"\n\nNote: These keywords are for the '{sector.name}' sector."
context['SECTOR'] = sector.name
except Exception:
pass
# Get prompt from registry
prompt = PromptRegistry.get_prompt(
function_name='auto_cluster',
account=account,
context=context
)
# Verify placeholder replacement
if '[IGNY8_KEYWORDS]' in prompt:
logger.error(f"[IGNY8_KEYWORDS] placeholder NOT replaced! Prompt length: {len(prompt)}")
else:
logger.info(f"Prompt placeholder replaced successfully. Prompt length: {len(prompt)}, Keywords text length: {len(keywords_text)}")
# IMPORTANT: When using JSON mode, OpenAI requires explicit JSON instruction
# The prompt template already includes "Format the output as a JSON object"
# but we need to ensure it's explicit for JSON mode compliance
# Check if prompt already explicitly requests JSON (case-insensitive)
prompt_lower = prompt.lower()
has_json_request = (
'json' in prompt_lower and
@@ -158,7 +144,7 @@ class AutoClusterFunction(BaseAIFunction):
def parse_response(self, response: str, step_tracker=None) -> List[Dict]:
"""Parse AI response into cluster data"""
import json
from igny8_core.ai.processor import AIProcessor
from igny8_core.ai.ai_core import AICore
if not response or not response.strip():
error_msg = "Empty response from AI"
@@ -172,8 +158,8 @@ class AutoClusterFunction(BaseAIFunction):
except json.JSONDecodeError as e:
logger.warning(f"parse_response: Direct JSON parse failed: {e}, trying extract_json method")
# Fall back to extract_json method which handles markdown code blocks
processor = AIProcessor()
json_data = processor.extract_json(response)
ai_core = AICore(account=getattr(self, 'account', None))
json_data = ai_core.extract_json(response)
if not json_data:
error_msg = f"Failed to parse clustering response. Response: {response[:200]}..."

View File

@@ -0,0 +1,337 @@
"""
Generate Content AI Function
Extracted from modules/writer/tasks.py
"""
import logging
import re
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.writer.models import Tasks
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import validate_tasks_exist
from igny8_core.ai.prompts import PromptRegistry
from igny8_core.ai.settings import get_model_config
logger = logging.getLogger(__name__)
class GenerateContentFunction(BaseAIFunction):
"""Generate content for tasks using AI"""
def get_name(self) -> str:
return 'generate_content'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Content',
'description': 'Generate article content from task ideas',
'phases': {
'INIT': 'Initializing content generation...',
'PREP': 'Loading tasks and building prompts...',
'AI_CALL': 'Generating content with AI...',
'PARSE': 'Processing content...',
'SAVE': 'Saving content...',
'DONE': 'Content generated!'
}
}
def get_max_items(self) -> int:
return 50 # Max tasks per batch
def validate(self, payload: dict, account=None) -> Dict:
"""Validate task IDs"""
result = super().validate(payload, account)
if not result['valid']:
return result
# Check tasks exist
task_ids = payload.get('ids', [])
if task_ids:
task_result = validate_tasks_exist(task_ids, account)
if not task_result['valid']:
return task_result
return {'valid': True}
def prepare(self, payload: dict, account=None) -> List:
"""Load tasks with all relationships"""
task_ids = payload.get('ids', [])
queryset = Tasks.objects.filter(id__in=task_ids)
if account:
queryset = queryset.filter(account=account)
# Preload all relationships to avoid N+1 queries
tasks = list(queryset.select_related(
'account', 'site', 'sector', 'cluster', 'idea'
))
if not tasks:
raise ValueError("No tasks found")
return tasks
def build_prompt(self, data: Any, account=None) -> str:
"""Build content generation prompt for a single task using registry"""
if isinstance(data, list):
# For now, handle single task (will be called per task)
if not data:
raise ValueError("No tasks provided")
task = data[0]
else:
task = data
account = account or task.account
# Build idea data string
idea_data = f"Title: {task.title or 'Untitled'}\n"
if task.description:
idea_data += f"Description: {task.description}\n"
# Handle idea description (might be JSON or plain text)
if task.idea and task.idea.description:
description = task.idea.description
try:
import json
parsed_desc = json.loads(description)
if isinstance(parsed_desc, dict):
formatted_desc = "Content Outline:\n\n"
if 'H2' in parsed_desc:
for h2_section in parsed_desc['H2']:
formatted_desc += f"## {h2_section.get('heading', '')}\n"
if 'subsections' in h2_section:
for h3_section in h2_section['subsections']:
formatted_desc += f"### {h3_section.get('subheading', '')}\n"
formatted_desc += f"Content Type: {h3_section.get('content_type', '')}\n"
formatted_desc += f"Details: {h3_section.get('details', '')}\n\n"
description = formatted_desc
except (json.JSONDecodeError, TypeError):
pass # Use as plain text
idea_data += f"Outline: {description}\n"
if task.idea:
idea_data += f"Structure: {task.idea.content_structure or task.content_structure or 'blog_post'}\n"
idea_data += f"Type: {task.idea.content_type or task.content_type or 'blog_post'}\n"
if task.idea.estimated_word_count:
idea_data += f"Estimated Word Count: {task.idea.estimated_word_count}\n"
# Build cluster data string
cluster_data = ''
if task.cluster:
cluster_data = f"Cluster Name: {task.cluster.name or ''}\n"
if task.cluster.description:
cluster_data += f"Description: {task.cluster.description}\n"
cluster_data += f"Status: {task.cluster.status or 'active'}\n"
# Build keywords string
keywords_data = task.keywords or ''
if not keywords_data and task.idea:
keywords_data = task.idea.target_keywords or ''
# Get prompt from registry with context
prompt = PromptRegistry.get_prompt(
function_name='generate_content',
account=account,
task=task,
context={
'IDEA': idea_data,
'CLUSTER': cluster_data,
'KEYWORDS': keywords_data,
}
)
return prompt
def parse_response(self, response: str, step_tracker=None) -> Dict:
"""Parse content response - can be JSON or plain text"""
import json
# Try to parse as JSON first
try:
parsed_json = json.loads(response.strip())
if isinstance(parsed_json, dict):
# It's a JSON object with structured data
return parsed_json
except (json.JSONDecodeError, ValueError):
pass
# If not JSON, treat as plain content and normalize
try:
from igny8_core.utils.content_normalizer import normalize_content
normalized = normalize_content(response)
content_text = normalized['normalized_content']
# Return as dict with content field for consistency
return {'content': content_text}
except Exception as e:
logger.warning(f"Content normalization failed: {e}, using original")
return {'content': response}
def save_output(
self,
parsed: Any,
original_data: Any,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""Save content to task - handles both JSON and plain text responses"""
if isinstance(original_data, list):
task = original_data[0] if original_data else None
else:
task = original_data
if not task:
raise ValueError("No task provided for saving")
# Handle parsed response - can be dict (JSON) or string (plain text)
if isinstance(parsed, dict):
# JSON response with structured fields
content = parsed.get('content', '')
title = parsed.get('title', task.title)
meta_title = parsed.get('meta_title', title or task.title)
meta_description = parsed.get('meta_description', '')
word_count = parsed.get('word_count', 0)
primary_keyword = parsed.get('primary_keyword', '')
secondary_keywords = parsed.get('secondary_keywords', [])
tags = parsed.get('tags', [])
categories = parsed.get('categories', [])
else:
# Plain text response (legacy)
content = str(parsed)
title = task.title
meta_title = task.title
meta_description = (task.description or '')[:160] if task.description else ''
word_count = 0
primary_keyword = ''
secondary_keywords = []
tags = []
categories = []
# Calculate word count if not provided
if not word_count and content:
text_for_counting = re.sub(r'<[^>]+>', '', content)
word_count = len(text_for_counting.split())
# Update task with all fields
if content:
task.content = content
if title and title != task.title:
task.title = title
task.word_count = word_count
# SEO fields
if meta_title:
task.meta_title = meta_title
elif not task.meta_title:
task.meta_title = task.title # Fallback to title
if meta_description:
task.meta_description = meta_description
elif not task.meta_description and task.description:
task.meta_description = (task.description or '')[:160] # Fallback to description
if primary_keyword:
task.primary_keyword = primary_keyword
if secondary_keywords:
task.secondary_keywords = secondary_keywords if isinstance(secondary_keywords, list) else []
if tags:
task.tags = tags if isinstance(tags, list) else []
if categories:
task.categories = categories if isinstance(categories, list) else []
task.status = 'draft'
task.save()
return {
'count': 1,
'tasks_updated': 1,
'word_count': word_count
}
def generate_content_core(task_ids: List[int], account_id: int = None, progress_callback=None):
"""
Core logic for generating content (legacy function signature for backward compatibility).
Can be called with or without Celery.
Args:
task_ids: List of task IDs
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates
Returns:
Dict with 'success', 'tasks_updated', 'message', etc.
"""
try:
from igny8_core.auth.models import Account
account = None
if account_id:
account = Account.objects.get(id=account_id)
# Use the new function class
fn = GenerateContentFunction()
fn.account = account
# Prepare payload
payload = {'ids': task_ids}
# Validate
validated = fn.validate(payload, account)
if not validated['valid']:
return {'success': False, 'error': validated['error']}
# Prepare data
tasks = fn.prepare(payload, account)
tasks_updated = 0
# Process each task
for task in tasks:
# Build prompt for this task
prompt = fn.build_prompt([task], account)
# Get model config from settings
model_config = get_model_config('generate_content')
# Call AI using centralized request handler
ai_core = AICore(account=account)
result = ai_core.run_ai_request(
prompt=prompt,
model=model_config.get('model'),
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name='generate_content'
)
if result.get('error'):
logger.error(f"AI error for task {task.id}: {result['error']}")
continue
# Parse response
content = fn.parse_response(result['content'])
if not content:
logger.warning(f"No content generated for task {task.id}")
continue
# Save output
save_result = fn.save_output(content, [task], account)
tasks_updated += save_result.get('tasks_updated', 0)
return {
'success': True,
'tasks_updated': tasks_updated,
'message': f'Content generation complete: {tasks_updated} articles generated'
}
except Exception as e:
logger.error(f"Error in generate_content_core: {str(e)}", exc_info=True)
return {'success': False, 'error': str(e)}

View File

@@ -0,0 +1,330 @@
"""
Generate Ideas AI Function
Extracted from modules/planner/tasks.py
"""
import logging
import json
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.planner.models import Clusters, ContentIdeas
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import validate_cluster_exists, validate_cluster_limits
from igny8_core.ai.tracker import ConsoleStepTracker
from igny8_core.ai.prompts import PromptRegistry
from igny8_core.ai.settings import get_model_config
logger = logging.getLogger(__name__)
class GenerateIdeasFunction(BaseAIFunction):
"""Generate content ideas from clusters using AI"""
def get_name(self) -> str:
return 'generate_ideas'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Ideas',
'description': 'Generate SEO-optimized content ideas from keyword clusters',
'phases': {
'INIT': 'Initializing idea generation...',
'PREP': 'Loading clusters...',
'AI_CALL': 'Generating ideas with AI...',
'PARSE': 'Parsing idea data...',
'SAVE': 'Saving ideas...',
'DONE': 'Ideas generated!'
}
}
def get_max_items(self) -> int:
return 10 # Max clusters per idea generation
def validate(self, payload: dict, account=None) -> Dict:
"""Validate cluster IDs and plan limits"""
result = super().validate(payload, account)
if not result['valid']:
return result
# Check cluster exists
cluster_ids = payload.get('ids', [])
if cluster_ids:
cluster_id = cluster_ids[0] # For single cluster idea generation
cluster_result = validate_cluster_exists(cluster_id, account)
if not cluster_result['valid']:
return cluster_result
# Check plan limits
limit_result = validate_cluster_limits(account, operation_type='idea')
if not limit_result['valid']:
return limit_result
return {'valid': True}
def prepare(self, payload: dict, account=None) -> Dict:
"""Load clusters with keywords"""
cluster_ids = payload.get('ids', [])
if not cluster_ids:
raise ValueError("No cluster IDs provided")
# Support multiple clusters (up to get_max_items())
queryset = Clusters.objects.filter(id__in=cluster_ids)
if account:
queryset = queryset.filter(account=account)
clusters = list(queryset.select_related('sector', 'account', 'site', 'sector__site').prefetch_related('keywords'))
if not clusters:
raise ValueError("No clusters found")
# Get keywords for each cluster
from igny8_core.modules.planner.models import Keywords
cluster_data = []
for cluster in clusters:
# Get keywords and extract the keyword text from seed_keyword relationship
keyword_objects = Keywords.objects.filter(cluster=cluster).select_related('seed_keyword')
keywords = [kw.seed_keyword.keyword for kw in keyword_objects if kw.seed_keyword]
cluster_data.append({
'id': cluster.id,
'name': cluster.name,
'description': cluster.description or '',
'keywords': keywords,
})
# Get account from first cluster if not provided
account = account or (clusters[0].account if clusters else None)
return {
'clusters': clusters, # List of cluster objects
'cluster_data': cluster_data, # Formatted data for AI
'account': account
}
def build_prompt(self, data: Dict, account=None) -> str:
"""Build ideas generation prompt using registry"""
cluster_data = data['cluster_data']
account = account or data.get('account')
# Format clusters text
clusters_text = '\n'.join([
f"Cluster ID: {c.get('id', '')} | Name: {c.get('name', '')} | Description: {c.get('description', '')}"
for c in cluster_data
])
# Format cluster keywords
cluster_keywords_text = '\n'.join([
f"Cluster ID: {c.get('id', '')} | Name: {c.get('name', '')} | Keywords: {', '.join(c.get('keywords', []))}"
for c in cluster_data
])
# Get prompt from registry with context
prompt = PromptRegistry.get_prompt(
function_name='generate_ideas',
account=account,
context={
'CLUSTERS': clusters_text,
'CLUSTER_KEYWORDS': cluster_keywords_text,
}
)
return prompt
def parse_response(self, response: str, step_tracker=None) -> List[Dict]:
"""Parse AI response into idea data"""
ai_core = AICore(account=self.account if hasattr(self, 'account') else None)
json_data = ai_core.extract_json(response)
if not json_data or 'ideas' not in json_data:
error_msg = f"Failed to parse ideas response: {response[:200]}..."
logger.error(error_msg)
raise ValueError(error_msg)
return json_data.get('ideas', [])
def save_output(
self,
parsed: List[Dict],
original_data: Dict,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""Save ideas to database"""
clusters = original_data['clusters'] # List of cluster objects
cluster_data = original_data['cluster_data'] # Formatted data for matching
account = account or original_data.get('account')
if not account:
raise ValueError("Account is required for idea creation")
ideas_created = 0
with transaction.atomic():
for idx, idea_data in enumerate(parsed):
# Find matching cluster by ID or name
cluster = None
cluster_id_from_ai = idea_data.get('cluster_id')
cluster_name = idea_data.get('cluster_name', '')
# Try to match by ID first
if cluster_id_from_ai:
for c in clusters:
if c.id == cluster_id_from_ai:
cluster = c
break
# Fallback to name matching
if not cluster and cluster_name:
for c in clusters:
if c.name == cluster_name:
cluster = c
break
# If still no match, use position-based matching (first idea -> first cluster, etc.)
if not cluster and len(clusters) > 0:
cluster_index = idx % len(clusters)
cluster = clusters[cluster_index]
logger.warning(f"Cluster not found by ID/name for idea '{idea_data.get('title', 'Untitled')}', using cluster at index {cluster_index}")
if not cluster:
logger.warning(f"Cluster not found for idea '{idea_data.get('title', 'Untitled')}', skipping")
continue
# Ensure site is available
site = cluster.site
if not site and cluster.sector:
site = cluster.sector.site
if not site:
logger.error(f"Site not found for cluster {cluster.id}, cannot create ContentIdeas")
continue
# Handle description - might be dict or string
description = idea_data.get('description', '')
if isinstance(description, dict):
description = json.dumps(description)
elif not isinstance(description, str):
description = str(description)
# Handle target_keywords
target_keywords = idea_data.get('covered_keywords', '') or idea_data.get('target_keywords', '')
# Create ContentIdeas record
ContentIdeas.objects.create(
idea_title=idea_data.get('title', 'Untitled Idea'),
description=description,
content_type=idea_data.get('content_type', 'blog_post'),
content_structure=idea_data.get('content_structure', 'supporting_page'),
target_keywords=target_keywords,
keyword_cluster=cluster,
estimated_word_count=idea_data.get('estimated_word_count', 1500),
status='new',
account=account,
site=site,
sector=cluster.sector,
)
ideas_created += 1
return {
'count': ideas_created,
'ideas_created': ideas_created
}
def generate_ideas_core(cluster_id: int, account_id: int = None, progress_callback=None):
"""
Core logic for generating ideas (legacy function signature for backward compatibility).
Can be called with or without Celery.
Args:
cluster_id: Cluster ID to generate idea for
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates
Returns:
Dict with 'success', 'idea_created', 'message', etc.
"""
tracker = ConsoleStepTracker('generate_ideas')
tracker.init("Task started")
try:
from igny8_core.auth.models import Account
account = None
if account_id:
account = Account.objects.get(id=account_id)
tracker.prep("Loading account and cluster data...")
# Use the new function class
fn = GenerateIdeasFunction()
# Store account for use in methods
fn.account = account
# Prepare payload
payload = {'ids': [cluster_id]}
# Validate
tracker.prep("Validating input...")
validated = fn.validate(payload, account)
if not validated['valid']:
tracker.error('ValidationError', validated['error'])
return {'success': False, 'error': validated['error']}
# Prepare data
tracker.prep("Loading cluster with keywords...")
data = fn.prepare(payload, account)
# Build prompt
tracker.prep("Building prompt...")
prompt = fn.build_prompt(data, account)
# Get model config from settings
model_config = get_model_config('generate_ideas')
# Call AI using centralized request handler
ai_core = AICore(account=account)
result = ai_core.run_ai_request(
prompt=prompt,
model=model_config.get('model'),
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name='generate_ideas',
tracker=tracker
)
if result.get('error'):
return {'success': False, 'error': result['error']}
# Parse response
tracker.parse("Parsing AI response...")
ideas_data = fn.parse_response(result['content'])
if not ideas_data:
tracker.error('ParseError', 'No ideas generated by AI')
return {'success': False, 'error': 'No ideas generated by AI'}
tracker.parse(f"Parsed {len(ideas_data)} idea(s)")
# Take first idea
idea_data = ideas_data[0]
# Save output
tracker.save("Saving idea to database...")
save_result = fn.save_output(ideas_data, data, account)
tracker.save(f"Saved {save_result['ideas_created']} idea(s)")
tracker.done(f"Idea '{idea_data.get('title', 'Untitled')}' created successfully")
return {
'success': True,
'idea_created': save_result['ideas_created'],
'message': f"Idea '{idea_data.get('title', 'Untitled')}' created"
}
except Exception as e:
tracker.error('Exception', str(e), e)
logger.error(f"Error in generate_ideas_core: {str(e)}", exc_info=True)
return {'success': False, 'error': str(e)}

View File

@@ -0,0 +1,277 @@
"""
Generate Images AI Function
Extracted from modules/writer/tasks.py
"""
import logging
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.writer.models import Tasks, Images
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.validators import validate_tasks_exist
from igny8_core.ai.prompts import PromptRegistry
from igny8_core.ai.settings import get_model_config
logger = logging.getLogger(__name__)
class GenerateImagesFunction(BaseAIFunction):
"""Generate images for tasks using AI"""
def get_name(self) -> str:
return 'generate_images'
def get_metadata(self) -> Dict:
return {
'display_name': 'Generate Images',
'description': 'Generate featured and in-article images for tasks',
'phases': {
'INIT': 'Initializing image generation...',
'PREP': 'Extracting image prompts...',
'AI_CALL': 'Generating images with AI...',
'PARSE': 'Processing image URLs...',
'SAVE': 'Saving images...',
'DONE': 'Images generated!'
}
}
def get_max_items(self) -> int:
return 20 # Max tasks per batch
def validate(self, payload: dict, account=None) -> Dict:
"""Validate task IDs"""
result = super().validate(payload, account)
if not result['valid']:
return result
# Check tasks exist
task_ids = payload.get('ids', [])
if task_ids:
task_result = validate_tasks_exist(task_ids, account)
if not task_result['valid']:
return task_result
return {'valid': True}
def prepare(self, payload: dict, account=None) -> Dict:
"""Load tasks and image generation settings"""
task_ids = payload.get('ids', [])
queryset = Tasks.objects.filter(id__in=task_ids)
if account:
queryset = queryset.filter(account=account)
tasks = list(queryset.select_related('account', 'sector', 'site'))
if not tasks:
raise ValueError("No tasks found")
# Get image generation settings
image_settings = {}
if account:
try:
from igny8_core.modules.system.models import IntegrationSettings
integration = IntegrationSettings.objects.get(
account=account,
integration_type='image_generation',
is_active=True
)
image_settings = integration.config or {}
except Exception:
pass
# Extract settings with defaults
provider = image_settings.get('provider') or image_settings.get('service', 'openai')
if provider == 'runware':
model = image_settings.get('model') or image_settings.get('runwareModel', 'runware:97@1')
else:
model = image_settings.get('model', 'dall-e-3')
return {
'tasks': tasks,
'account': account,
'provider': provider,
'model': model,
'image_type': image_settings.get('image_type', 'realistic'),
'max_in_article_images': int(image_settings.get('max_in_article_images', 2)),
'desktop_enabled': image_settings.get('desktop_enabled', True),
'mobile_enabled': image_settings.get('mobile_enabled', True),
}
def build_prompt(self, data: Dict, account=None) -> Dict:
"""Extract image prompts from task content"""
task = data.get('task')
max_images = data.get('max_in_article_images', 2)
if not task or not task.content:
raise ValueError("Task has no content")
# Use AI to extract image prompts
ai_core = AICore(account=account or data.get('account'))
account_obj = account or data.get('account')
# Get prompt from registry
prompt = PromptRegistry.get_prompt(
function_name='extract_image_prompts',
account=account_obj,
context={
'title': task.title,
'content': task.content[:5000], # Limit content length
'max_images': max_images
}
)
# Get model config
model_config = get_model_config('extract_image_prompts')
# Call AI to extract prompts using centralized request handler
result = ai_core.run_ai_request(
prompt=prompt,
model=model_config.get('model'),
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name='extract_image_prompts'
)
if result.get('error'):
raise ValueError(f"Failed to extract image prompts: {result['error']}")
# Parse JSON response
json_data = ai_core.extract_json(result['content'])
if not json_data:
raise ValueError("Failed to parse image prompts response")
return {
'featured_prompt': json_data.get('featured_prompt', ''),
'in_article_prompts': json_data.get('in_article_prompts', [])
}
def parse_response(self, response: Dict, step_tracker=None) -> Dict:
"""Parse image generation response (already parsed, just return)"""
return response
def save_output(
self,
parsed: Dict,
original_data: Dict,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""Save images to database"""
task = original_data.get('task')
image_url = parsed.get('url')
image_type = parsed.get('image_type') # 'featured', 'desktop', 'mobile'
if not task or not image_url:
raise ValueError("Missing task or image URL")
# Create Images record
image = Images.objects.create(
task=task,
image_url=image_url,
image_type=image_type,
account=account or task.account,
site=task.site,
sector=task.sector,
)
return {
'count': 1,
'images_created': 1,
'image_id': image.id
}
def generate_images_core(task_ids: List[int], account_id: int = None, progress_callback=None):
"""
Core logic for generating images (legacy function signature for backward compatibility).
Can be called with or without Celery.
Args:
task_ids: List of task IDs
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates
Returns:
Dict with 'success', 'images_created', 'message', etc.
"""
try:
from igny8_core.auth.models import Account
account = None
if account_id:
account = Account.objects.get(id=account_id)
# Use the new function class
fn = GenerateImagesFunction()
fn.account = account
# Prepare payload
payload = {'ids': task_ids}
# Validate
validated = fn.validate(payload, account)
if not validated['valid']:
return {'success': False, 'error': validated['error']}
# Prepare data
data = fn.prepare(payload, account)
tasks = data['tasks']
# Get prompts from registry
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
negative_prompt = PromptRegistry.get_negative_prompt(account)
ai_core = AICore(account=account)
images_created = 0
# Process each task
for task in tasks:
if not task.content:
continue
# Extract image prompts
prompts_data = fn.build_prompt({'task': task, **data}, account)
featured_prompt = prompts_data['featured_prompt']
in_article_prompts = prompts_data['in_article_prompts']
# Format featured prompt
formatted_featured = image_prompt_template.format(
image_type=data['image_type'],
post_title=task.title,
image_prompt=featured_prompt
)
# Generate featured image using centralized handler
featured_result = ai_core.generate_image(
prompt=formatted_featured,
provider=data['provider'],
model=data['model'],
negative_prompt=negative_prompt,
function_name='generate_images'
)
if not featured_result.get('error') and featured_result.get('url'):
fn.save_output(
{'url': featured_result['url'], 'image_type': 'featured'},
{'task': task, **data},
account
)
images_created += 1
# Generate in-article images (desktop/mobile if enabled)
# ... (simplified for now, full logic in tasks.py)
return {
'success': True,
'images_created': images_created,
'message': f'Image generation complete: {images_created} images created'
}
except Exception as e:
logger.error(f"Error in generate_images_core: {str(e)}", exc_info=True)
return {'success': False, 'error': str(e)}

View File

@@ -1,20 +1,26 @@
"""
AI Processor wrapper for the framework
Reuses existing AIProcessor but provides framework-compatible interface
DEPRECATED: Use AICore.run_ai_request() instead for all new code.
This file is kept for backward compatibility only.
"""
from typing import Dict, Any, Optional, List
from igny8_core.utils.ai_processor import AIProcessor as BaseAIProcessor
from igny8_core.ai.ai_core import AICore
class AIProcessor:
"""
Framework-compatible wrapper around existing AIProcessor.
Provides consistent interface for all AI functions.
DEPRECATED: Use AICore.run_ai_request() instead.
This class redirects to AICore for consistency.
"""
def __init__(self, account=None):
self.processor = BaseAIProcessor(account=account)
# Use AICore internally for all requests
self.ai_core = AICore(account=account)
self.account = account
# Keep old processor for backward compatibility only
self.processor = BaseAIProcessor(account=account)
def call(
self,
@@ -28,35 +34,25 @@ class AIProcessor:
) -> Dict[str, Any]:
"""
Call AI provider with prompt.
DEPRECATED: Use AICore.run_ai_request() instead.
Returns:
Dict with 'content', 'error', 'input_tokens', 'output_tokens',
'total_tokens', 'model', 'cost', 'api_id'
"""
# Use specified model or account's default
active_model = model or self.processor.default_model
# Check if model supports JSON mode
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
if response_format is None and active_model in json_models:
response_format = {'type': 'json_object'}
# Call OpenAI - don't pass response_steps to old processor
# The new framework handles all step tracking at the engine level
result = self.processor._call_openai(
prompt,
model=active_model,
# Redirect to AICore for centralized execution
return self.ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=max_tokens,
temperature=temperature,
response_format=response_format,
response_steps=None # Disable old processor's step tracking
function_name='AIProcessor.call'
)
return result
def extract_json(self, response_text: str) -> Optional[Dict]:
"""Extract JSON from response text"""
return self.processor._extract_json_from_response(response_text)
return self.ai_core.extract_json(response_text)
def generate_image(
self,
@@ -67,11 +63,13 @@ class AIProcessor:
account=None
) -> Dict[str, Any]:
"""Generate image using AI"""
return self.processor.generate_image(
return self.ai_core.generate_image(
prompt=prompt,
provider='openai',
model=model,
size=size,
n=n,
account=account or self.account
account=account or self.account,
function_name='AIProcessor.generate_image'
)

View File

@@ -0,0 +1,278 @@
"""
Prompt Registry - Centralized prompt management with override hierarchy
Supports: task-level overrides → DB prompts → default fallbacks
"""
import logging
from typing import Dict, Any, Optional
from django.db import models
logger = logging.getLogger(__name__)
class PromptRegistry:
"""
Centralized prompt registry with hierarchical resolution:
1. Task-level prompt_override (if exists)
2. DB prompt for (account, function)
3. Default fallback from registry
"""
# Default prompts stored in registry
DEFAULT_PROMPTS = {
'clustering': """Analyze the following keywords and group them into topic clusters.
Each cluster should include:
- "name": A clear, descriptive topic name
- "description": A brief explanation of what the cluster covers
- "keywords": A list of related keywords that belong to this cluster
Format the output as a JSON object with a "clusters" array.
IMPORTANT: In the "keywords" array, you MUST use the EXACT keyword strings from the input list below. Do not modify, paraphrase, or create variations of the keywords. Only use the exact keywords as they appear in the input list.
Clustering rules:
- Group keywords based on strong semantic or topical relationships (intent, use-case, function, audience, etc.)
- Clusters should reflect how people actually search — problem ➝ solution, general ➝ specific, product ➝ benefit, etc.
- Avoid grouping keywords just because they share similar words — focus on meaning
- Include 310 keywords per cluster where appropriate
- Skip unrelated or outlier keywords that don't fit a clear theme
- CRITICAL: Only return keywords that exactly match the input keywords (case-insensitive matching is acceptable)
Keywords to process:
[IGNY8_KEYWORDS]""",
'ideas': """Generate SEO-optimized, high-quality content ideas and detailed outlines for each of the following keyword clusters.
Clusters to analyze:
[IGNY8_CLUSTERS]
Keywords in each cluster:
[IGNY8_CLUSTER_KEYWORDS]
Return your response as JSON with an "ideas" array.
For each cluster, generate 1-3 content ideas.
Each idea must include:
- "title": compelling blog/article title that naturally includes a primary keyword
- "description": detailed content outline with H2/H3 structure (as plain text or structured JSON)
- "content_type": the type of content (blog_post, article, guide, tutorial)
- "content_structure": the editorial structure (cluster_hub, landing_page, pillar_page, supporting_page)
- "estimated_word_count": estimated total word count (1500-2200 words)
- "target_keywords": comma-separated list of keywords that will be covered (or "covered_keywords")
- "cluster_name": name of the cluster this idea belongs to (REQUIRED)
- "cluster_id": ID of the cluster this idea belongs to (REQUIRED - use the exact cluster ID from the input)
IMPORTANT: You MUST include the exact "cluster_id" from the cluster data provided. Match the cluster name to find the correct cluster_id.
Return only valid JSON with an "ideas" array.""",
'content_generation': """You are an editorial content strategist. Generate a complete blog post/article based on the provided content idea.
CONTENT IDEA DETAILS:
[IGNY8_IDEA]
KEYWORD CLUSTER:
[IGNY8_CLUSTER]
ASSOCIATED KEYWORDS:
[IGNY8_KEYWORDS]
Generate well-structured, SEO-optimized content with:
- Engaging introduction
- 5-8 H2 sections with H3 subsections
- Natural keyword integration
- 1500-2000 words total
- Proper HTML formatting (h2, h3, p, ul, ol, table tags)
Return the content as plain text with HTML tags.""",
'image_prompt_extraction': """Extract image prompts from the following article content.
ARTICLE TITLE: {title}
ARTICLE CONTENT:
{content}
Extract image prompts for:
1. Featured Image: One main image that represents the article topic
2. In-Article Images: Up to {max_images} images that would be useful within the article content
Return a JSON object with this structure:
{{
"featured_prompt": "Detailed description of the featured image",
"in_article_prompts": [
"Description of first in-article image",
"Description of second in-article image",
...
]
}}
Make sure each prompt is detailed enough for image generation, describing the visual elements, style, mood, and composition.""",
'image_prompt_template': 'Create a high-quality {image_type} image to use as a featured photo for a blog post titled "{post_title}". The image should visually represent the theme, mood, and subject implied by the image prompt: {image_prompt}. Focus on a realistic, well-composed scene that naturally communicates the topic without text or logos. Use balanced lighting, pleasing composition, and photographic detail suitable for lifestyle or editorial web content. Avoid adding any visible or readable text, brand names, or illustrative effects. **And make sure image is not blurry.**',
'negative_prompt': 'text, watermark, logo, overlay, title, caption, writing on walls, writing on objects, UI, infographic elements, post title',
}
# Mapping from function names to prompt types
FUNCTION_TO_PROMPT_TYPE = {
'auto_cluster': 'clustering',
'generate_ideas': 'ideas',
'generate_content': 'content_generation',
'generate_images': 'image_prompt_extraction',
'extract_image_prompts': 'image_prompt_extraction',
}
@classmethod
def get_prompt(
cls,
function_name: str,
account: Optional[Any] = None,
task: Optional[Any] = None,
context: Optional[Dict[str, Any]] = None
) -> str:
"""
Get prompt for a function with hierarchical resolution.
Priority:
1. task.prompt_override (if task provided and has override)
2. DB prompt for (account, function)
3. Default fallback from registry
Args:
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
account: Account object (optional)
task: Task object with optional prompt_override (optional)
context: Additional context for prompt rendering (optional)
Returns:
Prompt string ready for formatting
"""
# Step 1: Check task-level override
if task and hasattr(task, 'prompt_override') and task.prompt_override:
logger.info(f"Using task-level prompt override for {function_name}")
prompt = task.prompt_override
return cls._render_prompt(prompt, context or {})
# Step 2: Get prompt type
prompt_type = cls.FUNCTION_TO_PROMPT_TYPE.get(function_name, function_name)
# Step 3: Try DB prompt
if account:
try:
from igny8_core.modules.system.models import AIPrompt
db_prompt = AIPrompt.objects.get(
account=account,
prompt_type=prompt_type,
is_active=True
)
logger.info(f"Using DB prompt for {function_name} (account {account.id})")
prompt = db_prompt.prompt_value
return cls._render_prompt(prompt, context or {})
except Exception as e:
logger.debug(f"No DB prompt found for {function_name}: {e}")
# Step 4: Use default fallback
prompt = cls.DEFAULT_PROMPTS.get(prompt_type, '')
if not prompt:
logger.warning(f"No default prompt found for {prompt_type}, using empty string")
return cls._render_prompt(prompt, context or {})
@classmethod
def _render_prompt(cls, prompt_template: str, context: Dict[str, Any]) -> str:
"""
Render prompt template with context variables.
Supports both .format() style ({variable}) and placeholder replacement ([IGNY8_*]).
Args:
prompt_template: Prompt template string
context: Context variables for rendering
Returns:
Rendered prompt string
"""
if not context:
return prompt_template
rendered = prompt_template
# Step 1: Replace [IGNY8_*] placeholders first (always do this)
for key, value in context.items():
placeholder = f'[IGNY8_{key.upper()}]'
if placeholder in rendered:
rendered = rendered.replace(placeholder, str(value))
logger.debug(f"Replaced placeholder {placeholder} with {len(str(value))} characters")
# Step 2: Try .format() style for {variable} placeholders (if any remain)
# Normalize context keys - convert UPPER to lowercase for .format()
normalized_context = {}
for key, value in context.items():
# Try both original key and lowercase version
normalized_context[key] = value
normalized_context[key.lower()] = value
# Only try .format() if there are {variable} placeholders
if '{' in rendered and '}' in rendered:
try:
rendered = rendered.format(**normalized_context)
except (KeyError, ValueError) as e:
# If .format() fails, log warning but keep the [IGNY8_*] replacements
logger.warning(f"Failed to format prompt with .format(): {e}. Using [IGNY8_*] replacements only.")
return rendered
@classmethod
def get_image_prompt_template(cls, account: Optional[Any] = None) -> str:
"""
Get image prompt template.
Returns template string (not rendered) - caller should format with .format()
"""
prompt_type = 'image_prompt_template'
# Try DB prompt
if account:
try:
from igny8_core.modules.system.models import AIPrompt
db_prompt = AIPrompt.objects.get(
account=account,
prompt_type=prompt_type,
is_active=True
)
return db_prompt.prompt_value
except Exception:
pass
# Use default
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
@classmethod
def get_negative_prompt(cls, account: Optional[Any] = None) -> str:
"""
Get negative prompt.
Returns template string (not rendered).
"""
prompt_type = 'negative_prompt'
# Try DB prompt
if account:
try:
from igny8_core.modules.system.models import AIPrompt
db_prompt = AIPrompt.objects.get(
account=account,
prompt_type=prompt_type,
is_active=True
)
return db_prompt.prompt_value
except Exception:
pass
# Use default
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
# Convenience function for backward compatibility
def get_prompt(function_name: str, account=None, task=None, context=None) -> str:
"""Get prompt using registry"""
return PromptRegistry.get_prompt(function_name, account=account, task=task, context=context)

View File

@@ -66,5 +66,23 @@ def _load_auto_cluster():
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
return AutoClusterFunction
register_lazy_function('auto_cluster', _load_auto_cluster)
def _load_generate_ideas():
"""Lazy loader for generate_ideas function"""
from igny8_core.ai.functions.generate_ideas import GenerateIdeasFunction
return GenerateIdeasFunction
def _load_generate_content():
"""Lazy loader for generate_content function"""
from igny8_core.ai.functions.generate_content import GenerateContentFunction
return GenerateContentFunction
def _load_generate_images():
"""Lazy loader for generate_images function"""
from igny8_core.ai.functions.generate_images import GenerateImagesFunction
return GenerateImagesFunction
register_lazy_function('auto_cluster', _load_auto_cluster)
register_lazy_function('generate_ideas', _load_generate_ideas)
register_lazy_function('generate_content', _load_generate_content)
register_lazy_function('generate_images', _load_generate_images)

View File

@@ -0,0 +1,92 @@
"""
AI Settings - Centralized model configurations and limits
"""
from typing import Dict, Any
# Model configurations for each AI function
MODEL_CONFIG = {
"auto_cluster": {
"model": "gpt-4o-mini",
"max_tokens": 3000,
"temperature": 0.7,
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
},
"generate_ideas": {
"model": "gpt-4.1",
"max_tokens": 4000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_content": {
"model": "gpt-4.1",
"max_tokens": 8000,
"temperature": 0.7,
"response_format": None, # Text output
},
"generate_images": {
"model": "dall-e-3",
"size": "1024x1024",
"provider": "openai",
},
"extract_image_prompts": {
"model": "gpt-4o-mini",
"max_tokens": 1000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
}
# Function name aliases (for backward compatibility)
FUNCTION_ALIASES = {
"cluster_keywords": "auto_cluster",
"auto_cluster_keywords": "auto_cluster",
"auto_generate_ideas": "generate_ideas",
"auto_generate_content": "generate_content",
"auto_generate_images": "generate_images",
}
def get_model_config(function_name: str) -> Dict[str, Any]:
"""
Get model configuration for an AI function.
Args:
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
Returns:
Dict with model, max_tokens, temperature, etc.
"""
# Check aliases first
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
# Get config or return defaults
config = MODEL_CONFIG.get(actual_name, {})
# Merge with defaults
default_config = {
"model": "gpt-4.1",
"max_tokens": 4000,
"temperature": 0.7,
"response_format": None,
}
return {**default_config, **config}
def get_model(function_name: str) -> str:
"""Get model name for function"""
config = get_model_config(function_name)
return config.get("model", "gpt-4.1")
def get_max_tokens(function_name: str) -> int:
"""Get max tokens for function"""
config = get_model_config(function_name)
return config.get("max_tokens", 4000)
def get_temperature(function_name: str) -> float:
"""Get temperature for function"""
config = get_model_config(function_name)
return config.get("temperature", 0.7)

View File

@@ -59,29 +59,37 @@ def run_ai_task(self, function_name: str, payload: dict, account_id: int = None)
logger.error(f" - Error: {result.get('error')}")
logger.info("=" * 80)
# If execution failed, raise exception so Celery marks it as FAILURE
# If execution failed, update state and return error (don't raise to avoid serialization issues)
if not result.get('success'):
error_msg = result.get('error', 'Task execution failed')
error_type = result.get('error_type', 'ExecutionError')
# Update task state before raising
# Update task state with error details
error_meta = {
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': result.get('phase', 'ERROR'),
'percentage': 0,
'message': f'Error: {error_msg}',
'request_steps': result.get('request_steps', []),
'response_steps': result.get('response_steps', [])
}
try:
self.update_state(
state='FAILURE',
meta={
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': result.get('phase', 'ERROR'),
'percentage': 0,
'message': f'Error: {error_msg}',
'request_steps': result.get('request_steps', []),
'response_steps': result.get('response_steps', [])
}
meta=error_meta
)
except Exception:
pass
# Raise exception so Celery properly tracks failure
raise Exception(f"{error_type}: {error_msg}")
except Exception as update_err:
logger.warning(f"Failed to update task state: {update_err}")
# Return error result - Celery will mark as FAILURE based on state
# Don't raise exception to avoid serialization issues
return {
'success': False,
'error': error_msg,
'error_type': error_type,
**error_meta
}
return result
@@ -94,26 +102,29 @@ def run_ai_task(self, function_name: str, payload: dict, account_id: int = None)
logger.error(f" - Error: {error_type}: {error_msg}")
logger.error("=" * 80, exc_info=True)
# Update task state with error details
# Update task state with error details (don't raise to avoid serialization issues)
error_meta = {
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': 'ERROR',
'percentage': 0,
'message': f'Error: {error_msg}'
}
try:
self.update_state(
state='FAILURE',
meta={
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': 'ERROR',
'percentage': 0,
'message': f'Error: {error_msg}'
}
meta=error_meta
)
except Exception:
pass # Don't fail if state update fails
except Exception as update_err:
logger.warning(f"Failed to update task state: {update_err}")
# Return error result - don't raise to avoid Celery serialization issues
return {
'success': False,
'error': error_msg,
'error_type': error_type,
'function_name': function_name
'function_name': function_name,
**error_meta
}

View File

@@ -0,0 +1,134 @@
"""
Test script for AI functions
Run this to verify all AI functions work with console logging
"""
import os
import sys
import django
# Setup Django
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../../../'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8.settings')
django.setup()
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
from igny8_core.ai.functions.generate_content import generate_content_core
from igny8_core.ai.functions.generate_images import generate_images_core
from igny8_core.ai.ai_core import AICore
def test_ai_core():
"""Test AICore.run_ai_request() directly"""
print("\n" + "="*80)
print("TEST 1: AICore.run_ai_request() - Direct API Call")
print("="*80)
ai_core = AICore()
result = ai_core.run_ai_request(
prompt="Say 'Hello, World!' in JSON format: {\"message\": \"your message\"}",
max_tokens=100,
function_name='test_ai_core'
)
if result.get('error'):
print(f"❌ Error: {result['error']}")
else:
print(f"✅ Success! Content: {result.get('content', '')[:100]}")
print(f" Tokens: {result.get('total_tokens')}, Cost: ${result.get('cost', 0):.6f}")
def test_auto_cluster():
"""Test auto cluster function"""
print("\n" + "="*80)
print("TEST 2: Auto Cluster Function")
print("="*80)
print("Note: This requires actual keyword IDs in the database")
print("Skipping - requires database setup")
# Uncomment to test with real data:
# fn = AutoClusterFunction()
# result = fn.validate({'ids': [1, 2, 3]})
# print(f"Validation result: {result}")
def test_generate_ideas():
"""Test generate ideas function"""
print("\n" + "="*80)
print("TEST 3: Generate Ideas Function")
print("="*80)
print("Note: This requires actual cluster ID in the database")
print("Skipping - requires database setup")
# Uncomment to test with real data:
# result = generate_ideas_core(cluster_id=1, account_id=1)
# print(f"Result: {result}")
def test_generate_content():
"""Test generate content function"""
print("\n" + "="*80)
print("TEST 4: Generate Content Function")
print("="*80)
print("Note: This requires actual task IDs in the database")
print("Skipping - requires database setup")
# Uncomment to test with real data:
# result = generate_content_core(task_ids=[1], account_id=1)
# print(f"Result: {result}")
def test_generate_images():
"""Test generate images function"""
print("\n" + "="*80)
print("TEST 5: Generate Images Function")
print("="*80)
print("Note: This requires actual task IDs in the database")
print("Skipping - requires database setup")
# Uncomment to test with real data:
# result = generate_images_core(task_ids=[1], account_id=1)
# print(f"Result: {result}")
def test_json_extraction():
"""Test JSON extraction"""
print("\n" + "="*80)
print("TEST 6: JSON Extraction")
print("="*80)
ai_core = AICore()
# Test 1: Direct JSON
json_text = '{"clusters": [{"name": "Test", "keywords": ["test"]}]}'
result = ai_core.extract_json(json_text)
print(f"✅ Direct JSON: {result is not None}")
# Test 2: JSON in markdown
json_markdown = '```json\n{"clusters": [{"name": "Test"}]}\n```'
result = ai_core.extract_json(json_markdown)
print(f"✅ JSON in markdown: {result is not None}")
# Test 3: Invalid JSON
invalid_json = "This is not JSON"
result = ai_core.extract_json(invalid_json)
print(f"✅ Invalid JSON handled: {result is None}")
if __name__ == '__main__':
print("\n" + "="*80)
print("AI FUNCTIONS TEST SUITE")
print("="*80)
print("Testing all AI functions with console logging enabled")
print("="*80)
# Run tests
test_ai_core()
test_json_extraction()
test_auto_cluster()
test_generate_ideas()
test_generate_content()
test_generate_images()
print("\n" + "="*80)
print("TEST SUITE COMPLETE")
print("="*80)
print("\nAll console logging should be visible above.")
print("Check for [AI][function_name] Step X: messages")

View File

@@ -4,7 +4,9 @@ Progress and Step Tracking utilities for AI framework
import time
import logging
from typing import List, Dict, Any, Optional, Callable
from datetime import datetime
from igny8_core.ai.types import StepLog, ProgressState
from igny8_core.ai.constants import DEBUG_MODE
logger = logging.getLogger(__name__)
@@ -221,3 +223,100 @@ class CostTracker:
"""Get all operations"""
return self.operations
class ConsoleStepTracker:
"""
Lightweight console-based step tracker for AI functions.
Logs each step to console with timestamps and clear labels.
Only logs if DEBUG_MODE is True.
"""
def __init__(self, function_name: str):
self.function_name = function_name
self.start_time = time.time()
self.steps = []
self.current_phase = None
def _log(self, phase: str, message: str, status: str = 'info'):
"""Internal logging method that checks DEBUG_MODE"""
if not DEBUG_MODE:
return
timestamp = datetime.now().strftime('%H:%M:%S')
phase_label = phase.upper()
if status == 'error':
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] [ERROR] {message}")
elif status == 'success':
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] ✅ {message}")
else:
print(f"[{timestamp}] [{self.function_name}] [{phase_label}] {message}")
self.steps.append({
'timestamp': timestamp,
'phase': phase,
'message': message,
'status': status
})
self.current_phase = phase
def init(self, message: str = "Task started"):
"""Log initialization phase"""
self._log('INIT', message)
def prep(self, message: str):
"""Log preparation phase"""
self._log('PREP', message)
def ai_call(self, message: str):
"""Log AI call phase"""
self._log('AI_CALL', message)
def parse(self, message: str):
"""Log parsing phase"""
self._log('PARSE', message)
def save(self, message: str):
"""Log save phase"""
self._log('SAVE', message)
def done(self, message: str = "Execution completed"):
"""Log completion"""
duration = time.time() - self.start_time
self._log('DONE', f"{message} (Duration: {duration:.2f}s)", status='success')
if DEBUG_MODE:
print(f"[{self.function_name}] === AI Task Complete ===")
def error(self, error_type: str, message: str, exception: Exception = None):
"""Log error with standardized format"""
error_msg = f"{error_type} {message}"
if exception:
error_msg += f" ({type(exception).__name__})"
self._log(self.current_phase or 'ERROR', error_msg, status='error')
if DEBUG_MODE and exception:
import traceback
print(f"[{self.function_name}] [ERROR] Stack trace:")
traceback.print_exc()
def retry(self, attempt: int, max_attempts: int, reason: str = ""):
"""Log retry attempt"""
msg = f"Retry attempt {attempt}/{max_attempts}"
if reason:
msg += f" {reason}"
self._log('AI_CALL', msg, status='info')
def timeout(self, timeout_seconds: int):
"""Log timeout"""
self.error('Timeout', f"Request timeout after {timeout_seconds}s")
def rate_limit(self, retry_after: str):
"""Log rate limit"""
self.error('RateLimit', f"OpenAI rate limit hit, retry in {retry_after}s")
def malformed_json(self, details: str = ""):
"""Log JSON parsing error"""
msg = "Failed to parse model response: Unexpected JSON"
if details:
msg += f" {details}"
self.error('MalformedJSON', msg)

View File

@@ -0,0 +1,217 @@
"""
AI Validators - Consolidated validation logic for all AI functions
"""
import logging
from typing import Dict, Any, Optional
from django.utils import timezone
logger = logging.getLogger(__name__)
def validate_ids(payload: dict, max_items: Optional[int] = None) -> Dict[str, Any]:
"""
Base validation: checks for 'ids' array and max_items limit.
Args:
payload: Request payload containing 'ids' array
max_items: Maximum number of items allowed (None = no limit)
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
ids = payload.get('ids', [])
if not ids:
return {'valid': False, 'error': 'No IDs provided'}
if max_items and len(ids) > max_items:
return {'valid': False, 'error': f'Maximum {max_items} items allowed'}
return {'valid': True}
def validate_keywords_exist(ids: list, account=None) -> Dict[str, Any]:
"""
Validate that keywords exist in database.
Args:
ids: List of keyword IDs
account: Optional account for filtering
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from igny8_core.modules.planner.models import Keywords
queryset = Keywords.objects.filter(id__in=ids)
if account:
queryset = queryset.filter(account=account)
if queryset.count() == 0:
return {'valid': False, 'error': 'No keywords found'}
return {'valid': True}
def validate_cluster_limits(account, operation_type: str = 'cluster') -> Dict[str, Any]:
"""
Validate plan limits for cluster operations.
Args:
account: Account object
operation_type: Type of operation ('cluster', 'idea', etc.)
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
if not account:
return {'valid': False, 'error': 'Account is required'}
plan = getattr(account, 'plan', None)
if not plan:
return {'valid': False, 'error': 'Account does not have an active plan'}
if operation_type == 'cluster':
from igny8_core.modules.planner.models import Clusters
# Check daily cluster limit
now = timezone.now()
start_of_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
clusters_today = Clusters.objects.filter(
account=account,
created_at__gte=start_of_day
).count()
if plan.daily_cluster_limit and clusters_today >= plan.daily_cluster_limit:
return {
'valid': False,
'error': f'Daily cluster limit reached ({plan.daily_cluster_limit} clusters per day). Please try again tomorrow.'
}
# Check max clusters limit
total_clusters = Clusters.objects.filter(account=account).count()
if plan.max_clusters and total_clusters >= plan.max_clusters:
return {
'valid': False,
'error': f'Maximum cluster limit reached ({plan.max_clusters} clusters). Please upgrade your plan or delete existing clusters.'
}
return {'valid': True}
def validate_cluster_exists(cluster_id: int, account=None) -> Dict[str, Any]:
"""
Validate that a cluster exists.
Args:
cluster_id: Cluster ID
account: Optional account for filtering
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from igny8_core.modules.planner.models import Clusters
queryset = Clusters.objects.filter(id=cluster_id)
if account:
queryset = queryset.filter(account=account)
if not queryset.exists():
return {'valid': False, 'error': 'Cluster not found'}
return {'valid': True}
def validate_tasks_exist(task_ids: list, account=None) -> Dict[str, Any]:
"""
Validate that tasks exist in database.
Args:
task_ids: List of task IDs
account: Optional account for filtering
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from igny8_core.modules.writer.models import Tasks
queryset = Tasks.objects.filter(id__in=task_ids)
if account:
queryset = queryset.filter(account=account)
if queryset.count() == 0:
return {'valid': False, 'error': 'No tasks found'}
return {'valid': True}
def validate_api_key(api_key: Optional[str], integration_type: str = 'openai') -> Dict[str, Any]:
"""
Validate that API key is configured.
Args:
api_key: API key to validate
integration_type: Type of integration ('openai', 'runware')
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
if not api_key:
return {
'valid': False,
'error': f'{integration_type.upper()} API key not configured'
}
return {'valid': True}
def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
"""
Validate that model is in supported list.
Args:
model: Model name to validate
model_type: Type of model ('text' or 'image')
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from .constants import MODEL_RATES, VALID_OPENAI_IMAGE_MODELS
if model_type == 'text':
if model not in MODEL_RATES:
return {
'valid': False,
'error': f'Model "{model}" is not in supported models list'
}
elif model_type == 'image':
if model not in VALID_OPENAI_IMAGE_MODELS:
return {
'valid': False,
'error': f'Model "{model}" is not valid for OpenAI image generation. Only {", ".join(VALID_OPENAI_IMAGE_MODELS)} are supported.'
}
return {'valid': True}
def validate_image_size(size: str, model: str) -> Dict[str, Any]:
"""
Validate that image size is valid for the selected model.
Args:
size: Image size (e.g., '1024x1024')
model: Model name
Returns:
Dict with 'valid' (bool) and optional 'error' (str)
"""
from .constants import VALID_SIZES_BY_MODEL
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
if size not in valid_sizes:
return {
'valid': False,
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
}
return {'valid': True}

View File

@@ -7,6 +7,8 @@ from typing import List
from django.db import transaction
from igny8_core.modules.planner.models import Keywords, Clusters, ContentIdeas
from igny8_core.utils.ai_processor import ai_processor
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
from igny8_core.ai.tracker import ConsoleStepTracker
logger = logging.getLogger(__name__)
@@ -23,9 +25,19 @@ except ImportError:
return decorator
# ============================================================================
# DEPRECATED: This function is deprecated. Use the new AI framework instead.
# New path: views.py -> run_ai_task -> AIEngine -> AutoClusterFunction
# This function is kept for backward compatibility but should not be used.
# ============================================================================
def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, account_id: int = None, progress_callback=None):
"""
Core logic for clustering keywords. Can be called with or without Celery.
[DEPRECATED] Core logic for clustering keywords. Can be called with or without Celery.
⚠️ WARNING: This function is deprecated. Use the new AI framework instead:
- New path: views.py -> run_ai_task -> AIEngine -> AutoClusterFunction
- This function uses the old AIProcessor and does not use PromptRegistry
- Console logging may not work correctly in this path
Args:
keyword_ids: List of keyword IDs to cluster
@@ -33,7 +45,11 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates (for Celery tasks)
"""
# Track request and response steps
# Initialize console step tracker for logging
tracker = ConsoleStepTracker('auto_cluster')
tracker.init(f"Starting keyword clustering for {len(keyword_ids)} keywords")
# Track request and response steps (for Celery progress callbacks)
request_steps = []
response_steps = []
@@ -56,6 +72,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
# Step 4: Keyword Loading & Validation
tracker.prep(f"Loading {len(keyword_ids)} keywords from database")
step_start = time.time()
keywords_queryset = Keywords.objects.filter(id__in=keyword_ids)
if account_id:
@@ -66,7 +83,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
keywords = list(keywords_queryset.select_related('account', 'site', 'site__account', 'sector', 'sector__site'))
if not keywords:
logger.warning(f"No keywords found for clustering: {keyword_ids}")
error_msg = f"No keywords found for clustering: {keyword_ids}"
logger.warning(error_msg)
tracker.error('Validation', error_msg)
request_steps.append({
'stepNumber': 4,
'stepName': 'Keyword Loading & Validation',
@@ -83,6 +102,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
return {'success': False, 'error': 'No keywords found', 'request_steps': request_steps, 'response_steps': response_steps}
tracker.prep(f"Loaded {len(keywords)} keywords successfully")
request_steps.append({
'stepNumber': 4,
'stepName': 'Keyword Loading & Validation',
@@ -329,10 +349,20 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
return {'success': False, 'error': f'Error preparing AI call: {str(e)}', 'request_steps': request_steps, 'response_steps': response_steps}
# Call AI with step tracking
result = processor.cluster_keywords(keyword_data, sector_name=sector_name, account=account, response_steps=response_steps, progress_callback=progress_callback)
tracker.ai_call(f"Sending {len(keyword_data)} keywords to AI for clustering")
result = processor.cluster_keywords(
keyword_data,
sector_name=sector_name,
account=account,
response_steps=response_steps,
progress_callback=progress_callback,
tracker=tracker # Pass tracker for console logging
)
if result.get('error'):
logger.error(f"AI clustering error: {result['error']}")
error_msg = f"AI clustering error: {result['error']}"
logger.error(error_msg)
tracker.error('AI_CALL', error_msg)
if progress_callback:
progress_callback(
state='FAILURE',
@@ -345,6 +375,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
return {'success': False, 'error': result['error'], 'request_steps': request_steps, 'response_steps': response_steps}
# Parse response
tracker.parse("Parsing AI response into cluster data")
# Update response_steps from result if available
if result.get('response_steps'):
response_steps.extend(result.get('response_steps', []))
@@ -369,6 +402,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
keywords_updated = 0
# Step 13: Database Transaction Start
tracker.save(f"Creating {len(clusters_data)} clusters in database")
step_start = time.time()
# Create/update clusters and assign keywords
# Note: account and sector are already extracted above to avoid database queries inside transaction
@@ -566,6 +600,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
# Final progress update
final_message = f"Clustering complete: {clusters_created} clusters created, {keywords_updated} keywords updated"
logger.info(final_message)
tracker.done(final_message)
if progress_callback:
progress_callback(
@@ -587,7 +622,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
}
except Exception as e:
logger.error(f"Error in auto_cluster_keywords_core: {str(e)}", exc_info=True)
error_msg = f"Error in auto_cluster_keywords_core: {str(e)}"
logger.error(error_msg, exc_info=True)
tracker.error('Exception', error_msg, exception=e)
if progress_callback:
progress_callback(
state='FAILURE',
@@ -607,10 +644,18 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
@shared_task(bind=True, max_retries=3)
# ============================================================================
# DEPRECATED: This Celery task is deprecated. Use run_ai_task instead.
# New path: views.py -> run_ai_task -> AIEngine -> AutoClusterFunction
# ============================================================================
def auto_cluster_keywords_task(self, keyword_ids: List[int], sector_id: int = None, account_id: int = None):
"""
Celery task wrapper for clustering keywords using AI.
Calls the core function with progress callback.
[DEPRECATED] Celery task wrapper for clustering keywords using AI.
⚠️ WARNING: This task is deprecated. Use the new AI framework instead:
- New path: views.py -> run_ai_task -> AIEngine -> AutoClusterFunction
- This task uses the old _auto_cluster_keywords_core function
- Console logging may not work correctly in this path
Args:
keyword_ids: List of keyword IDs to cluster

View File

@@ -716,96 +716,99 @@ class ClusterViewSet(SiteSectorModelViewSet):
@action(detail=False, methods=['post'], url_path='auto_generate_ideas', url_name='auto_generate_ideas')
def auto_generate_ideas(self, request):
"""Generate content ideas for clusters using AI"""
ids = request.data.get('ids', [])
"""Generate content ideas for clusters using AI - New unified framework"""
import logging
from igny8_core.ai.tasks import run_ai_task
from kombu.exceptions import OperationalError as KombuOperationalError
if not ids:
return Response({'error': 'No cluster IDs provided'}, status=status.HTTP_400_BAD_REQUEST)
logger = logging.getLogger(__name__)
if len(ids) > 5:
return Response({'error': 'Maximum 5 clusters allowed for idea generation'}, status=status.HTTP_400_BAD_REQUEST)
# Get account - handle RelatedObjectDoesNotExist
account = None
account_id = None
try:
# Get account
account = getattr(request, 'account', None)
if account:
# Access pk directly instead of id to avoid potential relationship access
account_id = getattr(account, 'pk', None) or getattr(account, 'id', None)
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.error(f"Error getting account: {type(e).__name__}: {e}", exc_info=True)
account_id = None
# Try to queue Celery task, fall back to synchronous if Celery not available
try:
import logging
logger = logging.getLogger(__name__)
logger.info(f"auto_generate_ideas called with ids={ids}, account_id={account_id}")
account_id = account.id if account else None
from .tasks import auto_generate_ideas_task
from kombu.exceptions import OperationalError as KombuOperationalError
# Prepare payload
payload = {
'ids': request.data.get('ids', [])
}
if hasattr(auto_generate_ideas_task, 'delay'):
try:
# Celery is available - queue async task
logger.info("Queuing Celery task...")
task = auto_generate_ideas_task.delay(ids, account_id=account_id)
logger.info(f"Task queued successfully: {task.id}")
logger.info(f"auto_generate_ideas called with ids={payload['ids']}, account_id={account_id}")
# Validate basic input
if not payload['ids']:
return Response({
'success': False,
'error': 'No cluster IDs provided'
}, status=status.HTTP_400_BAD_REQUEST)
if len(payload['ids']) > 10:
return Response({
'success': False,
'error': 'Maximum 10 clusters allowed for idea generation'
}, status=status.HTTP_400_BAD_REQUEST)
# Try to queue Celery task
try:
if hasattr(run_ai_task, 'delay'):
task = run_ai_task.delay(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
logger.info(f"Task queued: {task.id}")
return Response({
'success': True,
'task_id': str(task.id),
'message': 'Idea generation started'
}, status=status.HTTP_200_OK)
except (KombuOperationalError, ConnectionError) as e:
# Celery connection failed - execute synchronously
logger.warning(f"Celery connection failed, executing synchronously: {e}")
result = auto_generate_ideas_task(ids, account_id=account_id)
else:
# Celery not available - execute synchronously
logger.warning("Celery not available, executing synchronously")
result = run_ai_task(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
'ideas_created': result.get('ideas_created', 0),
'message': 'Ideas generated successfully'
**result
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
# Celery not available - execute synchronously
logger.info("Celery not available, executing synchronously")
result = auto_generate_ideas_task(ids, account_id=account_id)
except (KombuOperationalError, ConnectionError) as e:
# Broker connection failed - fall back to synchronous execution
logger.warning(f"Celery broker unavailable, falling back to synchronous execution: {str(e)}")
result = run_ai_task(
function_name='generate_ideas',
payload=payload,
account_id=account_id
)
if result.get('success'):
return Response({
'success': True,
'ideas_created': result.get('ideas_created', 0),
'message': 'Ideas generated successfully'
**result
}, status=status.HTTP_200_OK)
else:
return Response({
'success': False,
'error': result.get('error', 'Idea generation failed')
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except ImportError as e:
import logging
logger = logging.getLogger(__name__)
logger.error(f"ImportError in auto_generate_ideas: {e}", exc_info=True)
return Response({
'success': False,
'error': 'AI tasks module not available'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"Error in auto_generate_ideas: {str(e)}", exc_info=True)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
import logging
logger = logging.getLogger(__name__)
error_type = type(e).__name__
error_msg = str(e)
logger.error(f"Error in auto_generate_ideas: {error_type}: {error_msg}", exc_info=True)
logger.error(f"Unexpected error in auto_generate_ideas: {str(e)}", exc_info=True)
return Response({
'success': False,
'error': f'Unexpected error: {error_msg}'
'error': f'Unexpected error: {str(e)}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request, *args, **kwargs):
@@ -977,8 +980,8 @@ class ContentIdeasViewSet(SiteSectorModelViewSet):
except (KombuOperationalError, ConnectionError) as e:
# Celery connection failed - execute synchronously
logger.warning(f"Celery connection failed, executing synchronously: {e}")
from .tasks import _generate_single_idea_core
result = _generate_single_idea_core(cluster_id, account_id=account_id, progress_callback=None)
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
result = generate_ideas_core(cluster_id, account_id=account_id, progress_callback=None)
if result.get('success'):
return Response({
'success': True,

View File

@@ -777,25 +777,72 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
try:
task_state = task.state
except (ValueError, KeyError) as state_exc:
# Task has malformed exception info - try to get error from result
# Task has malformed exception info - try to get error from multiple sources
logger.warning(f"Error accessing task.state (malformed exception info): {str(state_exc)}")
error_msg = 'Task failed - exception details unavailable'
error_type = 'UnknownError'
request_steps = []
response_steps = []
# First, try to get from backend's stored meta (most reliable for our update_state calls)
try:
# Try to get error from task.result
if hasattr(task, 'result'):
result = task.result
if isinstance(result, dict) and 'error' in result:
error_msg = result['error']
elif isinstance(result, str):
error_msg = result
except Exception:
pass
backend = task.backend
if hasattr(backend, 'get_task_meta'):
stored_meta = backend.get_task_meta(task_id)
if stored_meta and isinstance(stored_meta, dict):
meta = stored_meta.get('meta', {})
if isinstance(meta, dict):
if 'error' in meta:
error_msg = meta.get('error')
if 'error_type' in meta:
error_type = meta.get('error_type', error_type)
if 'request_steps' in meta:
request_steps = meta.get('request_steps', [])
if 'response_steps' in meta:
response_steps = meta.get('response_steps', [])
except Exception as e:
logger.debug(f"Error getting from backend meta: {str(e)}")
# Try to get error from task.result
if error_msg == 'Task failed - exception details unavailable':
try:
if hasattr(task, 'result'):
result = task.result
if isinstance(result, dict):
error_msg = result.get('error', error_msg)
error_type = result.get('error_type', error_type)
request_steps = result.get('request_steps', request_steps)
response_steps = result.get('response_steps', response_steps)
elif isinstance(result, str):
error_msg = result
except Exception as e:
logger.debug(f"Error extracting error from task.result: {str(e)}")
# Also try to get error from task.info
if error_msg == 'Task failed - exception details unavailable':
try:
if hasattr(task, 'info') and task.info:
if isinstance(task.info, dict):
if 'error' in task.info:
error_msg = task.info['error']
if 'error_type' in task.info:
error_type = task.info['error_type']
if 'request_steps' in task.info:
request_steps = task.info.get('request_steps', request_steps)
if 'response_steps' in task.info:
response_steps = task.info.get('response_steps', response_steps)
except Exception as e:
logger.debug(f"Error extracting error from task.info: {str(e)}")
return Response({
'state': 'FAILURE',
'meta': {
'error': error_msg,
'error_type': error_type,
'percentage': 0,
'message': f'Error: {error_msg}',
'request_steps': request_steps,
'response_steps': response_steps,
}
})
except (KombuOperationalError, RedisConnectionError, ConnectionError) as conn_exc:
@@ -834,15 +881,29 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
})
# Safely get task info/result
# Try to get error from task.result first (before it gets malformed)
# Try to get error from multiple sources
task_result = None
task_info = None
error_message = None
error_type = None
# First, try to get from backend's stored meta (most reliable for our update_state calls)
try:
backend = task.backend
if hasattr(backend, 'get_task_meta'):
stored_meta = backend.get_task_meta(task_id)
if stored_meta and isinstance(stored_meta, dict):
meta = stored_meta.get('meta', {})
if isinstance(meta, dict):
if 'error' in meta:
error_message = meta.get('error')
error_type = meta.get('error_type', 'UnknownError')
except Exception as backend_err:
logger.debug(f"Could not get from backend meta: {backend_err}")
try:
# Try to get result first - this often has the actual error
if hasattr(task, 'result'):
if not error_message and hasattr(task, 'result'):
try:
task_result = task.result
# If result is a dict with error, extract it
@@ -850,6 +911,9 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
if 'error' in task_result:
error_message = task_result.get('error')
error_type = task_result.get('error_type', 'UnknownError')
elif 'success' in task_result and not task_result.get('success'):
error_message = task_result.get('error', 'Task failed')
error_type = task_result.get('error_type', 'UnknownError')
except Exception:
pass # Will try task.info next
except Exception:
@@ -971,6 +1035,25 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
else:
error_message = str(error_info) if error_info else 'Task failed'
# If still no error message, try to get from task backend directly
if not error_message:
try:
# Try to get from backend's stored result
backend = task.backend
if hasattr(backend, 'get'):
stored = backend.get(task_id)
if stored and isinstance(stored, dict):
if 'error' in stored:
error_message = stored['error']
elif isinstance(stored.get('result'), dict) and 'error' in stored['result']:
error_message = stored['result']['error']
except Exception as backend_err:
logger.warning(f"Error getting from backend: {backend_err}")
# Final fallback
if not error_message:
error_message = 'Task failed - check backend logs for details'
response_meta = {
'error': error_message,
'percentage': 0,
@@ -992,6 +1075,13 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
# Also include error_type if available in meta
if 'error_type' in meta and not error_type:
response_meta['error_type'] = meta['error_type']
# Also check for error in meta directly
if 'error' in meta and not error_message:
error_message = meta['error']
response_meta['error'] = error_message
if 'error_type' in meta and not error_type:
error_type = meta['error_type']
response_meta['error_type'] = error_type
return Response({
'state': task_state,

View File

@@ -0,0 +1,35 @@
# Generated migration for adding SEO fields to Tasks model
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('writer', '0003_alter_content_options_alter_images_options_and_more'),
('igny8_core_auth', '0008_passwordresettoken_alter_industry_options_and_more'),
]
operations = [
migrations.AddField(
model_name='tasks',
name='primary_keyword',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='tasks',
name='secondary_keywords',
field=models.JSONField(blank=True, default=list, help_text='List of secondary keywords'),
),
migrations.AddField(
model_name='tasks',
name='tags',
field=models.JSONField(blank=True, default=list, help_text='List of tags'),
),
migrations.AddField(
model_name='tasks',
name='categories',
field=models.JSONField(blank=True, default=list, help_text='List of categories'),
),
]

View File

@@ -65,6 +65,10 @@ class Tasks(SiteSectorBaseModel):
# SEO fields
meta_title = models.CharField(max_length=255, blank=True, null=True)
meta_description = models.TextField(blank=True, null=True)
primary_keyword = models.CharField(max_length=255, blank=True, null=True)
secondary_keywords = models.JSONField(default=list, blank=True, help_text="List of secondary keywords")
tags = models.JSONField(default=list, blank=True, help_text="List of tags")
categories = models.JSONField(default=list, blank=True, help_text="List of categories")
# WordPress integration
assigned_post_id = models.IntegerField(null=True, blank=True) # WordPress post ID if published

View File

@@ -8,6 +8,8 @@ from django.db import transaction
from igny8_core.modules.writer.models import Tasks, Images, Content
from igny8_core.utils.ai_processor import ai_processor
from igny8_core.modules.system.utils import get_prompt_value, get_default_prompt
from igny8_core.ai.functions.generate_content import generate_content_core
from igny8_core.ai.functions.generate_images import generate_images_core
logger = logging.getLogger(__name__)

View File

@@ -1049,10 +1049,17 @@ Make sure each prompt is detailed enough for image generation, describing the vi
account=None,
response_steps=None,
progress_callback=None,
tracker=None, # Optional ConsoleStepTracker for logging
**kwargs
) -> Dict[str, Any]:
"""
Cluster keywords using AI-based semantic similarity.
[DEPRECATED] Cluster keywords using AI-based semantic similarity.
⚠️ WARNING: This method is deprecated. Use the new AI framework instead:
- New path: views.py -> run_ai_task -> AIEngine -> AutoClusterFunction
- This method uses the old prompt system and does not use PromptRegistry
- Console logging may not work correctly in this path
Based on reference plugin's clustering prompt.
Args:
@@ -1063,6 +1070,7 @@ Make sure each prompt is detailed enough for image generation, describing the vi
Returns:
Dict with 'clusters' (list of cluster dicts with name, description, keywords)
"""
logger.warning("AIProcessor.cluster_keywords is deprecated. Use the new AI framework (AutoClusterFunction) instead.")
if not keywords:
return {
'clusters': [],
@@ -1075,20 +1083,41 @@ Make sure each prompt is detailed enough for image generation, describing the vi
for kw in keywords
])
if tracker:
tracker.prep(f"Formatted {len(keywords)} keywords for prompt")
account_obj = account or self.account
# Get prompt template from database or default
# NOTE: This is legacy code. New code should use PromptRegistry.get_prompt()
# Keeping this for backward compatibility with old tasks
prompt_template = self.get_prompt('clustering', account=account_obj)
# Replace placeholders in prompt template
if '[IGNY8_KEYWORDS]' not in prompt_template:
error_msg = "Prompt template missing [IGNY8_KEYWORDS] placeholder"
logger.error(error_msg)
if tracker:
tracker.error('Prompt', error_msg)
return {
'clusters': [],
'error': error_msg,
}
prompt = prompt_template.replace('[IGNY8_KEYWORDS]', keywords_text)
if tracker:
tracker.prep(f"Prompt prepared: {len(prompt)} characters (keywords: {len(keywords_text)} chars)")
if sector_name:
prompt += f"\n\nNote: These keywords are for the '{sector_name}' sector."
logger.info(f"Clustering {len(keywords)} keywords using AI")
logger.info(f"AIProcessor.cluster_keywords: About to call OpenAI API with {len(keywords)} keywords")
if tracker:
tracker.ai_call(f"Calling OpenAI API with model: {self.default_model}")
# Initialize response_steps if not provided
if response_steps is None:
response_steps = []
@@ -1109,6 +1138,12 @@ Make sure each prompt is detailed enough for image generation, describing the vi
response_format=response_format,
response_steps=response_steps
)
if tracker:
if result.get('error'):
tracker.error('AI_CALL', f"OpenAI API error: {result['error']}")
else:
tracker.ai_call(f"Received response: {result.get('total_tokens', 0)} tokens")
logger.info(f"AIProcessor.cluster_keywords: OpenAI API call completed. Error: {result.get('error')}, Has content: {bool(result.get('content'))}")
except Exception as e:
logger.error(f"AIProcessor.cluster_keywords: Exception calling OpenAI API: {type(e).__name__}: {str(e)}", exc_info=True)
@@ -1141,11 +1176,16 @@ Make sure each prompt is detailed enough for image generation, describing the vi
}
# Step 11: JSON Extraction & Parsing
if tracker:
tracker.parse("Extracting JSON from AI response")
step_start = time.time()
json_data = self._extract_json_from_response(result['content'])
if not json_data:
logger.error(f"Failed to parse clustering response: {result.get('content', '')[:200]}")
error_msg = f"Failed to parse clustering response: {result.get('content', '')[:200]}"
logger.error(error_msg)
if tracker:
tracker.error('Parse', error_msg)
if response_steps is not None:
response_steps.append({
'stepNumber': 11,
@@ -1194,7 +1234,10 @@ Make sure each prompt is detailed enough for image generation, describing the vi
clusters = json_data
if not clusters:
logger.error(f"No clusters found in response: {json_data}")
error_msg = f"No clusters found in response: {json_data}"
logger.error(error_msg)
if tracker:
tracker.error('Parse', error_msg)
if response_steps is not None:
response_steps.append({
'stepNumber': 12,
@@ -1216,6 +1259,10 @@ Make sure each prompt is detailed enough for image generation, describing the vi
'response_steps': response_steps
}
logger.info(f"Successfully parsed {len(clusters)} clusters from AI response")
if tracker:
tracker.parse(f"Successfully extracted {len(clusters)} clusters from response")
if response_steps is not None:
response_steps.append({
'stepNumber': 12,