This commit is contained in:
Desktop
2025-11-10 22:05:35 +05:00
parent c21ce01cd2
commit 46f5bb4d62
10 changed files with 2193 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
"""
AI Templates
Template files for reference when creating new AI functions.
"""

View File

@@ -0,0 +1,281 @@
"""
AI Functions Template
Template/Reference file showing the common pattern used by auto_cluster, generate_ideas, and generate_content.
This is a reference template - do not modify existing functions, use this as a guide for new functions.
"""
import logging
from typing import Dict, List, Any, Optional
from igny8_core.auth.models import Account
from igny8_core.ai.helpers.base import BaseAIFunction
from igny8_core.ai.helpers.ai_core import AICore
from igny8_core.ai.helpers.tracker import ConsoleStepTracker
from igny8_core.ai.helpers.settings import get_model_config
logger = logging.getLogger(__name__)
def ai_function_core_template(
function_class: BaseAIFunction,
function_name: str,
payload: Dict[str, Any],
account_id: Optional[int] = None,
progress_callback: Optional[callable] = None,
**kwargs
) -> Dict[str, Any]:
"""
Template for AI function core logic (legacy function signature pattern).
This template shows the common pattern used by:
- generate_ideas_core
- generate_content_core
- auto_cluster (via engine, but similar pattern)
Usage Example:
def my_function_core(item_id: int, account_id: int = None, progress_callback=None):
fn = MyFunctionClass()
payload = {'ids': [item_id]}
return ai_function_core_template(
function_class=fn,
function_name='my_function',
payload=payload,
account_id=account_id,
progress_callback=progress_callback
)
Args:
function_class: Instance of the AI function class (e.g., GenerateIdeasFunction())
function_name: Function name for config/tracking (e.g., 'generate_ideas')
payload: Payload dict with 'ids' and other function-specific data
account_id: Optional account ID for account isolation
progress_callback: Optional progress callback for Celery tasks
**kwargs: Additional function-specific parameters
Returns:
Dict with 'success', function-specific result fields, 'message', etc.
"""
# Initialize tracker
tracker = ConsoleStepTracker(function_name)
tracker.init("Task started")
try:
# Load account
account = None
if account_id:
account = Account.objects.get(id=account_id)
tracker.prep("Loading account data...")
# Store account on function instance
function_class.account = account
# Validate
tracker.prep("Validating input...")
validated = function_class.validate(payload, account)
if not validated['valid']:
tracker.error('ValidationError', validated['error'])
return {'success': False, 'error': validated['error']}
# Prepare data
tracker.prep("Preparing data...")
data = function_class.prepare(payload, account)
# Build prompt
tracker.prep("Building prompt...")
prompt = function_class.build_prompt(data, account)
# Get model config from settings
model_config = get_model_config(function_name)
# Generate function_id for tracking (ai_ prefix with function name)
function_id = f"ai_{function_name}"
# Call AI using centralized request handler
ai_core = AICore(account=account)
result = ai_core.run_ai_request(
prompt=prompt,
model=model_config.get('model'),
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
function_id=function_id,
tracker=tracker
)
if result.get('error'):
return {'success': False, 'error': result['error']}
# Parse response
tracker.parse("Parsing AI response...")
parsed = function_class.parse_response(result['content'], tracker)
if not parsed:
tracker.error('ParseError', 'No data parsed from AI response')
return {'success': False, 'error': 'No data parsed from AI response'}
# Handle list responses
if isinstance(parsed, list):
parsed_count = len(parsed)
tracker.parse(f"Parsed {parsed_count} item(s)")
else:
parsed_count = 1
tracker.parse("Parsed response")
# Save output
tracker.save("Saving to database...")
save_result = function_class.save_output(parsed, data, account, step_tracker=tracker)
tracker.save(f"Saved {save_result.get('count', 0)} item(s)")
# Build success message
if isinstance(parsed, list) and len(parsed) > 0:
first_item = parsed[0]
item_name = first_item.get('title') or first_item.get('name') or 'item'
tracker.done(f"Successfully created {item_name}")
message = f"Successfully created {item_name}"
else:
tracker.done("Task completed successfully")
message = "Task completed successfully"
return {
'success': True,
**save_result,
'message': message
}
except Exception as e:
tracker.error('Exception', str(e), e)
logger.error(f"Error in {function_name}_core: {str(e)}", exc_info=True)
return {'success': False, 'error': str(e)}
def ai_function_batch_template(
function_class: BaseAIFunction,
function_name: str,
payload: Dict[str, Any],
account_id: Optional[int] = None,
progress_callback: Optional[callable] = None,
**kwargs
) -> Dict[str, Any]:
"""
Template for AI function batch processing (like generate_content_core).
This template shows the pattern for functions that process multiple items in a loop.
Usage Example:
def my_batch_function_core(item_ids: List[int], account_id: int = None, progress_callback=None):
fn = MyFunctionClass()
payload = {'ids': item_ids}
return ai_function_batch_template(
function_class=fn,
function_name='my_function',
payload=payload,
account_id=account_id,
progress_callback=progress_callback
)
Args:
function_class: Instance of the AI function class
function_name: Function name for config/tracking
payload: Payload dict with 'ids' list
account_id: Optional account ID for account isolation
progress_callback: Optional progress callback for Celery tasks
**kwargs: Additional function-specific parameters
Returns:
Dict with 'success', 'count', 'tasks_updated', 'message', etc.
"""
tracker = ConsoleStepTracker(function_name)
tracker.init("Task started")
try:
# Load account
account = None
if account_id:
account = Account.objects.get(id=account_id)
tracker.prep("Loading account data...")
# Store account on function instance
function_class.account = account
# Validate
tracker.prep("Validating input...")
validated = function_class.validate(payload, account)
if not validated['valid']:
tracker.error('ValidationError', validated['error'])
return {'success': False, 'error': validated['error']}
# Prepare data (returns list of items)
tracker.prep("Preparing data...")
items = function_class.prepare(payload, account)
if not isinstance(items, list):
items = [items]
total_items = len(items)
processed_count = 0
tracker.prep(f"Processing {total_items} item(s)...")
# Get model config once (shared across all items)
model_config = get_model_config(function_name)
# Generate function_id for tracking (ai_ prefix with function name)
function_id = f"ai_{function_name}"
ai_core = AICore(account=account)
# Process each item
for idx, item in enumerate(items):
try:
# Build prompt for this item
prompt = function_class.build_prompt(item if not isinstance(items, list) else [item], account)
# Call AI
result = ai_core.run_ai_request(
prompt=prompt,
model=model_config.get('model'),
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name,
function_id=function_id,
tracker=tracker
)
if result.get('error'):
logger.error(f"AI error for item {idx + 1}/{total_items}: {result['error']}")
continue
# Parse response
parsed = function_class.parse_response(result['content'], tracker)
if not parsed:
logger.warning(f"No data parsed for item {idx + 1}/{total_items}")
continue
# Save output
save_result = function_class.save_output(
parsed,
item if not isinstance(items, list) else [item],
account,
step_tracker=tracker
)
processed_count += save_result.get('count', 0) or save_result.get('tasks_updated', 0) or 0
except Exception as e:
logger.error(f"Error processing item {idx + 1}/{total_items}: {str(e)}", exc_info=True)
continue
tracker.done(f"Processed {processed_count} item(s) successfully")
return {
'success': True,
'count': processed_count,
'tasks_updated': processed_count,
'message': f'Task completed: {processed_count} item(s) processed'
}
except Exception as e:
tracker.error('Exception', str(e), e)
logger.error(f"Error in {function_name}_core: {str(e)}", exc_info=True)
return {'success': False, 'error': str(e)}

View File

@@ -0,0 +1,80 @@
"""
Modal Configuration Templates for AI Functions
Each function uses the same AIProgressModal component with different configs.
"""
# Modal configuration templates for each AI function
MODAL_CONFIGS = {
'auto_cluster': {
'title': 'Auto Cluster Keywords',
'function_id': 'ai_auto_cluster',
'success_title': 'Clustering Complete!',
'success_message_template': 'Successfully created {clusters_created} clusters and updated {keywords_updated} keywords.',
'error_title': 'Clustering Failed',
'error_message_template': 'An error occurred while clustering keywords. Please try again.',
},
'generate_ideas': {
'title': 'Generating Ideas',
'function_id': 'ai_generate_ideas',
'success_title': 'Ideas Generated!',
'success_message_template': 'Successfully generated {ideas_created} content idea(s).',
'error_title': 'Idea Generation Failed',
'error_message_template': 'An error occurred while generating ideas. Please try again.',
},
'generate_content': {
'title': 'Generating Content',
'function_id': 'ai_generate_content',
'success_title': 'Content Generated!',
'success_message_template': 'Successfully generated content for {tasks_updated} task(s).',
'error_title': 'Content Generation Failed',
'error_message_template': 'An error occurred while generating content. Please try again.',
},
}
# Legacy function IDs (for backward compatibility)
LEGACY_FUNCTION_IDS = {
'generate_ideas': 'ai_generate_ideas',
'generate_content': 'ai_generate_content',
}
def get_modal_config(function_name: str, is_legacy: bool = False) -> dict:
"""
Get modal configuration for an AI function.
Args:
function_name: Function name (e.g., 'auto_cluster', 'generate_ideas', 'generate_content')
is_legacy: Whether this is a legacy function path
Returns:
Dict with modal configuration
"""
config = MODAL_CONFIGS.get(function_name, {}).copy()
# Override function_id for legacy paths
if is_legacy and function_name in LEGACY_FUNCTION_IDS:
config['function_id'] = LEGACY_FUNCTION_IDS[function_name]
return config
def format_success_message(function_name: str, result: dict) -> str:
"""
Format success message based on function result.
Args:
function_name: Function name
result: Result dict from function execution
Returns:
Formatted success message
"""
config = MODAL_CONFIGS.get(function_name, {})
template = config.get('success_message_template', 'Task completed successfully.')
try:
return template.format(**result)
except KeyError:
# Fallback if template variables don't match
return config.get('success_message_template', 'Task completed successfully.')