Initial commit: igny8 project

This commit is contained in:
igny8
2025-11-09 10:27:02 +00:00
commit 60b8188111
27265 changed files with 4360521 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
"""
IGNY8 AI Framework
Unified framework for all AI functions with consistent lifecycle, progress tracking, and logging.
"""
from igny8_core.ai.registry import register_function, get_function, list_functions
from igny8_core.ai.engine import AIEngine
from igny8_core.ai.base import BaseAIFunction
# Don't auto-import functions here - let apps.py handle it lazily
# This prevents circular import issues during Django startup
__all__ = [
'AIEngine',
'BaseAIFunction',
'register_function',
'get_function',
'list_functions',
]

View File

@@ -0,0 +1,59 @@
"""
Admin configuration for AI models
"""
from django.contrib import admin
from igny8_core.ai.models import AITaskLog
@admin.register(AITaskLog)
class AITaskLogAdmin(admin.ModelAdmin):
"""Admin interface for AI task logs"""
list_display = [
'function_name',
'account',
'status',
'phase',
'cost',
'tokens',
'duration',
'created_at'
]
list_filter = [
'function_name',
'status',
'phase',
'created_at'
]
search_fields = [
'function_name',
'task_id',
'message',
'error'
]
readonly_fields = [
'task_id',
'function_name',
'account',
'phase',
'message',
'status',
'duration',
'cost',
'tokens',
'request_steps',
'response_steps',
'error',
'payload',
'result',
'created_at',
'updated_at'
]
def has_add_permission(self, request):
"""Logs are created automatically, no manual creation"""
return False
def has_change_permission(self, request, obj=None):
"""Logs are read-only"""
return False

View File

@@ -0,0 +1,20 @@
"""
Django app configuration for AI framework
"""
from django.apps import AppConfig
class AIConfig(AppConfig):
"""App configuration for AI framework"""
default_auto_field = 'django.db.models.BigAutoField'
name = 'igny8_core.ai'
verbose_name = 'AI Framework'
def ready(self):
"""Import admin to register models"""
try:
import igny8_core.ai.admin # noqa
except ImportError:
pass
# Don't register functions here - they will be loaded lazily when needed

View File

@@ -0,0 +1,96 @@
"""
Base class for all AI functions
"""
from abc import ABC, abstractmethod
from typing import Dict, List, Any, Optional
class BaseAIFunction(ABC):
"""
Base class for all AI functions.
Each function only implements its specific logic.
"""
@abstractmethod
def get_name(self) -> str:
"""Return function name (e.g., 'auto_cluster')"""
pass
def get_metadata(self) -> Dict:
"""Return function metadata (display name, description, phases)"""
return {
'display_name': self.get_name().replace('_', ' ').title(),
'description': f'{self.get_name()} AI function',
'phases': {
'INIT': 'Initializing...',
'PREP': 'Preparing data...',
'AI_CALL': 'Processing with AI...',
'PARSE': 'Parsing response...',
'SAVE': 'Saving results...',
'DONE': 'Complete!'
}
}
def validate(self, payload: dict, account=None) -> Dict[str, Any]:
"""
Validate input payload.
Default: checks for 'ids' array, max_items limit.
Override for custom validation.
"""
ids = payload.get('ids', [])
if not ids:
return {'valid': False, 'error': 'No IDs provided'}
max_items = self.get_max_items()
if max_items and len(ids) > max_items:
return {'valid': False, 'error': f'Maximum {max_items} items allowed'}
return {'valid': True}
def get_max_items(self) -> Optional[int]:
"""Override to set max items limit"""
return None
@abstractmethod
def prepare(self, payload: dict, account=None) -> Any:
"""
Load and prepare data for AI processing.
Returns: prepared data structure
"""
pass
@abstractmethod
def build_prompt(self, data: Any, account=None) -> str:
"""
Build AI prompt from prepared data.
Returns: prompt string
"""
pass
def get_model(self, account=None) -> Optional[str]:
"""Override to specify model (defaults to account's default model)"""
return None # Uses account's default from AIProcessor
@abstractmethod
def parse_response(self, response: str, step_tracker=None) -> Any:
"""
Parse AI response into structured data.
Returns: parsed data structure
"""
pass
@abstractmethod
def save_output(
self,
parsed: Any,
original_data: Any,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""
Save parsed results to database.
Returns: dict with 'count', 'items_created', etc.
"""
pass

View File

@@ -0,0 +1,271 @@
"""
AI Engine - Central orchestrator for all AI functions
"""
import logging
from typing import Dict, Any, Optional
from igny8_core.ai.base import BaseAIFunction
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker
from igny8_core.ai.processor import AIProcessor
logger = logging.getLogger(__name__)
class AIEngine:
"""
Central orchestrator for all AI functions.
Manages lifecycle, progress, logging, retries, cost tracking.
"""
def __init__(self, celery_task=None, account=None):
self.task = celery_task
self.account = account
self.tracker = ProgressTracker(celery_task)
self.step_tracker = StepTracker('ai_engine')
self.cost_tracker = CostTracker()
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
"""
Unified execution pipeline for all AI functions.
Phases with improved percentage mapping:
- INIT (0-10%): Validation & preparation
- PREP (10-25%): Data loading & prompt building
- AI_CALL (25-70%): API call to provider (longest phase)
- PARSE (70-85%): Response parsing
- SAVE (85-98%): Database operations
- DONE (98-100%): Finalization
"""
function_name = fn.get_name()
self.step_tracker.function_name = function_name
try:
# Phase 1: INIT - Validation & Setup (0-10%)
validated = fn.validate(payload, self.account)
if not validated['valid']:
return self._handle_error(validated['error'], fn)
self.step_tracker.add_request_step("INIT", "success", "Validation complete")
self.tracker.update("INIT", 10, "Validation complete", meta=self.step_tracker.get_meta())
# Phase 2: PREP - Data Loading & Prompt Building (10-25%)
data = fn.prepare(payload, self.account)
if isinstance(data, (list, tuple)):
data_count = len(data)
elif isinstance(data, dict):
data_count = len(data.get('keywords', [])) if 'keywords' in data else data.get('count', 1)
else:
data_count = 1
prompt = fn.build_prompt(data, self.account)
self.step_tracker.add_request_step("PREP", "success", f"Loaded {data_count} items, built prompt ({len(prompt)} chars)")
self.tracker.update("PREP", 25, f"Data prepared: {data_count} items", meta=self.step_tracker.get_meta())
# Phase 3: AI_CALL - Provider API Call (25-70%)
processor = AIProcessor(account=self.account)
model = fn.get_model(self.account)
# Track AI call start
self.step_tracker.add_response_step("AI_CALL", "success", f"Calling {model or 'default'} model...")
self.tracker.update("AI_CALL", 30, f"Sending to {model or 'default'}...", meta=self.step_tracker.get_meta())
try:
raw_response = processor.call(
prompt,
model=model,
# Don't pass response_steps - the processor ignores it anyway
# Step tracking is handled by the engine
progress_callback=lambda state, meta: self.tracker.update_ai_progress(state, {
**meta,
**self.step_tracker.get_meta()
})
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
logger.error(f"Exception during AI call: {error_msg}", exc_info=True)
return self._handle_error(error_msg, fn)
if raw_response.get('error'):
error_msg = raw_response.get('error', 'Unknown AI error')
logger.error(f"AI call returned error: {error_msg}")
return self._handle_error(error_msg, fn)
if not raw_response.get('content'):
error_msg = "AI call returned no content"
logger.error(error_msg)
return self._handle_error(error_msg, fn)
# Track cost
self.cost_tracker.record(
function_name=function_name,
cost=raw_response.get('cost', 0),
tokens=raw_response.get('total_tokens', 0),
model=raw_response.get('model')
)
# Update AI_CALL step with results
self.step_tracker.response_steps[-1] = {
**self.step_tracker.response_steps[-1],
'message': f"Received {raw_response.get('total_tokens', 0)} tokens, Cost: ${raw_response.get('cost', 0):.6f}",
'duration': raw_response.get('duration')
}
self.tracker.update("AI_CALL", 70, f"AI response received ({raw_response.get('total_tokens', 0)} tokens)", meta=self.step_tracker.get_meta())
# Phase 4: PARSE - Response Parsing (70-85%)
try:
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
if isinstance(parsed, (list, tuple)):
parsed_count = len(parsed)
elif isinstance(parsed, dict):
parsed_count = parsed.get('count', 1)
else:
parsed_count = 1
self.step_tracker.add_response_step("PARSE", "success", f"Parsed {parsed_count} items from AI response")
self.tracker.update("PARSE", 85, f"Parsed {parsed_count} items", meta=self.step_tracker.get_meta())
except Exception as parse_error:
error_msg = f"Failed to parse AI response: {str(parse_error)}"
logger.error(f"AIEngine: {error_msg}", exc_info=True)
logger.error(f"AIEngine: Response content was: {response_content[:500] if response_content else 'None'}...")
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
# Pass step_tracker to save_output so it can add validation steps
save_result = fn.save_output(parsed, data, self.account, self.tracker, step_tracker=self.step_tracker)
clusters_created = save_result.get('clusters_created', 0)
keywords_updated = save_result.get('keywords_updated', 0)
self.step_tracker.add_request_step("SAVE", "success", f"Created {clusters_created} clusters, updated {keywords_updated} keywords")
self.tracker.update("SAVE", 98, f"Saved: {clusters_created} clusters, {keywords_updated} keywords", meta=self.step_tracker.get_meta())
# Track credit usage after successful save
if self.account and raw_response:
try:
from igny8_core.modules.billing.services import CreditService
from igny8_core.modules.billing.models import CreditUsageLog
# Calculate credits used (based on tokens or fixed cost)
credits_used = self._calculate_credits_for_clustering(
keyword_count=len(data.get('keywords', [])) if isinstance(data, dict) else len(data) if isinstance(data, list) else 1,
tokens=raw_response.get('total_tokens', 0),
cost=raw_response.get('cost', 0)
)
# Log credit usage (don't deduct from account.credits, just log)
CreditUsageLog.objects.create(
account=self.account,
operation_type='clustering',
credits_used=credits_used,
cost_usd=raw_response.get('cost'),
model_used=raw_response.get('model', ''),
tokens_input=raw_response.get('tokens_input', 0),
tokens_output=raw_response.get('tokens_output', 0),
related_object_type='cluster',
metadata={
'clusters_created': clusters_created,
'keywords_updated': keywords_updated,
'function_name': function_name
}
)
except Exception as e:
logger.warning(f"Failed to log credit usage: {e}", exc_info=True)
# Phase 6: DONE - Finalization (98-100%)
self.step_tracker.add_request_step("DONE", "success", "Task completed successfully")
self.tracker.update("DONE", 100, "Task complete!", meta=self.step_tracker.get_meta())
# Log to database
self._log_to_database(fn, payload, parsed, save_result)
return {
'success': True,
**save_result,
'request_steps': self.step_tracker.request_steps,
'response_steps': self.step_tracker.response_steps,
'cost': self.cost_tracker.get_total(),
'tokens': self.cost_tracker.get_total_tokens()
}
except Exception as e:
logger.error(f"Error in AIEngine.execute for {function_name}: {str(e)}", exc_info=True)
return self._handle_error(str(e), fn, exc_info=True)
def _handle_error(self, error: str, fn: BaseAIFunction = None, exc_info=False):
"""Centralized error handling"""
function_name = fn.get_name() if fn else 'unknown'
self.step_tracker.add_request_step("Error", "error", error, error=error)
error_meta = {
'error': error,
'error_type': type(error).__name__ if isinstance(error, Exception) else 'Error',
**self.step_tracker.get_meta()
}
self.tracker.error(error, meta=error_meta)
if exc_info:
logger.error(f"Error in {function_name}: {error}", exc_info=True)
else:
logger.error(f"Error in {function_name}: {error}")
self._log_to_database(fn, None, None, None, error=error)
return {
'success': False,
'error': error,
'error_type': type(error).__name__ if isinstance(error, Exception) else 'Error',
'request_steps': self.step_tracker.request_steps,
'response_steps': self.step_tracker.response_steps
}
def _log_to_database(
self,
fn: BaseAIFunction = None,
payload: dict = None,
parsed: Any = None,
save_result: dict = None,
error: str = None
):
"""Log to unified ai_task_logs table"""
try:
from igny8_core.ai.models import AITaskLog
# Only log if account exists (AITaskLog requires account)
if not self.account:
logger.warning("Cannot log AI task - no account available")
return
AITaskLog.objects.create(
task_id=self.task.request.id if self.task else None,
function_name=fn.get_name() if fn else None,
account=self.account,
phase=self.tracker.current_phase,
message=self.tracker.current_message,
status='error' if error else 'success',
duration=self.tracker.get_duration(),
cost=self.cost_tracker.get_total(),
tokens=self.cost_tracker.get_total_tokens(),
request_steps=self.step_tracker.request_steps,
response_steps=self.step_tracker.response_steps,
error=error,
payload=payload,
result=save_result
)
except Exception as e:
# Don't fail the task if logging fails
logger.warning(f"Failed to log to database: {e}")
def _calculate_credits_for_clustering(self, keyword_count, tokens, cost):
"""Calculate credits used for clustering operation"""
# Use plan's cost per request if available, otherwise calculate from tokens
if self.account and hasattr(self.account, 'plan') and self.account.plan:
plan = self.account.plan
# Check if plan has ai_cost_per_request config
if hasattr(plan, 'ai_cost_per_request') and plan.ai_cost_per_request:
cluster_cost = plan.ai_cost_per_request.get('cluster', 0)
if cluster_cost:
return int(cluster_cost)
# Fallback: 1 credit per 30 keywords (minimum 1)
credits = max(1, int(keyword_count / 30))
return credits

View File

@@ -0,0 +1,4 @@
"""
AI Function implementations
"""

View File

@@ -0,0 +1,345 @@
"""
Auto Cluster Keywords AI Function
"""
import logging
from typing import Dict, List, Any
from django.db import transaction
from igny8_core.ai.base import BaseAIFunction
from igny8_core.modules.planner.models import Keywords, Clusters
from igny8_core.modules.system.utils import get_prompt_value
logger = logging.getLogger(__name__)
class AutoClusterFunction(BaseAIFunction):
"""Auto-cluster keywords using AI"""
def get_name(self) -> str:
return 'auto_cluster'
def get_metadata(self) -> Dict:
return {
'display_name': 'Auto Cluster Keywords',
'description': 'Group related keywords into semantic clusters using AI',
'phases': {
'INIT': 'Initializing clustering...',
'PREP': 'Loading keywords...',
'AI_CALL': 'Analyzing keyword relationships...',
'PARSE': 'Parsing cluster data...',
'SAVE': 'Creating clusters...',
'DONE': 'Clustering complete!'
}
}
def get_max_items(self) -> int:
return 20
def validate(self, payload: dict, account=None) -> Dict:
"""Custom validation for clustering with plan limit checks"""
result = super().validate(payload, account)
if not result['valid']:
return result
# Additional validation: check keywords exist
ids = payload.get('ids', [])
queryset = Keywords.objects.filter(id__in=ids)
if account:
queryset = queryset.filter(account=account)
if queryset.count() == 0:
return {'valid': False, 'error': 'No keywords found'}
# Plan limit validation
if account:
plan = getattr(account, 'plan', None)
if plan:
from django.utils import timezone
from igny8_core.modules.planner.models import Clusters
# Check daily cluster limit
now = timezone.now()
start_of_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
clusters_today = Clusters.objects.filter(
account=account,
created_at__gte=start_of_day
).count()
if plan.daily_cluster_limit and clusters_today >= plan.daily_cluster_limit:
return {
'valid': False,
'error': f'Daily cluster limit reached ({plan.daily_cluster_limit} clusters per day). Please try again tomorrow.'
}
# Check max clusters limit
total_clusters = Clusters.objects.filter(account=account).count()
if plan.max_clusters and total_clusters >= plan.max_clusters:
return {
'valid': False,
'error': f'Maximum cluster limit reached ({plan.max_clusters} clusters). Please upgrade your plan or delete existing clusters.'
}
else:
return {'valid': False, 'error': 'Account does not have an active plan'}
return {'valid': True}
def prepare(self, payload: dict, account=None) -> Dict:
"""Load keywords with relationships"""
ids = payload.get('ids', [])
sector_id = payload.get('sector_id')
queryset = Keywords.objects.filter(id__in=ids)
if account:
queryset = queryset.filter(account=account)
if sector_id:
queryset = queryset.filter(sector_id=sector_id)
keywords = list(queryset.select_related('account', 'site', 'site__account', 'sector', 'sector__site'))
if not keywords:
raise ValueError("No keywords found")
# Store original keyword objects for later use
return {
'keywords': keywords,
'keyword_data': [
{
'id': kw.id,
'keyword': kw.keyword,
'volume': kw.volume,
'difficulty': kw.difficulty,
'intent': kw.intent,
}
for kw in keywords
],
'sector_id': sector_id
}
def build_prompt(self, data: Dict, account=None) -> str:
"""Build clustering prompt"""
keyword_data = data['keyword_data']
sector_id = data.get('sector_id')
# Get prompt template
prompt_template = get_prompt_value(account, 'clustering')
# Format keywords
keywords_text = '\n'.join([
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']}, Intent: {kw['intent']})"
for kw in keyword_data
])
prompt = prompt_template.replace('[IGNY8_KEYWORDS]', keywords_text)
# Add sector context if available
if sector_id:
try:
from igny8_core.auth.models import Sector
sector = Sector.objects.get(id=sector_id)
if sector:
prompt += f"\n\nNote: These keywords are for the '{sector.name}' sector."
except Exception:
pass
# IMPORTANT: When using JSON mode, OpenAI requires explicit JSON instruction
# The prompt template already includes "Format the output as a JSON object"
# but we need to ensure it's explicit for JSON mode compliance
# Check if prompt already explicitly requests JSON (case-insensitive)
prompt_lower = prompt.lower()
has_json_request = (
'json' in prompt_lower and
('format' in prompt_lower or 'respond' in prompt_lower or 'return' in prompt_lower or 'output' in prompt_lower)
)
if not has_json_request:
prompt += "\n\nIMPORTANT: You must respond with valid JSON only. The response must be a JSON object with a 'clusters' array."
return prompt
def parse_response(self, response: str, step_tracker=None) -> List[Dict]:
"""Parse AI response into cluster data"""
import json
from igny8_core.ai.processor import AIProcessor
if not response or not response.strip():
error_msg = "Empty response from AI"
logger.error(f"parse_response: {error_msg}")
raise ValueError(error_msg)
# Try direct JSON parse first (most common case with JSON mode)
json_data = None
try:
json_data = json.loads(response.strip())
except json.JSONDecodeError as e:
logger.warning(f"parse_response: Direct JSON parse failed: {e}, trying extract_json method")
# Fall back to extract_json method which handles markdown code blocks
processor = AIProcessor()
json_data = processor.extract_json(response)
if not json_data:
error_msg = f"Failed to parse clustering response. Response: {response[:200]}..."
logger.error(f"parse_response: {error_msg}")
raise ValueError(error_msg)
# Extract clusters array
clusters = []
if isinstance(json_data, dict):
if 'clusters' in json_data:
clusters = json_data.get('clusters', [])
else:
# Try to find clusters in any key
for key, value in json_data.items():
if isinstance(value, list) and len(value) > 0:
if isinstance(value[0], dict) and ('name' in value[0] or 'keywords' in value[0]):
clusters = value
break
elif isinstance(json_data, list):
clusters = json_data
if not clusters:
error_msg = f"No clusters found in AI response. JSON data: {json_data}"
logger.error(f"parse_response: {error_msg}")
raise ValueError(error_msg)
# Step tracking is handled by the engine - don't add steps here
return clusters
def save_output(
self,
parsed: List[Dict],
original_data: Dict,
account=None,
progress_tracker=None,
step_tracker=None
) -> Dict:
"""Save clusters to database"""
keywords = original_data['keywords']
sector_id = original_data.get('sector_id')
if not keywords:
raise ValueError("No keywords available for saving")
# Get context from first keyword (account/site/sector already validated at page level)
first_keyword = keywords[0]
account = account or first_keyword.account
site = first_keyword.site
# Get sector if needed
from igny8_core.auth.models import Sector
sector = first_keyword.sector
if not sector and sector_id:
try:
sector = Sector.objects.get(id=sector_id)
except Sector.DoesNotExist:
sector = None
if not account:
raise ValueError("Account is required for cluster creation")
clusters_created = 0
keywords_updated = 0
with transaction.atomic():
for idx, cluster_data in enumerate(parsed):
if progress_tracker:
progress = 80 + int((idx / len(parsed)) * 15)
progress_tracker.update(
"SAVE",
progress,
f"Creating cluster {idx + 1}/{len(parsed)}...",
current=idx + 1,
total=len(parsed),
current_item=cluster_data.get('name', '')
)
cluster_name = cluster_data.get('name', '')
cluster_keywords = cluster_data.get('keywords', [])
if not cluster_name or not cluster_keywords:
continue
# Get or create cluster
if sector:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
account=account,
site=site,
sector=sector,
defaults={
'description': cluster_data.get('description', ''),
'status': 'active',
}
)
else:
cluster, created = Clusters.objects.get_or_create(
name=cluster_name,
account=account,
site=site,
sector__isnull=True,
defaults={
'description': cluster_data.get('description', ''),
'status': 'active',
'sector': None,
}
)
if created:
clusters_created += 1
# Match and assign keywords (case-insensitive)
cluster_keywords_normalized = {kw.strip().lower(): kw.strip() for kw in cluster_keywords}
available_keywords_normalized = {
kw_obj.keyword.strip().lower(): kw_obj
for kw_obj in keywords
}
matched_keyword_objects = []
for cluster_kw_normalized, cluster_kw_original in cluster_keywords_normalized.items():
if cluster_kw_normalized in available_keywords_normalized:
matched_keyword_objects.append(available_keywords_normalized[cluster_kw_normalized])
# Update matched keywords
if matched_keyword_objects:
matched_ids = [kw.id for kw in matched_keyword_objects]
keyword_filter = Keywords.objects.filter(
id__in=matched_ids,
account=account
)
if sector:
keyword_filter = keyword_filter.filter(sector=sector)
else:
keyword_filter = keyword_filter.filter(sector__isnull=True)
updated_count = keyword_filter.update(
cluster=cluster,
status='mapped'
)
keywords_updated += updated_count
# Recalculate cluster metrics
from django.db.models import Sum, Case, When, F, IntegerField
cluster_filter = Clusters.objects.filter(account=account)
if sector:
cluster_filter = cluster_filter.filter(sector=sector)
else:
cluster_filter = cluster_filter.filter(sector__isnull=True)
for cluster in cluster_filter:
cluster.keywords_count = Keywords.objects.filter(cluster=cluster).count()
# Volume calculation: use volume_override if available, otherwise seed_keyword__volume
volume_sum = Keywords.objects.filter(cluster=cluster).aggregate(
total=Sum(
Case(
When(volume_override__isnull=False, then=F('volume_override')),
default=F('seed_keyword__volume'),
output_field=IntegerField()
)
)
)['total']
cluster.volume = volume_sum or 0
cluster.save()
return {
'count': clusters_created,
'clusters_created': clusters_created,
'keywords_updated': keywords_updated
}

View File

@@ -0,0 +1,2 @@
# AI Framework migrations

View File

@@ -0,0 +1,52 @@
"""
AI Framework Models
"""
from django.db import models
from igny8_core.auth.models import AccountBaseModel
class AITaskLog(AccountBaseModel):
"""
Unified logging table for all AI tasks.
Stores request/response steps, costs, tokens, and results.
"""
task_id = models.CharField(max_length=255, db_index=True, null=True, blank=True)
function_name = models.CharField(max_length=100, db_index=True)
phase = models.CharField(max_length=50, default='INIT')
message = models.TextField(blank=True)
status = models.CharField(max_length=20, choices=[
('success', 'Success'),
('error', 'Error'),
('pending', 'Pending'),
], default='pending')
# Timing
duration = models.IntegerField(null=True, blank=True, help_text="Duration in milliseconds")
# Cost tracking
cost = models.DecimalField(max_digits=10, decimal_places=6, default=0.0)
tokens = models.IntegerField(default=0)
# Step tracking
request_steps = models.JSONField(default=list, blank=True)
response_steps = models.JSONField(default=list, blank=True)
# Error tracking
error = models.TextField(null=True, blank=True)
# Data
payload = models.JSONField(null=True, blank=True)
result = models.JSONField(null=True, blank=True)
class Meta:
db_table = 'igny8_ai_task_logs'
ordering = ['-created_at']
indexes = [
models.Index(fields=['task_id']),
models.Index(fields=['function_name', 'account']),
models.Index(fields=['status', 'created_at']),
]
def __str__(self):
return f"{self.function_name} - {self.status} - {self.created_at}"

View File

@@ -0,0 +1,77 @@
"""
AI Processor wrapper for the framework
Reuses existing AIProcessor but provides framework-compatible interface
"""
from typing import Dict, Any, Optional, List
from igny8_core.utils.ai_processor import AIProcessor as BaseAIProcessor
class AIProcessor:
"""
Framework-compatible wrapper around existing AIProcessor.
Provides consistent interface for all AI functions.
"""
def __init__(self, account=None):
self.processor = BaseAIProcessor(account=account)
self.account = account
def call(
self,
prompt: str,
model: Optional[str] = None,
max_tokens: int = 4000,
temperature: float = 0.7,
response_format: Optional[Dict] = None,
response_steps: Optional[List] = None,
progress_callback=None
) -> Dict[str, Any]:
"""
Call AI provider with prompt.
Returns:
Dict with 'content', 'error', 'input_tokens', 'output_tokens',
'total_tokens', 'model', 'cost', 'api_id'
"""
# Use specified model or account's default
active_model = model or self.processor.default_model
# Check if model supports JSON mode
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
if response_format is None and active_model in json_models:
response_format = {'type': 'json_object'}
# Call OpenAI - don't pass response_steps to old processor
# The new framework handles all step tracking at the engine level
result = self.processor._call_openai(
prompt,
model=active_model,
max_tokens=max_tokens,
temperature=temperature,
response_format=response_format,
response_steps=None # Disable old processor's step tracking
)
return result
def extract_json(self, response_text: str) -> Optional[Dict]:
"""Extract JSON from response text"""
return self.processor._extract_json_from_response(response_text)
def generate_image(
self,
prompt: str,
model: str = 'dall-e-3',
size: str = '1024x1024',
n: int = 1,
account=None
) -> Dict[str, Any]:
"""Generate image using AI"""
return self.processor.generate_image(
prompt=prompt,
model=model,
size=size,
n=n,
account=account or self.account
)

View File

@@ -0,0 +1,70 @@
"""
Function registry for dynamic AI function discovery
Lazy loading - functions are only imported when actually needed
"""
import logging
from typing import Dict, Type, Optional
from igny8_core.ai.base import BaseAIFunction
logger = logging.getLogger(__name__)
_FUNCTION_REGISTRY: Dict[str, Type[BaseAIFunction]] = {}
_FUNCTION_LOADERS: Dict[str, callable] = {}
def register_function(name: str, function_class: Type[BaseAIFunction]):
"""Register an AI function"""
if not issubclass(function_class, BaseAIFunction):
raise ValueError(f"{function_class} must inherit from BaseAIFunction")
_FUNCTION_REGISTRY[name] = function_class
logger.info(f"Registered AI function: {name}")
def register_lazy_function(name: str, loader_func: callable):
"""Register a lazy loader for a function - will only import when needed"""
_FUNCTION_LOADERS[name] = loader_func
def get_function(name: str) -> Optional[Type[BaseAIFunction]]:
"""Get function class by name - lazy loads if not already registered"""
# Check if already loaded
if name in _FUNCTION_REGISTRY:
return _FUNCTION_REGISTRY[name]
# Try lazy loading
if name in _FUNCTION_LOADERS:
try:
loader = _FUNCTION_LOADERS[name]
fn_class = loader()
_FUNCTION_REGISTRY[name] = fn_class
logger.info(f"Lazy loaded AI function: {name}")
return fn_class
except Exception as e:
logger.error(f"Failed to lazy load function {name}: {e}", exc_info=True)
return None
return None
def list_functions() -> list:
"""List all registered functions"""
return list(_FUNCTION_REGISTRY.keys())
def get_function_instance(name: str) -> Optional[BaseAIFunction]:
"""Get function instance by name - lazy loads if needed"""
fn_class = get_function(name)
if fn_class:
return fn_class()
return None
# Register lazy loaders - functions are only imported when actually called
def _load_auto_cluster():
"""Lazy loader for auto_cluster function"""
from igny8_core.ai.functions.auto_cluster import AutoClusterFunction
return AutoClusterFunction
register_lazy_function('auto_cluster', _load_auto_cluster)

View File

@@ -0,0 +1,119 @@
"""
Unified Celery task entrypoint for all AI functions
"""
import logging
from celery import shared_task
from igny8_core.ai.engine import AIEngine
from igny8_core.ai.registry import get_function_instance
logger = logging.getLogger(__name__)
@shared_task(bind=True, max_retries=3)
def run_ai_task(self, function_name: str, payload: dict, account_id: int = None):
"""
Single Celery entrypoint for all AI functions.
Dynamically loads and executes the requested function.
Args:
function_name: Name of the AI function (e.g., 'auto_cluster')
payload: Function-specific payload
account_id: Account ID for account isolation
"""
logger.info("=" * 80)
logger.info(f"run_ai_task STARTED: {function_name}")
logger.info(f" - Task ID: {self.request.id}")
logger.info(f" - Function: {function_name}")
logger.info(f" - Account ID: {account_id}")
logger.info(f" - Payload keys: {list(payload.keys())}")
logger.info("=" * 80)
try:
# Get account
account = None
if account_id:
from igny8_core.auth.models import Account
try:
account = Account.objects.get(id=account_id)
except Account.DoesNotExist:
logger.warning(f"Account {account_id} not found")
# Get function from registry
fn = get_function_instance(function_name)
if not fn:
error_msg = f'Function {function_name} not found in registry'
logger.error(error_msg)
return {
'success': False,
'error': error_msg
}
# Create engine and execute
engine = AIEngine(celery_task=self, account=account)
result = engine.execute(fn, payload)
logger.info("=" * 80)
logger.info(f"run_ai_task COMPLETED: {function_name}")
logger.info(f" - Success: {result.get('success')}")
if not result.get('success'):
logger.error(f" - Error: {result.get('error')}")
logger.info("=" * 80)
# If execution failed, raise exception so Celery marks it as FAILURE
if not result.get('success'):
error_msg = result.get('error', 'Task execution failed')
error_type = result.get('error_type', 'ExecutionError')
# Update task state before raising
try:
self.update_state(
state='FAILURE',
meta={
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': result.get('phase', 'ERROR'),
'percentage': 0,
'message': f'Error: {error_msg}',
'request_steps': result.get('request_steps', []),
'response_steps': result.get('response_steps', [])
}
)
except Exception:
pass
# Raise exception so Celery properly tracks failure
raise Exception(f"{error_type}: {error_msg}")
return result
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error("=" * 80)
logger.error(f"run_ai_task FAILED: {function_name}")
logger.error(f" - Error: {error_type}: {error_msg}")
logger.error("=" * 80, exc_info=True)
# Update task state with error details
try:
self.update_state(
state='FAILURE',
meta={
'error': error_msg,
'error_type': error_type,
'function_name': function_name,
'phase': 'ERROR',
'percentage': 0,
'message': f'Error: {error_msg}'
}
)
except Exception:
pass # Don't fail if state update fails
return {
'success': False,
'error': error_msg,
'error_type': error_type,
'function_name': function_name
}

View File

@@ -0,0 +1,223 @@
"""
Progress and Step Tracking utilities for AI framework
"""
import time
import logging
from typing import List, Dict, Any, Optional, Callable
from igny8_core.ai.types import StepLog, ProgressState
logger = logging.getLogger(__name__)
class StepTracker:
"""Tracks detailed request and response steps for debugging"""
def __init__(self, function_name: str):
self.function_name = function_name
self.request_steps: List[Dict] = []
self.response_steps: List[Dict] = []
self.step_counter = 0
def add_request_step(
self,
step_name: str,
status: str = 'success',
message: str = '',
error: str = None,
duration: int = None
) -> Dict:
"""Add a request step with automatic timing"""
self.step_counter += 1
step = {
'stepNumber': self.step_counter,
'stepName': step_name,
'functionName': self.function_name,
'status': status,
'message': message,
'duration': duration
}
if error:
step['error'] = error
self.request_steps.append(step)
return step
def add_response_step(
self,
step_name: str,
status: str = 'success',
message: str = '',
error: str = None,
duration: int = None
) -> Dict:
"""Add a response step with automatic timing"""
self.step_counter += 1
step = {
'stepNumber': self.step_counter,
'stepName': step_name,
'functionName': self.function_name,
'status': status,
'message': message,
'duration': duration
}
if error:
step['error'] = error
self.response_steps.append(step)
return step
def get_meta(self) -> Dict:
"""Get metadata for progress callback"""
return {
'request_steps': self.request_steps,
'response_steps': self.response_steps
}
class ProgressTracker:
"""Tracks progress updates for AI tasks"""
def __init__(self, celery_task=None):
self.task = celery_task
self.current_phase = 'INIT'
self.current_message = 'Initializing...'
self.current_percentage = 0
self.start_time = time.time()
self.current = 0
self.total = 0
def update(
self,
phase: str,
percentage: int,
message: str,
current: int = None,
total: int = None,
current_item: str = None,
meta: Dict = None
):
"""Update progress with consistent format"""
self.current_phase = phase
self.current_message = message
self.current_percentage = percentage
if current is not None:
self.current = current
if total is not None:
self.total = total
progress_meta = {
'phase': phase,
'percentage': percentage,
'message': message,
'current': self.current,
'total': self.total,
}
if current_item:
progress_meta['current_item'] = current_item
if meta:
progress_meta.update(meta)
# Update Celery task state if available
if self.task:
try:
self.task.update_state(
state='PROGRESS',
meta=progress_meta
)
except Exception as e:
logger.warning(f"Failed to update Celery task state: {e}")
logger.info(f"[{phase}] {percentage}%: {message}")
def set_phase(self, phase: str, percentage: int, message: str, meta: Dict = None):
"""Set progress phase"""
self.update(phase, percentage, message, meta=meta)
def complete(self, message: str = "Task complete!", meta: Dict = None):
"""Mark task as complete"""
final_meta = {
'phase': 'DONE',
'percentage': 100,
'message': message,
'status': 'success'
}
if meta:
final_meta.update(meta)
if self.task:
try:
self.task.update_state(
state='SUCCESS',
meta=final_meta
)
except Exception as e:
logger.warning(f"Failed to update Celery task state: {e}")
def error(self, error_message: str, meta: Dict = None):
"""Mark task as failed"""
error_meta = {
'phase': 'ERROR',
'percentage': 0,
'message': f'Error: {error_message}',
'status': 'error',
'error': error_message
}
if meta:
error_meta.update(meta)
if self.task:
try:
self.task.update_state(
state='FAILURE',
meta=error_meta
)
except Exception as e:
logger.warning(f"Failed to update Celery task state: {e}")
def get_duration(self) -> int:
"""Get elapsed time in milliseconds"""
return int((time.time() - self.start_time) * 1000)
def update_ai_progress(self, state: str, meta: Dict):
"""Callback for AI processor progress updates"""
if isinstance(meta, dict):
percentage = meta.get('percentage', self.current_percentage)
message = meta.get('message', self.current_message)
phase = meta.get('phase', self.current_phase)
self.update(phase, percentage, message, meta=meta)
class CostTracker:
"""Tracks API costs and token usage"""
def __init__(self):
self.total_cost = 0.0
self.total_tokens = 0
self.operations = []
def record(self, function_name: str, cost: float, tokens: int, model: str = None):
"""Record an API call cost"""
self.total_cost += cost
self.total_tokens += tokens
self.operations.append({
'function': function_name,
'cost': cost,
'tokens': tokens,
'model': model
})
def get_total(self) -> float:
"""Get total cost"""
return self.total_cost
def get_total_tokens(self) -> int:
"""Get total tokens"""
return self.total_tokens
def get_operations(self) -> List[Dict]:
"""Get all operations"""
return self.operations

View File

@@ -0,0 +1,44 @@
"""
Shared types and dataclasses for AI framework
"""
from dataclasses import dataclass
from typing import Dict, List, Any, Optional
from datetime import datetime
@dataclass
class StepLog:
"""Single step in request/response tracking"""
stepNumber: int
stepName: str
functionName: str
status: str # 'success' or 'error'
message: str
error: Optional[str] = None
duration: Optional[int] = None # milliseconds
@dataclass
class ProgressState:
"""Progress state for AI tasks"""
phase: str # INIT, PREP, AI_CALL, PARSE, SAVE, DONE
percentage: int # 0-100
message: str
current: Optional[int] = None
total: Optional[int] = None
current_item: Optional[str] = None
@dataclass
class AITaskResult:
"""Result from AI function execution"""
success: bool
function_name: str
result_data: Dict[str, Any]
request_steps: List[StepLog]
response_steps: List[StepLog]
cost: float = 0.0
tokens: int = 0
error: Optional[str] = None
duration: Optional[int] = None # milliseconds