ai updaet

This commit is contained in:
alorig
2025-11-09 22:55:37 +05:00
parent c08a4c7bd6
commit 73e10ecb99
4 changed files with 315 additions and 14 deletions

View File

@@ -4,7 +4,7 @@ AI Engine - Central orchestrator for all AI functions
import logging
from typing import Dict, Any, Optional
from igny8_core.ai.base import BaseAIFunction
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker, ConsoleStepTracker
from igny8_core.ai.ai_core import AICore
from igny8_core.ai.settings import get_model_config
@@ -21,7 +21,8 @@ class AIEngine:
self.task = celery_task
self.account = account
self.tracker = ProgressTracker(celery_task)
self.step_tracker = StepTracker('ai_engine')
self.step_tracker = StepTracker('ai_engine') # For Celery progress callbacks
self.console_tracker = None # Will be initialized per function
self.cost_tracker = CostTracker()
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
@@ -39,16 +40,24 @@ class AIEngine:
function_name = fn.get_name()
self.step_tracker.function_name = function_name
# Initialize console tracker for logging (Stage 3 requirement)
self.console_tracker = ConsoleStepTracker(function_name)
self.console_tracker.init(f"Starting {function_name} execution")
try:
# Phase 1: INIT - Validation & Setup (0-10%)
self.console_tracker.prep("Validating input payload")
validated = fn.validate(payload, self.account)
if not validated['valid']:
self.console_tracker.error('ValidationError', validated['error'])
return self._handle_error(validated['error'], fn)
self.console_tracker.prep("Validation complete")
self.step_tracker.add_request_step("INIT", "success", "Validation complete")
self.tracker.update("INIT", 10, "Validation complete", meta=self.step_tracker.get_meta())
# Phase 2: PREP - Data Loading & Prompt Building (10-25%)
self.console_tracker.prep("Loading data from database")
data = fn.prepare(payload, self.account)
if isinstance(data, (list, tuple)):
data_count = len(data)
@@ -57,7 +66,9 @@ class AIEngine:
else:
data_count = 1
self.console_tracker.prep(f"Building prompt from {data_count} items")
prompt = fn.build_prompt(data, self.account)
self.console_tracker.prep(f"Prompt built: {len(prompt)} characters")
self.step_tracker.add_request_step("PREP", "success", f"Loaded {data_count} items, built prompt ({len(prompt)} chars)")
self.tracker.update("PREP", 25, f"Data prepared: {data_count} items", meta=self.step_tracker.get_meta())
@@ -65,23 +76,27 @@ class AIEngine:
ai_core = AICore(account=self.account)
function_name = fn.get_name()
# Get model config from settings
# Get model config from settings (Stage 4 requirement)
model_config = get_model_config(function_name)
model = model_config.get('model')
self.console_tracker.ai_call(f"Calling {model or 'default'} model with {len(prompt)} char prompt")
# Track AI call start
self.step_tracker.add_response_step("AI_CALL", "success", f"Calling {model or 'default'} model...")
self.tracker.update("AI_CALL", 30, f"Sending to {model or 'default'}...", meta=self.step_tracker.get_meta())
try:
# Use centralized run_ai_request() with console logging
# Use centralized run_ai_request() with console logging (Stage 2 & 3 requirement)
# Pass console_tracker for unified logging
raw_response = ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=model_config.get('max_tokens'),
temperature=model_config.get('temperature'),
response_format=model_config.get('response_format'),
function_name=function_name
function_name=function_name,
tracker=self.console_tracker # Pass console tracker for logging
)
except Exception as e:
error_msg = f"AI call failed: {str(e)}"
@@ -116,6 +131,7 @@ class AIEngine:
# Phase 4: PARSE - Response Parsing (70-85%)
try:
self.console_tracker.parse("Parsing AI response")
response_content = raw_response.get('content', '')
parsed = fn.parse_response(response_content, self.step_tracker)
@@ -126,6 +142,7 @@ class AIEngine:
else:
parsed_count = 1
self.console_tracker.parse(f"Successfully parsed {parsed_count} items from response")
self.step_tracker.add_response_step("PARSE", "success", f"Parsed {parsed_count} items from AI response")
self.tracker.update("PARSE", 85, f"Parsed {parsed_count} items", meta=self.step_tracker.get_meta())
except Exception as parse_error:
@@ -135,12 +152,27 @@ class AIEngine:
return self._handle_error(error_msg, fn)
# Phase 5: SAVE - Database Operations (85-98%)
self.console_tracker.save("Saving results to database")
# Pass step_tracker to save_output so it can add validation steps
save_result = fn.save_output(parsed, data, self.account, self.tracker, step_tracker=self.step_tracker)
clusters_created = save_result.get('clusters_created', 0)
keywords_updated = save_result.get('keywords_updated', 0)
self.step_tracker.add_request_step("SAVE", "success", f"Created {clusters_created} clusters, updated {keywords_updated} keywords")
self.tracker.update("SAVE", 98, f"Saved: {clusters_created} clusters, {keywords_updated} keywords", meta=self.step_tracker.get_meta())
count = save_result.get('count', 0)
# Build success message based on function type
if clusters_created:
save_msg = f"Created {clusters_created} clusters, updated {keywords_updated} keywords"
elif count:
save_msg = f"Saved {count} items"
else:
save_msg = "Results saved successfully"
self.console_tracker.save(save_msg)
self.step_tracker.add_request_step("SAVE", "success", save_msg)
self.tracker.update("SAVE", 98, save_msg, meta=self.step_tracker.get_meta())
# Store save_msg for use in DONE phase
final_save_msg = save_msg
# Track credit usage after successful save
if self.account and raw_response:
@@ -175,6 +207,8 @@ class AIEngine:
logger.warning(f"Failed to log credit usage: {e}", exc_info=True)
# Phase 6: DONE - Finalization (98-100%)
success_msg = f"Task completed: {final_save_msg}" if 'final_save_msg' in locals() else "Task completed successfully"
self.console_tracker.done(success_msg)
self.step_tracker.add_request_step("DONE", "success", "Task completed successfully")
self.tracker.update("DONE", 100, "Task complete!", meta=self.step_tracker.get_meta())
@@ -197,6 +231,12 @@ class AIEngine:
def _handle_error(self, error: str, fn: BaseAIFunction = None, exc_info=False):
"""Centralized error handling"""
function_name = fn.get_name() if fn else 'unknown'
# Log to console tracker if available (Stage 3 requirement)
if self.console_tracker:
error_type = type(error).__name__ if isinstance(error, Exception) else 'Error'
self.console_tracker.error(error_type, str(error), exception=error if isinstance(error, Exception) else None)
self.step_tracker.add_request_step("Error", "error", error, error=error)
error_meta = {

View File

@@ -8,6 +8,7 @@ from django.db import transaction
from igny8_core.modules.planner.models import Keywords, Clusters, ContentIdeas
from igny8_core.utils.ai_processor import ai_processor
from igny8_core.ai.functions.generate_ideas import generate_ideas_core
from igny8_core.ai.tracker import ConsoleStepTracker
logger = logging.getLogger(__name__)
@@ -34,7 +35,11 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
account_id: Account ID for account isolation
progress_callback: Optional function to call for progress updates (for Celery tasks)
"""
# Track request and response steps
# Initialize console step tracker for logging
tracker = ConsoleStepTracker('auto_cluster')
tracker.init(f"Starting keyword clustering for {len(keyword_ids)} keywords")
# Track request and response steps (for Celery progress callbacks)
request_steps = []
response_steps = []
@@ -57,6 +62,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
# Step 4: Keyword Loading & Validation
tracker.prep(f"Loading {len(keyword_ids)} keywords from database")
step_start = time.time()
keywords_queryset = Keywords.objects.filter(id__in=keyword_ids)
if account_id:
@@ -67,7 +73,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
keywords = list(keywords_queryset.select_related('account', 'site', 'site__account', 'sector', 'sector__site'))
if not keywords:
logger.warning(f"No keywords found for clustering: {keyword_ids}")
error_msg = f"No keywords found for clustering: {keyword_ids}"
logger.warning(error_msg)
tracker.error('Validation', error_msg)
request_steps.append({
'stepNumber': 4,
'stepName': 'Keyword Loading & Validation',
@@ -84,6 +92,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
return {'success': False, 'error': 'No keywords found', 'request_steps': request_steps, 'response_steps': response_steps}
tracker.prep(f"Loaded {len(keywords)} keywords successfully")
request_steps.append({
'stepNumber': 4,
'stepName': 'Keyword Loading & Validation',
@@ -330,10 +339,20 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
return {'success': False, 'error': f'Error preparing AI call: {str(e)}', 'request_steps': request_steps, 'response_steps': response_steps}
# Call AI with step tracking
result = processor.cluster_keywords(keyword_data, sector_name=sector_name, account=account, response_steps=response_steps, progress_callback=progress_callback)
tracker.ai_call(f"Sending {len(keyword_data)} keywords to AI for clustering")
result = processor.cluster_keywords(
keyword_data,
sector_name=sector_name,
account=account,
response_steps=response_steps,
progress_callback=progress_callback,
tracker=tracker # Pass tracker for console logging
)
if result.get('error'):
logger.error(f"AI clustering error: {result['error']}")
error_msg = f"AI clustering error: {result['error']}"
logger.error(error_msg)
tracker.error('AI_CALL', error_msg)
if progress_callback:
progress_callback(
state='FAILURE',
@@ -346,6 +365,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
)
return {'success': False, 'error': result['error'], 'request_steps': request_steps, 'response_steps': response_steps}
# Parse response
tracker.parse("Parsing AI response into cluster data")
# Update response_steps from result if available
if result.get('response_steps'):
response_steps.extend(result.get('response_steps', []))
@@ -370,6 +392,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
keywords_updated = 0
# Step 13: Database Transaction Start
tracker.save(f"Creating {len(clusters_data)} clusters in database")
step_start = time.time()
# Create/update clusters and assign keywords
# Note: account and sector are already extracted above to avoid database queries inside transaction
@@ -567,6 +590,7 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
# Final progress update
final_message = f"Clustering complete: {clusters_created} clusters created, {keywords_updated} keywords updated"
logger.info(final_message)
tracker.done(final_message)
if progress_callback:
progress_callback(
@@ -588,7 +612,9 @@ def _auto_cluster_keywords_core(keyword_ids: List[int], sector_id: int = None, a
}
except Exception as e:
logger.error(f"Error in auto_cluster_keywords_core: {str(e)}", exc_info=True)
error_msg = f"Error in auto_cluster_keywords_core: {str(e)}"
logger.error(error_msg, exc_info=True)
tracker.error('Exception', error_msg, exception=e)
if progress_callback:
progress_callback(
state='FAILURE',

View File

@@ -1049,6 +1049,7 @@ Make sure each prompt is detailed enough for image generation, describing the vi
account=None,
response_steps=None,
progress_callback=None,
tracker=None, # Optional ConsoleStepTracker for logging
**kwargs
) -> Dict[str, Any]:
"""
@@ -1075,20 +1076,41 @@ Make sure each prompt is detailed enough for image generation, describing the vi
for kw in keywords
])
if tracker:
tracker.prep(f"Formatted {len(keywords)} keywords for prompt")
account_obj = account or self.account
# Get prompt template from database or default
# NOTE: This is legacy code. New code should use PromptRegistry.get_prompt()
# Keeping this for backward compatibility with old tasks
prompt_template = self.get_prompt('clustering', account=account_obj)
# Replace placeholders in prompt template
if '[IGNY8_KEYWORDS]' not in prompt_template:
error_msg = "Prompt template missing [IGNY8_KEYWORDS] placeholder"
logger.error(error_msg)
if tracker:
tracker.error('Prompt', error_msg)
return {
'clusters': [],
'error': error_msg,
}
prompt = prompt_template.replace('[IGNY8_KEYWORDS]', keywords_text)
if tracker:
tracker.prep(f"Prompt prepared: {len(prompt)} characters (keywords: {len(keywords_text)} chars)")
if sector_name:
prompt += f"\n\nNote: These keywords are for the '{sector_name}' sector."
logger.info(f"Clustering {len(keywords)} keywords using AI")
logger.info(f"AIProcessor.cluster_keywords: About to call OpenAI API with {len(keywords)} keywords")
if tracker:
tracker.ai_call(f"Calling OpenAI API with model: {self.default_model}")
# Initialize response_steps if not provided
if response_steps is None:
response_steps = []
@@ -1109,6 +1131,12 @@ Make sure each prompt is detailed enough for image generation, describing the vi
response_format=response_format,
response_steps=response_steps
)
if tracker:
if result.get('error'):
tracker.error('AI_CALL', f"OpenAI API error: {result['error']}")
else:
tracker.ai_call(f"Received response: {result.get('total_tokens', 0)} tokens")
logger.info(f"AIProcessor.cluster_keywords: OpenAI API call completed. Error: {result.get('error')}, Has content: {bool(result.get('content'))}")
except Exception as e:
logger.error(f"AIProcessor.cluster_keywords: Exception calling OpenAI API: {type(e).__name__}: {str(e)}", exc_info=True)
@@ -1141,11 +1169,16 @@ Make sure each prompt is detailed enough for image generation, describing the vi
}
# Step 11: JSON Extraction & Parsing
if tracker:
tracker.parse("Extracting JSON from AI response")
step_start = time.time()
json_data = self._extract_json_from_response(result['content'])
if not json_data:
logger.error(f"Failed to parse clustering response: {result.get('content', '')[:200]}")
error_msg = f"Failed to parse clustering response: {result.get('content', '')[:200]}"
logger.error(error_msg)
if tracker:
tracker.error('Parse', error_msg)
if response_steps is not None:
response_steps.append({
'stepNumber': 11,
@@ -1194,7 +1227,10 @@ Make sure each prompt is detailed enough for image generation, describing the vi
clusters = json_data
if not clusters:
logger.error(f"No clusters found in response: {json_data}")
error_msg = f"No clusters found in response: {json_data}"
logger.error(error_msg)
if tracker:
tracker.error('Parse', error_msg)
if response_steps is not None:
response_steps.append({
'stepNumber': 12,
@@ -1216,6 +1252,10 @@ Make sure each prompt is detailed enough for image generation, describing the vi
'response_steps': response_steps
}
logger.info(f"Successfully parsed {len(clusters)} clusters from AI response")
if tracker:
tracker.parse(f"Successfully extracted {len(clusters)} clusters from response")
if response_steps is not None:
response_steps.append({
'stepNumber': 12,