prep
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
"""
|
||||
Workflow Functions
|
||||
New AI functions using the unified template pattern.
|
||||
"""
|
||||
|
||||
755
backend/igny8_core/ai/helpers/ai_core.py
Normal file
755
backend/igny8_core/ai/helpers/ai_core.py
Normal file
@@ -0,0 +1,755 @@
|
||||
"""
|
||||
AI Core - Centralized execution and logging layer for all AI requests
|
||||
Handles API calls, model selection, response parsing, and console logging
|
||||
"""
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import requests
|
||||
import time
|
||||
from typing import Dict, Any, Optional, List
|
||||
from django.conf import settings
|
||||
|
||||
from igny8_core.ai.constants import (
|
||||
DEFAULT_AI_MODEL,
|
||||
JSON_MODE_MODELS,
|
||||
MODEL_RATES,
|
||||
IMAGE_MODEL_RATES,
|
||||
VALID_OPENAI_IMAGE_MODELS,
|
||||
VALID_SIZES_BY_MODEL,
|
||||
DEBUG_MODE,
|
||||
)
|
||||
from .tracker import ConsoleStepTracker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AICore:
|
||||
"""
|
||||
Centralized AI operations handler with console logging.
|
||||
All AI requests go through run_ai_request() for consistent execution and logging.
|
||||
"""
|
||||
|
||||
def __init__(self, account=None):
|
||||
"""
|
||||
Initialize AICore with account context.
|
||||
|
||||
Args:
|
||||
account: Optional account object for API key/model loading
|
||||
"""
|
||||
self.account = account
|
||||
self._openai_api_key = None
|
||||
self._runware_api_key = None
|
||||
self._default_model = None
|
||||
self._load_account_settings()
|
||||
|
||||
def _load_account_settings(self):
|
||||
"""Load API keys and model from IntegrationSettings or Django settings"""
|
||||
if self.account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
|
||||
# Load OpenAI settings
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=self.account,
|
||||
is_active=True
|
||||
).first()
|
||||
if openai_settings and openai_settings.config:
|
||||
self._openai_api_key = openai_settings.config.get('apiKey')
|
||||
model = openai_settings.config.get('model')
|
||||
if model:
|
||||
if model in MODEL_RATES:
|
||||
self._default_model = model
|
||||
logger.info(f"Loaded model '{model}' from IntegrationSettings for account {self.account.id}")
|
||||
else:
|
||||
error_msg = f"Model '{model}' from IntegrationSettings is not in supported models list. Supported models: {list(MODEL_RATES.keys())}"
|
||||
logger.error(f"[AICore] {error_msg}")
|
||||
logger.error(f"[AICore] Account {self.account.id} has invalid model configuration. Please update Integration Settings.")
|
||||
# Don't set _default_model, will fall back to Django settings
|
||||
else:
|
||||
logger.warning(f"No model configured in IntegrationSettings for account {self.account.id}, will use fallback")
|
||||
|
||||
# Load Runware settings
|
||||
runware_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='runware',
|
||||
account=self.account,
|
||||
is_active=True
|
||||
).first()
|
||||
if runware_settings and runware_settings.config:
|
||||
self._runware_api_key = runware_settings.config.get('apiKey')
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load account settings: {e}", exc_info=True)
|
||||
|
||||
# Fallback to Django settings
|
||||
if not self._openai_api_key:
|
||||
self._openai_api_key = getattr(settings, 'OPENAI_API_KEY', None)
|
||||
if not self._runware_api_key:
|
||||
self._runware_api_key = getattr(settings, 'RUNWARE_API_KEY', None)
|
||||
if not self._default_model:
|
||||
self._default_model = getattr(settings, 'DEFAULT_AI_MODEL', DEFAULT_AI_MODEL)
|
||||
|
||||
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
|
||||
"""Get API key for integration type"""
|
||||
if integration_type == 'openai':
|
||||
return self._openai_api_key
|
||||
elif integration_type == 'runware':
|
||||
return self._runware_api_key
|
||||
return None
|
||||
|
||||
def get_model(self, integration_type: str = 'openai') -> str:
|
||||
"""Get model for integration type"""
|
||||
if integration_type == 'openai':
|
||||
return self._default_model
|
||||
return DEFAULT_AI_MODEL
|
||||
|
||||
def run_ai_request(
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str] = None,
|
||||
max_tokens: int = 4000,
|
||||
temperature: float = 0.7,
|
||||
response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None,
|
||||
function_name: str = 'ai_request',
|
||||
function_id: Optional[str] = None,
|
||||
tracker: Optional[ConsoleStepTracker] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Centralized AI request handler with console logging.
|
||||
All AI text generation requests go through this method.
|
||||
|
||||
Args:
|
||||
prompt: Prompt text
|
||||
model: Model name (defaults to account's default)
|
||||
max_tokens: Maximum tokens
|
||||
temperature: Temperature (0-1)
|
||||
response_format: Optional response format dict (for JSON mode)
|
||||
api_key: Optional API key override
|
||||
function_name: Function name for logging (e.g., 'cluster_keywords')
|
||||
tracker: Optional ConsoleStepTracker instance for logging
|
||||
|
||||
Returns:
|
||||
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
|
||||
'model', 'cost', 'error', 'api_id'
|
||||
"""
|
||||
# Use provided tracker or create a new one
|
||||
if tracker is None:
|
||||
tracker = ConsoleStepTracker(function_name)
|
||||
|
||||
tracker.ai_call("Preparing request...")
|
||||
|
||||
# Step 1: Validate API key
|
||||
api_key = api_key or self._openai_api_key
|
||||
if not api_key:
|
||||
error_msg = 'OpenAI API key not configured'
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': model or self._default_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 2: Determine model
|
||||
active_model = model or self._default_model
|
||||
|
||||
# Debug logging: Show model from settings vs model used
|
||||
model_from_settings = self._default_model
|
||||
model_used = active_model
|
||||
logger.info(f"[AICore] Model Configuration Debug:")
|
||||
logger.info(f" - Model from IntegrationSettings: {model_from_settings}")
|
||||
logger.info(f" - Model parameter passed: {model}")
|
||||
logger.info(f" - Model actually used in request: {model_used}")
|
||||
tracker.ai_call(f"Model Debug - Settings: {model_from_settings}, Parameter: {model}, Using: {model_used}")
|
||||
|
||||
# Validate model is available and supported
|
||||
if not active_model:
|
||||
error_msg = 'No AI model configured. Please configure a model in Integration Settings or Django settings.'
|
||||
logger.error(f"[AICore] {error_msg}")
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': None,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
if active_model not in MODEL_RATES:
|
||||
error_msg = f"Model '{active_model}' is not supported. Supported models: {list(MODEL_RATES.keys())}"
|
||||
logger.error(f"[AICore] {error_msg}")
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
tracker.ai_call(f"Using model: {active_model}")
|
||||
|
||||
# Step 3: Auto-enable JSON mode for supported models
|
||||
if response_format is None and active_model in JSON_MODE_MODELS:
|
||||
response_format = {'type': 'json_object'}
|
||||
tracker.ai_call(f"Auto-enabled JSON mode for {active_model}")
|
||||
elif response_format:
|
||||
tracker.ai_call(f"Using custom response format: {response_format}")
|
||||
else:
|
||||
tracker.ai_call("Using text response format")
|
||||
|
||||
# Step 4: Validate prompt length and add function_id
|
||||
prompt_length = len(prompt)
|
||||
tracker.ai_call(f"Prompt length: {prompt_length} characters")
|
||||
|
||||
# Add function_id to prompt if provided (for tracking)
|
||||
final_prompt = prompt
|
||||
if function_id:
|
||||
function_id_prefix = f'function_id: "{function_id}"\n\n'
|
||||
final_prompt = function_id_prefix + prompt
|
||||
tracker.ai_call(f"Added function_id to prompt: {function_id}")
|
||||
|
||||
# Step 5: Build request payload
|
||||
url = 'https://api.openai.com/v1/chat/completions'
|
||||
headers = {
|
||||
'Authorization': f'Bearer {api_key}',
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
body_data = {
|
||||
'model': active_model,
|
||||
'messages': [{'role': 'user', 'content': final_prompt}],
|
||||
'temperature': temperature,
|
||||
}
|
||||
|
||||
if max_tokens:
|
||||
body_data['max_tokens'] = max_tokens
|
||||
|
||||
if response_format:
|
||||
body_data['response_format'] = response_format
|
||||
|
||||
tracker.ai_call(f"Request payload prepared (model={active_model}, max_tokens={max_tokens}, temp={temperature})")
|
||||
|
||||
# Step 6: Send request
|
||||
tracker.ai_call("Sending request to OpenAI API...")
|
||||
request_start = time.time()
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=body_data, timeout=60)
|
||||
request_duration = time.time() - request_start
|
||||
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
# Step 7: Validate HTTP response
|
||||
if response.status_code != 200:
|
||||
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
|
||||
error_message = f"HTTP {response.status_code} error"
|
||||
|
||||
if isinstance(error_data, dict) and 'error' in error_data:
|
||||
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
|
||||
error_message += f": {error_data['error']['message']}"
|
||||
|
||||
# Check for rate limit
|
||||
if response.status_code == 429:
|
||||
retry_after = response.headers.get('retry-after', '60')
|
||||
tracker.rate_limit(retry_after)
|
||||
error_message += f" (Rate limit - retry after {retry_after}s)"
|
||||
else:
|
||||
tracker.error('HTTPError', error_message)
|
||||
|
||||
logger.error(f"OpenAI API HTTP error {response.status_code}: {error_message}")
|
||||
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_message,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 8: Parse response JSON
|
||||
try:
|
||||
data = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
error_msg = f'Failed to parse JSON response: {str(e)}'
|
||||
tracker.malformed_json(str(e))
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
api_id = data.get('id')
|
||||
|
||||
# Step 9: Extract content
|
||||
if 'choices' in data and len(data['choices']) > 0:
|
||||
content = data['choices'][0]['message']['content']
|
||||
usage = data.get('usage', {})
|
||||
input_tokens = usage.get('prompt_tokens', 0)
|
||||
output_tokens = usage.get('completion_tokens', 0)
|
||||
total_tokens = usage.get('total_tokens', 0)
|
||||
|
||||
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
|
||||
tracker.parse(f"Content length: {len(content)} characters")
|
||||
|
||||
# Step 10: Calculate cost
|
||||
rates = MODEL_RATES.get(active_model, {'input': 2.00, 'output': 8.00})
|
||||
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
|
||||
tracker.parse(f"Cost calculated: ${cost:.6f}")
|
||||
|
||||
tracker.done("Request completed successfully")
|
||||
|
||||
return {
|
||||
'content': content,
|
||||
'input_tokens': input_tokens,
|
||||
'output_tokens': output_tokens,
|
||||
'total_tokens': total_tokens,
|
||||
'model': active_model,
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
'api_id': api_id,
|
||||
'duration': request_duration, # Add duration tracking
|
||||
}
|
||||
else:
|
||||
error_msg = 'No content in OpenAI response'
|
||||
tracker.error('EmptyResponse', error_msg)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': api_id,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (60s exceeded)'
|
||||
tracker.timeout(60)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_msg = f'Request exception: {str(e)}'
|
||||
tracker.error('RequestException', error_msg, e)
|
||||
logger.error(f"OpenAI API error: {error_msg}", exc_info=True)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
logger.error(f"[AI][{function_name}][Error] {error_msg}", exc_info=True)
|
||||
if tracker:
|
||||
tracker.error('UnexpectedError', error_msg, e)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
def extract_json(self, response_text: str) -> Optional[Dict]:
|
||||
"""
|
||||
Extract JSON from response text.
|
||||
Handles markdown code blocks, multiline JSON, etc.
|
||||
|
||||
Args:
|
||||
response_text: Raw response text from AI
|
||||
|
||||
Returns:
|
||||
Parsed JSON dict or None
|
||||
"""
|
||||
if not response_text or not response_text.strip():
|
||||
return None
|
||||
|
||||
# Try direct JSON parse first
|
||||
try:
|
||||
return json.loads(response_text.strip())
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try to extract JSON from markdown code blocks
|
||||
json_block_pattern = r'```(?:json)?\s*(\{.*?\}|\[.*?\])\s*```'
|
||||
matches = re.findall(json_block_pattern, response_text, re.DOTALL)
|
||||
if matches:
|
||||
try:
|
||||
return json.loads(matches[0])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try to find JSON object/array in text
|
||||
json_pattern = r'(\{.*\}|\[.*\])'
|
||||
matches = re.findall(json_pattern, response_text, re.DOTALL)
|
||||
for match in matches:
|
||||
try:
|
||||
return json.loads(match)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
return None
|
||||
|
||||
def generate_image(
|
||||
self,
|
||||
prompt: str,
|
||||
provider: str = 'openai',
|
||||
model: Optional[str] = None,
|
||||
size: str = '1024x1024',
|
||||
n: int = 1,
|
||||
api_key: Optional[str] = None,
|
||||
negative_prompt: Optional[str] = None,
|
||||
function_name: str = 'generate_image'
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using AI with console logging.
|
||||
|
||||
Args:
|
||||
prompt: Image prompt
|
||||
provider: 'openai' or 'runware'
|
||||
model: Model name
|
||||
size: Image size
|
||||
n: Number of images
|
||||
api_key: Optional API key override
|
||||
negative_prompt: Optional negative prompt
|
||||
function_name: Function name for logging
|
||||
|
||||
Returns:
|
||||
Dict with 'url', 'revised_prompt', 'cost', 'error', etc.
|
||||
"""
|
||||
print(f"[AI][{function_name}] Step 1: Preparing image generation request...")
|
||||
|
||||
if provider == 'openai':
|
||||
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
elif provider == 'runware':
|
||||
return self._generate_image_runware(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
else:
|
||||
error_msg = f'Unknown provider: {provider}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': provider,
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def _generate_image_openai(
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str],
|
||||
size: str,
|
||||
n: int,
|
||||
api_key: Optional[str],
|
||||
negative_prompt: Optional[str],
|
||||
function_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate image using OpenAI DALL-E"""
|
||||
print(f"[AI][{function_name}] Provider: OpenAI")
|
||||
|
||||
api_key = api_key or self._openai_api_key
|
||||
if not api_key:
|
||||
error_msg = 'OpenAI API key not configured'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
model = model or 'dall-e-3'
|
||||
print(f"[AI][{function_name}] Step 2: Using model: {model}, size: {size}")
|
||||
|
||||
# Validate model
|
||||
if model not in VALID_OPENAI_IMAGE_MODELS:
|
||||
error_msg = f"Model '{model}' is not valid for OpenAI image generation. Only {', '.join(VALID_OPENAI_IMAGE_MODELS)} are supported."
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
# Validate size
|
||||
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
|
||||
if size not in valid_sizes:
|
||||
error_msg = f"Image size '{size}' is not valid for model '{model}'. Valid sizes: {', '.join(valid_sizes)}"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
url = 'https://api.openai.com/v1/images/generations'
|
||||
print(f"[AI][{function_name}] Step 3: Sending request to OpenAI Images API...")
|
||||
|
||||
headers = {
|
||||
'Authorization': f'Bearer {api_key}',
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
data = {
|
||||
'model': model,
|
||||
'prompt': prompt,
|
||||
'n': n,
|
||||
'size': size
|
||||
}
|
||||
|
||||
if negative_prompt:
|
||||
# Note: OpenAI DALL-E doesn't support negative_prompt in API, but we log it
|
||||
print(f"[AI][{function_name}] Note: Negative prompt provided but OpenAI DALL-E doesn't support it")
|
||||
|
||||
request_start = time.time()
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=data, timeout=150)
|
||||
request_duration = time.time() - request_start
|
||||
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
|
||||
error_message = f"HTTP {response.status_code} error"
|
||||
if isinstance(error_data, dict) and 'error' in error_data:
|
||||
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
|
||||
error_message += f": {error_data['error']['message']}"
|
||||
|
||||
print(f"[AI][{function_name}][Error] {error_message}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_message,
|
||||
}
|
||||
|
||||
body = response.json()
|
||||
if 'data' in body and len(body['data']) > 0:
|
||||
image_data = body['data'][0]
|
||||
image_url = image_data.get('url')
|
||||
revised_prompt = image_data.get('revised_prompt')
|
||||
|
||||
cost = IMAGE_MODEL_RATES.get(model, 0.040) * n
|
||||
print(f"[AI][{function_name}] Step 5: Image generated successfully")
|
||||
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
|
||||
print(f"[AI][{function_name}][Success] Image generation completed")
|
||||
|
||||
return {
|
||||
'url': image_url,
|
||||
'revised_prompt': revised_prompt,
|
||||
'provider': 'openai',
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
}
|
||||
else:
|
||||
error_msg = 'No image data in response'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (150s exceeded)'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(error_msg, exc_info=True)
|
||||
return {
|
||||
'url': None,
|
||||
'revised_prompt': None,
|
||||
'provider': 'openai',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def _generate_image_runware(
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str],
|
||||
size: str,
|
||||
n: int,
|
||||
api_key: Optional[str],
|
||||
negative_prompt: Optional[str],
|
||||
function_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate image using Runware"""
|
||||
print(f"[AI][{function_name}] Provider: Runware")
|
||||
|
||||
api_key = api_key or self._runware_api_key
|
||||
if not api_key:
|
||||
error_msg = 'Runware API key not configured'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'runware',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
runware_model = model or 'runware:97@1'
|
||||
print(f"[AI][{function_name}] Step 2: Using model: {runware_model}, size: {size}")
|
||||
|
||||
# Parse size
|
||||
try:
|
||||
width, height = map(int, size.split('x'))
|
||||
except ValueError:
|
||||
error_msg = f"Invalid size format: {size}. Expected format: WIDTHxHEIGHT"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'runware',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
url = 'https://api.runware.ai/v1'
|
||||
print(f"[AI][{function_name}] Step 3: Sending request to Runware API...")
|
||||
|
||||
# Runware uses array payload
|
||||
payload = [{
|
||||
'taskType': 'imageInference',
|
||||
'model': runware_model,
|
||||
'prompt': prompt,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'apiKey': api_key
|
||||
}]
|
||||
|
||||
if negative_prompt:
|
||||
payload[0]['negativePrompt'] = negative_prompt
|
||||
|
||||
request_start = time.time()
|
||||
try:
|
||||
response = requests.post(url, json=payload, timeout=150)
|
||||
request_duration = time.time() - request_start
|
||||
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f"HTTP {response.status_code} error"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'runware',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
body = response.json()
|
||||
# Runware returns array with image data
|
||||
if isinstance(body, list) and len(body) > 0:
|
||||
image_data = body[0]
|
||||
image_url = image_data.get('imageURL') or image_data.get('url')
|
||||
|
||||
cost = 0.036 * n # Runware pricing
|
||||
print(f"[AI][{function_name}] Step 5: Image generated successfully")
|
||||
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
|
||||
print(f"[AI][{function_name}][Success] Image generation completed")
|
||||
|
||||
return {
|
||||
'url': image_url,
|
||||
'provider': 'runware',
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
}
|
||||
else:
|
||||
error_msg = 'No image data in Runware response'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'runware',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(error_msg, exc_info=True)
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'runware',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
|
||||
"""Calculate cost for API call"""
|
||||
if model_type == 'text':
|
||||
rates = MODEL_RATES.get(model, {'input': 2.00, 'output': 8.00})
|
||||
input_cost = (input_tokens / 1_000_000) * rates['input']
|
||||
output_cost = (output_tokens / 1_000_000) * rates['output']
|
||||
return input_cost + output_cost
|
||||
elif model_type == 'image':
|
||||
rate = IMAGE_MODEL_RATES.get(model, 0.040)
|
||||
return rate * 1
|
||||
return 0.0
|
||||
|
||||
# Legacy method names for backward compatibility
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 4000,
|
||||
temperature: float = 0.7, response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""Legacy method - redirects to run_ai_request()"""
|
||||
return self.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
response_format=response_format,
|
||||
api_key=api_key,
|
||||
function_name='call_openai'
|
||||
)
|
||||
94
backend/igny8_core/ai/helpers/base.py
Normal file
94
backend/igny8_core/ai/helpers/base.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Base class for all AI functions
|
||||
"""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
class BaseAIFunction(ABC):
|
||||
"""
|
||||
Base class for all AI functions.
|
||||
Each function only implements its specific logic.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_name(self) -> str:
|
||||
"""Return function name (e.g., 'auto_cluster')"""
|
||||
pass
|
||||
|
||||
def get_metadata(self) -> Dict:
|
||||
"""Return function metadata (display name, description, phases)"""
|
||||
return {
|
||||
'display_name': self.get_name().replace('_', ' ').title(),
|
||||
'description': f'{self.get_name()} AI function',
|
||||
'phases': {
|
||||
'INIT': 'Initializing...',
|
||||
'PREP': 'Preparing data...',
|
||||
'AI_CALL': 'Processing with AI...',
|
||||
'PARSE': 'Parsing response...',
|
||||
'SAVE': 'Saving results...',
|
||||
'DONE': 'Complete!'
|
||||
}
|
||||
}
|
||||
|
||||
def validate(self, payload: dict, account=None) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate input payload.
|
||||
Default: checks for 'ids' array.
|
||||
Override for custom validation.
|
||||
"""
|
||||
ids = payload.get('ids', [])
|
||||
if not ids:
|
||||
return {'valid': False, 'error': 'No IDs provided'}
|
||||
|
||||
# Removed max_items limit check - no limits enforced
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
def get_max_items(self) -> Optional[int]:
|
||||
"""Override to set max items limit"""
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
def prepare(self, payload: dict, account=None) -> Any:
|
||||
"""
|
||||
Load and prepare data for AI processing.
|
||||
Returns: prepared data structure
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def build_prompt(self, data: Any, account=None) -> str:
|
||||
"""
|
||||
Build AI prompt from prepared data.
|
||||
Returns: prompt string
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_model(self, account=None) -> Optional[str]:
|
||||
"""Override to specify model (defaults to account's default model)"""
|
||||
return None # Uses account's default from AIProcessor
|
||||
|
||||
@abstractmethod
|
||||
def parse_response(self, response: str, step_tracker=None) -> Any:
|
||||
"""
|
||||
Parse AI response into structured data.
|
||||
Returns: parsed data structure
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save_output(
|
||||
self,
|
||||
parsed: Any,
|
||||
original_data: Any,
|
||||
account=None,
|
||||
progress_tracker=None,
|
||||
step_tracker=None
|
||||
) -> Dict:
|
||||
"""
|
||||
Save parsed results to database.
|
||||
Returns: dict with 'count', 'items_created', etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
52
backend/igny8_core/ai/helpers/models.py
Normal file
52
backend/igny8_core/ai/helpers/models.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""
|
||||
AI Framework Models
|
||||
"""
|
||||
from django.db import models
|
||||
from igny8_core.auth.models import AccountBaseModel
|
||||
|
||||
|
||||
class AITaskLog(AccountBaseModel):
|
||||
"""
|
||||
Unified logging table for all AI tasks.
|
||||
Stores request/response steps, costs, tokens, and results.
|
||||
"""
|
||||
task_id = models.CharField(max_length=255, db_index=True, null=True, blank=True)
|
||||
function_name = models.CharField(max_length=100, db_index=True)
|
||||
phase = models.CharField(max_length=50, default='INIT')
|
||||
message = models.TextField(blank=True)
|
||||
status = models.CharField(max_length=20, choices=[
|
||||
('success', 'Success'),
|
||||
('error', 'Error'),
|
||||
('pending', 'Pending'),
|
||||
], default='pending')
|
||||
|
||||
# Timing
|
||||
duration = models.IntegerField(null=True, blank=True, help_text="Duration in milliseconds")
|
||||
|
||||
# Cost tracking
|
||||
cost = models.DecimalField(max_digits=10, decimal_places=6, default=0.0)
|
||||
tokens = models.IntegerField(default=0)
|
||||
|
||||
# Step tracking
|
||||
request_steps = models.JSONField(default=list, blank=True)
|
||||
response_steps = models.JSONField(default=list, blank=True)
|
||||
|
||||
# Error tracking
|
||||
error = models.TextField(null=True, blank=True)
|
||||
|
||||
# Data
|
||||
payload = models.JSONField(null=True, blank=True)
|
||||
result = models.JSONField(null=True, blank=True)
|
||||
|
||||
class Meta:
|
||||
db_table = 'igny8_ai_task_logs'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['task_id']),
|
||||
models.Index(fields=['function_name', 'account']),
|
||||
models.Index(fields=['status', 'created_at']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.function_name} - {self.status} - {self.created_at}"
|
||||
|
||||
116
backend/igny8_core/ai/helpers/settings.py
Normal file
116
backend/igny8_core/ai/helpers/settings.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""
|
||||
AI Settings - Centralized model configurations and limits
|
||||
"""
|
||||
from typing import Dict, Any
|
||||
|
||||
# Model configurations for each AI function
|
||||
MODEL_CONFIG = {
|
||||
"auto_cluster": {
|
||||
"model": "gpt-4o-mini",
|
||||
"max_tokens": 3000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
|
||||
},
|
||||
"generate_ideas": {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 4000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"}, # JSON output
|
||||
},
|
||||
"generate_content": {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 8000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"}, # JSON output
|
||||
},
|
||||
"generate_images": {
|
||||
"model": "dall-e-3",
|
||||
"size": "1024x1024",
|
||||
"provider": "openai",
|
||||
},
|
||||
"extract_image_prompts": {
|
||||
"model": "gpt-4o-mini",
|
||||
"max_tokens": 1000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
}
|
||||
|
||||
# Function name aliases (for backward compatibility)
|
||||
FUNCTION_ALIASES = {
|
||||
"cluster_keywords": "auto_cluster",
|
||||
"auto_cluster_keywords": "auto_cluster",
|
||||
"auto_generate_ideas": "generate_ideas",
|
||||
"auto_generate_content": "generate_content",
|
||||
"auto_generate_images": "generate_images",
|
||||
}
|
||||
|
||||
|
||||
def get_model_config(function_name: str, account=None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get model configuration for an AI function.
|
||||
Reads model from IntegrationSettings if account is provided, otherwise uses defaults.
|
||||
|
||||
Args:
|
||||
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
|
||||
account: Optional account object to read model from IntegrationSettings
|
||||
|
||||
Returns:
|
||||
Dict with model, max_tokens, temperature, etc.
|
||||
"""
|
||||
# Check aliases first
|
||||
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
|
||||
|
||||
# Get base config
|
||||
config = MODEL_CONFIG.get(actual_name, {}).copy()
|
||||
|
||||
# Try to get model from IntegrationSettings if account is provided
|
||||
model_from_settings = None
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=account,
|
||||
is_active=True
|
||||
).first()
|
||||
if openai_settings and openai_settings.config:
|
||||
model_from_settings = openai_settings.config.get('model')
|
||||
if model_from_settings:
|
||||
# Validate model is in our supported list
|
||||
from igny8_core.utils.ai_processor import MODEL_RATES
|
||||
if model_from_settings in MODEL_RATES:
|
||||
config['model'] = model_from_settings
|
||||
except Exception as e:
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Could not load model from IntegrationSettings: {e}", exc_info=True)
|
||||
|
||||
# Merge with defaults
|
||||
default_config = {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 4000,
|
||||
"temperature": 0.7,
|
||||
"response_format": None,
|
||||
}
|
||||
|
||||
return {**default_config, **config}
|
||||
|
||||
|
||||
def get_model(function_name: str) -> str:
|
||||
"""Get model name for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("model", "gpt-4.1")
|
||||
|
||||
|
||||
def get_max_tokens(function_name: str) -> int:
|
||||
"""Get max tokens for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("max_tokens", 4000)
|
||||
|
||||
|
||||
def get_temperature(function_name: str) -> float:
|
||||
"""Get temperature for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("temperature", 0.7)
|
||||
|
||||
347
backend/igny8_core/ai/helpers/tracker.py
Normal file
347
backend/igny8_core/ai/helpers/tracker.py
Normal file
@@ -0,0 +1,347 @@
|
||||
"""
|
||||
Progress and Step Tracking utilities for AI framework
|
||||
"""
|
||||
import time
|
||||
import logging
|
||||
from typing import List, Dict, Any, Optional, Callable
|
||||
from datetime import datetime
|
||||
from igny8_core.ai.types import StepLog, ProgressState
|
||||
from igny8_core.ai.constants import DEBUG_MODE
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StepTracker:
|
||||
"""Tracks detailed request and response steps for debugging"""
|
||||
|
||||
def __init__(self, function_name: str):
|
||||
self.function_name = function_name
|
||||
self.request_steps: List[Dict] = []
|
||||
self.response_steps: List[Dict] = []
|
||||
self.step_counter = 0
|
||||
|
||||
def add_request_step(
|
||||
self,
|
||||
step_name: str,
|
||||
status: str = 'success',
|
||||
message: str = '',
|
||||
error: str = None,
|
||||
duration: int = None
|
||||
) -> Dict:
|
||||
"""Add a request step with automatic timing"""
|
||||
self.step_counter += 1
|
||||
step = {
|
||||
'stepNumber': self.step_counter,
|
||||
'stepName': step_name,
|
||||
'functionName': self.function_name,
|
||||
'status': status,
|
||||
'message': message,
|
||||
'duration': duration
|
||||
}
|
||||
if error:
|
||||
step['error'] = error
|
||||
|
||||
self.request_steps.append(step)
|
||||
return step
|
||||
|
||||
def add_response_step(
|
||||
self,
|
||||
step_name: str,
|
||||
status: str = 'success',
|
||||
message: str = '',
|
||||
error: str = None,
|
||||
duration: int = None
|
||||
) -> Dict:
|
||||
"""Add a response step with automatic timing"""
|
||||
self.step_counter += 1
|
||||
step = {
|
||||
'stepNumber': self.step_counter,
|
||||
'stepName': step_name,
|
||||
'functionName': self.function_name,
|
||||
'status': status,
|
||||
'message': message,
|
||||
'duration': duration
|
||||
}
|
||||
if error:
|
||||
step['error'] = error
|
||||
|
||||
self.response_steps.append(step)
|
||||
return step
|
||||
|
||||
def get_meta(self) -> Dict:
|
||||
"""Get metadata for progress callback"""
|
||||
return {
|
||||
'request_steps': self.request_steps,
|
||||
'response_steps': self.response_steps
|
||||
}
|
||||
|
||||
|
||||
class ProgressTracker:
|
||||
"""Tracks progress updates for AI tasks"""
|
||||
|
||||
def __init__(self, celery_task=None):
|
||||
self.task = celery_task
|
||||
self.current_phase = 'INIT'
|
||||
self.current_message = 'Initializing...'
|
||||
self.current_percentage = 0
|
||||
self.start_time = time.time()
|
||||
self.current = 0
|
||||
self.total = 0
|
||||
|
||||
def update(
|
||||
self,
|
||||
phase: str,
|
||||
percentage: int,
|
||||
message: str,
|
||||
current: int = None,
|
||||
total: int = None,
|
||||
current_item: str = None,
|
||||
meta: Dict = None
|
||||
):
|
||||
"""Update progress with consistent format"""
|
||||
self.current_phase = phase
|
||||
self.current_message = message
|
||||
self.current_percentage = percentage
|
||||
|
||||
if current is not None:
|
||||
self.current = current
|
||||
if total is not None:
|
||||
self.total = total
|
||||
|
||||
progress_meta = {
|
||||
'phase': phase,
|
||||
'percentage': percentage,
|
||||
'message': message,
|
||||
'current': self.current,
|
||||
'total': self.total,
|
||||
}
|
||||
|
||||
if current_item:
|
||||
progress_meta['current_item'] = current_item
|
||||
|
||||
if meta:
|
||||
progress_meta.update(meta)
|
||||
|
||||
# Update Celery task state if available
|
||||
if self.task:
|
||||
try:
|
||||
self.task.update_state(
|
||||
state='PROGRESS',
|
||||
meta=progress_meta
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to update Celery task state: {e}")
|
||||
|
||||
logger.info(f"[{phase}] {percentage}%: {message}")
|
||||
|
||||
def set_phase(self, phase: str, percentage: int, message: str, meta: Dict = None):
|
||||
"""Set progress phase"""
|
||||
self.update(phase, percentage, message, meta=meta)
|
||||
|
||||
def complete(self, message: str = "Task complete!", meta: Dict = None):
|
||||
"""Mark task as complete"""
|
||||
final_meta = {
|
||||
'phase': 'DONE',
|
||||
'percentage': 100,
|
||||
'message': message,
|
||||
'status': 'success'
|
||||
}
|
||||
if meta:
|
||||
final_meta.update(meta)
|
||||
|
||||
if self.task:
|
||||
try:
|
||||
self.task.update_state(
|
||||
state='SUCCESS',
|
||||
meta=final_meta
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to update Celery task state: {e}")
|
||||
|
||||
def error(self, error_message: str, meta: Dict = None):
|
||||
"""Mark task as failed"""
|
||||
error_meta = {
|
||||
'phase': 'ERROR',
|
||||
'percentage': 0,
|
||||
'message': f'Error: {error_message}',
|
||||
'status': 'error',
|
||||
'error': error_message
|
||||
}
|
||||
if meta:
|
||||
error_meta.update(meta)
|
||||
|
||||
if self.task:
|
||||
try:
|
||||
self.task.update_state(
|
||||
state='FAILURE',
|
||||
meta=error_meta
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to update Celery task state: {e}")
|
||||
|
||||
def get_duration(self) -> int:
|
||||
"""Get elapsed time in milliseconds"""
|
||||
return int((time.time() - self.start_time) * 1000)
|
||||
|
||||
def update_ai_progress(self, state: str, meta: Dict):
|
||||
"""Callback for AI processor progress updates"""
|
||||
if isinstance(meta, dict):
|
||||
percentage = meta.get('percentage', self.current_percentage)
|
||||
message = meta.get('message', self.current_message)
|
||||
phase = meta.get('phase', self.current_phase)
|
||||
self.update(phase, percentage, message, meta=meta)
|
||||
|
||||
|
||||
class CostTracker:
|
||||
"""Tracks API costs and token usage"""
|
||||
|
||||
def __init__(self):
|
||||
self.total_cost = 0.0
|
||||
self.total_tokens = 0
|
||||
self.operations = []
|
||||
|
||||
def record(self, function_name: str, cost: float, tokens: int, model: str = None):
|
||||
"""Record an API call cost"""
|
||||
self.total_cost += cost
|
||||
self.total_tokens += tokens
|
||||
self.operations.append({
|
||||
'function': function_name,
|
||||
'cost': cost,
|
||||
'tokens': tokens,
|
||||
'model': model
|
||||
})
|
||||
|
||||
def get_total(self) -> float:
|
||||
"""Get total cost"""
|
||||
return self.total_cost
|
||||
|
||||
def get_total_tokens(self) -> int:
|
||||
"""Get total tokens"""
|
||||
return self.total_tokens
|
||||
|
||||
def get_operations(self) -> List[Dict]:
|
||||
"""Get all operations"""
|
||||
return self.operations
|
||||
|
||||
|
||||
class ConsoleStepTracker:
|
||||
"""
|
||||
Lightweight console-based step tracker for AI functions.
|
||||
Logs each step to console with timestamps and clear labels.
|
||||
Only logs if DEBUG_MODE is True.
|
||||
"""
|
||||
|
||||
def __init__(self, function_name: str):
|
||||
self.function_name = function_name
|
||||
self.start_time = time.time()
|
||||
self.steps = []
|
||||
self.current_phase = None
|
||||
|
||||
# Debug: Verify DEBUG_MODE is enabled
|
||||
import sys
|
||||
if DEBUG_MODE:
|
||||
init_msg = f"[DEBUG] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is ENABLED"
|
||||
logger.info(init_msg)
|
||||
print(init_msg, flush=True, file=sys.stdout)
|
||||
else:
|
||||
init_msg = f"[WARNING] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is DISABLED"
|
||||
logger.warning(init_msg)
|
||||
print(init_msg, flush=True, file=sys.stdout)
|
||||
|
||||
def _log(self, phase: str, message: str, status: str = 'info'):
|
||||
"""Internal logging method that checks DEBUG_MODE"""
|
||||
if not DEBUG_MODE:
|
||||
return
|
||||
|
||||
import sys
|
||||
timestamp = datetime.now().strftime('%H:%M:%S')
|
||||
phase_label = phase.upper()
|
||||
|
||||
if status == 'error':
|
||||
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] [ERROR] {message}"
|
||||
# Use logger.error for errors so they're always visible
|
||||
logger.error(log_msg)
|
||||
elif status == 'success':
|
||||
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] ✅ {message}"
|
||||
logger.info(log_msg)
|
||||
else:
|
||||
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] {message}"
|
||||
logger.info(log_msg)
|
||||
|
||||
# Also print to stdout for immediate visibility (works in Celery worker logs)
|
||||
print(log_msg, flush=True, file=sys.stdout)
|
||||
|
||||
self.steps.append({
|
||||
'timestamp': timestamp,
|
||||
'phase': phase,
|
||||
'message': message,
|
||||
'status': status
|
||||
})
|
||||
self.current_phase = phase
|
||||
|
||||
def init(self, message: str = "Task started"):
|
||||
"""Log initialization phase"""
|
||||
self._log('INIT', message)
|
||||
|
||||
def prep(self, message: str):
|
||||
"""Log preparation phase"""
|
||||
self._log('PREP', message)
|
||||
|
||||
def ai_call(self, message: str):
|
||||
"""Log AI call phase"""
|
||||
self._log('AI_CALL', message)
|
||||
|
||||
def parse(self, message: str):
|
||||
"""Log parsing phase"""
|
||||
self._log('PARSE', message)
|
||||
|
||||
def save(self, message: str):
|
||||
"""Log save phase"""
|
||||
self._log('SAVE', message)
|
||||
|
||||
def done(self, message: str = "Execution completed"):
|
||||
"""Log completion"""
|
||||
duration = time.time() - self.start_time
|
||||
self._log('DONE', f"{message} (Duration: {duration:.2f}s)", status='success')
|
||||
if DEBUG_MODE:
|
||||
import sys
|
||||
complete_msg = f"[{self.function_name}] === AI Task Complete ==="
|
||||
logger.info(complete_msg)
|
||||
print(complete_msg, flush=True, file=sys.stdout)
|
||||
|
||||
def error(self, error_type: str, message: str, exception: Exception = None):
|
||||
"""Log error with standardized format"""
|
||||
error_msg = f"{error_type} – {message}"
|
||||
if exception:
|
||||
error_msg += f" ({type(exception).__name__})"
|
||||
self._log(self.current_phase or 'ERROR', error_msg, status='error')
|
||||
if DEBUG_MODE and exception:
|
||||
import sys
|
||||
import traceback
|
||||
error_trace_msg = f"[{self.function_name}] [ERROR] Stack trace:"
|
||||
logger.error(error_trace_msg, exc_info=exception)
|
||||
print(error_trace_msg, flush=True, file=sys.stdout)
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
|
||||
def retry(self, attempt: int, max_attempts: int, reason: str = ""):
|
||||
"""Log retry attempt"""
|
||||
msg = f"Retry attempt {attempt}/{max_attempts}"
|
||||
if reason:
|
||||
msg += f" – {reason}"
|
||||
self._log('AI_CALL', msg, status='info')
|
||||
|
||||
def timeout(self, timeout_seconds: int):
|
||||
"""Log timeout"""
|
||||
self.error('Timeout', f"Request timeout after {timeout_seconds}s")
|
||||
|
||||
def rate_limit(self, retry_after: str):
|
||||
"""Log rate limit"""
|
||||
self.error('RateLimit', f"OpenAI rate limit hit, retry in {retry_after}s")
|
||||
|
||||
def malformed_json(self, details: str = ""):
|
||||
"""Log JSON parsing error"""
|
||||
msg = "Failed to parse model response: Unexpected JSON"
|
||||
if details:
|
||||
msg += f" – {details}"
|
||||
self.error('MalformedJSON', msg)
|
||||
|
||||
5
backend/igny8_core/ai/templates/__init__.py
Normal file
5
backend/igny8_core/ai/templates/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""
|
||||
AI Templates
|
||||
Template files for reference when creating new AI functions.
|
||||
"""
|
||||
|
||||
281
backend/igny8_core/ai/templates/ai_functions_template.py
Normal file
281
backend/igny8_core/ai/templates/ai_functions_template.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""
|
||||
AI Functions Template
|
||||
Template/Reference file showing the common pattern used by auto_cluster, generate_ideas, and generate_content.
|
||||
This is a reference template - do not modify existing functions, use this as a guide for new functions.
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from igny8_core.auth.models import Account
|
||||
from igny8_core.ai.helpers.base import BaseAIFunction
|
||||
from igny8_core.ai.helpers.ai_core import AICore
|
||||
from igny8_core.ai.helpers.tracker import ConsoleStepTracker
|
||||
from igny8_core.ai.helpers.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ai_function_core_template(
|
||||
function_class: BaseAIFunction,
|
||||
function_name: str,
|
||||
payload: Dict[str, Any],
|
||||
account_id: Optional[int] = None,
|
||||
progress_callback: Optional[callable] = None,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Template for AI function core logic (legacy function signature pattern).
|
||||
|
||||
This template shows the common pattern used by:
|
||||
- generate_ideas_core
|
||||
- generate_content_core
|
||||
- auto_cluster (via engine, but similar pattern)
|
||||
|
||||
Usage Example:
|
||||
def my_function_core(item_id: int, account_id: int = None, progress_callback=None):
|
||||
fn = MyFunctionClass()
|
||||
payload = {'ids': [item_id]}
|
||||
return ai_function_core_template(
|
||||
function_class=fn,
|
||||
function_name='my_function',
|
||||
payload=payload,
|
||||
account_id=account_id,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
Args:
|
||||
function_class: Instance of the AI function class (e.g., GenerateIdeasFunction())
|
||||
function_name: Function name for config/tracking (e.g., 'generate_ideas')
|
||||
payload: Payload dict with 'ids' and other function-specific data
|
||||
account_id: Optional account ID for account isolation
|
||||
progress_callback: Optional progress callback for Celery tasks
|
||||
**kwargs: Additional function-specific parameters
|
||||
|
||||
Returns:
|
||||
Dict with 'success', function-specific result fields, 'message', etc.
|
||||
"""
|
||||
# Initialize tracker
|
||||
tracker = ConsoleStepTracker(function_name)
|
||||
tracker.init("Task started")
|
||||
|
||||
try:
|
||||
# Load account
|
||||
account = None
|
||||
if account_id:
|
||||
account = Account.objects.get(id=account_id)
|
||||
|
||||
tracker.prep("Loading account data...")
|
||||
|
||||
# Store account on function instance
|
||||
function_class.account = account
|
||||
|
||||
# Validate
|
||||
tracker.prep("Validating input...")
|
||||
validated = function_class.validate(payload, account)
|
||||
if not validated['valid']:
|
||||
tracker.error('ValidationError', validated['error'])
|
||||
return {'success': False, 'error': validated['error']}
|
||||
|
||||
# Prepare data
|
||||
tracker.prep("Preparing data...")
|
||||
data = function_class.prepare(payload, account)
|
||||
|
||||
# Build prompt
|
||||
tracker.prep("Building prompt...")
|
||||
prompt = function_class.build_prompt(data, account)
|
||||
|
||||
# Get model config from settings
|
||||
model_config = get_model_config(function_name)
|
||||
|
||||
# Generate function_id for tracking (ai_ prefix with function name)
|
||||
function_id = f"ai_{function_name}"
|
||||
|
||||
# Call AI using centralized request handler
|
||||
ai_core = AICore(account=account)
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model_config.get('model'),
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name=function_name,
|
||||
function_id=function_id,
|
||||
tracker=tracker
|
||||
)
|
||||
|
||||
if result.get('error'):
|
||||
return {'success': False, 'error': result['error']}
|
||||
|
||||
# Parse response
|
||||
tracker.parse("Parsing AI response...")
|
||||
parsed = function_class.parse_response(result['content'], tracker)
|
||||
|
||||
if not parsed:
|
||||
tracker.error('ParseError', 'No data parsed from AI response')
|
||||
return {'success': False, 'error': 'No data parsed from AI response'}
|
||||
|
||||
# Handle list responses
|
||||
if isinstance(parsed, list):
|
||||
parsed_count = len(parsed)
|
||||
tracker.parse(f"Parsed {parsed_count} item(s)")
|
||||
else:
|
||||
parsed_count = 1
|
||||
tracker.parse("Parsed response")
|
||||
|
||||
# Save output
|
||||
tracker.save("Saving to database...")
|
||||
save_result = function_class.save_output(parsed, data, account, step_tracker=tracker)
|
||||
tracker.save(f"Saved {save_result.get('count', 0)} item(s)")
|
||||
|
||||
# Build success message
|
||||
if isinstance(parsed, list) and len(parsed) > 0:
|
||||
first_item = parsed[0]
|
||||
item_name = first_item.get('title') or first_item.get('name') or 'item'
|
||||
tracker.done(f"Successfully created {item_name}")
|
||||
message = f"Successfully created {item_name}"
|
||||
else:
|
||||
tracker.done("Task completed successfully")
|
||||
message = "Task completed successfully"
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
**save_result,
|
||||
'message': message
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
tracker.error('Exception', str(e), e)
|
||||
logger.error(f"Error in {function_name}_core: {str(e)}", exc_info=True)
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
|
||||
def ai_function_batch_template(
|
||||
function_class: BaseAIFunction,
|
||||
function_name: str,
|
||||
payload: Dict[str, Any],
|
||||
account_id: Optional[int] = None,
|
||||
progress_callback: Optional[callable] = None,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Template for AI function batch processing (like generate_content_core).
|
||||
|
||||
This template shows the pattern for functions that process multiple items in a loop.
|
||||
|
||||
Usage Example:
|
||||
def my_batch_function_core(item_ids: List[int], account_id: int = None, progress_callback=None):
|
||||
fn = MyFunctionClass()
|
||||
payload = {'ids': item_ids}
|
||||
return ai_function_batch_template(
|
||||
function_class=fn,
|
||||
function_name='my_function',
|
||||
payload=payload,
|
||||
account_id=account_id,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
Args:
|
||||
function_class: Instance of the AI function class
|
||||
function_name: Function name for config/tracking
|
||||
payload: Payload dict with 'ids' list
|
||||
account_id: Optional account ID for account isolation
|
||||
progress_callback: Optional progress callback for Celery tasks
|
||||
**kwargs: Additional function-specific parameters
|
||||
|
||||
Returns:
|
||||
Dict with 'success', 'count', 'tasks_updated', 'message', etc.
|
||||
"""
|
||||
tracker = ConsoleStepTracker(function_name)
|
||||
tracker.init("Task started")
|
||||
|
||||
try:
|
||||
# Load account
|
||||
account = None
|
||||
if account_id:
|
||||
account = Account.objects.get(id=account_id)
|
||||
|
||||
tracker.prep("Loading account data...")
|
||||
|
||||
# Store account on function instance
|
||||
function_class.account = account
|
||||
|
||||
# Validate
|
||||
tracker.prep("Validating input...")
|
||||
validated = function_class.validate(payload, account)
|
||||
if not validated['valid']:
|
||||
tracker.error('ValidationError', validated['error'])
|
||||
return {'success': False, 'error': validated['error']}
|
||||
|
||||
# Prepare data (returns list of items)
|
||||
tracker.prep("Preparing data...")
|
||||
items = function_class.prepare(payload, account)
|
||||
if not isinstance(items, list):
|
||||
items = [items]
|
||||
|
||||
total_items = len(items)
|
||||
processed_count = 0
|
||||
|
||||
tracker.prep(f"Processing {total_items} item(s)...")
|
||||
|
||||
# Get model config once (shared across all items)
|
||||
model_config = get_model_config(function_name)
|
||||
# Generate function_id for tracking (ai_ prefix with function name)
|
||||
function_id = f"ai_{function_name}"
|
||||
ai_core = AICore(account=account)
|
||||
|
||||
# Process each item
|
||||
for idx, item in enumerate(items):
|
||||
try:
|
||||
# Build prompt for this item
|
||||
prompt = function_class.build_prompt(item if not isinstance(items, list) else [item], account)
|
||||
|
||||
# Call AI
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model_config.get('model'),
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name=function_name,
|
||||
function_id=function_id,
|
||||
tracker=tracker
|
||||
)
|
||||
|
||||
if result.get('error'):
|
||||
logger.error(f"AI error for item {idx + 1}/{total_items}: {result['error']}")
|
||||
continue
|
||||
|
||||
# Parse response
|
||||
parsed = function_class.parse_response(result['content'], tracker)
|
||||
|
||||
if not parsed:
|
||||
logger.warning(f"No data parsed for item {idx + 1}/{total_items}")
|
||||
continue
|
||||
|
||||
# Save output
|
||||
save_result = function_class.save_output(
|
||||
parsed,
|
||||
item if not isinstance(items, list) else [item],
|
||||
account,
|
||||
step_tracker=tracker
|
||||
)
|
||||
|
||||
processed_count += save_result.get('count', 0) or save_result.get('tasks_updated', 0) or 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing item {idx + 1}/{total_items}: {str(e)}", exc_info=True)
|
||||
continue
|
||||
|
||||
tracker.done(f"Processed {processed_count} item(s) successfully")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'count': processed_count,
|
||||
'tasks_updated': processed_count,
|
||||
'message': f'Task completed: {processed_count} item(s) processed'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
tracker.error('Exception', str(e), e)
|
||||
logger.error(f"Error in {function_name}_core: {str(e)}", exc_info=True)
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
80
backend/igny8_core/ai/templates/modals_template.py
Normal file
80
backend/igny8_core/ai/templates/modals_template.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Modal Configuration Templates for AI Functions
|
||||
Each function uses the same AIProgressModal component with different configs.
|
||||
"""
|
||||
|
||||
# Modal configuration templates for each AI function
|
||||
MODAL_CONFIGS = {
|
||||
'auto_cluster': {
|
||||
'title': 'Auto Cluster Keywords',
|
||||
'function_id': 'ai_auto_cluster',
|
||||
'success_title': 'Clustering Complete!',
|
||||
'success_message_template': 'Successfully created {clusters_created} clusters and updated {keywords_updated} keywords.',
|
||||
'error_title': 'Clustering Failed',
|
||||
'error_message_template': 'An error occurred while clustering keywords. Please try again.',
|
||||
},
|
||||
'generate_ideas': {
|
||||
'title': 'Generating Ideas',
|
||||
'function_id': 'ai_generate_ideas',
|
||||
'success_title': 'Ideas Generated!',
|
||||
'success_message_template': 'Successfully generated {ideas_created} content idea(s).',
|
||||
'error_title': 'Idea Generation Failed',
|
||||
'error_message_template': 'An error occurred while generating ideas. Please try again.',
|
||||
},
|
||||
'generate_content': {
|
||||
'title': 'Generating Content',
|
||||
'function_id': 'ai_generate_content',
|
||||
'success_title': 'Content Generated!',
|
||||
'success_message_template': 'Successfully generated content for {tasks_updated} task(s).',
|
||||
'error_title': 'Content Generation Failed',
|
||||
'error_message_template': 'An error occurred while generating content. Please try again.',
|
||||
},
|
||||
}
|
||||
|
||||
# Legacy function IDs (for backward compatibility)
|
||||
LEGACY_FUNCTION_IDS = {
|
||||
'generate_ideas': 'ai_generate_ideas',
|
||||
'generate_content': 'ai_generate_content',
|
||||
}
|
||||
|
||||
|
||||
def get_modal_config(function_name: str, is_legacy: bool = False) -> dict:
|
||||
"""
|
||||
Get modal configuration for an AI function.
|
||||
|
||||
Args:
|
||||
function_name: Function name (e.g., 'auto_cluster', 'generate_ideas', 'generate_content')
|
||||
is_legacy: Whether this is a legacy function path
|
||||
|
||||
Returns:
|
||||
Dict with modal configuration
|
||||
"""
|
||||
config = MODAL_CONFIGS.get(function_name, {}).copy()
|
||||
|
||||
# Override function_id for legacy paths
|
||||
if is_legacy and function_name in LEGACY_FUNCTION_IDS:
|
||||
config['function_id'] = LEGACY_FUNCTION_IDS[function_name]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def format_success_message(function_name: str, result: dict) -> str:
|
||||
"""
|
||||
Format success message based on function result.
|
||||
|
||||
Args:
|
||||
function_name: Function name
|
||||
result: Result dict from function execution
|
||||
|
||||
Returns:
|
||||
Formatted success message
|
||||
"""
|
||||
config = MODAL_CONFIGS.get(function_name, {})
|
||||
template = config.get('success_message_template', 'Task completed successfully.')
|
||||
|
||||
try:
|
||||
return template.format(**result)
|
||||
except KeyError:
|
||||
# Fallback if template variables don't match
|
||||
return config.get('success_message_template', 'Task completed successfully.')
|
||||
|
||||
458
frontend/src/components/common/AIProgressModal.tsx
Normal file
458
frontend/src/components/common/AIProgressModal.tsx
Normal file
@@ -0,0 +1,458 @@
|
||||
import React, { useEffect, useRef } from 'react';
|
||||
import { Modal } from '../ui/modal';
|
||||
import { ProgressBar } from '../ui/progress';
|
||||
import Button from '../ui/button/Button';
|
||||
|
||||
export interface AIProgressModalProps {
|
||||
isOpen: boolean;
|
||||
title: string;
|
||||
percentage: number; // 0-100
|
||||
status: 'pending' | 'processing' | 'completed' | 'error';
|
||||
message: string;
|
||||
details?: {
|
||||
current: number;
|
||||
total: number;
|
||||
completed: number;
|
||||
currentItem?: string;
|
||||
phase?: string;
|
||||
};
|
||||
onClose?: () => void;
|
||||
onCancel?: () => void;
|
||||
taskId?: string;
|
||||
functionId?: string; // AI function ID for tracking (e.g., "ai-cluster-01")
|
||||
stepLogs?: Array<{
|
||||
stepNumber: number;
|
||||
stepName: string;
|
||||
status: string;
|
||||
message: string;
|
||||
timestamp?: number;
|
||||
}>; // Step logs for debugging
|
||||
config?: {
|
||||
successTitle?: string;
|
||||
successMessage?: string;
|
||||
errorTitle?: string;
|
||||
errorMessage?: string;
|
||||
};
|
||||
}
|
||||
|
||||
// Generate modal instance ID (increments per modal instance)
|
||||
let modalInstanceCounter = 0;
|
||||
const getModalInstanceId = () => {
|
||||
modalInstanceCounter++;
|
||||
return `modal-${String(modalInstanceCounter).padStart(2, '0')}`;
|
||||
};
|
||||
|
||||
export default function AIProgressModal({
|
||||
isOpen,
|
||||
title,
|
||||
percentage,
|
||||
status,
|
||||
message,
|
||||
details,
|
||||
onClose,
|
||||
onCancel,
|
||||
taskId,
|
||||
functionId,
|
||||
stepLogs = [],
|
||||
config = {},
|
||||
}: AIProgressModalProps) {
|
||||
// Generate modal instance ID on first render
|
||||
const modalInstanceIdRef = React.useRef<string | null>(null);
|
||||
React.useEffect(() => {
|
||||
if (!modalInstanceIdRef.current) {
|
||||
modalInstanceIdRef.current = getModalInstanceId();
|
||||
}
|
||||
}, []);
|
||||
|
||||
const modalInstanceId = modalInstanceIdRef.current || 'modal-01';
|
||||
|
||||
// Build full function ID with modal instance
|
||||
const fullFunctionId = functionId ? `${functionId}-${modalInstanceId}` : null;
|
||||
|
||||
// Determine color based on status
|
||||
const getProgressColor = (): 'primary' | 'success' | 'error' | 'warning' => {
|
||||
if (status === 'error') return 'error';
|
||||
if (status === 'completed') return 'success';
|
||||
if (status === 'processing') return 'primary';
|
||||
return 'primary';
|
||||
};
|
||||
|
||||
// Success icon (from AlertModal style)
|
||||
const SuccessIcon = () => (
|
||||
<div className="relative flex items-center justify-center w-24 h-24 mx-auto mb-6">
|
||||
{/* Light green flower-like outer shape with rounded petals */}
|
||||
<div
|
||||
className="absolute inset-0 bg-success-100 rounded-full"
|
||||
style={{
|
||||
clipPath: 'polygon(50% 0%, 61% 35%, 98% 35%, 68% 57%, 79% 91%, 50% 70%, 21% 91%, 32% 57%, 2% 35%, 39% 35%)',
|
||||
width: '80px',
|
||||
height: '80px'
|
||||
}}
|
||||
/>
|
||||
{/* Dark green inner circle */}
|
||||
<div className="relative bg-success-600 rounded-full w-16 h-16 flex items-center justify-center shadow-lg">
|
||||
<svg
|
||||
className="w-8 h-8 text-white"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
strokeWidth={3}
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
d="M5 13l4 4L19 7"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
// Error icon
|
||||
const ErrorIcon = () => (
|
||||
<div className="relative flex items-center justify-center w-24 h-24 mx-auto mb-6">
|
||||
{/* Light red cloud-like background */}
|
||||
<div
|
||||
className="absolute inset-0 bg-error-100 rounded-full blur-2xl opacity-50"
|
||||
style={{
|
||||
width: '90px',
|
||||
height: '90px',
|
||||
transform: 'scale(1.1)'
|
||||
}}
|
||||
/>
|
||||
{/* Light red circle with red X */}
|
||||
<div className="relative bg-error-100 rounded-full w-16 h-16 flex items-center justify-center shadow-lg">
|
||||
<svg
|
||||
className="w-10 h-10 text-error-500"
|
||||
fill="currentColor"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path
|
||||
fillRule="evenodd"
|
||||
d="M18.364 5.636a1 1 0 010 1.414L13.414 12l4.95 4.95a1 1 0 11-1.414 1.414L12 13.414l-4.95 4.95a1 1 0 01-1.414-1.414L10.586 12 5.636 7.05a1 1 0 011.414-1.414L12 10.586l4.95-4.95a1 1 0 011.414 0z"
|
||||
clipRule="evenodd"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
// Processing spinner
|
||||
const ProcessingIcon = () => (
|
||||
<div className="flex items-center justify-center w-16 h-16 mx-auto mb-6">
|
||||
<svg
|
||||
className="w-16 h-16 text-brand-500 animate-spin"
|
||||
fill="none"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<circle
|
||||
className="opacity-25"
|
||||
cx="12"
|
||||
cy="12"
|
||||
r="10"
|
||||
stroke="currentColor"
|
||||
strokeWidth="4"
|
||||
/>
|
||||
<path
|
||||
className="opacity-75"
|
||||
fill="currentColor"
|
||||
d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
);
|
||||
|
||||
// Show completion screen with big success icon
|
||||
if (status === 'completed') {
|
||||
return (
|
||||
<Modal
|
||||
isOpen={isOpen}
|
||||
onClose={onClose || (() => {})}
|
||||
className="max-w-md"
|
||||
showCloseButton={true}
|
||||
>
|
||||
<div className="px-8 py-10 text-center">
|
||||
{/* Big Success Icon */}
|
||||
<SuccessIcon />
|
||||
|
||||
{/* Title */}
|
||||
<h2 className="text-2xl font-bold text-gray-800 dark:text-white mb-4">
|
||||
{config.successTitle || title || 'Task Completed!'}
|
||||
</h2>
|
||||
|
||||
{/* Message */}
|
||||
<p className="text-gray-600 dark:text-gray-400 mb-6 text-sm leading-relaxed">
|
||||
{config.successMessage || message}
|
||||
</p>
|
||||
|
||||
{/* Details if available */}
|
||||
{details && details.total > 0 && (
|
||||
<div className="mb-6 p-4 bg-gray-50 dark:bg-gray-800 rounded-lg">
|
||||
<div className="text-sm text-gray-600 dark:text-gray-400">
|
||||
<span className="font-semibold text-gray-900 dark:text-white">
|
||||
{details.completed || details.current}
|
||||
</span>
|
||||
{' / '}
|
||||
<span className="text-gray-500 dark:text-gray-400">
|
||||
{details.total}
|
||||
</span>
|
||||
{' items completed'}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
<div className="mb-6 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && <div>Function ID: {fullFunctionId}</div>}
|
||||
{taskId && <div>Task ID: {taskId}</div>}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step Logs / Debug Logs */}
|
||||
{stepLogs.length > 0 && (
|
||||
<div className="mb-6 max-h-48 overflow-y-auto bg-gray-50 dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700 p-3">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<h4 className="text-xs font-semibold text-gray-700 dark:text-gray-300">
|
||||
Step Logs
|
||||
</h4>
|
||||
<span className="text-xs text-gray-500 dark:text-gray-400">
|
||||
{stepLogs.length} step{stepLogs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-1">
|
||||
{stepLogs.map((step, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`text-xs p-2 rounded border ${
|
||||
step.status === 'success'
|
||||
? 'bg-green-50 dark:bg-green-900/20 border-green-200 dark:border-green-800 text-green-800 dark:text-green-300'
|
||||
: step.status === 'error'
|
||||
? 'bg-red-50 dark:bg-red-900/20 border-red-200 dark:border-red-800 text-red-800 dark:text-red-300'
|
||||
: 'bg-blue-50 dark:bg-blue-900/20 border-blue-200 dark:border-blue-800 text-blue-800 dark:text-blue-300'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-mono font-semibold">
|
||||
[{step.stepNumber}]
|
||||
</span>
|
||||
<span className="font-semibold">{step.stepName}:</span>
|
||||
<span>{step.message}</span>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Close Button */}
|
||||
<div className="flex justify-center">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-6 py-3 rounded-lg font-medium text-sm transition-colors shadow-sm bg-success-500 hover:bg-success-600 text-white"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
|
||||
// Show error screen with big error icon
|
||||
if (status === 'error') {
|
||||
return (
|
||||
<Modal
|
||||
isOpen={isOpen}
|
||||
onClose={onClose || (() => {})}
|
||||
className="max-w-md"
|
||||
showCloseButton={true}
|
||||
>
|
||||
<div className="px-8 py-10 text-center">
|
||||
{/* Big Error Icon */}
|
||||
<ErrorIcon />
|
||||
|
||||
{/* Title */}
|
||||
<h2 className="text-2xl font-bold text-gray-800 dark:text-white mb-4">
|
||||
{config.errorTitle || 'Error Occurred'}
|
||||
</h2>
|
||||
|
||||
{/* Message */}
|
||||
<p className="text-gray-600 dark:text-gray-400 mb-6 text-sm leading-relaxed">
|
||||
{config.errorMessage || message}
|
||||
</p>
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
<div className="mb-6 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && <div>Function ID: {fullFunctionId}</div>}
|
||||
{taskId && <div>Task ID: {taskId}</div>}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step Logs / Debug Logs */}
|
||||
{stepLogs.length > 0 && (
|
||||
<div className="mb-6 max-h-48 overflow-y-auto bg-gray-50 dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700 p-3">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<h4 className="text-xs font-semibold text-gray-700 dark:text-gray-300">
|
||||
Step Logs
|
||||
</h4>
|
||||
<span className="text-xs text-gray-500 dark:text-gray-400">
|
||||
{stepLogs.length} step{stepLogs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-1">
|
||||
{stepLogs.map((step, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`text-xs p-2 rounded border ${
|
||||
step.status === 'success'
|
||||
? 'bg-green-50 dark:bg-green-900/20 border-green-200 dark:border-green-800 text-green-800 dark:text-green-300'
|
||||
: step.status === 'error'
|
||||
? 'bg-red-50 dark:bg-red-900/20 border-red-200 dark:border-red-800 text-red-800 dark:text-red-300'
|
||||
: 'bg-blue-50 dark:bg-blue-900/20 border-blue-200 dark:border-blue-800 text-blue-800 dark:text-blue-300'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-mono font-semibold">
|
||||
[{step.stepNumber}]
|
||||
</span>
|
||||
<span className="font-semibold">{step.stepName}:</span>
|
||||
<span>{step.message}</span>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Close Button */}
|
||||
<div className="flex justify-center">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-6 py-3 rounded-lg font-medium text-sm transition-colors shadow-sm bg-error-500 hover:bg-error-600 text-white"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
|
||||
// Processing/Pending state - show progress modal
|
||||
return (
|
||||
<Modal
|
||||
isOpen={isOpen}
|
||||
onClose={onClose || (() => {})}
|
||||
className="max-w-lg"
|
||||
showCloseButton={false}
|
||||
>
|
||||
<div className="p-6 min-h-[300px]">
|
||||
{/* Header with Processing Icon */}
|
||||
<div className="flex flex-col items-center mb-6">
|
||||
<ProcessingIcon />
|
||||
<h3 className="text-xl font-semibold text-gray-900 dark:text-white mb-2 text-center">
|
||||
{title}
|
||||
</h3>
|
||||
<p className="text-sm text-gray-600 dark:text-gray-400 text-center">
|
||||
{message}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Progress Bar */}
|
||||
<div className="mb-6">
|
||||
<ProgressBar
|
||||
value={percentage}
|
||||
color={getProgressColor()}
|
||||
size="lg"
|
||||
showLabel={true}
|
||||
label={`${Math.round(percentage)}%`}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Details (current/total) */}
|
||||
{details && details.total > 0 && (
|
||||
<div className="mb-4 p-3 bg-gray-50 dark:bg-gray-800 rounded-lg">
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-gray-600 dark:text-gray-400">
|
||||
Progress
|
||||
</span>
|
||||
<span className="font-semibold text-gray-900 dark:text-white">
|
||||
{details.current || details.completed || 0} / {details.total}
|
||||
</span>
|
||||
</div>
|
||||
{details.currentItem && (
|
||||
<div className="mt-2 text-xs text-gray-500 dark:text-gray-400 truncate">
|
||||
Current: {details.currentItem}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
<div className="mb-4 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && (
|
||||
<div>Function ID: {fullFunctionId}</div>
|
||||
)}
|
||||
{taskId && (
|
||||
<div>Task ID: {taskId}</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step Logs / Debug Logs */}
|
||||
{stepLogs.length > 0 && (
|
||||
<div className="mb-4 max-h-48 overflow-y-auto bg-gray-50 dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700 p-3">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<h4 className="text-xs font-semibold text-gray-700 dark:text-gray-300">
|
||||
Step Logs
|
||||
</h4>
|
||||
<span className="text-xs text-gray-500 dark:text-gray-400">
|
||||
{stepLogs.length} step{stepLogs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-1">
|
||||
{stepLogs.map((step, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`text-xs p-2 rounded border ${
|
||||
step.status === 'success'
|
||||
? 'bg-green-50 dark:bg-green-900/20 border-green-200 dark:border-green-800 text-green-800 dark:text-green-300'
|
||||
: step.status === 'error'
|
||||
? 'bg-red-50 dark:bg-red-900/20 border-red-200 dark:border-red-800 text-red-800 dark:text-red-300'
|
||||
: 'bg-blue-50 dark:bg-blue-900/20 border-blue-200 dark:border-blue-800 text-blue-800 dark:text-blue-300'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-mono font-semibold">
|
||||
[{step.stepNumber}]
|
||||
</span>
|
||||
<span className="font-semibold">{step.stepName}:</span>
|
||||
<span>{step.message}</span>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex justify-end gap-3">
|
||||
{onCancel && status !== 'completed' && status !== 'error' && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="sm"
|
||||
onClick={onCancel}
|
||||
disabled={status === 'processing'}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user