- Added primary_keyword, secondary_keywords, tags, and categories fields to Tasks model - Updated generate_content function to handle full JSON response with all SEO fields - Improved progress bar animation: smooth 1% increments every 300ms - Enhanced step detection for content generation vs clustering vs ideas - Fixed progress modal to show correct messages for each function type - Added comprehensive logging to Keywords and Tasks pages for AI functions - Fixed error handling to show meaningful error messages instead of generic failures
76 lines
2.3 KiB
Python
76 lines
2.3 KiB
Python
"""
|
|
AI Processor wrapper for the framework
|
|
DEPRECATED: Use AICore.run_ai_request() instead for all new code.
|
|
This file is kept for backward compatibility only.
|
|
"""
|
|
from typing import Dict, Any, Optional, List
|
|
from igny8_core.utils.ai_processor import AIProcessor as BaseAIProcessor
|
|
from igny8_core.ai.ai_core import AICore
|
|
|
|
|
|
class AIProcessor:
|
|
"""
|
|
Framework-compatible wrapper around existing AIProcessor.
|
|
DEPRECATED: Use AICore.run_ai_request() instead.
|
|
This class redirects to AICore for consistency.
|
|
"""
|
|
|
|
def __init__(self, account=None):
|
|
# Use AICore internally for all requests
|
|
self.ai_core = AICore(account=account)
|
|
self.account = account
|
|
# Keep old processor for backward compatibility only
|
|
self.processor = BaseAIProcessor(account=account)
|
|
|
|
def call(
|
|
self,
|
|
prompt: str,
|
|
model: Optional[str] = None,
|
|
max_tokens: int = 4000,
|
|
temperature: float = 0.7,
|
|
response_format: Optional[Dict] = None,
|
|
response_steps: Optional[List] = None,
|
|
progress_callback=None
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Call AI provider with prompt.
|
|
DEPRECATED: Use AICore.run_ai_request() instead.
|
|
|
|
Returns:
|
|
Dict with 'content', 'error', 'input_tokens', 'output_tokens',
|
|
'total_tokens', 'model', 'cost', 'api_id'
|
|
"""
|
|
# Redirect to AICore for centralized execution
|
|
return self.ai_core.run_ai_request(
|
|
prompt=prompt,
|
|
model=model,
|
|
max_tokens=max_tokens,
|
|
temperature=temperature,
|
|
response_format=response_format,
|
|
function_name='AIProcessor.call'
|
|
)
|
|
|
|
def extract_json(self, response_text: str) -> Optional[Dict]:
|
|
"""Extract JSON from response text"""
|
|
return self.ai_core.extract_json(response_text)
|
|
|
|
def generate_image(
|
|
self,
|
|
prompt: str,
|
|
model: str = 'dall-e-3',
|
|
size: str = '1024x1024',
|
|
n: int = 1,
|
|
account=None
|
|
) -> Dict[str, Any]:
|
|
"""Generate image using AI"""
|
|
return self.ai_core.generate_image(
|
|
prompt=prompt,
|
|
provider='openai',
|
|
model=model,
|
|
size=size,
|
|
n=n,
|
|
account=account or self.account,
|
|
function_name='AIProcessor.generate_image'
|
|
)
|
|
|