Add SEO fields to Tasks model, improve content generation response handling, and enhance progress bar animation

- Added primary_keyword, secondary_keywords, tags, and categories fields to Tasks model
- Updated generate_content function to handle full JSON response with all SEO fields
- Improved progress bar animation: smooth 1% increments every 300ms
- Enhanced step detection for content generation vs clustering vs ideas
- Fixed progress modal to show correct messages for each function type
- Added comprehensive logging to Keywords and Tasks pages for AI functions
- Fixed error handling to show meaningful error messages instead of generic failures
This commit is contained in:
Gitea Deploy
2025-11-09 21:22:34 +00:00
parent 09d22ab0e2
commit 961362e088
17340 changed files with 10636 additions and 2248776 deletions

View File

@@ -1,20 +1,26 @@
"""
AI Processor wrapper for the framework
Reuses existing AIProcessor but provides framework-compatible interface
DEPRECATED: Use AICore.run_ai_request() instead for all new code.
This file is kept for backward compatibility only.
"""
from typing import Dict, Any, Optional, List
from igny8_core.utils.ai_processor import AIProcessor as BaseAIProcessor
from igny8_core.ai.ai_core import AICore
class AIProcessor:
"""
Framework-compatible wrapper around existing AIProcessor.
Provides consistent interface for all AI functions.
DEPRECATED: Use AICore.run_ai_request() instead.
This class redirects to AICore for consistency.
"""
def __init__(self, account=None):
self.processor = BaseAIProcessor(account=account)
# Use AICore internally for all requests
self.ai_core = AICore(account=account)
self.account = account
# Keep old processor for backward compatibility only
self.processor = BaseAIProcessor(account=account)
def call(
self,
@@ -28,35 +34,25 @@ class AIProcessor:
) -> Dict[str, Any]:
"""
Call AI provider with prompt.
DEPRECATED: Use AICore.run_ai_request() instead.
Returns:
Dict with 'content', 'error', 'input_tokens', 'output_tokens',
'total_tokens', 'model', 'cost', 'api_id'
"""
# Use specified model or account's default
active_model = model or self.processor.default_model
# Check if model supports JSON mode
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
if response_format is None and active_model in json_models:
response_format = {'type': 'json_object'}
# Call OpenAI - don't pass response_steps to old processor
# The new framework handles all step tracking at the engine level
result = self.processor._call_openai(
prompt,
model=active_model,
# Redirect to AICore for centralized execution
return self.ai_core.run_ai_request(
prompt=prompt,
model=model,
max_tokens=max_tokens,
temperature=temperature,
response_format=response_format,
response_steps=None # Disable old processor's step tracking
function_name='AIProcessor.call'
)
return result
def extract_json(self, response_text: str) -> Optional[Dict]:
"""Extract JSON from response text"""
return self.processor._extract_json_from_response(response_text)
return self.ai_core.extract_json(response_text)
def generate_image(
self,
@@ -67,11 +63,13 @@ class AIProcessor:
account=None
) -> Dict[str, Any]:
"""Generate image using AI"""
return self.processor.generate_image(
return self.ai_core.generate_image(
prompt=prompt,
provider='openai',
model=model,
size=size,
n=n,
account=account or self.account
account=account or self.account,
function_name='AIProcessor.generate_image'
)