78 lines
2.4 KiB
Python
78 lines
2.4 KiB
Python
"""
|
|
AI Processor wrapper for the framework
|
|
Reuses existing AIProcessor but provides framework-compatible interface
|
|
"""
|
|
from typing import Dict, Any, Optional, List
|
|
from igny8_core.utils.ai_processor import AIProcessor as BaseAIProcessor
|
|
|
|
|
|
class AIProcessor:
|
|
"""
|
|
Framework-compatible wrapper around existing AIProcessor.
|
|
Provides consistent interface for all AI functions.
|
|
"""
|
|
|
|
def __init__(self, account=None):
|
|
self.processor = BaseAIProcessor(account=account)
|
|
self.account = account
|
|
|
|
def call(
|
|
self,
|
|
prompt: str,
|
|
model: Optional[str] = None,
|
|
max_tokens: int = 4000,
|
|
temperature: float = 0.7,
|
|
response_format: Optional[Dict] = None,
|
|
response_steps: Optional[List] = None,
|
|
progress_callback=None
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Call AI provider with prompt.
|
|
|
|
Returns:
|
|
Dict with 'content', 'error', 'input_tokens', 'output_tokens',
|
|
'total_tokens', 'model', 'cost', 'api_id'
|
|
"""
|
|
# Use specified model or account's default
|
|
active_model = model or self.processor.default_model
|
|
|
|
# Check if model supports JSON mode
|
|
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
|
|
if response_format is None and active_model in json_models:
|
|
response_format = {'type': 'json_object'}
|
|
|
|
# Call OpenAI - don't pass response_steps to old processor
|
|
# The new framework handles all step tracking at the engine level
|
|
result = self.processor._call_openai(
|
|
prompt,
|
|
model=active_model,
|
|
max_tokens=max_tokens,
|
|
temperature=temperature,
|
|
response_format=response_format,
|
|
response_steps=None # Disable old processor's step tracking
|
|
)
|
|
|
|
return result
|
|
|
|
def extract_json(self, response_text: str) -> Optional[Dict]:
|
|
"""Extract JSON from response text"""
|
|
return self.processor._extract_json_from_response(response_text)
|
|
|
|
def generate_image(
|
|
self,
|
|
prompt: str,
|
|
model: str = 'dall-e-3',
|
|
size: str = '1024x1024',
|
|
n: int = 1,
|
|
account=None
|
|
) -> Dict[str, Any]:
|
|
"""Generate image using AI"""
|
|
return self.processor.generate_image(
|
|
prompt=prompt,
|
|
model=model,
|
|
size=size,
|
|
n=n,
|
|
account=account or self.account
|
|
)
|
|
|