fixes of ai toke limit standrd 8192
This commit is contained in:
@@ -853,10 +853,10 @@ class AICore:
|
||||
return 0.0
|
||||
|
||||
# Legacy method names for backward compatibility
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 4000,
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 8192,
|
||||
temperature: float = 0.7, response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""Legacy method - redirects to run_ai_request()"""
|
||||
"""DEPRECATED: Legacy method - redirects to run_ai_request(). Use run_ai_request() directly."""
|
||||
return self.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
|
||||
@@ -99,8 +99,8 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
# MODEL_RATES not available - skip validation
|
||||
pass
|
||||
|
||||
# Get max_tokens and temperature from config (with reasonable defaults for API)
|
||||
max_tokens = config.get('max_tokens', 16384) # Maximum for long-form content generation (2000-3000 words)
|
||||
# Get max_tokens and temperature from config (standardized to 8192)
|
||||
max_tokens = config.get('max_tokens', 8192) # Standardized across entire codebase
|
||||
temperature = config.get('temperature', 0.7) # Reasonable default
|
||||
|
||||
# Build response format based on model (JSON mode for supported models)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,18 @@
|
||||
"""
|
||||
AI Processor - Unified AI interface for content generation, images, clustering
|
||||
Based on reference plugin's OpenAI integration (ai/openai-api.php)
|
||||
Matches exact endpoints and request formats from reference plugin.
|
||||
AI Processor - LEGACY - Use igny8_core.ai.engine.AIEngine instead
|
||||
|
||||
DEPRECATION WARNING: This module is deprecated and maintained only for:
|
||||
1. MODEL_RATES constant (imported by settings.py and integration_views.py)
|
||||
2. Integration test views
|
||||
|
||||
For all AI function execution, use the new AI framework:
|
||||
- igny8_core.ai.engine.AIEngine
|
||||
- igny8_core.ai.functions.*
|
||||
|
||||
This file will be removed in a future version after extracting MODEL_RATES to constants.py.
|
||||
"""
|
||||
import logging
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
import requests
|
||||
@@ -434,7 +443,7 @@ class AIProcessor:
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str] = None,
|
||||
max_tokens: int = 4000,
|
||||
max_tokens: int = 8192,
|
||||
temperature: float = 0.7,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]:
|
||||
@@ -528,7 +537,7 @@ Make sure each prompt is detailed enough for image generation, describing the vi
|
||||
)
|
||||
|
||||
# Call OpenAI to extract prompts
|
||||
result = self.generate_content(prompt, max_tokens=1000, temperature=0.7)
|
||||
result = self.generate_content(prompt, max_tokens=8192, temperature=0.7)
|
||||
|
||||
if result.get('error'):
|
||||
return {'error': result['error']}
|
||||
@@ -1130,7 +1139,7 @@ Make sure each prompt is detailed enough for image generation, describing the vi
|
||||
result = self._call_openai(
|
||||
prompt,
|
||||
model=active_model, # Explicitly pass to ensure consistency
|
||||
max_tokens=3000,
|
||||
max_tokens=8192,
|
||||
temperature=0.7,
|
||||
response_format=response_format,
|
||||
response_steps=response_steps
|
||||
@@ -1337,7 +1346,7 @@ Make sure each prompt is detailed enough for image generation, describing the vi
|
||||
result = self._call_openai(
|
||||
prompt,
|
||||
model=active_model, # Explicitly pass to ensure consistency
|
||||
max_tokens=4000,
|
||||
max_tokens=8192,
|
||||
temperature=0.7,
|
||||
response_format=response_format
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user