fixes of ai toke limit standrd 8192
This commit is contained in:
@@ -853,10 +853,10 @@ class AICore:
|
||||
return 0.0
|
||||
|
||||
# Legacy method names for backward compatibility
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 4000,
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 8192,
|
||||
temperature: float = 0.7, response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""Legacy method - redirects to run_ai_request()"""
|
||||
"""DEPRECATED: Legacy method - redirects to run_ai_request(). Use run_ai_request() directly."""
|
||||
return self.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
|
||||
@@ -99,8 +99,8 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
# MODEL_RATES not available - skip validation
|
||||
pass
|
||||
|
||||
# Get max_tokens and temperature from config (with reasonable defaults for API)
|
||||
max_tokens = config.get('max_tokens', 16384) # Maximum for long-form content generation (2000-3000 words)
|
||||
# Get max_tokens and temperature from config (standardized to 8192)
|
||||
max_tokens = config.get('max_tokens', 8192) # Standardized across entire codebase
|
||||
temperature = config.get('temperature', 0.7) # Reasonable default
|
||||
|
||||
# Build response format based on model (JSON mode for supported models)
|
||||
|
||||
Reference in New Issue
Block a user