Add support for GPT-5.1 and GPT-5.2: update token limits and pricing

This commit is contained in:
IGNY8 VPS (Salman)
2025-12-17 11:11:11 +00:00
parent e43f8553b6
commit 62e55389f9
7 changed files with 40 additions and 15 deletions

View File

@@ -99,8 +99,10 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
# MODEL_RATES not available - skip validation
pass
# Get max_tokens and temperature from config (standardized to 8192)
max_tokens = config.get('max_tokens', 8192) # Standardized across entire codebase
# Get max_tokens and temperature from config (standardized to 8192, 16384 for GPT-5.x)
# GPT-5.1 and GPT-5.2 use 16384 max_tokens by default
default_max_tokens = 16384 if model in ['gpt-5.1', 'gpt-5.2'] else 8192
max_tokens = config.get('max_tokens', default_max_tokens)
temperature = config.get('temperature', 0.7) # Reasonable default
# Build response format based on model (JSON mode for supported models)