Add support for GPT-5.1 and GPT-5.2: update token limits and pricing
This commit is contained in:
@@ -174,8 +174,12 @@ class AIProcessor:
|
||||
}
|
||||
|
||||
# Add max_tokens if specified (reference plugin uses 4000 default)
|
||||
# GPT-5.1 and GPT-5.2 use max_completion_tokens instead of max_tokens
|
||||
if max_tokens:
|
||||
body_data['max_tokens'] = max_tokens
|
||||
if model in ['gpt-5.1', 'gpt-5.2']:
|
||||
body_data['max_completion_tokens'] = max_tokens
|
||||
else:
|
||||
body_data['max_tokens'] = max_tokens
|
||||
|
||||
# Add response_format if specified (for JSON mode)
|
||||
if response_format:
|
||||
@@ -1131,15 +1135,17 @@ Make sure each prompt is detailed enough for image generation, describing the vi
|
||||
# Call OpenAI with JSON response format - Steps 8-10 happen in _call_openai
|
||||
# Use the active model from integration settings (self.default_model)
|
||||
# Check if model supports JSON mode
|
||||
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
|
||||
active_model = self.default_model # This is the active model from IntegrationSettings
|
||||
response_format = {'type': 'json_object'} if active_model in json_models else None
|
||||
response_format = {'type': 'json_object'} if active_model in JSON_MODE_MODELS else None
|
||||
|
||||
# GPT-5.1 and GPT-5.2 use 16384 max_tokens
|
||||
max_tokens = 16384 if active_model in ['gpt-5.1', 'gpt-5.2'] else 8192
|
||||
|
||||
try:
|
||||
result = self._call_openai(
|
||||
prompt,
|
||||
model=active_model, # Explicitly pass to ensure consistency
|
||||
max_tokens=8192,
|
||||
max_tokens=max_tokens,
|
||||
temperature=0.7,
|
||||
response_format=response_format,
|
||||
response_steps=response_steps
|
||||
@@ -1339,14 +1345,16 @@ Make sure each prompt is detailed enough for image generation, describing the vi
|
||||
|
||||
# Use the active model from integration settings (self.default_model)
|
||||
# Check if model supports JSON mode
|
||||
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
|
||||
active_model = self.default_model # This is the active model from IntegrationSettings
|
||||
response_format = {'type': 'json_object'} if active_model in json_models else None
|
||||
response_format = {'type': 'json_object'} if active_model in JSON_MODE_MODELS else None
|
||||
|
||||
# GPT-5.1 and GPT-5.2 use 16384 max_tokens
|
||||
max_tokens = 16384 if active_model in ['gpt-5.1', 'gpt-5.2'] else 8192
|
||||
|
||||
result = self._call_openai(
|
||||
prompt,
|
||||
model=active_model, # Explicitly pass to ensure consistency
|
||||
max_tokens=8192,
|
||||
max_tokens=max_tokens,
|
||||
temperature=0.7,
|
||||
response_format=response_format
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user