testign promtps

This commit is contained in:
IGNY8 VPS (Salman)
2025-12-17 00:09:07 +00:00
parent 84fd4bc11a
commit 8f97666522
11 changed files with 1149 additions and 3 deletions

View File

@@ -113,7 +113,7 @@ class AICore:
self,
prompt: str,
model: str,
max_tokens: int = 4000,
max_tokens: int = 8192,
temperature: float = 0.7,
response_format: Optional[Dict] = None,
api_key: Optional[str] = None,

View File

@@ -100,7 +100,7 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
pass
# Get max_tokens and temperature from config (with reasonable defaults for API)
max_tokens = config.get('max_tokens', 4000) # Reasonable default for API limits
max_tokens = config.get('max_tokens', 16384) # Maximum for long-form content generation (2000-3000 words)
temperature = config.get('temperature', 0.7) # Reasonable default
# Build response format based on model (JSON mode for supported models)

View File

@@ -191,6 +191,9 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
# Get model from config or use default (reference plugin: get_option('igny8_model', 'gpt-4.1'))
model = (config or {}).get('model', 'gpt-4.1') if config else 'gpt-4.1'
# Get max_tokens from config or use default
max_tokens = (config or {}).get('max_tokens', 8192) if config else 8192
# Check if test with response is requested (reference plugin: $with_response parameter)
with_response = (config or {}).get('with_response', False) if config else False
@@ -201,9 +204,10 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
'messages': [
{
'role': 'user',
'content': 'test ping, reply with: OK! Ping Received. Also tell me: what is your maximum token limit that I can use in 1 request?'
'content': 'Reply with exactly: OK! Ping Received'
}
],
'max_tokens': max_tokens,
'temperature': 0.7,
}
@@ -239,6 +243,7 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
data={
'message': 'API connection and response test successful!',
'model_used': model,
'max_tokens_limit': max_tokens,
'response': response_text,
'tokens_used': f"{input_tokens} / {output_tokens}",
'total_tokens': total_tokens,