testign promtps

This commit is contained in:
IGNY8 VPS (Salman)
2025-12-17 00:09:07 +00:00
parent 84fd4bc11a
commit 8f97666522
11 changed files with 1149 additions and 3 deletions

118
test_idea_generation.py Normal file
View File

@@ -0,0 +1,118 @@
#!/usr/bin/env python
"""
Test idea generation with 16384 max_tokens to ensure it works correctly
"""
import os, sys, django
sys.path.insert(0, '/app')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
django.setup()
import requests, json
from igny8_core.modules.system.models import IntegrationSettings
from igny8_core.auth.models import Account
account = Account.objects.filter(slug='aws-admin').first()
settings = IntegrationSettings.objects.filter(integration_type='openai', account=account, is_active=True).first()
config = settings.config or {}
api_key = config.get('apiKey')
model = config.get('model', 'gpt-4o')
max_tokens = config.get('max_tokens', 16384)
# Read the actual idea generation prompt
with open('/data/idea-generation-prompt.md', 'r') as f:
prompt_template = f.read()
# Simulate idea generation input
cluster_data = """
Cluster ID: 1 | Name: Organic Cotton Bedding | Description: Keywords related to organic and eco-friendly bedding products
"""
cluster_keywords = """
Cluster ID: 1 | Name: Organic Cotton Bedding | Keywords: organic cotton sheets, eco-friendly bedding, sustainable duvet covers, GOTS certified bedding, organic mattress protector, chemical-free bedding, organic pillowcases, hypoallergenic sheets
"""
prompt = prompt_template.replace('[IGNY8_CLUSTERS]', cluster_data)
prompt = prompt.replace('[IGNY8_CLUSTER_KEYWORDS]', cluster_keywords)
print(f"🧪 Testing Idea Generation with max_tokens={max_tokens:,}")
print("=" * 70)
print(f"Prompt length: {len(prompt):,} characters (~{len(prompt)//4:,} tokens)")
print()
try:
response = requests.post(
'https://api.openai.com/v1/chat/completions',
headers={'Authorization': f'Bearer {api_key}', 'Content-Type': 'application/json'},
json={
'model': model,
'messages': [{'role': 'user', 'content': prompt}],
'max_tokens': max_tokens,
'temperature': 0.7,
'response_format': {"type": "json_object"}
},
timeout=60
)
if response.status_code == 200:
data = response.json()
usage = data.get('usage', {})
finish_reason = data['choices'][0].get('finish_reason')
prompt_tokens = usage.get('prompt_tokens', 0)
completion_tokens = usage.get('completion_tokens', 0)
total_tokens = usage.get('total_tokens', 0)
print(f"✓ Response received")
print(f" Input Tokens: {prompt_tokens:>6,}")
print(f" Output Tokens: {completion_tokens:>6,}")
print(f" Total Tokens: {total_tokens:>6,}")
print(f" Finish Reason: {finish_reason}")
print()
# Try to parse the response
try:
response_text = data['choices'][0]['message']['content']
ideas_data = json.loads(response_text)
ideas_count = len(ideas_data.get('ideas', []))
print(f"✅ Generated {ideas_count} ideas successfully")
# Show summary of first idea
if ideas_count > 0:
first_idea = ideas_data['ideas'][0]
print()
print("Sample Idea:")
print(f" Title: {first_idea.get('title', 'N/A')}")
desc = first_idea.get('description', {})
if isinstance(desc, dict) and 'H2' in desc:
h2_count = len(desc['H2'])
print(f" H2 Sections: {h2_count}")
print(f" Keywords: {first_idea.get('covered_keywords', 'N/A')[:60]}...")
except json.JSONDecodeError:
print("⚠️ Could not parse JSON response")
print()
if finish_reason == 'length':
print("🚨 WARNING: Response was TRUNCATED!")
print(" Consider increasing max_tokens further")
else:
print("✅ Response completed successfully")
print(f" Used {completion_tokens} of {max_tokens:,} available tokens")
print(f" Headroom: {((max_tokens - completion_tokens) / max_tokens * 100):.1f}%")
else:
print(f"✗ API Error: {response.status_code}")
print(response.json().get('error', {}).get('message', '')[:200])
except Exception as e:
print(f"✗ Exception: {str(e)}")
print()
print("=" * 70)
print("CONCLUSION:")
print("=" * 70)
print("Idea generation typically uses 1500-2500 tokens for 3 ideas with outlines.")
print(f"Current max_tokens={max_tokens:,} provides plenty of headroom.")
print("✅ Configuration is optimal for idea generation.")