Stage 4
This commit is contained in:
@@ -25,6 +25,14 @@ from igny8_core.ai.constants import (
|
||||
DEFAULT_AI_MODEL,
|
||||
JSON_MODE_MODELS,
|
||||
)
|
||||
from igny8_core.ai.prompts import PromptRegistry, get_prompt
|
||||
from igny8_core.ai.settings import (
|
||||
MODEL_CONFIG,
|
||||
get_model_config,
|
||||
get_model,
|
||||
get_max_tokens,
|
||||
get_temperature,
|
||||
)
|
||||
|
||||
# Don't auto-import functions here - let apps.py handle it lazily
|
||||
# This prevents circular import issues during Django startup
|
||||
@@ -52,5 +60,14 @@ __all__ = [
|
||||
'VALID_SIZES_BY_MODEL',
|
||||
'DEFAULT_AI_MODEL',
|
||||
'JSON_MODE_MODELS',
|
||||
# Prompts
|
||||
'PromptRegistry',
|
||||
'get_prompt',
|
||||
# Settings
|
||||
'MODEL_CONFIG',
|
||||
'get_model_config',
|
||||
'get_model',
|
||||
'get_max_tokens',
|
||||
'get_temperature',
|
||||
]
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import Dict, Any, Optional
|
||||
from igny8_core.ai.base import BaseAIFunction
|
||||
from igny8_core.ai.tracker import StepTracker, ProgressTracker, CostTracker
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -62,7 +63,11 @@ class AIEngine:
|
||||
|
||||
# Phase 3: AI_CALL - Provider API Call (25-70%)
|
||||
ai_core = AICore(account=self.account)
|
||||
model = fn.get_model(self.account)
|
||||
function_name = fn.get_name()
|
||||
|
||||
# Get model config from settings
|
||||
model_config = get_model_config(function_name)
|
||||
model = model_config.get('model')
|
||||
|
||||
# Track AI call start
|
||||
self.step_tracker.add_response_step("AI_CALL", "success", f"Calling {model or 'default'} model...")
|
||||
@@ -73,9 +78,10 @@ class AIEngine:
|
||||
raw_response = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
max_tokens=4000,
|
||||
temperature=0.7,
|
||||
function_name=fn.get_name()
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name=function_name
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"AI call failed: {str(e)}"
|
||||
|
||||
@@ -6,8 +6,9 @@ from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.base import BaseAIFunction
|
||||
from igny8_core.modules.planner.models import Keywords, Clusters
|
||||
from igny8_core.modules.system.utils import get_prompt_value
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -90,20 +91,18 @@ class AutoClusterFunction(BaseAIFunction):
|
||||
}
|
||||
|
||||
def build_prompt(self, data: Dict, account=None) -> str:
|
||||
"""Build clustering prompt"""
|
||||
"""Build clustering prompt using registry"""
|
||||
keyword_data = data['keyword_data']
|
||||
sector_id = data.get('sector_id')
|
||||
|
||||
# Get prompt template
|
||||
prompt_template = get_prompt_value(account, 'clustering')
|
||||
|
||||
# Format keywords
|
||||
keywords_text = '\n'.join([
|
||||
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']}, Intent: {kw['intent']})"
|
||||
for kw in keyword_data
|
||||
])
|
||||
|
||||
prompt = prompt_template.replace('[IGNY8_KEYWORDS]', keywords_text)
|
||||
# Build context
|
||||
context = {'KEYWORDS': keywords_text}
|
||||
|
||||
# Add sector context if available
|
||||
if sector_id:
|
||||
@@ -111,14 +110,20 @@ class AutoClusterFunction(BaseAIFunction):
|
||||
from igny8_core.auth.models import Sector
|
||||
sector = Sector.objects.get(id=sector_id)
|
||||
if sector:
|
||||
prompt += f"\n\nNote: These keywords are for the '{sector.name}' sector."
|
||||
context['SECTOR'] = sector.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get prompt from registry
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='auto_cluster',
|
||||
account=account,
|
||||
context=context
|
||||
)
|
||||
|
||||
# IMPORTANT: When using JSON mode, OpenAI requires explicit JSON instruction
|
||||
# The prompt template already includes "Format the output as a JSON object"
|
||||
# but we need to ensure it's explicit for JSON mode compliance
|
||||
# Check if prompt already explicitly requests JSON (case-insensitive)
|
||||
prompt_lower = prompt.lower()
|
||||
has_json_request = (
|
||||
'json' in prompt_lower and
|
||||
|
||||
@@ -8,9 +8,10 @@ from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.base import BaseAIFunction
|
||||
from igny8_core.modules.writer.models import Tasks
|
||||
from igny8_core.modules.system.utils import get_prompt_value, get_default_prompt
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.validators import validate_tasks_exist
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -72,7 +73,7 @@ class GenerateContentFunction(BaseAIFunction):
|
||||
return tasks
|
||||
|
||||
def build_prompt(self, data: Any, account=None) -> str:
|
||||
"""Build content generation prompt for a single task"""
|
||||
"""Build content generation prompt for a single task using registry"""
|
||||
if isinstance(data, list):
|
||||
# For now, handle single task (will be called per task)
|
||||
if not data:
|
||||
@@ -81,10 +82,7 @@ class GenerateContentFunction(BaseAIFunction):
|
||||
else:
|
||||
task = data
|
||||
|
||||
# Get prompt template
|
||||
prompt_template = get_prompt_value(account or task.account, 'content_generation')
|
||||
if not prompt_template:
|
||||
prompt_template = get_default_prompt('content_generation')
|
||||
account = account or task.account
|
||||
|
||||
# Build idea data string
|
||||
idea_data = f"Title: {task.title or 'Untitled'}\n"
|
||||
@@ -132,10 +130,17 @@ class GenerateContentFunction(BaseAIFunction):
|
||||
if not keywords_data and task.idea:
|
||||
keywords_data = task.idea.target_keywords or ''
|
||||
|
||||
# Replace placeholders
|
||||
prompt = prompt_template.replace('[IGNY8_IDEA]', idea_data)
|
||||
prompt = prompt.replace('[IGNY8_CLUSTER]', cluster_data)
|
||||
prompt = prompt.replace('[IGNY8_KEYWORDS]', keywords_data)
|
||||
# Get prompt from registry with context
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='generate_content',
|
||||
account=account,
|
||||
task=task,
|
||||
context={
|
||||
'IDEA': idea_data,
|
||||
'CLUSTER': cluster_data,
|
||||
'KEYWORDS': keywords_data,
|
||||
}
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
@@ -228,11 +233,17 @@ def generate_content_core(task_ids: List[int], account_id: int = None, progress_
|
||||
# Build prompt for this task
|
||||
prompt = fn.build_prompt([task], account)
|
||||
|
||||
# Get model config from settings
|
||||
model_config = get_model_config('generate_content')
|
||||
|
||||
# Call AI using centralized request handler
|
||||
ai_core = AICore(account=account)
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
max_tokens=4000,
|
||||
model=model_config.get('model'),
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name='generate_content'
|
||||
)
|
||||
|
||||
|
||||
@@ -8,10 +8,11 @@ from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.base import BaseAIFunction
|
||||
from igny8_core.modules.planner.models import Clusters, ContentIdeas
|
||||
from igny8_core.modules.system.utils import get_prompt_value
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.validators import validate_cluster_exists, validate_cluster_limits
|
||||
from igny8_core.ai.tracker import ConsoleStepTracker
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -96,11 +97,9 @@ class GenerateIdeasFunction(BaseAIFunction):
|
||||
}
|
||||
|
||||
def build_prompt(self, data: Dict, account=None) -> str:
|
||||
"""Build ideas generation prompt"""
|
||||
"""Build ideas generation prompt using registry"""
|
||||
cluster_data = data['cluster_data']
|
||||
|
||||
# Get prompt template
|
||||
prompt_template = get_prompt_value(account or data.get('account'), 'ideas')
|
||||
account = account or data.get('account')
|
||||
|
||||
# Format clusters text
|
||||
clusters_text = '\n'.join([
|
||||
@@ -114,9 +113,15 @@ class GenerateIdeasFunction(BaseAIFunction):
|
||||
for c in cluster_data
|
||||
])
|
||||
|
||||
# Replace placeholders
|
||||
prompt = prompt_template.replace('[IGNY8_CLUSTERS]', clusters_text)
|
||||
prompt = prompt.replace('[IGNY8_CLUSTER_KEYWORDS]', cluster_keywords_text)
|
||||
# Get prompt from registry with context
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='generate_ideas',
|
||||
account=account,
|
||||
context={
|
||||
'CLUSTERS': clusters_text,
|
||||
'CLUSTER_KEYWORDS': cluster_keywords_text,
|
||||
}
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
@@ -231,11 +236,17 @@ def generate_ideas_core(cluster_id: int, account_id: int = None, progress_callba
|
||||
tracker.prep("Building prompt...")
|
||||
prompt = fn.build_prompt(data, account)
|
||||
|
||||
# Get model config from settings
|
||||
model_config = get_model_config('generate_ideas')
|
||||
|
||||
# Call AI using centralized request handler
|
||||
ai_core = AICore(account=account)
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
max_tokens=4000,
|
||||
model=model_config.get('model'),
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name='generate_ideas',
|
||||
tracker=tracker
|
||||
)
|
||||
|
||||
@@ -7,9 +7,10 @@ from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.base import BaseAIFunction
|
||||
from igny8_core.modules.writer.models import Tasks, Images
|
||||
from igny8_core.modules.system.utils import get_prompt_value, get_default_prompt
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.validators import validate_tasks_exist
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -107,23 +108,29 @@ class GenerateImagesFunction(BaseAIFunction):
|
||||
|
||||
# Use AI to extract image prompts
|
||||
ai_core = AICore(account=account or data.get('account'))
|
||||
account_obj = account or data.get('account')
|
||||
|
||||
# Get prompt template
|
||||
prompt_template = get_prompt_value(account or data.get('account'), 'image_prompt_extraction')
|
||||
if not prompt_template:
|
||||
prompt_template = get_default_prompt('image_prompt_extraction')
|
||||
|
||||
# Format prompt
|
||||
prompt = prompt_template.format(
|
||||
title=task.title,
|
||||
content=task.content[:5000], # Limit content length
|
||||
max_images=max_images
|
||||
# Get prompt from registry
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='extract_image_prompts',
|
||||
account=account_obj,
|
||||
context={
|
||||
'title': task.title,
|
||||
'content': task.content[:5000], # Limit content length
|
||||
'max_images': max_images
|
||||
}
|
||||
)
|
||||
|
||||
# Get model config
|
||||
model_config = get_model_config('extract_image_prompts')
|
||||
|
||||
# Call AI to extract prompts using centralized request handler
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=prompt,
|
||||
max_tokens=1000,
|
||||
model=model_config.get('model'),
|
||||
max_tokens=model_config.get('max_tokens'),
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name='extract_image_prompts'
|
||||
)
|
||||
|
||||
@@ -214,14 +221,9 @@ def generate_images_core(task_ids: List[int], account_id: int = None, progress_c
|
||||
data = fn.prepare(payload, account)
|
||||
tasks = data['tasks']
|
||||
|
||||
# Get prompts
|
||||
image_prompt_template = get_prompt_value(account, 'image_prompt_template')
|
||||
if not image_prompt_template:
|
||||
image_prompt_template = get_default_prompt('image_prompt_template')
|
||||
|
||||
negative_prompt = get_prompt_value(account, 'negative_prompt')
|
||||
if not negative_prompt:
|
||||
negative_prompt = get_default_prompt('negative_prompt')
|
||||
# Get prompts from registry
|
||||
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
|
||||
negative_prompt = PromptRegistry.get_negative_prompt(account)
|
||||
|
||||
ai_core = AICore(account=account)
|
||||
images_created = 0
|
||||
|
||||
270
backend/igny8_core/ai/prompts.py
Normal file
270
backend/igny8_core/ai/prompts.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""
|
||||
Prompt Registry - Centralized prompt management with override hierarchy
|
||||
Supports: task-level overrides → DB prompts → default fallbacks
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from django.db import models
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PromptRegistry:
|
||||
"""
|
||||
Centralized prompt registry with hierarchical resolution:
|
||||
1. Task-level prompt_override (if exists)
|
||||
2. DB prompt for (account, function)
|
||||
3. Default fallback from registry
|
||||
"""
|
||||
|
||||
# Default prompts stored in registry
|
||||
DEFAULT_PROMPTS = {
|
||||
'clustering': """Analyze the following keywords and group them into topic clusters.
|
||||
|
||||
Each cluster should include:
|
||||
- "name": A clear, descriptive topic name
|
||||
- "description": A brief explanation of what the cluster covers
|
||||
- "keywords": A list of related keywords that belong to this cluster
|
||||
|
||||
Format the output as a JSON object with a "clusters" array.
|
||||
|
||||
IMPORTANT: In the "keywords" array, you MUST use the EXACT keyword strings from the input list below. Do not modify, paraphrase, or create variations of the keywords. Only use the exact keywords as they appear in the input list.
|
||||
|
||||
Clustering rules:
|
||||
- Group keywords based on strong semantic or topical relationships (intent, use-case, function, audience, etc.)
|
||||
- Clusters should reflect how people actually search — problem ➝ solution, general ➝ specific, product ➝ benefit, etc.
|
||||
- Avoid grouping keywords just because they share similar words — focus on meaning
|
||||
- Include 3–10 keywords per cluster where appropriate
|
||||
- Skip unrelated or outlier keywords that don't fit a clear theme
|
||||
- CRITICAL: Only return keywords that exactly match the input keywords (case-insensitive matching is acceptable)
|
||||
|
||||
Keywords to process:
|
||||
[IGNY8_KEYWORDS]""",
|
||||
|
||||
'ideas': """Generate SEO-optimized, high-quality content ideas and detailed outlines for each of the following keyword clusters.
|
||||
|
||||
Clusters to analyze:
|
||||
[IGNY8_CLUSTERS]
|
||||
|
||||
Keywords in each cluster:
|
||||
[IGNY8_CLUSTER_KEYWORDS]
|
||||
|
||||
Return your response as JSON with an "ideas" array.
|
||||
For each cluster, generate 1-3 content ideas.
|
||||
|
||||
Each idea must include:
|
||||
- "title": compelling blog/article title that naturally includes a primary keyword
|
||||
- "description": detailed content outline with H2/H3 structure (as plain text or structured JSON)
|
||||
- "content_type": the type of content (blog_post, article, guide, tutorial)
|
||||
- "content_structure": the editorial structure (cluster_hub, landing_page, pillar_page, supporting_page)
|
||||
- "estimated_word_count": estimated total word count (1500-2200 words)
|
||||
- "target_keywords": comma-separated list of keywords that will be covered (or "covered_keywords")
|
||||
- "cluster_name": name of the cluster this idea belongs to (REQUIRED)
|
||||
- "cluster_id": ID of the cluster this idea belongs to (REQUIRED - use the exact cluster ID from the input)
|
||||
|
||||
IMPORTANT: You MUST include the exact "cluster_id" from the cluster data provided. Match the cluster name to find the correct cluster_id.
|
||||
|
||||
Return only valid JSON with an "ideas" array.""",
|
||||
|
||||
'content_generation': """You are an editorial content strategist. Generate a complete blog post/article based on the provided content idea.
|
||||
|
||||
CONTENT IDEA DETAILS:
|
||||
[IGNY8_IDEA]
|
||||
|
||||
KEYWORD CLUSTER:
|
||||
[IGNY8_CLUSTER]
|
||||
|
||||
ASSOCIATED KEYWORDS:
|
||||
[IGNY8_KEYWORDS]
|
||||
|
||||
Generate well-structured, SEO-optimized content with:
|
||||
- Engaging introduction
|
||||
- 5-8 H2 sections with H3 subsections
|
||||
- Natural keyword integration
|
||||
- 1500-2000 words total
|
||||
- Proper HTML formatting (h2, h3, p, ul, ol, table tags)
|
||||
|
||||
Return the content as plain text with HTML tags.""",
|
||||
|
||||
'image_prompt_extraction': """Extract image prompts from the following article content.
|
||||
|
||||
ARTICLE TITLE: {title}
|
||||
|
||||
ARTICLE CONTENT:
|
||||
{content}
|
||||
|
||||
Extract image prompts for:
|
||||
1. Featured Image: One main image that represents the article topic
|
||||
2. In-Article Images: Up to {max_images} images that would be useful within the article content
|
||||
|
||||
Return a JSON object with this structure:
|
||||
{{
|
||||
"featured_prompt": "Detailed description of the featured image",
|
||||
"in_article_prompts": [
|
||||
"Description of first in-article image",
|
||||
"Description of second in-article image",
|
||||
...
|
||||
]
|
||||
}}
|
||||
|
||||
Make sure each prompt is detailed enough for image generation, describing the visual elements, style, mood, and composition.""",
|
||||
|
||||
'image_prompt_template': 'Create a high-quality {image_type} image to use as a featured photo for a blog post titled "{post_title}". The image should visually represent the theme, mood, and subject implied by the image prompt: {image_prompt}. Focus on a realistic, well-composed scene that naturally communicates the topic without text or logos. Use balanced lighting, pleasing composition, and photographic detail suitable for lifestyle or editorial web content. Avoid adding any visible or readable text, brand names, or illustrative effects. **And make sure image is not blurry.**',
|
||||
|
||||
'negative_prompt': 'text, watermark, logo, overlay, title, caption, writing on walls, writing on objects, UI, infographic elements, post title',
|
||||
}
|
||||
|
||||
# Mapping from function names to prompt types
|
||||
FUNCTION_TO_PROMPT_TYPE = {
|
||||
'auto_cluster': 'clustering',
|
||||
'generate_ideas': 'ideas',
|
||||
'generate_content': 'content_generation',
|
||||
'generate_images': 'image_prompt_extraction',
|
||||
'extract_image_prompts': 'image_prompt_extraction',
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_prompt(
|
||||
cls,
|
||||
function_name: str,
|
||||
account: Optional[Any] = None,
|
||||
task: Optional[Any] = None,
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
) -> str:
|
||||
"""
|
||||
Get prompt for a function with hierarchical resolution.
|
||||
|
||||
Priority:
|
||||
1. task.prompt_override (if task provided and has override)
|
||||
2. DB prompt for (account, function)
|
||||
3. Default fallback from registry
|
||||
|
||||
Args:
|
||||
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
|
||||
account: Account object (optional)
|
||||
task: Task object with optional prompt_override (optional)
|
||||
context: Additional context for prompt rendering (optional)
|
||||
|
||||
Returns:
|
||||
Prompt string ready for formatting
|
||||
"""
|
||||
# Step 1: Check task-level override
|
||||
if task and hasattr(task, 'prompt_override') and task.prompt_override:
|
||||
logger.info(f"Using task-level prompt override for {function_name}")
|
||||
prompt = task.prompt_override
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
|
||||
# Step 2: Get prompt type
|
||||
prompt_type = cls.FUNCTION_TO_PROMPT_TYPE.get(function_name, function_name)
|
||||
|
||||
# Step 3: Try DB prompt
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
logger.info(f"Using DB prompt for {function_name} (account {account.id})")
|
||||
prompt = db_prompt.prompt_value
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
except Exception as e:
|
||||
logger.debug(f"No DB prompt found for {function_name}: {e}")
|
||||
|
||||
# Step 4: Use default fallback
|
||||
prompt = cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
if not prompt:
|
||||
logger.warning(f"No default prompt found for {prompt_type}, using empty string")
|
||||
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
|
||||
@classmethod
|
||||
def _render_prompt(cls, prompt_template: str, context: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Render prompt template with context variables.
|
||||
Supports both .format() style ({variable}) and placeholder replacement ([IGNY8_*]).
|
||||
|
||||
Args:
|
||||
prompt_template: Prompt template string
|
||||
context: Context variables for rendering
|
||||
|
||||
Returns:
|
||||
Rendered prompt string
|
||||
"""
|
||||
if not context:
|
||||
return prompt_template
|
||||
|
||||
# Normalize context keys - convert UPPER to lowercase for .format()
|
||||
normalized_context = {}
|
||||
for key, value in context.items():
|
||||
# Try both original key and lowercase version
|
||||
normalized_context[key] = value
|
||||
normalized_context[key.lower()] = value
|
||||
|
||||
# Try .format() style first (for {variable} placeholders)
|
||||
try:
|
||||
return prompt_template.format(**normalized_context)
|
||||
except (KeyError, ValueError):
|
||||
# Fall back to simple string replacement for [IGNY8_*] placeholders
|
||||
rendered = prompt_template
|
||||
for key, value in context.items():
|
||||
placeholder = f'[IGNY8_{key.upper()}]'
|
||||
if placeholder in rendered:
|
||||
rendered = rendered.replace(placeholder, str(value))
|
||||
return rendered
|
||||
|
||||
@classmethod
|
||||
def get_image_prompt_template(cls, account: Optional[Any] = None) -> str:
|
||||
"""
|
||||
Get image prompt template.
|
||||
Returns template string (not rendered) - caller should format with .format()
|
||||
"""
|
||||
prompt_type = 'image_prompt_template'
|
||||
|
||||
# Try DB prompt
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
return db_prompt.prompt_value
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Use default
|
||||
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
|
||||
@classmethod
|
||||
def get_negative_prompt(cls, account: Optional[Any] = None) -> str:
|
||||
"""
|
||||
Get negative prompt.
|
||||
Returns template string (not rendered).
|
||||
"""
|
||||
prompt_type = 'negative_prompt'
|
||||
|
||||
# Try DB prompt
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
return db_prompt.prompt_value
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Use default
|
||||
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
|
||||
|
||||
# Convenience function for backward compatibility
|
||||
def get_prompt(function_name: str, account=None, task=None, context=None) -> str:
|
||||
"""Get prompt using registry"""
|
||||
return PromptRegistry.get_prompt(function_name, account=account, task=task, context=context)
|
||||
|
||||
92
backend/igny8_core/ai/settings.py
Normal file
92
backend/igny8_core/ai/settings.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""
|
||||
AI Settings - Centralized model configurations and limits
|
||||
"""
|
||||
from typing import Dict, Any
|
||||
|
||||
# Model configurations for each AI function
|
||||
MODEL_CONFIG = {
|
||||
"auto_cluster": {
|
||||
"model": "gpt-4o-mini",
|
||||
"max_tokens": 3000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"}, # Auto-enabled for JSON mode models
|
||||
},
|
||||
"generate_ideas": {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 4000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
"generate_content": {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 8000,
|
||||
"temperature": 0.7,
|
||||
"response_format": None, # Text output
|
||||
},
|
||||
"generate_images": {
|
||||
"model": "dall-e-3",
|
||||
"size": "1024x1024",
|
||||
"provider": "openai",
|
||||
},
|
||||
"extract_image_prompts": {
|
||||
"model": "gpt-4o-mini",
|
||||
"max_tokens": 1000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
}
|
||||
|
||||
# Function name aliases (for backward compatibility)
|
||||
FUNCTION_ALIASES = {
|
||||
"cluster_keywords": "auto_cluster",
|
||||
"auto_cluster_keywords": "auto_cluster",
|
||||
"auto_generate_ideas": "generate_ideas",
|
||||
"auto_generate_content": "generate_content",
|
||||
"auto_generate_images": "generate_images",
|
||||
}
|
||||
|
||||
|
||||
def get_model_config(function_name: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get model configuration for an AI function.
|
||||
|
||||
Args:
|
||||
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
|
||||
|
||||
Returns:
|
||||
Dict with model, max_tokens, temperature, etc.
|
||||
"""
|
||||
# Check aliases first
|
||||
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
|
||||
|
||||
# Get config or return defaults
|
||||
config = MODEL_CONFIG.get(actual_name, {})
|
||||
|
||||
# Merge with defaults
|
||||
default_config = {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 4000,
|
||||
"temperature": 0.7,
|
||||
"response_format": None,
|
||||
}
|
||||
|
||||
return {**default_config, **config}
|
||||
|
||||
|
||||
def get_model(function_name: str) -> str:
|
||||
"""Get model name for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("model", "gpt-4.1")
|
||||
|
||||
|
||||
def get_max_tokens(function_name: str) -> int:
|
||||
"""Get max tokens for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("max_tokens", 4000)
|
||||
|
||||
|
||||
def get_temperature(function_name: str) -> float:
|
||||
"""Get temperature for function"""
|
||||
config = get_model_config(function_name)
|
||||
return config.get("temperature", 0.7)
|
||||
|
||||
Reference in New Issue
Block a user