feat: Add Global Module Settings and Caption to Images

- Introduced GlobalModuleSettings model for platform-wide module enable/disable settings.
- Added 'caption' field to Images model to store image captions.
- Updated GenerateImagePromptsFunction to handle new caption structure in prompts.
- Enhanced AIPromptViewSet to return global prompt types and validate active prompts.
- Modified serializers and views to accommodate new caption field and global settings.
- Updated frontend components to display captions and filter prompts based on active types.
- Created migrations for GlobalModuleSettings and added caption field to Images.
This commit is contained in:
IGNY8 VPS (Salman)
2025-12-20 21:34:59 +00:00
parent 9e8ff4fbb1
commit 7a1e952a57
16 changed files with 370 additions and 383 deletions

View File

@@ -160,6 +160,7 @@ class Igny8AdminSite(UnfoldAdminSite):
'Global Settings': { 'Global Settings': {
'models': [ 'models': [
('system', 'GlobalIntegrationSettings'), ('system', 'GlobalIntegrationSettings'),
('system', 'GlobalModuleSettings'),
('system', 'GlobalAIPrompt'), ('system', 'GlobalAIPrompt'),
('system', 'GlobalAuthorProfile'), ('system', 'GlobalAuthorProfile'),
('system', 'GlobalStrategy'), ('system', 'GlobalStrategy'),

View File

@@ -112,7 +112,7 @@ class GenerateImagePromptsFunction(BaseAIFunction):
return prompt return prompt
def parse_response(self, response: str, step_tracker=None) -> Dict: def parse_response(self, response: str, step_tracker=None) -> Dict:
"""Parse AI response - same pattern as other functions""" """Parse AI response with new structure including captions"""
ai_core = AICore(account=getattr(self, 'account', None)) ai_core = AICore(account=getattr(self, 'account', None))
json_data = ai_core.extract_json(response) json_data = ai_core.extract_json(response)
@@ -123,9 +123,28 @@ class GenerateImagePromptsFunction(BaseAIFunction):
if 'featured_prompt' not in json_data: if 'featured_prompt' not in json_data:
raise ValueError("Missing 'featured_prompt' in AI response") raise ValueError("Missing 'featured_prompt' in AI response")
if 'featured_caption' not in json_data:
raise ValueError("Missing 'featured_caption' in AI response")
if 'in_article_prompts' not in json_data: if 'in_article_prompts' not in json_data:
raise ValueError("Missing 'in_article_prompts' in AI response") raise ValueError("Missing 'in_article_prompts' in AI response")
# Validate in_article_prompts structure (should be list of objects with prompt & caption)
in_article_prompts = json_data.get('in_article_prompts', [])
if in_article_prompts:
for idx, item in enumerate(in_article_prompts):
if isinstance(item, dict):
if 'prompt' not in item:
raise ValueError(f"Missing 'prompt' in in_article_prompts[{idx}]")
if 'caption' not in item:
raise ValueError(f"Missing 'caption' in in_article_prompts[{idx}]")
else:
# Legacy format (just string) - convert to new format
in_article_prompts[idx] = {
'prompt': str(item),
'caption': '' # Empty caption for legacy data
}
return json_data return json_data
def save_output( def save_output(
@@ -151,23 +170,33 @@ class GenerateImagePromptsFunction(BaseAIFunction):
prompts_created = 0 prompts_created = 0
with transaction.atomic(): with transaction.atomic():
# Save featured image prompt - use content instead of task # Save featured image prompt with caption
Images.objects.update_or_create( Images.objects.update_or_create(
content=content, content=content,
image_type='featured', image_type='featured',
defaults={ defaults={
'prompt': parsed['featured_prompt'], 'prompt': parsed['featured_prompt'],
'caption': parsed.get('featured_caption', ''),
'status': 'pending', 'status': 'pending',
'position': 0, 'position': 0,
} }
) )
prompts_created += 1 prompts_created += 1
# Save in-article image prompts # Save in-article image prompts with captions
in_article_prompts = parsed.get('in_article_prompts', []) in_article_prompts = parsed.get('in_article_prompts', [])
h2_headings = extracted.get('h2_headings', []) h2_headings = extracted.get('h2_headings', [])
for idx, prompt_text in enumerate(in_article_prompts[:max_images]): for idx, prompt_data in enumerate(in_article_prompts[:max_images]):
# Handle both new format (dict with prompt & caption) and legacy format (string)
if isinstance(prompt_data, dict):
prompt_text = prompt_data.get('prompt', '')
caption_text = prompt_data.get('caption', '')
else:
# Legacy format - just a string prompt
prompt_text = str(prompt_data)
caption_text = ''
heading = h2_headings[idx] if idx < len(h2_headings) else f"Section {idx + 1}" heading = h2_headings[idx] if idx < len(h2_headings) else f"Section {idx + 1}"
Images.objects.update_or_create( Images.objects.update_or_create(
@@ -176,6 +205,7 @@ class GenerateImagePromptsFunction(BaseAIFunction):
position=idx + 1, position=idx + 1,
defaults={ defaults={
'prompt': prompt_text, 'prompt': prompt_text,
'caption': caption_text,
'status': 'pending', 'status': 'pending',
} }
) )

View File

@@ -436,6 +436,7 @@ class Images(SoftDeletableModel, SiteSectorBaseModel):
image_url = models.CharField(max_length=500, blank=True, null=True, help_text="URL of the generated/stored image") image_url = models.CharField(max_length=500, blank=True, null=True, help_text="URL of the generated/stored image")
image_path = models.CharField(max_length=500, blank=True, null=True, help_text="Local path if stored locally") image_path = models.CharField(max_length=500, blank=True, null=True, help_text="Local path if stored locally")
prompt = models.TextField(blank=True, null=True, help_text="Image generation prompt used") prompt = models.TextField(blank=True, null=True, help_text="Image generation prompt used")
caption = models.TextField(blank=True, null=True, help_text="Image caption (40-60 words) to display with the image")
status = models.CharField(max_length=50, default='pending', help_text="Status: pending, generated, failed") status = models.CharField(max_length=50, default='pending', help_text="Status: pending, generated, failed")
position = models.IntegerField(default=0, help_text="Position for in-article images ordering") position = models.IntegerField(default=0, help_text="Position for in-article images ordering")
created_at = models.DateTimeField(auto_now_add=True) created_at = models.DateTimeField(auto_now_add=True)

View File

@@ -10,6 +10,7 @@ from .global_settings_models import (
GlobalAIPrompt, GlobalAIPrompt,
GlobalAuthorProfile, GlobalAuthorProfile,
GlobalStrategy, GlobalStrategy,
GlobalModuleSettings,
) )
from django.contrib import messages from django.contrib import messages
@@ -445,3 +446,55 @@ class GlobalStrategyAdmin(ImportExportMixin, Igny8ModelAdmin):
}), }),
) )
@admin.register(GlobalModuleSettings)
class GlobalModuleSettingsAdmin(Igny8ModelAdmin):
"""
Admin for global module enable/disable settings.
Singleton model - only one record exists.
Controls which modules are available platform-wide.
"""
def has_add_permission(self, request):
"""Only allow one instance"""
return not GlobalModuleSettings.objects.exists()
def has_delete_permission(self, request, obj=None):
"""Prevent deletion of singleton"""
return False
fieldsets = (
('Module Availability (Platform-Wide)', {
'fields': (
'planner_enabled',
'writer_enabled',
'thinker_enabled',
'automation_enabled',
'site_builder_enabled',
'linker_enabled',
'optimizer_enabled',
'publisher_enabled',
),
'description': 'Control which modules are available across the entire platform. Disabled modules will not load for ANY user.'
}),
('Metadata', {
'fields': ('created_at', 'updated_at'),
'classes': ('collapse',)
}),
)
readonly_fields = ['created_at', 'updated_at']
list_display = [
'id',
'planner_enabled',
'writer_enabled',
'thinker_enabled',
'automation_enabled',
'site_builder_enabled',
'linker_enabled',
'optimizer_enabled',
'publisher_enabled',
'updated_at',
]

View File

@@ -345,3 +345,74 @@ class GlobalStrategy(models.Model):
def __str__(self): def __str__(self):
return f"{self.name} ({self.get_category_display()})" return f"{self.name} ({self.get_category_display()})"
class GlobalModuleSettings(models.Model):
"""
Global module enable/disable settings (platform-wide).
Singleton model - only one record exists (pk=1).
Controls which modules are available across the entire platform.
No per-account overrides allowed - this is admin-only control.
"""
planner_enabled = models.BooleanField(
default=True,
help_text="Enable Planner module platform-wide"
)
writer_enabled = models.BooleanField(
default=True,
help_text="Enable Writer module platform-wide"
)
thinker_enabled = models.BooleanField(
default=True,
help_text="Enable Thinker module platform-wide"
)
automation_enabled = models.BooleanField(
default=True,
help_text="Enable Automation module platform-wide"
)
site_builder_enabled = models.BooleanField(
default=True,
help_text="Enable Site Builder module platform-wide"
)
linker_enabled = models.BooleanField(
default=True,
help_text="Enable Linker module platform-wide"
)
optimizer_enabled = models.BooleanField(
default=True,
help_text="Enable Optimizer module platform-wide"
)
publisher_enabled = models.BooleanField(
default=True,
help_text="Enable Publisher module platform-wide"
)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'igny8_global_module_settings'
verbose_name = 'Global Module Settings'
verbose_name_plural = 'Global Module Settings'
def __str__(self):
return "Global Module Settings"
def save(self, *args, **kwargs):
"""Enforce singleton pattern"""
self.pk = 1
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Prevent deletion"""
pass
@classmethod
def get_instance(cls):
"""Get or create the singleton instance"""
obj, created = cls.objects.get_or_create(pk=1)
return obj
def is_module_enabled(self, module_name: str) -> bool:
"""Check if a module is enabled"""
field_name = f"{module_name}_enabled"
return getattr(self, field_name, False)

View File

@@ -0,0 +1,36 @@
# Generated by Django 5.2.9 on 2025-12-20 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0009_fix_variables_optional'),
]
operations = [
migrations.CreateModel(
name='GlobalModuleSettings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('planner_enabled', models.BooleanField(default=True, help_text='Enable Planner module platform-wide')),
('writer_enabled', models.BooleanField(default=True, help_text='Enable Writer module platform-wide')),
('thinker_enabled', models.BooleanField(default=True, help_text='Enable Thinker module platform-wide')),
('automation_enabled', models.BooleanField(default=True, help_text='Enable Automation module platform-wide')),
('site_builder_enabled', models.BooleanField(default=True, help_text='Enable Site Builder module platform-wide')),
('linker_enabled', models.BooleanField(default=True, help_text='Enable Linker module platform-wide')),
('optimizer_enabled', models.BooleanField(default=True, help_text='Enable Optimizer module platform-wide')),
('publisher_enabled', models.BooleanField(default=True, help_text='Enable Publisher module platform-wide')),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Global Module Settings',
'verbose_name_plural': 'Global Module Settings',
'db_table': 'igny8_global_module_settings',
},
),
# AccountIntegrationOverride was already removed in migration 0004
# migrations.DeleteModel(name='AccountIntegrationOverride'),
]

View File

@@ -9,6 +9,7 @@ class AIPromptSerializer(serializers.ModelSerializer):
"""Serializer for AI Prompts""" """Serializer for AI Prompts"""
prompt_type_display = serializers.CharField(source='get_prompt_type_display', read_only=True) prompt_type_display = serializers.CharField(source='get_prompt_type_display', read_only=True)
default_prompt = serializers.SerializerMethodField()
class Meta: class Meta:
model = AIPrompt model = AIPrompt
@@ -23,6 +24,18 @@ class AIPromptSerializer(serializers.ModelSerializer):
'created_at', 'created_at',
] ]
read_only_fields = ['id', 'created_at', 'updated_at', 'default_prompt'] read_only_fields = ['id', 'created_at', 'updated_at', 'default_prompt']
def get_default_prompt(self, obj):
"""Get live default prompt from GlobalAIPrompt"""
from .global_settings_models import GlobalAIPrompt
try:
global_prompt = GlobalAIPrompt.objects.get(
prompt_type=obj.prompt_type,
is_active=True
)
return global_prompt.prompt_value
except GlobalAIPrompt.DoesNotExist:
return f"ERROR: Global prompt '{obj.prompt_type}' not configured in admin"
class AuthorProfileSerializer(serializers.ModelSerializer): class AuthorProfileSerializer(serializers.ModelSerializer):

View File

@@ -293,41 +293,55 @@ class ModuleSettingsViewSet(AccountModelViewSet):
) )
class ModuleEnableSettingsViewSet(AccountModelViewSet): class ModuleEnableSettingsViewSet(AccountModelViewSet):
""" """
ViewSet for managing module enable/disable settings ViewSet for GLOBAL module enable/disable settings (read-only).
Unified API Standard v1.0 compliant Returns platform-wide module availability.
One record per account Only superadmin can modify via Django Admin.
Read access: All authenticated users
Write access: Admins/Owners only
""" """
queryset = ModuleEnableSettings.objects.all() queryset = ModuleEnableSettings.objects.all()
serializer_class = ModuleEnableSettingsSerializer serializer_class = ModuleEnableSettingsSerializer
http_method_names = ['get'] # Read-only
authentication_classes = [JWTAuthentication] authentication_classes = [JWTAuthentication]
throttle_scope = 'system' throttle_scope = 'system'
throttle_classes = [DebugScopedRateThrottle] throttle_classes = [DebugScopedRateThrottle]
def get_permissions(self): def get_permissions(self):
""" """Read-only for all authenticated users"""
Allow read access to all authenticated users, return [IsAuthenticatedAndActive(), HasTenantAccess()]
but restrict write access to admins/owners
"""
if self.action in ['list', 'retrieve', 'get_current']:
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess]
else:
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess, IsAdminOrOwner]
return [permission() for permission in permission_classes]
def get_queryset(self): def get_queryset(self):
"""Get module enable settings for current account""" """Return empty queryset (not used - we return global settings)"""
# Return queryset filtered by account - but list() will handle get_or_create return ModuleEnableSettings.objects.none()
queryset = super().get_queryset()
# Filter by account if available def list(self, request, *args, **kwargs):
account = getattr(self.request, 'account', None) """Return global module settings (platform-wide)"""
if not account: try:
user = getattr(self.request, 'user', None) from igny8_core.modules.system.global_settings_models import GlobalModuleSettings
if user: global_settings = GlobalModuleSettings.get_instance()
account = getattr(user, 'account', None)
if account: data = {
queryset = queryset.filter(account=account) 'id': 1,
'planner_enabled': global_settings.planner_enabled,
'writer_enabled': global_settings.writer_enabled,
'thinker_enabled': global_settings.thinker_enabled,
'automation_enabled': global_settings.automation_enabled,
'site_builder_enabled': global_settings.site_builder_enabled,
'linker_enabled': global_settings.linker_enabled,
'optimizer_enabled': global_settings.optimizer_enabled,
'publisher_enabled': global_settings.publisher_enabled,
'created_at': global_settings.created_at.isoformat() if global_settings.created_at else None,
'updated_at': global_settings.updated_at.isoformat() if global_settings.updated_at else None,
}
return success_response(data=data, request=request)
except Exception as e:
return error_response(
error=str(e),
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
request=request
)
def retrieve(self, request, pk=None, *args, **kwargs):
"""Same as list - return global settings"""
return self.list(request)
return queryset return queryset
@action(detail=False, methods=['get', 'put'], url_path='current', url_name='current') @action(detail=False, methods=['get', 'put'], url_path='current', url_name='current')

View File

@@ -5,335 +5,22 @@ from typing import Optional
def get_default_prompt(prompt_type: str) -> str: def get_default_prompt(prompt_type: str) -> str:
"""Get default prompt value from GlobalAIPrompt ONLY - single source of truth""" """
Get default prompt value from GlobalAIPrompt ONLY - single source of truth.
No hardcoded fallbacks. Admin must configure prompts in GlobalAIPrompt table.
"""
from .global_settings_models import GlobalAIPrompt from .global_settings_models import GlobalAIPrompt
try: try:
global_prompt = GlobalAIPrompt.objects.get(prompt_type=prompt_type, is_active=True) global_prompt = GlobalAIPrompt.objects.get(prompt_type=prompt_type, is_active=True)
return global_prompt.prompt_value return global_prompt.prompt_value
except GlobalAIPrompt.DoesNotExist: except GlobalAIPrompt.DoesNotExist:
return f"ERROR: Global prompt '{prompt_type}' not configured in admin. Please configure it at: admin/system/globalaiprompt/" error_msg = (
'clustering': """You are a semantic strategist and SEO architecture engine. Your task is to analyze the provided keyword list and group them into meaningful, intent-driven topic clusters that reflect how real users search, think, and act online. f"ERROR: Global prompt '{prompt_type}' not configured. "
f"Please configure it in Django admin at: /admin/system/globalaiprompt/"
Return a single JSON object with a "clusters" array. Each cluster must follow this structure: )
return error_msg
{ except Exception as e:
"name": "[Descriptive cluster name — natural, SEO-relevant, clearly expressing the topic]", error_msg = f"ERROR: Failed to load global prompt '{prompt_type}': {str(e)}"
"description": "[12 concise sentences explaining what this cluster covers and why these keywords belong together]", return error_msg
"keywords": ["keyword 1", "keyword 2", "keyword 3", "..."]
}
CLUSTERING STRATEGY:
1. Keyword-first, structure-follows:
- Do NOT rely on assumed categories or existing content structures.
- Begin purely from the meaning, intent, and behavioral connection between keywords.
2. Use multi-dimensional grouping logic:
- Group keywords by these behavioral dimensions:
• Search Intent → informational, commercial, transactional, navigational
• Use-Case or Problem → what the user is trying to achieve or solve
• Function or Feature → how something works or what it does
• Persona or Audience → who the content or product serves
• Context → location, time, season, platform, or device
- Combine 23 dimensions naturally where they make sense.
3. Model real search behavior:
- Favor clusters that form natural user journeys such as:
• Problem ➝ Solution
• General ➝ Specific
• Product ➝ Use-case
• Buyer ➝ Benefit
• Tool ➝ Function
• Task ➝ Method
- Each cluster should feel like a real topic hub users would explore in depth.
4. Avoid superficial groupings:
- Do not cluster keywords just because they share words.
- Do not force-fit outliers or unrelated keywords.
- Exclude keywords that don't logically connect to any cluster.
5. Quality rules:
- Each cluster should include between 310 strongly related keywords.
- Never duplicate a keyword across multiple clusters.
- Prioritize semantic strength, search intent, and usefulness for SEO-driven content structure.
- It's better to output fewer, high-quality clusters than many weak or shallow ones.
INPUT FORMAT:
{
"keywords": [IGNY8_KEYWORDS]
}
OUTPUT FORMAT:
Return ONLY the final JSON object in this format:
{
"clusters": [
{
"name": "...",
"description": "...",
"keywords": ["...", "...", "..."]
}
]
}
Do not include any explanations, text, or commentary outside the JSON output.
""",
'ideas': """Generate SEO-optimized, high-quality content ideas and outlines for each keyword cluster.
Input:
Clusters: [IGNY8_CLUSTERS]
Keywords: [IGNY8_CLUSTER_KEYWORDS]
Output: JSON with "ideas" array.
Each cluster → 1 cluster_hub + 24 supporting ideas.
Each idea must include:
title, description, content_type, content_structure, cluster_id, estimated_word_count (15002200), and covered_keywords.
Outline Rules:
Intro: 1 hook (3040 words) + 2 intro paragraphs (5060 words each).
58 H2 sections, each with 23 H3s.
Each H2 ≈ 250300 words, mixed content (paragraphs, lists, tables, blockquotes).
Vary section format and tone; no bullets or lists at start.
Tables have columns; blockquotes = expert POV or data insight.
Use depth, examples, and real context.
Avoid repetitive structure.
Tone: Professional editorial flow. No generic phrasing. Use varied sentence openings and realistic examples.
Output JSON Example:
{
"ideas": [
{
"title": "Best Organic Cotton Duvet Covers for All Seasons",
"description": {
"introduction": {
"hook": "Transform your sleep with organic cotton that blends comfort and sustainability.",
"paragraphs": [
{"content_type": "paragraph", "details": "Overview of organic cotton's rise in bedding industry."},
{"content_type": "paragraph", "details": "Why consumers prefer organic bedding over synthetic alternatives."}
]
},
"H2": [
{
"heading": "Why Choose Organic Cotton for Bedding?",
"subsections": [
{"subheading": "Health and Skin Benefits", "content_type": "paragraph", "details": "Discuss hypoallergenic and chemical-free aspects."},
{"subheading": "Environmental Sustainability", "content_type": "list", "details": "Eco benefits like low water use, no pesticides."},
{"subheading": "Long-Term Cost Savings", "content_type": "table", "details": "Compare durability and pricing over time."}
]
}
]
},
"content_type": "post",
"content_structure": "review",
"cluster_id": 12,
"estimated_word_count": 1800,
"covered_keywords": "organic duvet covers, eco-friendly bedding, sustainable sheets"
}
]
}""",
'content_generation': """You are an editorial content strategist. Your task is to generate a complete JSON response object that includes all the fields listed below, based on the provided content idea, keyword cluster, and keyword list.
Only the `content` field should contain HTML inside JSON object.
==================
Generate a complete JSON response object matching this structure:
==================
{
"title": "[Blog title using the primary keyword — full sentence case]",
"meta_title": "[Meta title under 60 characters — natural, optimized, and compelling]",
"meta_description": "[Meta description under 160 characters — clear and enticing summary]",
"content": "[HTML content — full editorial structure with <p>, <h2>, <h3>, <ul>, <ol>, <table>]",
"word_count": [Exact integer — word count of HTML body only],
"primary_keyword": "[Single primary keyword used in title and first paragraph]",
"secondary_keywords": [
"[Keyword 1]",
"[Keyword 2]",
"[Keyword 3]"
],
"tags": [
"[24 word lowercase tag 1]",
"[24 word lowercase tag 2]",
"[24 word lowercase tag 3]",
"[24 word lowercase tag 4]",
"[24 word lowercase tag 5]"
],
"categories": [
"[Parent Category > Child Category]",
"[Optional Second Category > Optional Subcategory]"
]
}
===========================
CONTENT FLOW RULES
===========================
**INTRODUCTION:**
- Start with 1 italicized hook (3040 words)
- Follow with 2 narrative paragraphs (each 5060 words; 23 sentences max)
- No headings allowed in intro
**H2 SECTIONS (58 total):**
Each section should be 250300 words and follow this format:
1. Two narrative paragraphs (80120 words each, 23 sentences)
2. One list or table (must come *after* a paragraph)
3. Optional closing paragraph (4060 words)
4. Insert 23 subsections naturally after main paragraphs
**Formatting Rules:**
- Vary use of unordered lists, ordered lists, and tables across sections
- Never begin any section or sub-section with a list or table
===========================
KEYWORD & SEO RULES
===========================
- **Primary keyword** must appear in:
- The title
- First paragraph of the introduction
- At least 2 H2 headings
- **Secondary keywords** must be used naturally, not forced
- **Tone & style guidelines:**
- No robotic or passive voice
- Avoid generic intros like "In today's world…"
- Don't repeat heading in opening sentence
- Vary sentence structure and length
===========================
INPUT VARIABLES
===========================
CONTENT IDEA DETAILS:
[IGNY8_IDEA]
KEYWORD CLUSTER:
[IGNY8_CLUSTER]
ASSOCIATED KEYWORDS:
[IGNY8_KEYWORDS]
===========================
OUTPUT FORMAT
===========================
Return ONLY the final JSON object.
Do NOT include any comments, formatting, or explanations.""",
'image_prompt_extraction': """Extract image prompts from the following article content.
ARTICLE TITLE: {title}
ARTICLE CONTENT:
{content}
Extract image prompts for:
1. Featured Image: One main image that represents the article topic
2. In-Article Images: Up to {max_images} images that would be useful within the article content
Return a JSON object with this structure:
{{
"featured_prompt": "Detailed description of the featured image",
"in_article_prompts": [
"Description of first in-article image",
"Description of second in-article image",
...
]
}}
Make sure each prompt is detailed enough for image generation, describing the visual elements, style, mood, and composition.""",
'image_prompt_template': '{image_type} image for blog post titled "{post_title}": {image_prompt}',
'negative_prompt': 'text, watermark, logo, overlay, title, caption, writing on walls, writing on objects, UI, infographic elements, post title',
'site_structure_generation': """You are the lead IA and conversion-focused strategist for a new marketing website. Use the inputs from the site builder wizard to craft an SEO-rich, user-journey friendly site architecture that a design and content team can build immediately.
INPUT DATA
----------
BUSINESS BRIEF:
[IGNY8_BUSINESS_BRIEF]
PRIMARY OBJECTIVES (rank by impact):
[IGNY8_OBJECTIVES]
DESIGN & STORY STYLE NOTES:
[IGNY8_STYLE]
SITE INFO (platform, audience, product/service category, any technical requirements):
[IGNY8_SITE_INFO]
TASK
----
1. Interpret the brief to define the core narrative arc (problem → solution → proof → conversion).
2. Output a JSON object that matches the SiteStructure schema:
{
"site": {
"name": "[Site or campaign name]",
"primary_navigation": ["..."],
"secondary_navigation": ["..."],
"hero_message": "[Top-level positioning statement]",
"tone": "[Voice guidance derived from style input]"
},
"pages": [
{
"slug": "kebab-case-slug",
"title": "SEO-friendly page title",
"type": "[page role e.g. hero, solution, proof, pricing, conversion]",
"status": "draft",
"objective": "Single measurable goal for this page aligned to objectives",
"primary_cta": "Primary call to action text",
"blocks": [
{
"type": "[block archetype e.g. hero, feature_grid, testimonial]",
"heading": "Section headline optimized for intent",
"subheading": "Support copy that clarifies value or context",
"layout": "Suggested layout pattern (full-bleed, two-column, cards, stats, etc.)",
"content": [
"Key talking points, proof, FAQs, offers, or data points"
]
}
]
}
]
}
3. Produce 610 total pages covering the full funnel (awareness, consideration, evaluation, conversion, post-conversion/support) unless the objectives explicitly demand fewer.
4. Every page must include at least three blocks ordered to tell a story, with clear internal logic (hook → build trust → guide action).
5. Craft navigation labels that mirror user language, avoid jargon, and reinforce topical authority.
6. Emphasize SEO signals: use keyword-rich yet natural titles, include topical coverage (solutions, use cases, proof, resources), and highlight schema-worthy elements (stats, FAQs, testimonials).
RESPONSE RULES
--------------
- Return ONLY the JSON object described above. Do not wrap it in markdown.
- Keep text human and specific; never say "Lorem ipsum" or "Example".
- When objectives mention specific offers, personas, or industries, reflect them in page titles, CTAs, and block content.
- If data is missing, infer the most logical assumption and note it inline with phrasing like "(assumed: ...)".
""",
}
return defaults.get(prompt_type, '')
def get_prompt_value(account, prompt_type: str) -> str:
"""Get prompt value for an account, or default if not set"""
try:
from .models import AIPrompt
prompt = AIPrompt.objects.get(account=account, prompt_type=prompt_type, is_active=True)
return prompt.prompt_value
except AIPrompt.DoesNotExist:
return get_default_prompt(prompt_type)

View File

@@ -48,22 +48,50 @@ class AIPromptViewSet(AccountModelViewSet):
"""Get prompts for the current account""" """Get prompts for the current account"""
return super().get_queryset().order_by('prompt_type') return super().get_queryset().order_by('prompt_type')
@action(detail=False, methods=['get'], url_path='active_types', url_name='active_types')
def get_active_types(self, request):
"""Get list of globally active prompt types"""
from .global_settings_models import GlobalAIPrompt
active_prompts = GlobalAIPrompt.objects.filter(is_active=True).values_list('prompt_type', flat=True)
return success_response(
data={'active_types': list(active_prompts)},
request=request
)
@action(detail=False, methods=['get'], url_path='by_type/(?P<prompt_type>[^/.]+)', url_name='by_type') @action(detail=False, methods=['get'], url_path='by_type/(?P<prompt_type>[^/.]+)', url_name='by_type')
def get_by_type(self, request, prompt_type=None): def get_by_type(self, request, prompt_type=None):
"""Get prompt by type""" """Get prompt by type - only if globally active"""
from .global_settings_models import GlobalAIPrompt
# Check if this prompt type is globally active
try:
global_prompt = GlobalAIPrompt.objects.get(prompt_type=prompt_type)
if not global_prompt.is_active:
return error_response(
error=f'Prompt type "{prompt_type}" is not globally active',
status_code=http_status.HTTP_404_NOT_FOUND,
request=request
)
except GlobalAIPrompt.DoesNotExist:
return error_response(
error=f'Prompt type "{prompt_type}" not found in global settings',
status_code=http_status.HTTP_404_NOT_FOUND,
request=request
)
# Get account-specific prompt or return default
try: try:
prompt = self.get_queryset().get(prompt_type=prompt_type) prompt = self.get_queryset().get(prompt_type=prompt_type)
serializer = self.get_serializer(prompt) serializer = self.get_serializer(prompt)
return success_response(data=serializer.data, request=request) return success_response(data=serializer.data, request=request)
except AIPrompt.DoesNotExist: except AIPrompt.DoesNotExist:
# Return default if not found # Return default from GlobalAIPrompt
from .utils import get_default_prompt
default_value = get_default_prompt(prompt_type)
return success_response( return success_response(
data={ data={
'prompt_type': prompt_type, 'prompt_type': prompt_type,
'prompt_value': default_value, 'prompt_value': global_prompt.prompt_value,
'default_prompt': default_value, 'default_prompt': global_prompt.prompt_value,
'is_active': True, 'is_active': True,
}, },
request=request request=request
@@ -120,10 +148,13 @@ class AIPromptViewSet(AccountModelViewSet):
request=request request=request
) )
# Get default prompt value if creating new # Get default prompt value from GlobalAIPrompt
from .utils import get_default_prompt from .utils import get_default_prompt
default_value = get_default_prompt(prompt_type) default_value = get_default_prompt(prompt_type)
# Check if the prompt is customized (different from global default)
is_customized = (prompt_value != default_value)
# Get or create prompt # Get or create prompt
prompt, created = AIPrompt.objects.get_or_create( prompt, created = AIPrompt.objects.get_or_create(
prompt_type=prompt_type, prompt_type=prompt_type,
@@ -131,12 +162,15 @@ class AIPromptViewSet(AccountModelViewSet):
defaults={ defaults={
'prompt_value': prompt_value, 'prompt_value': prompt_value,
'default_prompt': default_value, 'default_prompt': default_value,
'is_customized': is_customized,
'is_active': True, 'is_active': True,
} }
) )
if not created: if not created:
prompt.prompt_value = prompt_value prompt.prompt_value = prompt_value
prompt.default_prompt = default_value
prompt.is_customized = is_customized
prompt.save() prompt.save()
serializer = self.get_serializer(prompt) serializer = self.get_serializer(prompt)
@@ -190,7 +224,7 @@ class AIPromptViewSet(AccountModelViewSet):
request=request request=request
) )
# Get default prompt # Get default prompt from GlobalAIPrompt
from .utils import get_default_prompt from .utils import get_default_prompt
default_value = get_default_prompt(prompt_type) default_value = get_default_prompt(prompt_type)
@@ -201,12 +235,15 @@ class AIPromptViewSet(AccountModelViewSet):
defaults={ defaults={
'prompt_value': default_value, 'prompt_value': default_value,
'default_prompt': default_value, 'default_prompt': default_value,
'is_customized': False,
'is_active': True, 'is_active': True,
} }
) )
if not created: if not created:
prompt.prompt_value = default_value prompt.prompt_value = default_value
prompt.default_prompt = default_value
prompt.is_customized = False
prompt.save() prompt.save()
serializer = self.get_serializer(prompt) serializer = self.get_serializer(prompt)

View File

@@ -0,0 +1,18 @@
# Generated by Django 5.2.9 on 2025-12-20 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('writer', '0012_soft_delete'),
]
operations = [
migrations.AddField(
model_name='images',
name='caption',
field=models.TextField(blank=True, help_text='Image caption (40-60 words) to display with the image', null=True),
),
]

View File

@@ -89,6 +89,7 @@ class ImagesSerializer(serializers.ModelSerializer):
'image_url', 'image_url',
'image_path', 'image_path',
'prompt', 'prompt',
'caption',
'status', 'status',
'position', 'position',
'created_at', 'created_at',
@@ -126,6 +127,7 @@ class ContentImageSerializer(serializers.ModelSerializer):
'image_url', 'image_url',
'image_path', 'image_path',
'prompt', 'prompt',
'caption',
'status', 'status',
'position', 'position',
'created_at', 'created_at',

View File

@@ -73,6 +73,7 @@ export default function Prompts() {
const [prompts, setPrompts] = useState<Record<string, PromptData>>({}); const [prompts, setPrompts] = useState<Record<string, PromptData>>({});
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
const [saving, setSaving] = useState<Record<string, boolean>>({}); const [saving, setSaving] = useState<Record<string, boolean>>({});
const [activePromptTypes, setActivePromptTypes] = useState<string[]>([]);
// Load all prompts // Load all prompts
useEffect(() => { useEffect(() => {
@@ -82,7 +83,15 @@ export default function Prompts() {
const loadPrompts = async () => { const loadPrompts = async () => {
setLoading(true); setLoading(true);
try { try {
const promises = PROMPT_TYPES.map(async (type) => { // First, get the list of globally active prompt types
const activeTypesResponse = await fetchAPI('/v1/system/prompts/active_types/');
const activeTypes = activeTypesResponse.active_types || [];
setActivePromptTypes(activeTypes);
// Only load prompts that are globally active
const activePromptConfigs = PROMPT_TYPES.filter(type => activeTypes.includes(type.key));
const promises = activePromptConfigs.map(async (type) => {
try { try {
// fetchAPI extracts data from unified format {success: true, data: {...}} // fetchAPI extracts data from unified format {success: true, data: {...}}
// So response IS the data object // So response IS the data object
@@ -100,15 +109,6 @@ export default function Prompts() {
results.forEach(({ key, data }) => { results.forEach(({ key, data }) => {
if (data) { if (data) {
promptsMap[key] = data; promptsMap[key] = data;
} else {
// Use default if not found
promptsMap[key] = {
prompt_type: key,
prompt_type_display: PROMPT_TYPES.find(t => t.key === key)?.label || key,
prompt_value: '',
default_prompt: '',
is_active: true,
};
} }
}); });
@@ -218,6 +218,7 @@ export default function Prompts() {
<div className="p-6"> <div className="p-6">
{/* Planner Prompts Section */} {/* Planner Prompts Section */}
{PROMPT_TYPES.filter(t => ['clustering', 'ideas'].includes(t.key) && activePromptTypes.includes(t.key)).length > 0 && (
<div className="mb-8"> <div className="mb-8">
<div className="mb-4"> <div className="mb-4">
<h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1"> <h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1">
@@ -231,7 +232,7 @@ export default function Prompts() {
{/* 2-Column Grid for Planner Prompts */} {/* 2-Column Grid for Planner Prompts */}
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
{/* Clustering Prompt */} {/* Clustering Prompt */}
{PROMPT_TYPES.filter(t => ['clustering', 'ideas'].includes(t.key)).map((type) => { {PROMPT_TYPES.filter(t => ['clustering', 'ideas'].includes(t.key) && activePromptTypes.includes(t.key)).map((type) => {
const prompt = prompts[type.key] || { const prompt = prompts[type.key] || {
prompt_type: type.key, prompt_type: type.key,
prompt_type_display: type.label, prompt_type_display: type.label,
@@ -287,8 +288,10 @@ export default function Prompts() {
})} })}
</div> </div>
</div> </div>
)}
{/* Writer Prompts Section */} {/* Writer Prompts Section */}
{PROMPT_TYPES.filter(t => t.key === 'content_generation' && activePromptTypes.includes(t.key)).length > 0 && (
<div className="mb-8"> <div className="mb-8">
<div className="mb-4"> <div className="mb-4">
<h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1"> <h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1">
@@ -301,7 +304,7 @@ export default function Prompts() {
{/* Content Generation Prompt */} {/* Content Generation Prompt */}
<div className="rounded-2xl border border-gray-200 bg-white dark:border-gray-800 dark:bg-gray-900"> <div className="rounded-2xl border border-gray-200 bg-white dark:border-gray-800 dark:bg-gray-900">
{PROMPT_TYPES.filter(t => t.key === 'content_generation').map((type) => { {PROMPT_TYPES.filter(t => t.key === 'content_generation' && activePromptTypes.includes(t.key)).map((type) => {
const prompt = prompts[type.key] || { const prompt = prompts[type.key] || {
prompt_type: type.key, prompt_type: type.key,
prompt_type_display: type.label, prompt_type_display: type.label,
@@ -357,8 +360,10 @@ export default function Prompts() {
})} })}
</div> </div>
</div> </div>
)}
{/* Image Generation Section */} {/* Image Generation Section */}
{PROMPT_TYPES.filter(t => ['image_prompt_extraction', 'image_prompt_template', 'negative_prompt'].includes(t.key) && activePromptTypes.includes(t.key)).length > 0 && (
<div className="mb-8"> <div className="mb-8">
<div className="mb-4"> <div className="mb-4">
<h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1"> <h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1">
@@ -371,7 +376,7 @@ export default function Prompts() {
{/* 2-Column Grid for Image Prompts */} {/* 2-Column Grid for Image Prompts */}
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
{PROMPT_TYPES.filter(t => ['image_prompt_extraction', 'image_prompt_template', 'negative_prompt'].includes(t.key)).map((type) => { {PROMPT_TYPES.filter(t => ['image_prompt_extraction', 'image_prompt_template', 'negative_prompt'].includes(t.key) && activePromptTypes.includes(t.key)).map((type) => {
const prompt = prompts[type.key] || { const prompt = prompts[type.key] || {
prompt_type: type.key, prompt_type: type.key,
prompt_type_display: type.label, prompt_type_display: type.label,
@@ -429,8 +434,10 @@ export default function Prompts() {
})} })}
</div> </div>
</div> </div>
)}
{/* Site Builder Prompts Section */} {/* Site Builder Prompts Section */}
{PROMPT_TYPES.filter(t => t.key === 'site_structure_generation' && activePromptTypes.includes(t.key)).length > 0 && (
<div className="mb-8"> <div className="mb-8">
<div className="mb-4"> <div className="mb-4">
<h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1"> <h2 className="text-xl font-semibold text-gray-800 dark:text-white mb-1">
@@ -443,7 +450,7 @@ export default function Prompts() {
{/* Site Structure Generation Prompt */} {/* Site Structure Generation Prompt */}
<div className="rounded-2xl border border-gray-200 bg-white dark:border-gray-800 dark:bg-gray-900"> <div className="rounded-2xl border border-gray-200 bg-white dark:border-gray-800 dark:bg-gray-900">
{PROMPT_TYPES.filter(t => t.key === 'site_structure_generation').map((type) => { {PROMPT_TYPES.filter(t => t.key === 'site_structure_generation' && activePromptTypes.includes(t.key)).map((type) => {
const prompt = prompts[type.key] || { const prompt = prompts[type.key] || {
prompt_type: type.key, prompt_type: type.key,
prompt_type_display: type.label, prompt_type_display: type.label,
@@ -508,6 +515,7 @@ export default function Prompts() {
})} })}
</div> </div>
</div> </div>
)}
</div> </div>
</> </>
); );

View File

@@ -1377,6 +1377,7 @@ export interface ImageRecord {
image_url?: string | null; image_url?: string | null;
image_path?: string | null; image_path?: string | null;
prompt?: string | null; prompt?: string | null;
caption?: string | null;
status: string; status: string;
position: number; position: number;
created_at: string; created_at: string;

View File

@@ -21,6 +21,9 @@ import {
AccountSettingsError, AccountSettingsError,
} from '../services/api'; } from '../services/api';
// Version for cache busting - increment when structure changes
const SETTINGS_STORE_VERSION = 2;
const getAccountSettingsErrorMessage = (error: AccountSettingsError): string => { const getAccountSettingsErrorMessage = (error: AccountSettingsError): string => {
switch (error.type) { switch (error.type) {
case 'ACCOUNT_SETTINGS_NOT_FOUND': case 'ACCOUNT_SETTINGS_NOT_FOUND':
@@ -241,11 +244,23 @@ export const useSettingsStore = create<SettingsState>()(
}), }),
{ {
name: 'settings-storage', name: 'settings-storage',
version: SETTINGS_STORE_VERSION, // Add version for cache busting
partialize: (state) => ({ partialize: (state) => ({
accountSettings: state.accountSettings, accountSettings: state.accountSettings,
moduleSettings: state.moduleSettings, moduleSettings: state.moduleSettings,
moduleEnableSettings: state.moduleEnableSettings, moduleEnableSettings: state.moduleEnableSettings,
}), }),
// Migrate function to handle version changes
migrate: (persistedState: any, version: number) => {
if (version < SETTINGS_STORE_VERSION) {
// Clear module enable settings on version upgrade
return {
...persistedState,
moduleEnableSettings: null,
};
}
return persistedState;
},
} }
) )
); );

View File

@@ -275,15 +275,15 @@ const FeaturedImageBlock = ({
) : ( ) : (
<PromptPlaceholder prompt={image?.prompt} minHeight={420} label="Featured Image Prompt" /> <PromptPlaceholder prompt={image?.prompt} minHeight={420} label="Featured Image Prompt" />
)} )}
{image?.prompt && imageSrc && ( {image?.caption && imageSrc && (
<div className="absolute bottom-5 left-5 rounded-full bg-white/80 px-4 py-2 text-xs font-medium text-slate-600 backdrop-blur-sm dark:bg-gray-950/70 dark:text-slate-300"> <div className="absolute bottom-5 left-5 rounded-full bg-white/80 px-4 py-2 text-xs font-medium text-slate-600 backdrop-blur-sm dark:bg-gray-950/70 dark:text-slate-300">
Prompt aligned to hero section Caption aligned to hero section
</div> </div>
)} )}
</div> </div>
{image?.prompt && ( {image?.caption && (
<div className="border-t border-slate-200/70 bg-white/70 px-8 py-6 text-sm leading-relaxed text-slate-600 backdrop-blur-sm dark:border-gray-800/60 dark:bg-gray-900/70 dark:text-slate-300"> <div className="border-t border-slate-200/70 bg-white/70 px-8 py-6 text-sm leading-relaxed text-slate-600 backdrop-blur-sm dark:border-gray-800/60 dark:bg-gray-900/70 dark:text-slate-300">
{image.prompt} {image.caption}
</div> </div>
)} )}
</div> </div>
@@ -322,12 +322,12 @@ const SectionImageBlock = ({
<ImageStatusPill status={image?.status} /> <ImageStatusPill status={image?.status} />
</div> </div>
</div> </div>
{image?.prompt && ( {image?.caption && (
<figcaption className="space-y-3 px-6 py-5 text-sm leading-relaxed text-slate-600 dark:text-slate-300"> <figcaption className="space-y-3 px-6 py-5 text-sm leading-relaxed text-slate-600 dark:text-slate-300">
<p className="font-semibold uppercase tracking-[0.25em] text-slate-400 dark:text-slate-500"> <p className="font-semibold uppercase tracking-[0.25em] text-slate-400 dark:text-slate-500">
Visual Direction Image Caption
</p> </p>
<p className="font-medium whitespace-pre-wrap">{image.prompt}</p> <p className="font-medium whitespace-pre-wrap">{image.caption}</p>
</figcaption> </figcaption>
)} )}
</figure> </figure>