AI AUtomtaion, Schudelign and publishign fromt and backe end refoactr

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-17 15:52:46 +00:00
parent 0435a5cf70
commit d3b3e1c0d4
34 changed files with 4715 additions and 375 deletions

View File

@@ -0,0 +1,357 @@
"""
Unified Site Settings API
Consolidates AI & Automation settings into a single endpoint.
Per SETTINGS-CONSOLIDATION-PLAN.md:
GET/PUT /api/v1/sites/{site_id}/unified-settings/
"""
import logging
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from django.shortcuts import get_object_or_404
from drf_spectacular.utils import extend_schema, extend_schema_view, OpenApiParameter
from igny8_core.api.permissions import IsAuthenticatedAndActive, IsEditorOrAbove
from igny8_core.api.response import success_response, error_response
from igny8_core.api.throttles import DebugScopedRateThrottle
from igny8_core.auth.models import Site
from igny8_core.business.automation.models import AutomationConfig
from igny8_core.business.integration.models import PublishingSettings
from igny8_core.business.billing.models import AIModelConfig
logger = logging.getLogger(__name__)
# Default stage configuration
DEFAULT_STAGE_CONFIG = {
'1': {'enabled': True, 'batch_size': 50, 'per_run_limit': 0, 'use_testing': False, 'budget_pct': 15},
'2': {'enabled': True, 'batch_size': 1, 'per_run_limit': 10, 'use_testing': False, 'budget_pct': 10},
'3': {'enabled': True, 'batch_size': 20, 'per_run_limit': 0}, # No AI
'4': {'enabled': True, 'batch_size': 1, 'per_run_limit': 5, 'use_testing': False, 'budget_pct': 40},
'5': {'enabled': True, 'batch_size': 1, 'per_run_limit': 5, 'use_testing': False, 'budget_pct': 5},
'6': {'enabled': True, 'batch_size': 1, 'per_run_limit': 20, 'use_testing': False, 'budget_pct': 30},
'7': {'enabled': True, 'per_run_limit': 10}, # No AI
}
STAGE_INFO = [
{'number': 1, 'name': 'Keywords → Clusters', 'has_ai': True},
{'number': 2, 'name': 'Clusters → Ideas', 'has_ai': True},
{'number': 3, 'name': 'Ideas → Tasks', 'has_ai': False},
{'number': 4, 'name': 'Tasks → Content', 'has_ai': True},
{'number': 5, 'name': 'Content → Prompts', 'has_ai': True},
{'number': 6, 'name': 'Prompts → Images', 'has_ai': True},
{'number': 7, 'name': 'Review → Approved', 'has_ai': False},
]
@extend_schema_view(
retrieve=extend_schema(
tags=['Site Settings'],
summary='Get unified site settings',
description='Get all AI & Automation settings for a site in one response',
parameters=[
OpenApiParameter(name='site_id', location='path', type=int, required=True),
]
),
update=extend_schema(
tags=['Site Settings'],
summary='Update unified site settings',
description='Update all AI & Automation settings for a site atomically',
parameters=[
OpenApiParameter(name='site_id', location='path', type=int, required=True),
]
),
)
class UnifiedSiteSettingsViewSet(viewsets.ViewSet):
"""
Unified API for all site AI & automation settings.
GET /api/v1/sites/{site_id}/unified-settings/
PUT /api/v1/sites/{site_id}/unified-settings/
"""
permission_classes = [IsAuthenticatedAndActive, IsEditorOrAbove]
throttle_scope = 'settings'
throttle_classes = [DebugScopedRateThrottle]
def retrieve(self, request, site_id=None):
"""Get all settings for a site in one response"""
site = get_object_or_404(Site, id=site_id, account=request.user.account)
# Get or create AutomationConfig
automation_config, _ = AutomationConfig.objects.get_or_create(
site=site,
defaults={
'account': site.account,
'is_enabled': False,
'frequency': 'daily',
'scheduled_time': '02:00',
}
)
# Get or create PublishingSettings
publishing_settings, _ = PublishingSettings.get_or_create_for_site(site)
# Get available models (Testing vs Live)
text_testing = AIModelConfig.get_testing_model('text')
text_live = AIModelConfig.get_live_model('text')
image_testing = AIModelConfig.get_testing_model('image')
image_live = AIModelConfig.get_live_model('image')
# Build stage configuration from AutomationConfig
stage_config = self._build_stage_config_from_automation(automation_config)
# Handle scheduled_time which might be a string or time object
scheduled_time = automation_config.scheduled_time
if scheduled_time:
if hasattr(scheduled_time, 'strftime'):
time_str = scheduled_time.strftime('%H:%M')
else:
time_str = str(scheduled_time)[:5] # Get HH:MM from string
else:
time_str = '02:00'
response_data = {
'site_id': site.id,
'site_name': site.name,
'automation': {
'enabled': automation_config.is_enabled,
'frequency': automation_config.frequency,
'time': time_str,
'last_run_at': automation_config.last_run_at.isoformat() if automation_config.last_run_at else None,
'next_run_at': automation_config.next_run_at.isoformat() if automation_config.next_run_at else None,
},
'stages': self._build_stage_matrix(stage_config),
'delays': {
'within_stage': automation_config.within_stage_delay,
'between_stage': automation_config.between_stage_delay,
},
'publishing': {
'auto_approval_enabled': publishing_settings.auto_approval_enabled,
'auto_publish_enabled': publishing_settings.auto_publish_enabled,
'publish_days': publishing_settings.publish_days,
'time_slots': publishing_settings.publish_time_slots,
# Calculated capacity (read-only)
'daily_capacity': publishing_settings.daily_capacity,
'weekly_capacity': publishing_settings.weekly_capacity,
'monthly_capacity': publishing_settings.monthly_capacity,
},
'available_models': {
'text': {
'testing': {
'id': text_testing.id if text_testing else None,
'name': text_testing.display_name if text_testing else None,
'model_name': text_testing.model_name if text_testing else None,
} if text_testing else None,
'live': {
'id': text_live.id if text_live else None,
'name': text_live.display_name if text_live else None,
'model_name': text_live.model_name if text_live else None,
} if text_live else None,
},
'image': {
'testing': {
'id': image_testing.id if image_testing else None,
'name': image_testing.display_name if image_testing else None,
'model_name': image_testing.model_name if image_testing else None,
} if image_testing else None,
'live': {
'id': image_live.id if image_live else None,
'name': image_live.display_name if image_live else None,
'model_name': image_live.model_name if image_live else None,
} if image_live else None,
},
},
}
return success_response(response_data, request=request)
def update(self, request, site_id=None):
"""Update all settings for a site atomically"""
site = get_object_or_404(Site, id=site_id, account=request.user.account)
data = request.data
try:
# Get or create AutomationConfig
automation_config, _ = AutomationConfig.objects.get_or_create(
site=site,
defaults={'account': site.account}
)
# Get or create PublishingSettings
publishing_settings, _ = PublishingSettings.get_or_create_for_site(site)
# Update automation settings
if 'automation' in data:
auto = data['automation']
if 'enabled' in auto:
automation_config.is_enabled = auto['enabled']
if 'frequency' in auto:
automation_config.frequency = auto['frequency']
if 'time' in auto:
from datetime import datetime
automation_config.scheduled_time = datetime.strptime(auto['time'], '%H:%M').time()
# Update stage configuration
if 'stages' in data:
self._update_stage_config(automation_config, data['stages'])
# Update delays
if 'delays' in data:
delays = data['delays']
if 'within_stage' in delays:
automation_config.within_stage_delay = delays['within_stage']
if 'between_stage' in delays:
automation_config.between_stage_delay = delays['between_stage']
automation_config.save()
# Update publishing settings
if 'publishing' in data:
pub = data['publishing']
if 'auto_approval_enabled' in pub:
publishing_settings.auto_approval_enabled = pub['auto_approval_enabled']
if 'auto_publish_enabled' in pub:
publishing_settings.auto_publish_enabled = pub['auto_publish_enabled']
if 'publish_days' in pub:
publishing_settings.publish_days = pub['publish_days']
if 'time_slots' in pub:
publishing_settings.publish_time_slots = pub['time_slots']
publishing_settings.save()
# Return the updated settings
return self.retrieve(request, site_id)
except Exception as e:
logger.exception(f"Error updating unified settings for site {site_id}")
return error_response(
f"Failed to update settings: {str(e)}",
None,
status.HTTP_400_BAD_REQUEST,
request
)
def _build_stage_config_from_automation(self, automation_config):
"""Build stage config dict from AutomationConfig model fields"""
return {
'1': {
'enabled': automation_config.stage_1_enabled,
'batch_size': automation_config.stage_1_batch_size,
'per_run_limit': automation_config.max_keywords_per_run,
'use_testing': False, # Default, can be stored in metadata later
},
'2': {
'enabled': automation_config.stage_2_enabled,
'batch_size': automation_config.stage_2_batch_size,
'per_run_limit': automation_config.max_clusters_per_run,
'use_testing': False,
},
'3': {
'enabled': automation_config.stage_3_enabled,
'batch_size': automation_config.stage_3_batch_size,
'per_run_limit': automation_config.max_ideas_per_run,
},
'4': {
'enabled': automation_config.stage_4_enabled,
'batch_size': automation_config.stage_4_batch_size,
'per_run_limit': automation_config.max_tasks_per_run,
'use_testing': False,
},
'5': {
'enabled': automation_config.stage_5_enabled,
'batch_size': automation_config.stage_5_batch_size,
'per_run_limit': automation_config.max_content_per_run,
'use_testing': False,
},
'6': {
'enabled': automation_config.stage_6_enabled,
'batch_size': automation_config.stage_6_batch_size,
'per_run_limit': automation_config.max_images_per_run,
'use_testing': False,
},
'7': {
'enabled': automation_config.stage_7_enabled,
'per_run_limit': automation_config.max_approvals_per_run,
},
}
def _build_stage_matrix(self, stage_config):
"""Build stage configuration matrix for frontend"""
result = []
for stage in STAGE_INFO:
num = str(stage['number'])
config = stage_config.get(num, DEFAULT_STAGE_CONFIG.get(num, {}))
stage_data = {
'number': stage['number'],
'name': stage['name'],
'has_ai': stage['has_ai'],
'enabled': config.get('enabled', True),
'batch_size': config.get('batch_size', 1),
'per_run_limit': config.get('per_run_limit', 0),
}
# Only include AI-related fields for stages that use AI
if stage['has_ai']:
stage_data['use_testing'] = config.get('use_testing', False)
stage_data['budget_pct'] = config.get('budget_pct', 20)
result.append(stage_data)
return result
def _update_stage_config(self, automation_config, stages):
"""Update AutomationConfig from stages array"""
for stage in stages:
num = stage.get('number')
if not num:
continue
if num == 1:
if 'enabled' in stage:
automation_config.stage_1_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_1_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_keywords_per_run = stage['per_run_limit']
elif num == 2:
if 'enabled' in stage:
automation_config.stage_2_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_2_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_clusters_per_run = stage['per_run_limit']
elif num == 3:
if 'enabled' in stage:
automation_config.stage_3_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_3_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_ideas_per_run = stage['per_run_limit']
elif num == 4:
if 'enabled' in stage:
automation_config.stage_4_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_4_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_tasks_per_run = stage['per_run_limit']
elif num == 5:
if 'enabled' in stage:
automation_config.stage_5_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_5_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_content_per_run = stage['per_run_limit']
elif num == 6:
if 'enabled' in stage:
automation_config.stage_6_enabled = stage['enabled']
if 'batch_size' in stage:
automation_config.stage_6_batch_size = stage['batch_size']
if 'per_run_limit' in stage:
automation_config.max_images_per_run = stage['per_run_limit']
elif num == 7:
if 'enabled' in stage:
automation_config.stage_7_enabled = stage['enabled']
if 'per_run_limit' in stage:
automation_config.max_approvals_per_run = stage['per_run_limit']

View File

@@ -0,0 +1,53 @@
# Generated by Django 5.2.10 on 2026-01-17 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('automation', '0007_add_stage_enabled_toggles'),
]
operations = [
migrations.AddField(
model_name='automationconfig',
name='max_approvals_per_run',
field=models.IntegerField(default=0, help_text='Max content pieces to auto-approve in stage 7 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_clusters_per_run',
field=models.IntegerField(default=0, help_text='Max clusters to process in stage 2 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_content_per_run',
field=models.IntegerField(default=0, help_text='Max content pieces for image prompts in stage 5 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_credits_per_run',
field=models.IntegerField(default=0, help_text='Max credits to use per run (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_ideas_per_run',
field=models.IntegerField(default=0, help_text='Max ideas to process in stage 3 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_images_per_run',
field=models.IntegerField(default=0, help_text='Max images to generate in stage 6 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_keywords_per_run',
field=models.IntegerField(default=0, help_text='Max keywords to process in stage 1 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_tasks_per_run',
field=models.IntegerField(default=0, help_text='Max tasks to process in stage 4 (0=unlimited)'),
),
]

View File

@@ -44,6 +44,19 @@ class AutomationConfig(models.Model):
within_stage_delay = models.IntegerField(default=3, help_text="Delay between batches within a stage (seconds)")
between_stage_delay = models.IntegerField(default=5, help_text="Delay between stage transitions (seconds)")
# Per-run item limits (0 = unlimited, processes all available)
# These prevent runaway automation and control resource usage
max_keywords_per_run = models.IntegerField(default=0, help_text="Max keywords to process in stage 1 (0=unlimited)")
max_clusters_per_run = models.IntegerField(default=0, help_text="Max clusters to process in stage 2 (0=unlimited)")
max_ideas_per_run = models.IntegerField(default=0, help_text="Max ideas to process in stage 3 (0=unlimited)")
max_tasks_per_run = models.IntegerField(default=0, help_text="Max tasks to process in stage 4 (0=unlimited)")
max_content_per_run = models.IntegerField(default=0, help_text="Max content pieces for image prompts in stage 5 (0=unlimited)")
max_images_per_run = models.IntegerField(default=0, help_text="Max images to generate in stage 6 (0=unlimited)")
max_approvals_per_run = models.IntegerField(default=0, help_text="Max content pieces to auto-approve in stage 7 (0=unlimited)")
# Credit budget limit per run (0 = use site's full credit balance)
max_credits_per_run = models.IntegerField(default=0, help_text="Max credits to use per run (0=unlimited)")
last_run_at = models.DateTimeField(null=True, blank=True)
next_run_at = models.DateTimeField(null=True, blank=True, help_text="Calculated based on frequency")

View File

@@ -63,7 +63,7 @@ class AutomationService:
def _check_should_stop(self) -> tuple[bool, str]:
"""
Check if automation should stop (paused or cancelled)
Check if automation should stop (paused, cancelled, or credit budget exceeded)
Returns:
(should_stop, reason)
@@ -79,6 +79,83 @@ class AutomationService:
elif self.run.status == 'cancelled':
return True, "cancelled"
# Check credit budget
budget_exceeded, budget_reason = self._check_credit_budget()
if budget_exceeded:
return True, f"credit_budget_exceeded: {budget_reason}"
return False, ""
def _get_per_run_limit(self, stage: int) -> int:
"""
Get the per-run item limit for a stage from config.
Args:
stage: Stage number (1-7)
Returns:
Max items to process (0 = unlimited)
"""
limit_map = {
1: self.config.max_keywords_per_run,
2: self.config.max_clusters_per_run,
3: self.config.max_ideas_per_run,
4: self.config.max_tasks_per_run,
5: self.config.max_content_per_run,
6: self.config.max_images_per_run,
7: self.config.max_approvals_per_run,
}
return limit_map.get(stage, 0)
def _apply_per_run_limit(self, queryset, stage: int, log_prefix: str = ""):
"""
Apply per-run limit to queryset if configured.
Args:
queryset: Django queryset to limit
stage: Stage number (1-7)
log_prefix: Prefix for log messages
Returns:
Limited queryset (or list if limit applied)
"""
limit = self._get_per_run_limit(stage)
if limit > 0:
total = queryset.count()
if total > limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage, f"{log_prefix}Applying per-run limit: {limit} of {total} items (limit set in automation config)"
)
return list(queryset[:limit])
return queryset
def _check_credit_budget(self) -> tuple[bool, str]:
"""
Check if credit budget for this run has been exceeded.
Returns:
(exceeded, reason) - If exceeded is True, automation should stop
"""
if not self.run or not self.config:
return False, ""
max_credits = self.config.max_credits_per_run
if max_credits <= 0: # 0 = unlimited
return False, ""
credits_used = self._get_credits_used()
if credits_used >= max_credits:
reason = f"Credit budget exhausted: {credits_used}/{max_credits} credits used"
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
self.run.current_stage, reason
)
return True, reason
return False, ""
def start_automation(self, trigger_type: str = 'manual') -> str:
@@ -170,6 +247,19 @@ class AutomationService:
disabled=False
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_keywords.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} keywords"
)
# Get limited keyword IDs first, then filter queryset
limited_ids = list(pending_keywords.values_list('id', flat=True)[:per_run_limit])
pending_keywords = pending_keywords.filter(id__in=limited_ids)
total_count = pending_keywords.count()
# IMPORTANT: Group keywords by sector to avoid mixing sectors in clustering
@@ -480,6 +570,17 @@ class AutomationService:
disabled=False
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_clusters.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} clusters"
)
pending_clusters = pending_clusters[:per_run_limit]
total_count = pending_clusters.count()
# Log stage start
@@ -674,6 +775,17 @@ class AutomationService:
status='new'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_ideas.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} ideas"
)
pending_ideas = pending_ideas[:per_run_limit]
total_count = pending_ideas.count()
# Log stage start
@@ -837,6 +949,17 @@ class AutomationService:
status='queued'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_tasks.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} tasks"
)
pending_tasks = pending_tasks[:per_run_limit]
total_count = pending_tasks.count()
# Log stage start
@@ -1078,6 +1201,17 @@ class AutomationService:
images_count=0
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = content_without_images.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} content items"
)
content_without_images = content_without_images[:per_run_limit]
total_count = content_without_images.count()
# ADDED: Enhanced logging
@@ -1291,6 +1425,17 @@ class AutomationService:
status='pending'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_images.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} images"
)
pending_images = pending_images[:per_run_limit]
total_count = pending_images.count()
# Log stage start
@@ -1538,6 +1683,17 @@ class AutomationService:
status='review'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = ready_for_review.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Approving {per_run_limit} of {total_available} content items"
)
ready_for_review = ready_for_review[:per_run_limit]
total_count = ready_for_review.count()
# Log stage start

View File

@@ -49,9 +49,9 @@ def check_scheduled_automations():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - already ran today")
continue
# Check if already running
if AutomationRun.objects.filter(site=config.site, status='running').exists():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - already running")
# Check if already running OR paused (don't start new if existing in progress)
if AutomationRun.objects.filter(site=config.site, status__in=['running', 'paused']).exists():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - automation in progress (running/paused)")
continue
logger.info(f"[AutomationTask] Starting scheduled automation for site {config.site.id}")
@@ -162,13 +162,50 @@ def run_automation_task(self, run_id: str):
@shared_task(name='automation.resume_automation_task', bind=True, max_retries=0)
def resume_automation_task(self, run_id: str):
"""
Resume paused automation run from current stage
Resume paused automation run from current stage.
CRITICAL FIXES:
- Verifies run status is 'running' before processing
- Reacquires lock in case it expired during long pause
- Checks pause/cancel status after each stage
- Releases lock on failure
"""
logger.info(f"[AutomationTask] Resuming automation run: {run_id}")
try:
from django.core.cache import cache
# Load run and verify status
run = AutomationRun.objects.get(run_id=run_id)
# CRITICAL FIX: Verify run is actually in 'running' status
# (status is set to 'running' by views.resume before calling this task)
if run.status != 'running':
logger.warning(f"[AutomationTask] Run {run_id} status is '{run.status}', not 'running'. Aborting resume.")
return
# CRITICAL FIX: Reacquire lock in case it expired during long pause (6hr timeout)
lock_key = f'automation_lock_{run.site.id}'
lock_acquired = cache.add(lock_key, run_id, timeout=21600) # 6 hours
if not lock_acquired:
# Lock exists - check if it's ours (from original run start)
existing_lock = cache.get(lock_key)
# If lock exists but isn't our run_id, another run may have started
if existing_lock and existing_lock != run_id and existing_lock != 'locked':
logger.warning(f"[AutomationTask] Lock held by different run ({existing_lock}). Aborting resume for {run_id}")
run.status = 'failed'
run.error_message = f'Lock acquired by another run ({existing_lock}) during pause'
run.completed_at = timezone.now()
run.save()
return
# Lock exists and is either 'locked' (our old format) or our run_id - proceed
logger.info(f"[AutomationTask] Existing lock found, proceeding with resume")
else:
# We acquired a new lock (old one expired)
logger.info(f"[AutomationTask] Reacquired lock after expiry for run {run_id}")
service = AutomationService.from_run_id(run_id)
run = service.run
config = service.config
# Continue from current stage
@@ -196,20 +233,35 @@ def resume_automation_task(self, run_id: str):
for stage in range(run.current_stage - 1, 7):
if stage_enabled[stage]:
stage_methods[stage]()
# CRITICAL FIX: Check for pause/cancel AFTER each stage (same as run_automation_task)
service.run.refresh_from_db()
if service.run.status in ['paused', 'cancelled']:
logger.info(f"[AutomationTask] Resumed automation {service.run.status} after stage {stage + 1}")
return
else:
logger.info(f"[AutomationTask] Stage {stage + 1} is disabled, skipping")
logger.info(f"[AutomationTask] Resumed automation run: {run_id}")
logger.info(f"[AutomationTask] Resumed automation completed: {run_id}")
except Exception as e:
logger.error(f"[AutomationTask] Failed to resume automation run {run_id}: {e}")
# Mark as failed
run = AutomationRun.objects.get(run_id=run_id)
run.status = 'failed'
run.error_message = str(e)
run.completed_at = timezone.now()
run.save()
# Mark as failed and release lock
try:
run = AutomationRun.objects.get(run_id=run_id)
run.status = 'failed'
run.error_message = str(e)
run.completed_at = timezone.now()
run.save()
# Release lock on failure
from django.core.cache import cache
cache.delete(f'automation_lock_{run.site.id}')
except Exception as cleanup_err:
logger.error(f"[AutomationTask] Failed to cleanup after resume failure: {cleanup_err}")
raise
# Alias for continue_automation_task (same as resume)

View File

@@ -77,6 +77,15 @@ class AutomationViewSet(viewsets.ViewSet):
'stage_6_batch_size': config.stage_6_batch_size,
'within_stage_delay': config.within_stage_delay,
'between_stage_delay': config.between_stage_delay,
# Per-run limits (0 = unlimited)
'max_keywords_per_run': config.max_keywords_per_run,
'max_clusters_per_run': config.max_clusters_per_run,
'max_ideas_per_run': config.max_ideas_per_run,
'max_tasks_per_run': config.max_tasks_per_run,
'max_content_per_run': config.max_content_per_run,
'max_images_per_run': config.max_images_per_run,
'max_approvals_per_run': config.max_approvals_per_run,
'max_credits_per_run': config.max_credits_per_run,
'last_run_at': config.last_run_at,
'next_run_at': config.next_run_at,
})
@@ -153,6 +162,18 @@ class AutomationViewSet(viewsets.ViewSet):
except (TypeError, ValueError):
pass
# Per-run limits (0 = unlimited)
for field in ['max_keywords_per_run', 'max_clusters_per_run', 'max_ideas_per_run',
'max_tasks_per_run', 'max_content_per_run', 'max_images_per_run',
'max_approvals_per_run', 'max_credits_per_run']:
if field in request.data:
try:
value = int(request.data[field])
if value >= 0: # Allow 0 (unlimited) or positive numbers
setattr(config, field, value)
except (TypeError, ValueError):
pass
config.save()
return Response({
@@ -175,6 +196,15 @@ class AutomationViewSet(viewsets.ViewSet):
'stage_6_batch_size': config.stage_6_batch_size,
'within_stage_delay': config.within_stage_delay,
'between_stage_delay': config.between_stage_delay,
# Per-run limits (0 = unlimited)
'max_keywords_per_run': config.max_keywords_per_run,
'max_clusters_per_run': config.max_clusters_per_run,
'max_ideas_per_run': config.max_ideas_per_run,
'max_tasks_per_run': config.max_tasks_per_run,
'max_content_per_run': config.max_content_per_run,
'max_images_per_run': config.max_images_per_run,
'max_approvals_per_run': config.max_approvals_per_run,
'max_credits_per_run': config.max_credits_per_run,
'last_run_at': config.last_run_at,
'next_run_at': config.next_run_at,
})
@@ -267,6 +297,17 @@ class AutomationViewSet(viewsets.ViewSet):
try:
service = AutomationService.from_run_id(run_id)
service.pause_automation()
# CRITICAL FIX: Log pause to automation log files
try:
service.logger.log_stage_progress(
service.run.run_id, service.account.id, service.site.id,
service.run.current_stage, f"Automation paused by user at stage {service.run.current_stage}"
)
except Exception as log_err:
# Don't fail the pause if logging fails
pass
return Response({'message': 'Automation paused'})
except AutomationRun.DoesNotExist:
return Response(
@@ -1613,6 +1654,22 @@ class AutomationViewSet(viewsets.ViewSet):
run.completed_at = timezone.now()
run.save(update_fields=['status', 'cancelled_at', 'completed_at'])
# CRITICAL FIX: Release the lock so user can start new automation
from django.core.cache import cache
cache.delete(f'automation_lock_{run.site.id}')
# Log the cancellation to automation log files
try:
from igny8_core.business.automation.services.automation_logger import AutomationLogger
logger = AutomationLogger()
logger.log_stage_progress(
run.run_id, run.account.id, run.site.id, run.current_stage,
f"Automation cancelled by user at stage {run.current_stage}"
)
except Exception as log_err:
# Don't fail the cancellation if logging fails
pass
return Response({
'message': 'Automation cancelled',
'status': run.status,

View File

@@ -0,0 +1,24 @@
# Generated migration for settings consolidation
# Add is_testing field to AIModelConfig
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0009_seed_ai_model_configs'),
]
operations = [
# Add is_testing field to AIModelConfig
migrations.AddField(
model_name='aimodelconfig',
name='is_testing',
field=models.BooleanField(
default=False,
db_index=True,
help_text='Testing model (cheap, for testing only). Only one per model_type can be is_testing=True.',
),
),
]

View File

@@ -828,6 +828,13 @@ class AIModelConfig(models.Model):
help_text="basic / quality / premium - for image models"
)
# Testing vs Live model designation
is_testing = models.BooleanField(
default=False,
db_index=True,
help_text="Testing model (cheap, for testing only). Only one per model_type can be is_testing=True."
)
# Image Size Configuration (for image models)
landscape_size = models.CharField(
max_length=20,
@@ -892,12 +899,18 @@ class AIModelConfig(models.Model):
return self.display_name
def save(self, *args, **kwargs):
"""Ensure only one is_default per model_type"""
"""Ensure only one is_default and one is_testing per model_type"""
if self.is_default:
AIModelConfig.objects.filter(
model_type=self.model_type,
is_default=True
).exclude(pk=self.pk).update(is_default=False)
if self.is_testing:
AIModelConfig.objects.filter(
model_type=self.model_type,
is_testing=True,
is_active=True
).exclude(pk=self.pk).update(is_testing=False)
super().save(*args, **kwargs)
@classmethod
@@ -910,6 +923,25 @@ class AIModelConfig(models.Model):
"""Get the default image generation model"""
return cls.objects.filter(model_type='image', is_default=True, is_active=True).first()
@classmethod
def get_testing_model(cls, model_type: str):
"""Get the testing model for text or image"""
return cls.objects.filter(
model_type=model_type,
is_testing=True,
is_active=True
).first()
@classmethod
def get_live_model(cls, model_type: str):
"""Get the live (default production) model for text or image"""
return cls.objects.filter(
model_type=model_type,
is_testing=False,
is_default=True,
is_active=True
).first()
@classmethod
def get_image_models_by_tier(cls):
"""Get all active image models grouped by quality tier"""
@@ -1044,3 +1076,121 @@ class WebhookEvent(models.Model):
self.error_message = error_message
self.retry_count += 1
self.save(update_fields=['error_message', 'retry_count'])
class SiteAIBudgetAllocation(AccountBaseModel):
"""
Site-level AI budget allocation by function.
Allows configuring what percentage of the site's credit budget
can be used for each AI function. This provides fine-grained
control over credit consumption during automation runs.
Example: 40% content, 30% images, 20% clustering, 10% ideas
When max_credits_per_run is set in AutomationConfig:
- Each function can only use up to its allocated % of that budget
- Prevents any single function from consuming all credits
"""
AI_FUNCTION_CHOICES = [
('clustering', 'Keyword Clustering (Stage 1)'),
('idea_generation', 'Ideas Generation (Stage 2)'),
('content_generation', 'Content Generation (Stage 4)'),
('image_prompt', 'Image Prompt Extraction (Stage 5)'),
('image_generation', 'Image Generation (Stage 6)'),
]
site = models.ForeignKey(
'igny8_core_auth.Site',
on_delete=models.CASCADE,
related_name='ai_budget_allocations',
help_text="Site this allocation belongs to"
)
ai_function = models.CharField(
max_length=50,
choices=AI_FUNCTION_CHOICES,
help_text="AI function to allocate budget for"
)
allocation_percentage = models.PositiveIntegerField(
default=20,
validators=[MinValueValidator(0)],
help_text="Percentage of credit budget allocated to this function (0-100)"
)
is_enabled = models.BooleanField(
default=True,
help_text="Whether this function is enabled for automation"
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'billing'
db_table = 'igny8_site_ai_budget_allocations'
verbose_name = 'Site AI Budget Allocation'
verbose_name_plural = 'Site AI Budget Allocations'
unique_together = [['site', 'ai_function']]
ordering = ['site', 'ai_function']
indexes = [
models.Index(fields=['site', 'is_enabled']),
models.Index(fields=['account', 'site']),
]
def __str__(self):
return f"{self.site.name} - {self.get_ai_function_display()}: {self.allocation_percentage}%"
@classmethod
def get_or_create_defaults_for_site(cls, site, account):
"""
Get or create default allocations for a site.
Default: Equal distribution across all functions (20% each = 100%)
"""
defaults = [
('clustering', 15),
('idea_generation', 10),
('content_generation', 40),
('image_prompt', 5),
('image_generation', 30),
]
allocations = []
for ai_function, percentage in defaults:
allocation, _ = cls.objects.get_or_create(
account=account,
site=site,
ai_function=ai_function,
defaults={
'allocation_percentage': percentage,
'is_enabled': True,
}
)
allocations.append(allocation)
return allocations
@classmethod
def get_allocation_for_function(cls, site, ai_function) -> int:
"""
Get allocation percentage for a specific AI function.
Returns 0 if not found or disabled.
"""
try:
allocation = cls.objects.get(site=site, ai_function=ai_function)
if allocation.is_enabled:
return allocation.allocation_percentage
return 0
except cls.DoesNotExist:
# Return default percentage if no allocation exists
default_map = {
'clustering': 15,
'idea_generation': 10,
'content_generation': 40,
'image_prompt': 5,
'image_generation': 30,
}
return default_map.get(ai_function, 20)

View File

@@ -0,0 +1,53 @@
# Generated by Django 5.2.10 on 2026-01-17 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('integration', '0003_add_publishing_settings'),
]
operations = [
migrations.AddField(
model_name='publishingsettings',
name='queue_limit',
field=models.PositiveIntegerField(default=100, help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='scheduling_mode',
field=models.CharField(default='time_slots', help_text='DEPRECATED - always uses time_slots mode', max_length=20),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_end_time',
field=models.TimeField(default='18:00', help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_interval_minutes',
field=models.PositiveIntegerField(default=30, help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_start_time',
field=models.TimeField(default='09:00', help_text='DEPRECATED - not used'),
),
migrations.AlterField(
model_name='publishingsettings',
name='daily_publish_limit',
field=models.PositiveIntegerField(default=3, help_text='DEPRECATED - derived from time_slots'),
),
migrations.AlterField(
model_name='publishingsettings',
name='monthly_publish_limit',
field=models.PositiveIntegerField(default=50, help_text='DEPRECATED - not used'),
),
migrations.AlterField(
model_name='publishingsettings',
name='weekly_publish_limit',
field=models.PositiveIntegerField(default=15, help_text='DEPRECATED - derived from days × slots'),
),
]

View File

@@ -247,8 +247,16 @@ class SyncEvent(AccountBaseModel):
class PublishingSettings(AccountBaseModel):
"""
Site-level publishing configuration settings.
Controls automatic approval, publishing limits, and scheduling.
Site-level publishing SCHEDULE configuration (SIMPLIFIED).
Controls automatic approval, publishing, and time-slot based scheduling.
REMOVED (per settings consolidation plan):
- scheduling_mode (only time_slots needed)
- daily_publish_limit (derived: len(time_slots))
- weekly_publish_limit (derived: len(time_slots) × len(publish_days))
- monthly_publish_limit (not needed)
- stagger_* fields (not needed)
- queue_limit (not needed)
"""
DEFAULT_PUBLISH_DAYS = ['mon', 'tue', 'wed', 'thu', 'fri']
@@ -273,26 +281,7 @@ class PublishingSettings(AccountBaseModel):
help_text="Automatically publish approved content to the external site"
)
# Publishing limits
daily_publish_limit = models.PositiveIntegerField(
default=3,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per day"
)
weekly_publish_limit = models.PositiveIntegerField(
default=15,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per week"
)
monthly_publish_limit = models.PositiveIntegerField(
default=50,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per month"
)
# Publishing schedule
# Publishing schedule - Days + Time Slots only (SIMPLIFIED)
publish_days = models.JSONField(
default=list,
help_text="Days of the week to publish (mon, tue, wed, thu, fri, sat, sun)"
@@ -303,6 +292,21 @@ class PublishingSettings(AccountBaseModel):
help_text="Times of day to publish (HH:MM format, e.g., ['09:00', '14:00', '18:00'])"
)
# DEPRECATED FIELDS - kept for backwards compatibility during migration
# These will be removed in a future migration
scheduling_mode = models.CharField(
max_length=20,
default='time_slots',
help_text="DEPRECATED - always uses time_slots mode"
)
daily_publish_limit = models.PositiveIntegerField(default=3, help_text="DEPRECATED - derived from time_slots")
weekly_publish_limit = models.PositiveIntegerField(default=15, help_text="DEPRECATED - derived from days × slots")
monthly_publish_limit = models.PositiveIntegerField(default=50, help_text="DEPRECATED - not used")
stagger_start_time = models.TimeField(default='09:00', help_text="DEPRECATED - not used")
stagger_end_time = models.TimeField(default='18:00', help_text="DEPRECATED - not used")
stagger_interval_minutes = models.PositiveIntegerField(default=30, help_text="DEPRECATED - not used")
queue_limit = models.PositiveIntegerField(default=100, help_text="DEPRECATED - not used")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@@ -323,6 +327,22 @@ class PublishingSettings(AccountBaseModel):
self.publish_time_slots = self.DEFAULT_TIME_SLOTS
super().save(*args, **kwargs)
# Calculated capacity properties (read-only, derived from days × slots)
@property
def daily_capacity(self) -> int:
"""Daily publishing capacity = number of time slots"""
return len(self.publish_time_slots) if self.publish_time_slots else 0
@property
def weekly_capacity(self) -> int:
"""Weekly publishing capacity = time slots × publish days"""
return self.daily_capacity * len(self.publish_days) if self.publish_days else 0
@property
def monthly_capacity(self) -> int:
"""Monthly publishing capacity (approximate: weekly × 4.3)"""
return int(self.weekly_capacity * 4.3)
@classmethod
def get_or_create_for_site(cls, site):
"""Get or create publishing settings for a site with defaults"""
@@ -332,9 +352,6 @@ class PublishingSettings(AccountBaseModel):
'account': site.account,
'auto_approval_enabled': True,
'auto_publish_enabled': True,
'daily_publish_limit': 3,
'weekly_publish_limit': 15,
'monthly_publish_limit': 50,
'publish_days': cls.DEFAULT_PUBLISH_DAYS,
'publish_time_slots': cls.DEFAULT_TIME_SLOTS,
}

View File

@@ -839,6 +839,7 @@ class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
'provider_badge',
'credit_display',
'quality_tier',
'is_testing_icon',
'is_active_icon',
'is_default_icon',
'updated_at',
@@ -848,6 +849,7 @@ class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
'model_type',
'provider',
'quality_tier',
'is_testing',
'is_active',
'is_default',
]
@@ -884,7 +886,8 @@ class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
'classes': ('collapse',)
}),
('Status', {
'fields': ('is_active', 'is_default'),
'fields': ('is_active', 'is_default', 'is_testing'),
'description': 'is_testing: Mark as cheap testing model (one per model_type)'
}),
('Timestamps', {
'fields': ('created_at', 'updated_at'),
@@ -969,8 +972,19 @@ class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
)
is_default_icon.short_description = 'Default'
def is_testing_icon(self, obj):
"""Testing status icon - shows ⚡ for testing models"""
if obj.is_testing:
return format_html(
'<span style="color: #f39c12; font-size: 18px;" title="Testing Model (cheap, for testing)">⚡</span>'
)
return format_html(
'<span style="color: #2ecc71; font-size: 14px;" title="Live Model">●</span>'
)
is_testing_icon.short_description = 'Testing/Live'
# Admin actions
actions = ['bulk_activate', 'bulk_deactivate', 'set_as_default']
actions = ['bulk_activate', 'bulk_deactivate', 'set_as_default', 'set_as_testing', 'unset_testing']
def bulk_activate(self, request, queryset):
"""Enable selected models"""
@@ -1005,3 +1019,34 @@ class AIModelConfigAdmin(SimpleHistoryAdmin, Igny8ModelAdmin):
messages.SUCCESS
)
set_as_default.short_description = 'Set as default model'
def set_as_testing(self, request, queryset):
"""Set one model as testing model for its type"""
if queryset.count() != 1:
self.message_user(request, 'Select exactly one model.', messages.ERROR)
return
model = queryset.first()
# Unset any existing testing model for this type
AIModelConfig.objects.filter(
model_type=model.model_type,
is_testing=True,
is_active=True
).exclude(pk=model.pk).update(is_testing=False)
model.is_testing = True
model.save()
self.message_user(
request,
f'{model.model_name} is now the TESTING {model.get_model_type_display()} model.',
messages.SUCCESS
)
set_as_testing.short_description = 'Set as testing model (cheap, for testing)'
def unset_testing(self, request, queryset):
"""Remove testing flag from selected models"""
count = queryset.update(is_testing=False)
self.message_user(request, f'{count} model(s) unmarked as testing.', messages.SUCCESS)
unset_testing.short_description = 'Unset testing flag'

View File

@@ -0,0 +1,47 @@
# Generated by Django 5.2.10 on 2026-01-17 14:37
import django.core.validators
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0034_backfill_credit_usage_log_site'),
('igny8_core_auth', '0031_drop_all_blueprint_tables'),
]
operations = [
migrations.AddField(
model_name='aimodelconfig',
name='is_testing',
field=models.BooleanField(db_index=True, default=False, help_text='Testing model (cheap, for testing only). Only one per model_type can be is_testing=True.'),
),
migrations.AddField(
model_name='historicalaimodelconfig',
name='is_testing',
field=models.BooleanField(db_index=True, default=False, help_text='Testing model (cheap, for testing only). Only one per model_type can be is_testing=True.'),
),
migrations.CreateModel(
name='SiteAIBudgetAllocation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ai_function', models.CharField(choices=[('clustering', 'Keyword Clustering (Stage 1)'), ('idea_generation', 'Ideas Generation (Stage 2)'), ('content_generation', 'Content Generation (Stage 4)'), ('image_prompt', 'Image Prompt Extraction (Stage 5)'), ('image_generation', 'Image Generation (Stage 6)')], help_text='AI function to allocate budget for', max_length=50)),
('allocation_percentage', models.PositiveIntegerField(default=20, help_text='Percentage of credit budget allocated to this function (0-100)', validators=[django.core.validators.MinValueValidator(0)])),
('is_enabled', models.BooleanField(default=True, help_text='Whether this function is enabled for automation')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('account', models.ForeignKey(db_column='tenant_id', on_delete=django.db.models.deletion.CASCADE, related_name='%(class)s_set', to='igny8_core_auth.account')),
('site', models.ForeignKey(help_text='Site this allocation belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='ai_budget_allocations', to='igny8_core_auth.site')),
],
options={
'verbose_name': 'Site AI Budget Allocation',
'verbose_name_plural': 'Site AI Budget Allocations',
'db_table': 'igny8_site_ai_budget_allocations',
'ordering': ['site', 'ai_function'],
'indexes': [models.Index(fields=['site', 'is_enabled'], name='igny8_site__site_id_36b0d0_idx'), models.Index(fields=['account', 'site'], name='igny8_site__tenant__853b16_idx')],
'unique_together': {('site', 'ai_function')},
},
),
]

View File

@@ -8,7 +8,8 @@ from .views import (
CreditUsageViewSet,
CreditTransactionViewSet,
BillingOverviewViewSet,
AdminBillingViewSet
AdminBillingViewSet,
SiteAIBudgetAllocationViewSet
)
router = DefaultRouter()
@@ -31,5 +32,7 @@ urlpatterns = [
path('admin/billing/stats/', AdminBillingViewSet.as_view({'get': 'stats'}), name='admin-billing-stats'),
path('admin/users/', AdminBillingViewSet.as_view({'get': 'list_users'}), name='admin-users-list'),
path('admin/credit-costs/', AdminBillingViewSet.as_view({'get': 'credit_costs'}), name='admin-credit-costs'),
# Site AI budget allocation
path('sites/<int:site_id>/ai-budget/', SiteAIBudgetAllocationViewSet.as_view({'get': 'list', 'post': 'create'}), name='site-ai-budget'),
]

View File

@@ -840,3 +840,177 @@ class AIModelConfigViewSet(viewsets.ReadOnlyModelViewSet):
status_code=status.HTTP_404_NOT_FOUND
)
# ==============================================================================
# Site AI Budget Allocation ViewSet
# ==============================================================================
from rest_framework import serializers as drf_serializers
class SiteAIBudgetAllocationSerializer(drf_serializers.Serializer):
"""Serializer for SiteAIBudgetAllocation model"""
id = drf_serializers.IntegerField(read_only=True)
ai_function = drf_serializers.CharField()
ai_function_display = drf_serializers.SerializerMethodField()
allocation_percentage = drf_serializers.IntegerField(min_value=0, max_value=100)
is_enabled = drf_serializers.BooleanField()
def get_ai_function_display(self, obj):
display_map = {
'clustering': 'Keyword Clustering (Stage 1)',
'idea_generation': 'Ideas Generation (Stage 2)',
'content_generation': 'Content Generation (Stage 4)',
'image_prompt': 'Image Prompt Extraction (Stage 5)',
'image_generation': 'Image Generation (Stage 6)',
}
if hasattr(obj, 'ai_function'):
return display_map.get(obj.ai_function, obj.ai_function)
return display_map.get(obj.get('ai_function', ''), '')
@extend_schema_view(
list=extend_schema(tags=['Billing'], summary='Get AI budget allocations for a site'),
create=extend_schema(tags=['Billing'], summary='Update AI budget allocations for a site'),
)
class SiteAIBudgetAllocationViewSet(viewsets.ViewSet):
"""
ViewSet for managing Site AI Budget Allocations.
GET /api/v1/billing/sites/{site_id}/ai-budget/
POST /api/v1/billing/sites/{site_id}/ai-budget/
Allows configuring what percentage of the site's credit budget
can be used for each AI function during automation runs.
"""
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess]
authentication_classes = [JWTAuthentication]
throttle_scope = 'billing'
throttle_classes = [DebugScopedRateThrottle]
def _get_site(self, site_id, request):
"""Get site and verify user has access"""
from igny8_core.auth.models import Site
try:
site = Site.objects.get(id=int(site_id))
account = getattr(request, 'account', None)
if account and site.account != account:
return None
return site
except (Site.DoesNotExist, ValueError, TypeError):
return None
def list(self, request, site_id=None):
"""
Get AI budget allocations for a site.
Creates default allocations if they don't exist.
"""
from igny8_core.business.billing.models import SiteAIBudgetAllocation
site = self._get_site(site_id, request)
if not site:
return error_response(
message='Site not found or access denied',
errors=None,
status_code=status.HTTP_404_NOT_FOUND
)
account = getattr(request, 'account', None) or site.account
# Get or create default allocations
allocations = SiteAIBudgetAllocation.get_or_create_defaults_for_site(site, account)
# Calculate total allocation
total_percentage = sum(a.allocation_percentage for a in allocations if a.is_enabled)
serializer = SiteAIBudgetAllocationSerializer(allocations, many=True)
return success_response(
data={
'allocations': serializer.data,
'total_percentage': total_percentage,
'is_valid': total_percentage <= 100,
},
message='AI budget allocations retrieved'
)
def create(self, request, site_id=None):
"""
Update AI budget allocations for a site.
Body:
{
"allocations": [
{"ai_function": "clustering", "allocation_percentage": 15, "is_enabled": true},
{"ai_function": "idea_generation", "allocation_percentage": 10, "is_enabled": true},
{"ai_function": "content_generation", "allocation_percentage": 40, "is_enabled": true},
{"ai_function": "image_prompt", "allocation_percentage": 5, "is_enabled": true},
{"ai_function": "image_generation", "allocation_percentage": 30, "is_enabled": true}
]
}
"""
from igny8_core.business.billing.models import SiteAIBudgetAllocation
site = self._get_site(site_id, request)
if not site:
return error_response(
message='Site not found or access denied',
errors=None,
status_code=status.HTTP_404_NOT_FOUND
)
account = getattr(request, 'account', None) or site.account
allocations_data = request.data.get('allocations', [])
if not allocations_data:
return error_response(
message='No allocations provided',
errors={'allocations': ['This field is required']},
status_code=status.HTTP_400_BAD_REQUEST
)
# Validate total percentage
total_percentage = sum(
a.get('allocation_percentage', 0)
for a in allocations_data
if a.get('is_enabled', True)
)
if total_percentage > 100:
return error_response(
message='Total allocation exceeds 100%',
errors={'total_percentage': [f'Total is {total_percentage}%, must be <= 100%']},
status_code=status.HTTP_400_BAD_REQUEST
)
# Update or create allocations
valid_functions = ['clustering', 'idea_generation', 'content_generation', 'image_prompt', 'image_generation']
updated = []
for alloc_data in allocations_data:
ai_function = alloc_data.get('ai_function')
if ai_function not in valid_functions:
continue
allocation, _ = SiteAIBudgetAllocation.objects.update_or_create(
account=account,
site=site,
ai_function=ai_function,
defaults={
'allocation_percentage': alloc_data.get('allocation_percentage', 20),
'is_enabled': alloc_data.get('is_enabled', True),
}
)
updated.append(allocation)
serializer = SiteAIBudgetAllocationSerializer(updated, many=True)
return success_response(
data={
'allocations': serializer.data,
'total_percentage': total_percentage,
},
message='AI budget allocations updated successfully'
)

View File

@@ -10,6 +10,7 @@ from igny8_core.modules.integration.webhooks import (
wordpress_status_webhook,
wordpress_metadata_webhook,
)
from igny8_core.api.unified_settings import UnifiedSiteSettingsViewSet
router = DefaultRouter()
router.register(r'integrations', IntegrationViewSet, basename='integration')
@@ -21,12 +22,21 @@ publishing_settings_viewset = PublishingSettingsViewSet.as_view({
'patch': 'partial_update',
})
# Create Unified Settings ViewSet instance
unified_settings_viewset = UnifiedSiteSettingsViewSet.as_view({
'get': 'retrieve',
'put': 'update',
})
urlpatterns = [
path('', include(router.urls)),
# Site-level publishing settings
path('sites/<int:site_id>/publishing-settings/', publishing_settings_viewset, name='publishing-settings'),
# Unified site settings (AI & Automation consolidated)
path('sites/<int:site_id>/unified-settings/', unified_settings_viewset, name='unified-settings'),
# Webhook endpoints
path('webhooks/wordpress/status/', wordpress_status_webhook, name='wordpress-status-webhook'),
path('webhooks/wordpress/metadata/', wordpress_metadata_webhook, name='wordpress-metadata-webhook'),

View File

@@ -966,11 +966,16 @@ class PublishingSettingsSerializer(serializers.ModelSerializer):
'site',
'auto_approval_enabled',
'auto_publish_enabled',
'scheduling_mode',
'daily_publish_limit',
'weekly_publish_limit',
'monthly_publish_limit',
'publish_days',
'publish_time_slots',
'stagger_start_time',
'stagger_end_time',
'stagger_interval_minutes',
'queue_limit',
'created_at',
'updated_at',
]

View File

@@ -706,34 +706,27 @@ UNFOLD = {
{"title": "Account Payment Methods", "icon": "account_balance_wallet", "link": lambda request: "/admin/billing/accountpaymentmethod/"},
],
},
# Credits
# Credits & AI Usage (CONSOLIDATED)
{
"title": "Credits",
"title": "Credits & AI Usage",
"icon": "toll",
"collapsible": True,
"items": [
{"title": "Transactions", "icon": "swap_horiz", "link": lambda request: "/admin/billing/credittransaction/"},
{"title": "Usage Log", "icon": "history", "link": lambda request: "/admin/billing/creditusagelog/"},
{"title": "Credit Transactions", "icon": "swap_horiz", "link": lambda request: "/admin/billing/credittransaction/"},
{"title": "Credit Usage Log", "icon": "history", "link": lambda request: "/admin/billing/creditusagelog/"},
{"title": "AI Task Logs", "icon": "smart_toy", "link": lambda request: "/admin/ai/aitasklog/"},
{"title": "Plan Limits", "icon": "speed", "link": lambda request: "/admin/billing/planlimitusage/"},
],
},
# Planning
# Content Pipeline (RENAMED from Planning + Writing)
{
"title": "Planning",
"icon": "map",
"title": "Content Pipeline",
"icon": "edit_note",
"collapsible": True,
"items": [
{"title": "Keywords", "icon": "key", "link": lambda request: "/admin/planner/keywords/"},
{"title": "Clusters", "icon": "hub", "link": lambda request: "/admin/planner/clusters/"},
{"title": "Content Ideas", "icon": "lightbulb", "link": lambda request: "/admin/planner/contentideas/"},
],
},
# Writing
{
"title": "Writing",
"icon": "edit_note",
"collapsible": True,
"items": [
{"title": "Tasks", "icon": "task_alt", "link": lambda request: "/admin/writer/tasks/"},
{"title": "Content", "icon": "description", "link": lambda request: "/admin/writer/content/"},
{"title": "Images", "icon": "image", "link": lambda request: "/admin/writer/images/"},
@@ -758,10 +751,31 @@ UNFOLD = {
"icon": "publish",
"collapsible": True,
"items": [
{"title": "Integrations", "icon": "extension", "link": lambda request: "/admin/integration/siteintegration/"},
{"title": "Publishing Records", "icon": "cloud_upload", "link": lambda request: "/admin/publishing/publishingrecord/"},
{"title": "Deployments", "icon": "rocket", "link": lambda request: "/admin/publishing/deploymentrecord/"},
{"title": "Sync Events", "icon": "sync", "link": lambda request: "/admin/integration/syncevent/"},
{"title": "Publishing Settings", "icon": "schedule", "link": lambda request: "/admin/integration/publishingsettings/"},
],
},
# Automation (NEW SECTION)
{
"title": "Automation",
"icon": "settings_suggest",
"collapsible": True,
"items": [
{"title": "Automation Configs", "icon": "tune", "link": lambda request: "/admin/automation/automationconfig/"},
{"title": "Automation Runs", "icon": "play_circle", "link": lambda request: "/admin/automation/automationrun/"},
],
},
# AI Configuration (SIMPLIFIED)
{
"title": "AI Configuration",
"icon": "psychology",
"collapsible": True,
"items": [
{"title": "AI Models (Testing/Live)", "icon": "model_training", "link": lambda request: "/admin/billing/aimodelconfig/"},
{"title": "System AI Settings", "icon": "tune", "link": lambda request: "/admin/system/systemaisettings/"},
{"title": "Integration Providers", "icon": "key", "link": lambda request: "/admin/system/integrationprovider/"},
],
},
# Plugin Management
@@ -776,20 +790,7 @@ UNFOLD = {
{"title": "Downloads", "icon": "download", "link": lambda request: "/admin/plugins/plugindownload/"},
],
},
# AI Configuration
{
"title": "AI Configuration",
"icon": "psychology",
"collapsible": True,
"items": [
{"title": "System AI Settings", "icon": "tune", "link": lambda request: "/admin/system/systemaisettings/"},
{"title": "AI Models", "icon": "model_training", "link": lambda request: "/admin/billing/aimodelconfig/"},
{"title": "Credit Costs by Function", "icon": "calculate", "link": lambda request: "/admin/billing/creditcostconfig/"},
{"title": "Billing Configuration", "icon": "payments", "link": lambda request: "/admin/billing/billingconfiguration/"},
{"title": "AI Task Logs", "icon": "history", "link": lambda request: "/admin/ai/aitasklog/"},
],
},
# Email Settings (NEW)
# Email Settings
{
"title": "Email Settings",
"icon": "email",
@@ -798,33 +799,29 @@ UNFOLD = {
{"title": "Email Configuration", "icon": "settings", "link": lambda request: "/admin/system/emailsettings/"},
{"title": "Email Templates", "icon": "article", "link": lambda request: "/admin/system/emailtemplate/"},
{"title": "Email Logs", "icon": "history", "link": lambda request: "/admin/system/emaillog/"},
{"title": "Resend Provider", "icon": "key", "link": lambda request: "/admin/system/integrationprovider/resend/change/"},
],
},
# Global Settings
# Global Settings (SIMPLIFIED)
{
"title": "Global Settings",
"icon": "settings",
"collapsible": True,
"items": [
{"title": "Integration Providers", "icon": "key", "link": lambda request: "/admin/system/integrationprovider/"},
{"title": "Global AI Prompts", "icon": "chat", "link": lambda request: "/admin/system/globalaiprompt/"},
{"title": "Automation Configs", "icon": "settings_suggest", "link": lambda request: "/admin/automation/automationconfig/"},
{"title": "Automation Runs", "icon": "play_circle", "link": lambda request: "/admin/automation/automationrun/"},
{"title": "Module Settings", "icon": "view_module", "link": lambda request: "/admin/system/globalmodulesettings/"},
{"title": "Author Profiles", "icon": "person_outline", "link": lambda request: "/admin/system/globalauthorprofile/"},
{"title": "Strategies", "icon": "strategy", "link": lambda request: "/admin/system/globalstrategy/"},
],
},
# System Configuration
# Account & User Settings (CONSOLIDATED)
{
"title": "System Configuration",
"title": "Account & User Settings",
"icon": "tune",
"collapsible": True,
"items": [
{"title": "Account Settings (All Settings)", "icon": "account_circle", "link": lambda request: "/admin/system/accountsettings/"},
{"title": "Account Settings", "icon": "account_circle", "link": lambda request: "/admin/system/accountsettings/"},
{"title": "User Settings", "icon": "person_search", "link": lambda request: "/admin/system/usersettings/"},
{"title": "Module Settings", "icon": "view_module", "link": lambda request: "/admin/system/modulesettings/"},
{"title": "Module Enable Settings", "icon": "view_module", "link": lambda request: "/admin/system/modulesettings/"},
],
},
# Resources

View File

@@ -68,7 +68,23 @@ def schedule_approved_content() -> Dict[str, Any]:
results['sites_processed'] += 1
continue
# Calculate available slots
# Handle immediate mode - schedule for now (will be picked up by process_scheduled_publications)
if settings.scheduling_mode == 'immediate':
for content in pending_content:
content.scheduled_publish_at = timezone.now()
content.site_status = 'scheduled'
content.site_status_updated_at = timezone.now()
content.save(update_fields=['scheduled_publish_at', 'site_status', 'site_status_updated_at'])
site_result['scheduled_count'] += 1
results['content_scheduled'] += 1
logger.info(f"Scheduled content {content.id} for immediate publishing")
results['details'].append(site_result)
results['sites_processed'] += 1
continue
# Calculate available slots for time_slots and stagger modes
available_slots = _calculate_available_slots(settings, site)
# Assign slots to content
@@ -110,6 +126,11 @@ def _calculate_available_slots(settings: 'PublishingSettings', site: 'Site') ->
"""
Calculate available publishing time slots based on settings and limits.
Supports three scheduling modes:
- time_slots: Publish at specific configured times each day
- stagger: Spread evenly throughout publish hours
- immediate: No scheduling - return immediately (handled separately)
Args:
settings: PublishingSettings instance
site: Site instance
@@ -120,13 +141,13 @@ def _calculate_available_slots(settings: 'PublishingSettings', site: 'Site') ->
from igny8_core.business.content.models import Content
now = timezone.now()
slots = []
# Get configured days and times
# Immediate mode - return empty list (content published immediately in process_scheduled_publications)
if settings.scheduling_mode == 'immediate':
return []
# Common setup
publish_days = settings.publish_days or ['mon', 'tue', 'wed', 'thu', 'fri']
publish_times = settings.publish_time_slots or ['09:00', '14:00', '18:00']
# Day name mapping
day_map = {
'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3,
'fri': 4, 'sat': 5, 'sun': 6
@@ -134,22 +155,11 @@ def _calculate_available_slots(settings: 'PublishingSettings', site: 'Site') ->
allowed_days = [day_map.get(d.lower(), -1) for d in publish_days]
allowed_days = [d for d in allowed_days if d >= 0]
# Parse time slots
time_slots = []
for time_str in publish_times:
try:
hour, minute = map(int, time_str.split(':'))
time_slots.append((hour, minute))
except (ValueError, AttributeError):
continue
if not time_slots:
time_slots = [(9, 0), (14, 0), (18, 0)]
# Calculate limits
daily_limit = settings.daily_publish_limit
weekly_limit = settings.weekly_publish_limit
monthly_limit = settings.monthly_publish_limit
queue_limit = getattr(settings, 'queue_limit', 100) or 100
# Count existing scheduled/published content
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
@@ -174,14 +184,53 @@ def _calculate_available_slots(settings: 'PublishingSettings', site: 'Site') ->
scheduled_publish_at__gte=month_start
).count()
# Generate slots for next 30 days
current_date = now.date()
slots_per_day = {} # Track slots used per day
# Route to appropriate slot generator
if settings.scheduling_mode == 'stagger':
return _generate_stagger_slots(
settings, site, now, allowed_days,
daily_limit, weekly_limit, monthly_limit, queue_limit,
daily_count, weekly_count, monthly_count
)
else:
# Default to time_slots mode
return _generate_time_slot_slots(
settings, site, now, allowed_days,
daily_limit, weekly_limit, monthly_limit, queue_limit,
daily_count, weekly_count, monthly_count
)
def _generate_time_slot_slots(
settings, site, now, allowed_days,
daily_limit, weekly_limit, monthly_limit, queue_limit,
daily_count, weekly_count, monthly_count
) -> list:
"""Generate slots based on specific time slots (original mode)."""
from igny8_core.business.content.models import Content
for day_offset in range(30):
slots = []
publish_times = settings.publish_time_slots or ['09:00', '14:00', '18:00']
# Parse time slots
time_slots = []
for time_str in publish_times:
try:
hour, minute = map(int, time_str.split(':'))
time_slots.append((hour, minute))
except (ValueError, AttributeError):
continue
if not time_slots:
time_slots = [(9, 0), (14, 0), (18, 0)]
current_date = now.date()
slots_per_day = {}
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
week_start = today_start - timedelta(days=now.weekday())
for day_offset in range(90): # Look 90 days ahead
check_date = current_date + timedelta(days=day_offset)
# Check if day is allowed
if check_date.weekday() not in allowed_days:
continue
@@ -216,8 +265,111 @@ def _calculate_available_slots(settings: 'PublishingSettings', site: 'Site') ->
slots.append(slot_time)
slots_per_day[day_key] = slots_per_day.get(day_key, 0) + 1
# Limit total slots to prevent memory issues
if len(slots) >= 100:
# Respect queue limit
if len(slots) >= queue_limit:
return slots
return slots
def _generate_stagger_slots(
settings, site, now, allowed_days,
daily_limit, weekly_limit, monthly_limit, queue_limit,
daily_count, weekly_count, monthly_count
) -> list:
"""
Generate slots spread evenly throughout the publishing window.
Distributes content throughout the day based on stagger_start_time,
stagger_end_time, and stagger_interval_minutes.
"""
from igny8_core.business.content.models import Content
slots = []
# Get stagger settings with defaults
start_hour, start_minute = 9, 0
end_hour, end_minute = 18, 0
if hasattr(settings, 'stagger_start_time') and settings.stagger_start_time:
start_hour = settings.stagger_start_time.hour
start_minute = settings.stagger_start_time.minute
if hasattr(settings, 'stagger_end_time') and settings.stagger_end_time:
end_hour = settings.stagger_end_time.hour
end_minute = settings.stagger_end_time.minute
interval_minutes = getattr(settings, 'stagger_interval_minutes', 30) or 30
interval = timedelta(minutes=interval_minutes)
current_date = now.date()
slots_per_day = {}
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
week_start = today_start - timedelta(days=now.weekday())
for day_offset in range(90): # Look 90 days ahead
check_date = current_date + timedelta(days=day_offset)
if check_date.weekday() not in allowed_days:
continue
# Day's publishing window
day_start = timezone.make_aware(
datetime.combine(check_date, datetime.min.time().replace(hour=start_hour, minute=start_minute))
)
day_end = timezone.make_aware(
datetime.combine(check_date, datetime.min.time().replace(hour=end_hour, minute=end_minute))
)
# Get existing scheduled times for this day to avoid conflicts
existing_times = set(
Content.objects.filter(
site=site,
site_status='scheduled',
scheduled_publish_at__date=check_date
).values_list('scheduled_publish_at', flat=True)
)
# Start slot calculation
current_slot = day_start
if check_date == current_date and now > day_start:
# Start from next interval after now
minutes_since_start = (now - day_start).total_seconds() / 60
intervals_passed = int(minutes_since_start / interval_minutes) + 1
current_slot = day_start + timedelta(minutes=intervals_passed * interval_minutes)
day_key = check_date.isoformat()
while current_slot <= day_end:
# Check daily limit
slots_this_day = slots_per_day.get(day_key, 0)
if daily_limit and (daily_count + slots_this_day) >= daily_limit:
break # Move to next day
# Check weekly limit
slot_week_start = current_slot - timedelta(days=current_slot.weekday())
if slot_week_start.date() == week_start.date():
scheduled_in_week = weekly_count + len([s for s in slots if s >= week_start])
if weekly_limit and scheduled_in_week >= weekly_limit:
current_slot += interval
continue
# Check monthly limit
if current_slot.month == now.month and current_slot.year == now.year:
scheduled_in_month = monthly_count + len([s for s in slots if s.month == now.month])
if monthly_limit and scheduled_in_month >= monthly_limit:
current_slot += interval
continue
# Avoid existing scheduled times
if current_slot not in existing_times:
slots.append(current_slot)
slots_per_day[day_key] = slots_per_day.get(day_key, 0) + 1
current_slot += interval
# Respect queue limit
if len(slots) >= queue_limit:
return slots
return slots