AI AUtomtaion, Schudelign and publishign fromt and backe end refoactr

This commit is contained in:
IGNY8 VPS (Salman)
2026-01-17 15:52:46 +00:00
parent 0435a5cf70
commit d3b3e1c0d4
34 changed files with 4715 additions and 375 deletions

View File

@@ -0,0 +1,53 @@
# Generated by Django 5.2.10 on 2026-01-17 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('automation', '0007_add_stage_enabled_toggles'),
]
operations = [
migrations.AddField(
model_name='automationconfig',
name='max_approvals_per_run',
field=models.IntegerField(default=0, help_text='Max content pieces to auto-approve in stage 7 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_clusters_per_run',
field=models.IntegerField(default=0, help_text='Max clusters to process in stage 2 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_content_per_run',
field=models.IntegerField(default=0, help_text='Max content pieces for image prompts in stage 5 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_credits_per_run',
field=models.IntegerField(default=0, help_text='Max credits to use per run (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_ideas_per_run',
field=models.IntegerField(default=0, help_text='Max ideas to process in stage 3 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_images_per_run',
field=models.IntegerField(default=0, help_text='Max images to generate in stage 6 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_keywords_per_run',
field=models.IntegerField(default=0, help_text='Max keywords to process in stage 1 (0=unlimited)'),
),
migrations.AddField(
model_name='automationconfig',
name='max_tasks_per_run',
field=models.IntegerField(default=0, help_text='Max tasks to process in stage 4 (0=unlimited)'),
),
]

View File

@@ -44,6 +44,19 @@ class AutomationConfig(models.Model):
within_stage_delay = models.IntegerField(default=3, help_text="Delay between batches within a stage (seconds)")
between_stage_delay = models.IntegerField(default=5, help_text="Delay between stage transitions (seconds)")
# Per-run item limits (0 = unlimited, processes all available)
# These prevent runaway automation and control resource usage
max_keywords_per_run = models.IntegerField(default=0, help_text="Max keywords to process in stage 1 (0=unlimited)")
max_clusters_per_run = models.IntegerField(default=0, help_text="Max clusters to process in stage 2 (0=unlimited)")
max_ideas_per_run = models.IntegerField(default=0, help_text="Max ideas to process in stage 3 (0=unlimited)")
max_tasks_per_run = models.IntegerField(default=0, help_text="Max tasks to process in stage 4 (0=unlimited)")
max_content_per_run = models.IntegerField(default=0, help_text="Max content pieces for image prompts in stage 5 (0=unlimited)")
max_images_per_run = models.IntegerField(default=0, help_text="Max images to generate in stage 6 (0=unlimited)")
max_approvals_per_run = models.IntegerField(default=0, help_text="Max content pieces to auto-approve in stage 7 (0=unlimited)")
# Credit budget limit per run (0 = use site's full credit balance)
max_credits_per_run = models.IntegerField(default=0, help_text="Max credits to use per run (0=unlimited)")
last_run_at = models.DateTimeField(null=True, blank=True)
next_run_at = models.DateTimeField(null=True, blank=True, help_text="Calculated based on frequency")

View File

@@ -63,7 +63,7 @@ class AutomationService:
def _check_should_stop(self) -> tuple[bool, str]:
"""
Check if automation should stop (paused or cancelled)
Check if automation should stop (paused, cancelled, or credit budget exceeded)
Returns:
(should_stop, reason)
@@ -79,6 +79,83 @@ class AutomationService:
elif self.run.status == 'cancelled':
return True, "cancelled"
# Check credit budget
budget_exceeded, budget_reason = self._check_credit_budget()
if budget_exceeded:
return True, f"credit_budget_exceeded: {budget_reason}"
return False, ""
def _get_per_run_limit(self, stage: int) -> int:
"""
Get the per-run item limit for a stage from config.
Args:
stage: Stage number (1-7)
Returns:
Max items to process (0 = unlimited)
"""
limit_map = {
1: self.config.max_keywords_per_run,
2: self.config.max_clusters_per_run,
3: self.config.max_ideas_per_run,
4: self.config.max_tasks_per_run,
5: self.config.max_content_per_run,
6: self.config.max_images_per_run,
7: self.config.max_approvals_per_run,
}
return limit_map.get(stage, 0)
def _apply_per_run_limit(self, queryset, stage: int, log_prefix: str = ""):
"""
Apply per-run limit to queryset if configured.
Args:
queryset: Django queryset to limit
stage: Stage number (1-7)
log_prefix: Prefix for log messages
Returns:
Limited queryset (or list if limit applied)
"""
limit = self._get_per_run_limit(stage)
if limit > 0:
total = queryset.count()
if total > limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage, f"{log_prefix}Applying per-run limit: {limit} of {total} items (limit set in automation config)"
)
return list(queryset[:limit])
return queryset
def _check_credit_budget(self) -> tuple[bool, str]:
"""
Check if credit budget for this run has been exceeded.
Returns:
(exceeded, reason) - If exceeded is True, automation should stop
"""
if not self.run or not self.config:
return False, ""
max_credits = self.config.max_credits_per_run
if max_credits <= 0: # 0 = unlimited
return False, ""
credits_used = self._get_credits_used()
if credits_used >= max_credits:
reason = f"Credit budget exhausted: {credits_used}/{max_credits} credits used"
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
self.run.current_stage, reason
)
return True, reason
return False, ""
def start_automation(self, trigger_type: str = 'manual') -> str:
@@ -170,6 +247,19 @@ class AutomationService:
disabled=False
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_keywords.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} keywords"
)
# Get limited keyword IDs first, then filter queryset
limited_ids = list(pending_keywords.values_list('id', flat=True)[:per_run_limit])
pending_keywords = pending_keywords.filter(id__in=limited_ids)
total_count = pending_keywords.count()
# IMPORTANT: Group keywords by sector to avoid mixing sectors in clustering
@@ -480,6 +570,17 @@ class AutomationService:
disabled=False
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_clusters.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} clusters"
)
pending_clusters = pending_clusters[:per_run_limit]
total_count = pending_clusters.count()
# Log stage start
@@ -674,6 +775,17 @@ class AutomationService:
status='new'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_ideas.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} ideas"
)
pending_ideas = pending_ideas[:per_run_limit]
total_count = pending_ideas.count()
# Log stage start
@@ -837,6 +949,17 @@ class AutomationService:
status='queued'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_tasks.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} tasks"
)
pending_tasks = pending_tasks[:per_run_limit]
total_count = pending_tasks.count()
# Log stage start
@@ -1078,6 +1201,17 @@ class AutomationService:
images_count=0
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = content_without_images.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} content items"
)
content_without_images = content_without_images[:per_run_limit]
total_count = content_without_images.count()
# ADDED: Enhanced logging
@@ -1291,6 +1425,17 @@ class AutomationService:
status='pending'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = pending_images.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Processing {per_run_limit} of {total_available} images"
)
pending_images = pending_images[:per_run_limit]
total_count = pending_images.count()
# Log stage start
@@ -1538,6 +1683,17 @@ class AutomationService:
status='review'
)
# Apply per-run limit (0 = unlimited)
per_run_limit = self._get_per_run_limit(stage_number)
total_available = ready_for_review.count()
if per_run_limit > 0 and total_available > per_run_limit:
self.logger.log_stage_progress(
self.run.run_id, self.account.id, self.site.id,
stage_number, f"Per-run limit: Approving {per_run_limit} of {total_available} content items"
)
ready_for_review = ready_for_review[:per_run_limit]
total_count = ready_for_review.count()
# Log stage start

View File

@@ -49,9 +49,9 @@ def check_scheduled_automations():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - already ran today")
continue
# Check if already running
if AutomationRun.objects.filter(site=config.site, status='running').exists():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - already running")
# Check if already running OR paused (don't start new if existing in progress)
if AutomationRun.objects.filter(site=config.site, status__in=['running', 'paused']).exists():
logger.info(f"[AutomationTask] Skipping site {config.site.id} - automation in progress (running/paused)")
continue
logger.info(f"[AutomationTask] Starting scheduled automation for site {config.site.id}")
@@ -162,13 +162,50 @@ def run_automation_task(self, run_id: str):
@shared_task(name='automation.resume_automation_task', bind=True, max_retries=0)
def resume_automation_task(self, run_id: str):
"""
Resume paused automation run from current stage
Resume paused automation run from current stage.
CRITICAL FIXES:
- Verifies run status is 'running' before processing
- Reacquires lock in case it expired during long pause
- Checks pause/cancel status after each stage
- Releases lock on failure
"""
logger.info(f"[AutomationTask] Resuming automation run: {run_id}")
try:
from django.core.cache import cache
# Load run and verify status
run = AutomationRun.objects.get(run_id=run_id)
# CRITICAL FIX: Verify run is actually in 'running' status
# (status is set to 'running' by views.resume before calling this task)
if run.status != 'running':
logger.warning(f"[AutomationTask] Run {run_id} status is '{run.status}', not 'running'. Aborting resume.")
return
# CRITICAL FIX: Reacquire lock in case it expired during long pause (6hr timeout)
lock_key = f'automation_lock_{run.site.id}'
lock_acquired = cache.add(lock_key, run_id, timeout=21600) # 6 hours
if not lock_acquired:
# Lock exists - check if it's ours (from original run start)
existing_lock = cache.get(lock_key)
# If lock exists but isn't our run_id, another run may have started
if existing_lock and existing_lock != run_id and existing_lock != 'locked':
logger.warning(f"[AutomationTask] Lock held by different run ({existing_lock}). Aborting resume for {run_id}")
run.status = 'failed'
run.error_message = f'Lock acquired by another run ({existing_lock}) during pause'
run.completed_at = timezone.now()
run.save()
return
# Lock exists and is either 'locked' (our old format) or our run_id - proceed
logger.info(f"[AutomationTask] Existing lock found, proceeding with resume")
else:
# We acquired a new lock (old one expired)
logger.info(f"[AutomationTask] Reacquired lock after expiry for run {run_id}")
service = AutomationService.from_run_id(run_id)
run = service.run
config = service.config
# Continue from current stage
@@ -196,20 +233,35 @@ def resume_automation_task(self, run_id: str):
for stage in range(run.current_stage - 1, 7):
if stage_enabled[stage]:
stage_methods[stage]()
# CRITICAL FIX: Check for pause/cancel AFTER each stage (same as run_automation_task)
service.run.refresh_from_db()
if service.run.status in ['paused', 'cancelled']:
logger.info(f"[AutomationTask] Resumed automation {service.run.status} after stage {stage + 1}")
return
else:
logger.info(f"[AutomationTask] Stage {stage + 1} is disabled, skipping")
logger.info(f"[AutomationTask] Resumed automation run: {run_id}")
logger.info(f"[AutomationTask] Resumed automation completed: {run_id}")
except Exception as e:
logger.error(f"[AutomationTask] Failed to resume automation run {run_id}: {e}")
# Mark as failed
run = AutomationRun.objects.get(run_id=run_id)
run.status = 'failed'
run.error_message = str(e)
run.completed_at = timezone.now()
run.save()
# Mark as failed and release lock
try:
run = AutomationRun.objects.get(run_id=run_id)
run.status = 'failed'
run.error_message = str(e)
run.completed_at = timezone.now()
run.save()
# Release lock on failure
from django.core.cache import cache
cache.delete(f'automation_lock_{run.site.id}')
except Exception as cleanup_err:
logger.error(f"[AutomationTask] Failed to cleanup after resume failure: {cleanup_err}")
raise
# Alias for continue_automation_task (same as resume)

View File

@@ -77,6 +77,15 @@ class AutomationViewSet(viewsets.ViewSet):
'stage_6_batch_size': config.stage_6_batch_size,
'within_stage_delay': config.within_stage_delay,
'between_stage_delay': config.between_stage_delay,
# Per-run limits (0 = unlimited)
'max_keywords_per_run': config.max_keywords_per_run,
'max_clusters_per_run': config.max_clusters_per_run,
'max_ideas_per_run': config.max_ideas_per_run,
'max_tasks_per_run': config.max_tasks_per_run,
'max_content_per_run': config.max_content_per_run,
'max_images_per_run': config.max_images_per_run,
'max_approvals_per_run': config.max_approvals_per_run,
'max_credits_per_run': config.max_credits_per_run,
'last_run_at': config.last_run_at,
'next_run_at': config.next_run_at,
})
@@ -153,6 +162,18 @@ class AutomationViewSet(viewsets.ViewSet):
except (TypeError, ValueError):
pass
# Per-run limits (0 = unlimited)
for field in ['max_keywords_per_run', 'max_clusters_per_run', 'max_ideas_per_run',
'max_tasks_per_run', 'max_content_per_run', 'max_images_per_run',
'max_approvals_per_run', 'max_credits_per_run']:
if field in request.data:
try:
value = int(request.data[field])
if value >= 0: # Allow 0 (unlimited) or positive numbers
setattr(config, field, value)
except (TypeError, ValueError):
pass
config.save()
return Response({
@@ -175,6 +196,15 @@ class AutomationViewSet(viewsets.ViewSet):
'stage_6_batch_size': config.stage_6_batch_size,
'within_stage_delay': config.within_stage_delay,
'between_stage_delay': config.between_stage_delay,
# Per-run limits (0 = unlimited)
'max_keywords_per_run': config.max_keywords_per_run,
'max_clusters_per_run': config.max_clusters_per_run,
'max_ideas_per_run': config.max_ideas_per_run,
'max_tasks_per_run': config.max_tasks_per_run,
'max_content_per_run': config.max_content_per_run,
'max_images_per_run': config.max_images_per_run,
'max_approvals_per_run': config.max_approvals_per_run,
'max_credits_per_run': config.max_credits_per_run,
'last_run_at': config.last_run_at,
'next_run_at': config.next_run_at,
})
@@ -267,6 +297,17 @@ class AutomationViewSet(viewsets.ViewSet):
try:
service = AutomationService.from_run_id(run_id)
service.pause_automation()
# CRITICAL FIX: Log pause to automation log files
try:
service.logger.log_stage_progress(
service.run.run_id, service.account.id, service.site.id,
service.run.current_stage, f"Automation paused by user at stage {service.run.current_stage}"
)
except Exception as log_err:
# Don't fail the pause if logging fails
pass
return Response({'message': 'Automation paused'})
except AutomationRun.DoesNotExist:
return Response(
@@ -1613,6 +1654,22 @@ class AutomationViewSet(viewsets.ViewSet):
run.completed_at = timezone.now()
run.save(update_fields=['status', 'cancelled_at', 'completed_at'])
# CRITICAL FIX: Release the lock so user can start new automation
from django.core.cache import cache
cache.delete(f'automation_lock_{run.site.id}')
# Log the cancellation to automation log files
try:
from igny8_core.business.automation.services.automation_logger import AutomationLogger
logger = AutomationLogger()
logger.log_stage_progress(
run.run_id, run.account.id, run.site.id, run.current_stage,
f"Automation cancelled by user at stage {run.current_stage}"
)
except Exception as log_err:
# Don't fail the cancellation if logging fails
pass
return Response({
'message': 'Automation cancelled',
'status': run.status,

View File

@@ -0,0 +1,24 @@
# Generated migration for settings consolidation
# Add is_testing field to AIModelConfig
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0009_seed_ai_model_configs'),
]
operations = [
# Add is_testing field to AIModelConfig
migrations.AddField(
model_name='aimodelconfig',
name='is_testing',
field=models.BooleanField(
default=False,
db_index=True,
help_text='Testing model (cheap, for testing only). Only one per model_type can be is_testing=True.',
),
),
]

View File

@@ -828,6 +828,13 @@ class AIModelConfig(models.Model):
help_text="basic / quality / premium - for image models"
)
# Testing vs Live model designation
is_testing = models.BooleanField(
default=False,
db_index=True,
help_text="Testing model (cheap, for testing only). Only one per model_type can be is_testing=True."
)
# Image Size Configuration (for image models)
landscape_size = models.CharField(
max_length=20,
@@ -892,12 +899,18 @@ class AIModelConfig(models.Model):
return self.display_name
def save(self, *args, **kwargs):
"""Ensure only one is_default per model_type"""
"""Ensure only one is_default and one is_testing per model_type"""
if self.is_default:
AIModelConfig.objects.filter(
model_type=self.model_type,
is_default=True
).exclude(pk=self.pk).update(is_default=False)
if self.is_testing:
AIModelConfig.objects.filter(
model_type=self.model_type,
is_testing=True,
is_active=True
).exclude(pk=self.pk).update(is_testing=False)
super().save(*args, **kwargs)
@classmethod
@@ -910,6 +923,25 @@ class AIModelConfig(models.Model):
"""Get the default image generation model"""
return cls.objects.filter(model_type='image', is_default=True, is_active=True).first()
@classmethod
def get_testing_model(cls, model_type: str):
"""Get the testing model for text or image"""
return cls.objects.filter(
model_type=model_type,
is_testing=True,
is_active=True
).first()
@classmethod
def get_live_model(cls, model_type: str):
"""Get the live (default production) model for text or image"""
return cls.objects.filter(
model_type=model_type,
is_testing=False,
is_default=True,
is_active=True
).first()
@classmethod
def get_image_models_by_tier(cls):
"""Get all active image models grouped by quality tier"""
@@ -1044,3 +1076,121 @@ class WebhookEvent(models.Model):
self.error_message = error_message
self.retry_count += 1
self.save(update_fields=['error_message', 'retry_count'])
class SiteAIBudgetAllocation(AccountBaseModel):
"""
Site-level AI budget allocation by function.
Allows configuring what percentage of the site's credit budget
can be used for each AI function. This provides fine-grained
control over credit consumption during automation runs.
Example: 40% content, 30% images, 20% clustering, 10% ideas
When max_credits_per_run is set in AutomationConfig:
- Each function can only use up to its allocated % of that budget
- Prevents any single function from consuming all credits
"""
AI_FUNCTION_CHOICES = [
('clustering', 'Keyword Clustering (Stage 1)'),
('idea_generation', 'Ideas Generation (Stage 2)'),
('content_generation', 'Content Generation (Stage 4)'),
('image_prompt', 'Image Prompt Extraction (Stage 5)'),
('image_generation', 'Image Generation (Stage 6)'),
]
site = models.ForeignKey(
'igny8_core_auth.Site',
on_delete=models.CASCADE,
related_name='ai_budget_allocations',
help_text="Site this allocation belongs to"
)
ai_function = models.CharField(
max_length=50,
choices=AI_FUNCTION_CHOICES,
help_text="AI function to allocate budget for"
)
allocation_percentage = models.PositiveIntegerField(
default=20,
validators=[MinValueValidator(0)],
help_text="Percentage of credit budget allocated to this function (0-100)"
)
is_enabled = models.BooleanField(
default=True,
help_text="Whether this function is enabled for automation"
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'billing'
db_table = 'igny8_site_ai_budget_allocations'
verbose_name = 'Site AI Budget Allocation'
verbose_name_plural = 'Site AI Budget Allocations'
unique_together = [['site', 'ai_function']]
ordering = ['site', 'ai_function']
indexes = [
models.Index(fields=['site', 'is_enabled']),
models.Index(fields=['account', 'site']),
]
def __str__(self):
return f"{self.site.name} - {self.get_ai_function_display()}: {self.allocation_percentage}%"
@classmethod
def get_or_create_defaults_for_site(cls, site, account):
"""
Get or create default allocations for a site.
Default: Equal distribution across all functions (20% each = 100%)
"""
defaults = [
('clustering', 15),
('idea_generation', 10),
('content_generation', 40),
('image_prompt', 5),
('image_generation', 30),
]
allocations = []
for ai_function, percentage in defaults:
allocation, _ = cls.objects.get_or_create(
account=account,
site=site,
ai_function=ai_function,
defaults={
'allocation_percentage': percentage,
'is_enabled': True,
}
)
allocations.append(allocation)
return allocations
@classmethod
def get_allocation_for_function(cls, site, ai_function) -> int:
"""
Get allocation percentage for a specific AI function.
Returns 0 if not found or disabled.
"""
try:
allocation = cls.objects.get(site=site, ai_function=ai_function)
if allocation.is_enabled:
return allocation.allocation_percentage
return 0
except cls.DoesNotExist:
# Return default percentage if no allocation exists
default_map = {
'clustering': 15,
'idea_generation': 10,
'content_generation': 40,
'image_prompt': 5,
'image_generation': 30,
}
return default_map.get(ai_function, 20)

View File

@@ -0,0 +1,53 @@
# Generated by Django 5.2.10 on 2026-01-17 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('integration', '0003_add_publishing_settings'),
]
operations = [
migrations.AddField(
model_name='publishingsettings',
name='queue_limit',
field=models.PositiveIntegerField(default=100, help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='scheduling_mode',
field=models.CharField(default='time_slots', help_text='DEPRECATED - always uses time_slots mode', max_length=20),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_end_time',
field=models.TimeField(default='18:00', help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_interval_minutes',
field=models.PositiveIntegerField(default=30, help_text='DEPRECATED - not used'),
),
migrations.AddField(
model_name='publishingsettings',
name='stagger_start_time',
field=models.TimeField(default='09:00', help_text='DEPRECATED - not used'),
),
migrations.AlterField(
model_name='publishingsettings',
name='daily_publish_limit',
field=models.PositiveIntegerField(default=3, help_text='DEPRECATED - derived from time_slots'),
),
migrations.AlterField(
model_name='publishingsettings',
name='monthly_publish_limit',
field=models.PositiveIntegerField(default=50, help_text='DEPRECATED - not used'),
),
migrations.AlterField(
model_name='publishingsettings',
name='weekly_publish_limit',
field=models.PositiveIntegerField(default=15, help_text='DEPRECATED - derived from days × slots'),
),
]

View File

@@ -247,8 +247,16 @@ class SyncEvent(AccountBaseModel):
class PublishingSettings(AccountBaseModel):
"""
Site-level publishing configuration settings.
Controls automatic approval, publishing limits, and scheduling.
Site-level publishing SCHEDULE configuration (SIMPLIFIED).
Controls automatic approval, publishing, and time-slot based scheduling.
REMOVED (per settings consolidation plan):
- scheduling_mode (only time_slots needed)
- daily_publish_limit (derived: len(time_slots))
- weekly_publish_limit (derived: len(time_slots) × len(publish_days))
- monthly_publish_limit (not needed)
- stagger_* fields (not needed)
- queue_limit (not needed)
"""
DEFAULT_PUBLISH_DAYS = ['mon', 'tue', 'wed', 'thu', 'fri']
@@ -273,26 +281,7 @@ class PublishingSettings(AccountBaseModel):
help_text="Automatically publish approved content to the external site"
)
# Publishing limits
daily_publish_limit = models.PositiveIntegerField(
default=3,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per day"
)
weekly_publish_limit = models.PositiveIntegerField(
default=15,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per week"
)
monthly_publish_limit = models.PositiveIntegerField(
default=50,
validators=[MinValueValidator(1)],
help_text="Maximum number of articles to publish per month"
)
# Publishing schedule
# Publishing schedule - Days + Time Slots only (SIMPLIFIED)
publish_days = models.JSONField(
default=list,
help_text="Days of the week to publish (mon, tue, wed, thu, fri, sat, sun)"
@@ -303,6 +292,21 @@ class PublishingSettings(AccountBaseModel):
help_text="Times of day to publish (HH:MM format, e.g., ['09:00', '14:00', '18:00'])"
)
# DEPRECATED FIELDS - kept for backwards compatibility during migration
# These will be removed in a future migration
scheduling_mode = models.CharField(
max_length=20,
default='time_slots',
help_text="DEPRECATED - always uses time_slots mode"
)
daily_publish_limit = models.PositiveIntegerField(default=3, help_text="DEPRECATED - derived from time_slots")
weekly_publish_limit = models.PositiveIntegerField(default=15, help_text="DEPRECATED - derived from days × slots")
monthly_publish_limit = models.PositiveIntegerField(default=50, help_text="DEPRECATED - not used")
stagger_start_time = models.TimeField(default='09:00', help_text="DEPRECATED - not used")
stagger_end_time = models.TimeField(default='18:00', help_text="DEPRECATED - not used")
stagger_interval_minutes = models.PositiveIntegerField(default=30, help_text="DEPRECATED - not used")
queue_limit = models.PositiveIntegerField(default=100, help_text="DEPRECATED - not used")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@@ -323,6 +327,22 @@ class PublishingSettings(AccountBaseModel):
self.publish_time_slots = self.DEFAULT_TIME_SLOTS
super().save(*args, **kwargs)
# Calculated capacity properties (read-only, derived from days × slots)
@property
def daily_capacity(self) -> int:
"""Daily publishing capacity = number of time slots"""
return len(self.publish_time_slots) if self.publish_time_slots else 0
@property
def weekly_capacity(self) -> int:
"""Weekly publishing capacity = time slots × publish days"""
return self.daily_capacity * len(self.publish_days) if self.publish_days else 0
@property
def monthly_capacity(self) -> int:
"""Monthly publishing capacity (approximate: weekly × 4.3)"""
return int(self.weekly_capacity * 4.3)
@classmethod
def get_or_create_for_site(cls, site):
"""Get or create publishing settings for a site with defaults"""
@@ -332,9 +352,6 @@ class PublishingSettings(AccountBaseModel):
'account': site.account,
'auto_approval_enabled': True,
'auto_publish_enabled': True,
'daily_publish_limit': 3,
'weekly_publish_limit': 15,
'monthly_publish_limit': 50,
'publish_days': cls.DEFAULT_PUBLISH_DAYS,
'publish_time_slots': cls.DEFAULT_TIME_SLOTS,
}