fix
This commit is contained in:
49
tools/automation_debug_commands.sh
Normal file
49
tools/automation_debug_commands.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
# Quick command reference for automation debugging
|
||||
|
||||
echo "=== AUTOMATION DIAGNOSTICS COMMANDS ==="
|
||||
echo ""
|
||||
|
||||
echo "1. CHECK RUNNING AUTOMATION RUNS:"
|
||||
echo "docker exec igny8_backend python manage.py shell << 'EOF'"
|
||||
echo "from igny8_core.business.automation.models import AutomationRun"
|
||||
echo "runs = AutomationRun.objects.filter(status__in=['running', 'paused'])"
|
||||
echo "for r in runs:"
|
||||
echo " print(f'{r.run_id} | Site:{r.site_id} | Stage:{r.current_stage} | Status:{r.status}')"
|
||||
echo "EOF"
|
||||
echo ""
|
||||
|
||||
echo "2. FORCE CANCEL STUCK RUNS:"
|
||||
echo "docker exec igny8_backend python manage.py shell << 'EOF'"
|
||||
echo "from igny8_core.business.automation.models import AutomationRun"
|
||||
echo "from django.core.cache import cache"
|
||||
echo "runs = AutomationRun.objects.filter(status__in=['running', 'paused'])"
|
||||
echo "for r in runs:"
|
||||
echo " r.status = 'cancelled'"
|
||||
echo " r.save()"
|
||||
echo " cache.delete(f'automation_lock_{r.site_id}')"
|
||||
echo " print(f'Cancelled {r.run_id}')"
|
||||
echo "EOF"
|
||||
echo ""
|
||||
|
||||
echo "3. CHECK CACHE LOCKS:"
|
||||
echo "docker exec igny8_backend python manage.py shell << 'EOF'"
|
||||
echo "from django.core.cache import cache"
|
||||
echo "for site_id in [5, 16]:"
|
||||
echo " val = cache.get(f'automation_lock_{site_id}')"
|
||||
echo " print(f'Site {site_id}: {val or \"UNLOCKED\"}')"
|
||||
echo "EOF"
|
||||
echo ""
|
||||
|
||||
echo "4. VIEW AUTOMATION LOGS:"
|
||||
echo "ls -lt /data/app/logs/automation/5/*/run_* | head -n 5"
|
||||
echo "tail -f /data/app/logs/automation/5/16/run_XXXXX_manual/automation_run.log"
|
||||
echo ""
|
||||
|
||||
echo "5. CHECK CELERY WORKERS:"
|
||||
echo "docker exec igny8_celery_worker celery -A igny8_core inspect active"
|
||||
echo ""
|
||||
|
||||
echo "6. RESTART BACKEND (after code changes):"
|
||||
echo "docker restart igny8_backend"
|
||||
echo ""
|
||||
65
tools/automation_logger_test.py
Normal file
65
tools/automation_logger_test.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick test harness for AutomationLogger diagnostic verification.
|
||||
This script loads the AutomationLogger module by path and runs a few methods to
|
||||
create a test run and write logs. It prints the activity log and diagnostic file.
|
||||
"""
|
||||
import importlib.util
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
MODULE_PATH = '/data/app/igny8/backend/igny8_core/business/automation/services/automation_logger.py'
|
||||
|
||||
spec = importlib.util.spec_from_file_location('automation_logger', MODULE_PATH)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
AutomationLogger = mod.AutomationLogger
|
||||
|
||||
BASE_LOG_DIR = '/data/app/logs/automation'
|
||||
SHARED_DIR = '/data/app/logs/automation/all_runs_test'
|
||||
|
||||
logger = AutomationLogger(base_log_dir=BASE_LOG_DIR, shared_log_dir=SHARED_DIR)
|
||||
|
||||
print('Using base_log_dir =', logger.base_log_dir)
|
||||
print('Using shared_log_dir =', logger.shared_log_dir)
|
||||
|
||||
# Run a test flow
|
||||
run_id = logger.start_run(999, 999, 'test')
|
||||
print('Created run_id:', run_id)
|
||||
|
||||
logger.log_stage_progress(run_id, 999, 999, 0, 'Diagnostic: stage progress test')
|
||||
logger.log_stage_error(run_id, 999, 999, 0, 'Diagnostic: simulated error')
|
||||
logger.log_stage_complete(run_id, 999, 999, 0, 3, '0m 1s', 0)
|
||||
|
||||
# Print activity log via get_activity_log
|
||||
activity = logger.get_activity_log(999, 999, run_id, last_n=50)
|
||||
print('\nActivity log (last lines):')
|
||||
for line in activity:
|
||||
print(line)
|
||||
|
||||
# Print diagnostic file tail
|
||||
diag_file = os.path.join(BASE_LOG_DIR, 'automation_diagnostic.log')
|
||||
print('\nDiagnostic file path:', diag_file)
|
||||
if os.path.exists(diag_file):
|
||||
print('\nDiagnostic log tail:')
|
||||
with open(diag_file, 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in lines[-50:]:
|
||||
print(line.rstrip())
|
||||
else:
|
||||
print('Diagnostic file not found')
|
||||
|
||||
# List created directories for quick verification
|
||||
print('\nListing created run dirs under base:')
|
||||
for p in sorted(Path(BASE_LOG_DIR).rglob(run_id)):
|
||||
print(p)
|
||||
|
||||
print('\nShared run dir listing:')
|
||||
shared_run = os.path.join(SHARED_DIR, run_id)
|
||||
if os.path.exists(shared_run):
|
||||
for root, dirs, files in os.walk(shared_run):
|
||||
for f in files:
|
||||
print(os.path.join(root, f))
|
||||
else:
|
||||
print('Shared run dir not found')
|
||||
136
tools/verify_automation_fix.py
Normal file
136
tools/verify_automation_fix.py
Normal file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick verification script for automation progress bar fix
|
||||
Tests that the AutomationService methods return correct data structures
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add backend to path
|
||||
sys.path.insert(0, '/data/app/igny8/backend')
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
|
||||
import django
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.automation.models import AutomationRun
|
||||
from igny8_core.business.automation.services import AutomationService
|
||||
|
||||
def test_processing_state():
|
||||
"""Test that get_current_processing_state returns correct structure"""
|
||||
print("=" * 80)
|
||||
print("AUTOMATION PROGRESS BAR FIX - VERIFICATION TEST")
|
||||
print("=" * 80)
|
||||
|
||||
# Find a recent running or paused run
|
||||
runs = AutomationRun.objects.filter(status__in=['running', 'paused']).order_by('-started_at')[:5]
|
||||
|
||||
if not runs.exists():
|
||||
print("\n❌ No running or paused automation runs found")
|
||||
print(" To test: Start an automation run from the UI")
|
||||
return
|
||||
|
||||
print(f"\n✓ Found {runs.count()} active run(s)")
|
||||
|
||||
for run in runs:
|
||||
print(f"\n{'='*80}")
|
||||
print(f"Run ID: {run.run_id}")
|
||||
print(f"Status: {run.status}")
|
||||
print(f"Current Stage: {run.current_stage}")
|
||||
print(f"Started: {run.started_at}")
|
||||
|
||||
try:
|
||||
service = AutomationService.from_run_id(run.run_id)
|
||||
state = service.get_current_processing_state()
|
||||
|
||||
if state is None:
|
||||
print("❌ get_current_processing_state() returned None")
|
||||
print(f" This should not happen for status='{run.status}'")
|
||||
continue
|
||||
|
||||
# Verify required fields
|
||||
required_fields = [
|
||||
'stage_number', 'stage_name', 'stage_type',
|
||||
'total_items', 'processed_items', 'percentage',
|
||||
'currently_processing', 'up_next', 'remaining_count'
|
||||
]
|
||||
|
||||
print("\n✓ State object returned successfully")
|
||||
print("\nField values:")
|
||||
|
||||
missing_fields = []
|
||||
for field in required_fields:
|
||||
if field in state:
|
||||
value = state[field]
|
||||
if isinstance(value, list):
|
||||
print(f" • {field}: [{len(value)} items]")
|
||||
else:
|
||||
print(f" • {field}: {value}")
|
||||
else:
|
||||
missing_fields.append(field)
|
||||
print(f" ❌ {field}: MISSING")
|
||||
|
||||
if missing_fields:
|
||||
print(f"\n❌ Missing fields: {', '.join(missing_fields)}")
|
||||
else:
|
||||
print("\n✓ All required fields present")
|
||||
|
||||
# Verify progress calculation
|
||||
if state['total_items'] > 0:
|
||||
expected_pct = round((state['processed_items'] / state['total_items']) * 100)
|
||||
if state['percentage'] == expected_pct:
|
||||
print(f"✓ Progress calculation correct: {state['processed_items']}/{state['total_items']} = {state['percentage']}%")
|
||||
else:
|
||||
print(f"❌ Progress mismatch: expected {expected_pct}%, got {state['percentage']}%")
|
||||
|
||||
# Check if paused state works
|
||||
if run.status == 'paused':
|
||||
print("\n✓ PAUSED RUN FIX VERIFIED: State returned for paused run!")
|
||||
print(" (Previously this would have returned None and caused blank card)")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error getting state: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("VERIFICATION COMPLETE")
|
||||
print("="*80)
|
||||
|
||||
# Check for trace files
|
||||
print("\nChecking for JSONL trace files...")
|
||||
import glob
|
||||
trace_files = glob.glob('/data/app/logs/automation/*/*/run_*/run_trace.jsonl')
|
||||
if trace_files:
|
||||
print(f"✓ Found {len(trace_files)} trace file(s)")
|
||||
latest = sorted(trace_files, key=os.path.getmtime, reverse=True)[:3]
|
||||
print("\nMost recent trace files:")
|
||||
for f in latest:
|
||||
size = os.path.getsize(f)
|
||||
print(f" • {f} ({size} bytes)")
|
||||
|
||||
# Check for stage_item_processed events
|
||||
try:
|
||||
with open(f, 'r') as tf:
|
||||
content = tf.read()
|
||||
if 'stage_item_processed' in content:
|
||||
count = content.count('stage_item_processed')
|
||||
print(f" ✓ Contains {count} stage_item_processed event(s)")
|
||||
else:
|
||||
print(f" ℹ No stage_item_processed events (may be older run)")
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
print("ℹ No trace files found yet (will appear for new runs)")
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("NEXT STEPS:")
|
||||
print("1. Start a new automation run from the UI")
|
||||
print("2. Watch the progress bar - it should animate smoothly")
|
||||
print("3. Try pausing - card should stay visible with yellow theme")
|
||||
print("4. Check logs in: /data/app/logs/automation/<account>/<site>/<run_id>/")
|
||||
print("5. Verify run_trace.jsonl contains 'stage_item_processed' events")
|
||||
print("="*80)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_processing_state()
|
||||
Reference in New Issue
Block a user