This commit is contained in:
alorig
2025-11-09 19:07:06 +05:00
parent 6b738d1ab7
commit 8cd036d8ce
13 changed files with 929 additions and 0 deletions

128
docs/AI-ELEMENTS-TABLE.md Normal file
View File

@@ -0,0 +1,128 @@
# AI-Related Elements Across Codebase
Complete table of all AI-related logic, configs, prompts, validations, retries, logging, response parsing, etc.
| Type | Name / Key | File | Function / Class | Line No | Notes |
|------|------------|------|------------------|---------|-------|
| **🧠 AI Core Functions** |
| AI Core | `_auto_cluster_keywords_core` | `backend/igny8_core/modules/planner/tasks.py` | `_auto_cluster_keywords_core()` | 26 | Core clustering logic (legacy, used by old tasks) |
| AI Core | `_generate_single_idea_core` | `backend/igny8_core/modules/planner/tasks.py` | `_generate_single_idea_core()` | 1047 | Core idea generation logic (legacy) |
| AI Core | `auto_generate_content_task` | `backend/igny8_core/modules/writer/tasks.py` | `auto_generate_content_task()` | 27 | Celery task for content generation (legacy) |
| AI Core | `AutoClusterFunction` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction` class | 14 | New framework function for clustering |
| AI Core | `cluster_keywords` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.cluster_keywords()` | 1080 | Legacy clustering method |
| AI Core | `generate_ideas` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.generate_ideas()` | 1280 | Legacy idea generation method |
| AI Core | `generate_content` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.generate_content()` | 446 | Legacy content generation method |
| AI Core | `generate_image` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.generate_image()` | 656 | Image generation (OpenAI DALL-E / Runware) |
| AI Core | `generate_image` | `backend/igny8_core/ai/processor.py` | `AIProcessor.generate_image()` | 61 | Framework wrapper for image generation |
| AI Core | `run_ai_task` | `backend/igny8_core/ai/tasks.py` | `run_ai_task()` | 13 | Unified Celery entrypoint for all AI functions |
| AI Core | `AIEngine.execute` | `backend/igny8_core/ai/engine.py` | `AIEngine.execute()` | 26 | Central orchestrator for all AI functions |
| **🔁 Retry / Model Fallback Logic** |
| Retry | `_call_openai` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._call_openai()` | 125 | Main OpenAI API call method with error handling |
| Retry | `_get_api_key` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_api_key()` | 73 | Gets API key from IntegrationSettings or Django settings (fallback) |
| Retry | `_get_model` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_model()` | 98 | Gets model from IntegrationSettings or Django settings (fallback) |
| Retry | `max_retries=3` | `backend/igny8_core/ai/tasks.py` | `@shared_task(bind=True, max_retries=3)` | 12 | Celery task retry configuration |
| Retry | `max_retries=3` | `backend/igny8_core/modules/writer/tasks.py` | `@shared_task(bind=True, max_retries=3)` | 26 | Content generation task retry configuration |
| Retry | Error handling | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 191-305 | HTTP error handling, JSON parsing errors, timeout handling |
| Retry | Fallback to Django settings | `backend/igny8_core/utils/ai_processor.py` | `_get_api_key()` | 91-95 | Falls back to `OPENAI_API_KEY` or `RUNWARE_API_KEY` from settings |
| Retry | Fallback to default model | `backend/igny8_core/utils/ai_processor.py` | `_get_model()` | 120-123 | Falls back to `DEFAULT_AI_MODEL` from settings (default: 'gpt-4.1') |
| **🧱 Prompt Sources** |
| Prompt | `clustering` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('clustering')` | 10-30 | Hardcoded default clustering prompt template |
| Prompt | `ideas` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('ideas')` | 32-55 | Hardcoded default ideas generation prompt template |
| Prompt | `content_generation` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('content_generation')` | 57-75 | Hardcoded default content generation prompt template |
| Prompt | `image_prompt_extraction` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('image_prompt_extraction')` | 77-98 | Hardcoded default image prompt extraction template |
| Prompt | `image_prompt_template` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('image_prompt_template')` | 100 | Hardcoded default image prompt template |
| Prompt | `negative_prompt` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('negative_prompt')` | 102 | Hardcoded default negative prompt |
| Prompt | `get_prompt_value` | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` | 108 | Retrieves prompt from DB (AIPrompt model) or returns default |
| Prompt | `AIPrompt` model | `backend/igny8_core/modules/system/models.py` | `AIPrompt` class | 13 | Database model storing account-specific prompts |
| Prompt | `prompt_type` | `backend/igny8_core/modules/system/models.py` | `AIPrompt.prompt_type` | 25 | Choices: clustering, ideas, content_generation, image_prompt_extraction, image_prompt_template, negative_prompt |
| Prompt | `build_prompt` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.build_prompt()` | 117 | Builds clustering prompt using `get_prompt_value()` |
| Prompt | `[IGNY8_KEYWORDS]` | `backend/igny8_core/ai/functions/auto_cluster.py` | `build_prompt()` | 131 | Placeholder replaced with keyword list |
| Prompt | `[IGNY8_CLUSTERS]` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('ideas')` | 34 | Placeholder for ideas prompt |
| Prompt | `[IGNY8_IDEA]` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt('content_generation')` | 60 | Placeholder for content generation prompt |
| **⚠️ Validation / Limits** |
| Validation | `validate` | `backend/igny8_core/ai/base.py` | `BaseAIFunction.validate()` | 34 | Base validation: checks for 'ids' array, max_items limit |
| Validation | `validate` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.validate()` | 37 | Custom validation: checks keywords exist, plan limits |
| Validation | `get_max_items` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.get_max_items()` | 34 | Returns 20 (max keywords per cluster operation) |
| Validation | `check_credits` | `backend/igny8_core/modules/billing/services.py` | `CreditService.check_credits()` | 16 | Checks if account has enough credits |
| Validation | Daily cluster limit | `backend/igny8_core/ai/functions/auto_cluster.py` | `validate()` | 59-71 | Checks `plan.daily_cluster_limit` against clusters created today |
| Validation | Max clusters limit | `backend/igny8_core/ai/functions/auto_cluster.py` | `validate()` | 73-79 | Checks `plan.max_clusters` against total clusters |
| Validation | `VALID_OPENAI_IMAGE_MODELS` | `backend/igny8_core/utils/ai_processor.py` | Constant | 34 | Set: {'dall-e-3', 'dall-e-2'} |
| Validation | `VALID_SIZES_BY_MODEL` | `backend/igny8_core/utils/ai_processor.py` | Constant | 41 | Dict mapping models to valid sizes |
| Validation | Model validation | `backend/igny8_core/utils/ai_processor.py` | `generate_image()` | 704-708 | Validates model is in VALID_OPENAI_IMAGE_MODELS |
| Validation | Size validation | `backend/igny8_core/utils/ai_processor.py` | `generate_image()` | 719-724 | Validates size is valid for selected model |
| Validation | Model rate validation | `backend/igny8_core/utils/ai_processor.py` | `_get_model()` | 112 | Validates model is in MODEL_RATES before using |
| **🪵 AI Debug Steps** |
| Debug Step | `addRequestStep` | `backend/igny8_core/ai/tracker.py` | `StepTracker.add_request_step()` | 21 | Adds request phase step (INIT, PREP, SAVE, DONE) |
| Debug Step | `addResponseStep` | `backend/igny8_core/ai/tracker.py` | `StepTracker.add_response_step()` | 45 | Adds response phase step (AI_CALL, PARSE) |
| Debug Step | `request_steps` | `backend/igny8_core/ai/tracker.py` | `StepTracker.request_steps` | 17 | List of request steps |
| Debug Step | `response_steps` | `backend/igny8_core/ai/tracker.py` | `StepTracker.response_steps` | 18 | List of response steps |
| Debug Step | `request_steps` | `backend/igny8_core/modules/planner/tasks.py` | `_auto_cluster_keywords_core()` | 37 | Legacy request steps tracking |
| Debug Step | `response_steps` | `backend/igny8_core/modules/planner/tasks.py` | `_auto_cluster_keywords_core()` | 38 | Legacy response steps tracking |
| Debug Step | `meta['request_steps']` | `backend/igny8_core/modules/system/integration_views.py` | `task_progress()` | 936-937 | Extracts request_steps from Celery task meta |
| Debug Step | `meta['response_steps']` | `backend/igny8_core/modules/system/integration_views.py` | `task_progress()` | 938-939 | Extracts response_steps from Celery task meta |
| Debug Step | `request_steps` | `backend/igny8_core/ai/models.py` | `AITaskLog.request_steps` | 31 | JSONField storing request steps in database |
| Debug Step | `response_steps` | `backend/igny8_core/ai/models.py` | `AITaskLog.response_steps` | 32 | JSONField storing response steps in database |
| Debug Step | `get_meta()` | `backend/igny8_core/ai/tracker.py` | `StepTracker.get_meta()` | 69 | Returns dict with request_steps and response_steps |
| Debug Step | Step injection | `backend/igny8_core/ai/engine.py` | `execute()` | 47, 60, 68, 125, 138, 174 | Adds steps at each phase (INIT, PREP, AI_CALL, PARSE, SAVE, DONE) |
| Debug Step | Step counter | `backend/igny8_core/ai/tracker.py` | `StepTracker.step_counter` | 19 | Auto-increments for sequential step numbers |
| **🧾 Model Config / Selection** |
| Model Config | `default_model` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.__init__()` | 67 | Set from `_get_model()` or defaults to 'gpt-4.1' |
| Model Config | `_get_model` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_model()` | 98 | Gets model from IntegrationSettings.config['model'] or Django settings |
| Model Config | `MODEL_RATES` | `backend/igny8_core/utils/ai_processor.py` | Constant | 19 | Dict: {'gpt-4.1': {...}, 'gpt-4o-mini': {...}, 'gpt-4o': {...}} |
| Model Config | `IMAGE_MODEL_RATES` | `backend/igny8_core/utils/ai_processor.py` | Constant | 26 | Dict: {'dall-e-3': 0.040, 'dall-e-2': 0.020, ...} |
| Model Config | `DEFAULT_AI_MODEL` | `backend/igny8_core/utils/ai_processor.py` | Django setting | 121 | Default: 'gpt-4.1' if not set |
| Model Config | `get_model` | `backend/igny8_core/ai/base.py` | `BaseAIFunction.get_model()` | 70 | Override to specify model (defaults to account's default) |
| Model Config | `model` parameter | `backend/igny8_core/ai/processor.py` | `call()` | 22 | Model can be passed to AIProcessor.call() |
| Model Config | `json_models` | `backend/igny8_core/ai/processor.py` | `call()` | 40 | List: ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview'] (supports JSON mode) |
| Model Config | `IntegrationSettings.config['model']` | `backend/igny8_core/modules/system/models.py` | `IntegrationSettings.config` | 56 | JSONField storing model name per account |
| Model Config | Model selection | `backend/igny8_core/ai/engine.py` | `execute()` | 65 | Gets model via `fn.get_model(self.account)` |
| **📦 Request/Response Structuring** |
| Request/Response | `response_format` | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 131 | Optional dict for JSON mode: {'type': 'json_object'} |
| Request/Response | `response_format` | `backend/igny8_core/ai/processor.py` | `call()` | 25 | Optional dict for JSON mode |
| Request/Response | JSON mode auto-enable | `backend/igny8_core/ai/processor.py` | `call()` | 41-42 | Auto-enables JSON mode for json_models if not specified |
| Request/Response | `_extract_json_from_response` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._extract_json_from_response()` | 334 | Extracts JSON from response (handles markdown code blocks, multiline) |
| Request/Response | `extract_json` | `backend/igny8_core/ai/processor.py` | `AIProcessor.extract_json()` | 57 | Wrapper for `_extract_json_from_response()` |
| Request/Response | `parse_response` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.parse_response()` | 158 | Parses AI response into cluster data structure |
| Request/Response | `parse_response` | `backend/igny8_core/ai/base.py` | `BaseAIFunction.parse_response()` | 75 | Abstract method for parsing AI response |
| Request/Response | Request body format | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 171-183 | Format: {'model': str, 'messages': [...], 'temperature': float, 'max_tokens': int, 'response_format': dict} |
| Request/Response | Response format | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 215-329 | Returns: {'content': str, 'input_tokens': int, 'output_tokens': int, 'total_tokens': int, 'model': str, 'cost': float, 'error': str, 'api_id': str} |
| Request/Response | JSON parsing fallback | `backend/igny8_core/ai/functions/auto_cluster.py` | `parse_response()` | 173-176 | Falls back to `extract_json()` if direct JSON parse fails |
| **📍 Paths / Constants** |
| Path/Constant | `OPENAI_API_KEY` | `backend/igny8_core/utils/ai_processor.py` | Django setting | 93 | Fallback API key from Django settings |
| Path/Constant | `RUNWARE_API_KEY` | `backend/igny8_core/utils/ai_processor.py` | Django setting | 95 | Fallback API key from Django settings |
| Path/Constant | OpenAI API URL | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 163 | Hardcoded: 'https://api.openai.com/v1/chat/completions' |
| Path/Constant | OpenAI Images URL | `backend/igny8_core/utils/ai_processor.py` | `generate_image()` | 735 | Hardcoded: 'https://api.openai.com/v1/images/generations' |
| Path/Constant | Runware API URL | `backend/igny8_core/utils/ai_processor.py` | `generate_image()` | 844 | Hardcoded: 'https://api.runware.ai/v1' |
| Path/Constant | `IntegrationSettings.config['apiKey']` | `backend/igny8_core/modules/system/models.py` | `IntegrationSettings.config` | 56 | JSONField storing API key per account |
| Path/Constant | Prompt type choices | `backend/igny8_core/modules/system/models.py` | `AIPrompt.PROMPT_TYPE_CHOICES` | 16 | ['clustering', 'ideas', 'content_generation', 'image_prompt_extraction', 'image_prompt_template', 'negative_prompt'] |
| Path/Constant | Integration type choices | `backend/igny8_core/modules/system/models.py` | `IntegrationSettings.INTEGRATION_TYPE_CHOICES` | 48 | ['openai', 'runware', 'gsc', 'image_generation'] |
| Path/Constant | `get_function_instance` | `backend/igny8_core/ai/registry.py` | `get_function_instance()` | - | Gets AI function instance from registry |
| Path/Constant | `auto_cluster` | `backend/igny8_core/ai/registry.py` | `register_lazy_function()` | 69 | Registered function name for clustering |
| **💰 Cost Tracking** |
| Cost | `CostTracker` | `backend/igny8_core/ai/tracker.py` | `CostTracker` class | 193 | Tracks API costs and token usage |
| Cost | `record` | `backend/igny8_core/ai/tracker.py` | `CostTracker.record()` | 201 | Records cost, tokens, model for an operation |
| Cost | Cost calculation | `backend/igny8_core/utils/ai_processor.py` | `_call_openai()` | 277-295 | Calculates cost from MODEL_RATES based on input/output tokens |
| Cost | Image cost calculation | `backend/igny8_core/utils/ai_processor.py` | `generate_image()` | 790 | Calculates cost from IMAGE_MODEL_RATES * n images |
| Cost | Credit logging | `backend/igny8_core/ai/engine.py` | `execute()` | 142-171 | Logs credit usage to CreditUsageLog after successful save |
| Cost | `_calculate_credits_for_clustering` | `backend/igny8_core/ai/engine.py` | `_calculate_credits_for_clustering()` | 257 | Calculates credits used (from plan config or fallback formula) |
| **📊 Progress Tracking** |
| Progress | `ProgressTracker` | `backend/igny8_core/ai/tracker.py` | `ProgressTracker` class | 77 | Tracks progress updates for AI tasks |
| Progress | `update` | `backend/igny8_core/ai/tracker.py` | `ProgressTracker.update()` | 89 | Updates Celery task state with progress |
| Progress | Phase percentages | `backend/igny8_core/ai/engine.py` | `execute()` | 30-36 | INIT (0-10%), PREP (10-25%), AI_CALL (25-70%), PARSE (70-85%), SAVE (85-98%), DONE (98-100%) |
| Progress | `update_ai_progress` | `backend/igny8_core/ai/tracker.py` | `ProgressTracker.update_ai_progress()` | 184 | Callback for AI processor progress updates |
| **🗄️ Database Logging** |
| Database | `AITaskLog` | `backend/igny8_core/ai/models.py` | `AITaskLog` model | 8 | Unified logging table for all AI tasks |
| Database | `_log_to_database` | `backend/igny8_core/ai/engine.py` | `AIEngine._log_to_database()` | 220 | Logs task execution to AITaskLog table |
| Database | Task logging | `backend/igny8_core/ai/engine.py` | `execute()` | 178 | Called after successful execution to log to database |
| **🔄 Celery Integration** |
| Celery | `run_ai_task` | `backend/igny8_core/ai/tasks.py` | `run_ai_task()` | 13 | Unified Celery task entrypoint |
| Celery | Task state updates | `backend/igny8_core/ai/tracker.py` | `ProgressTracker.update()` | 124-131 | Updates Celery task state via `task.update_state()` |
| Celery | Meta injection | `backend/igny8_core/modules/system/integration_views.py` | `task_progress()` | 936-991 | Extracts request_steps/response_steps from task meta and returns to frontend |
| Celery | Error state | `backend/igny8_core/ai/tasks.py` | `run_ai_task()` | 68-84 | Updates task state to FAILURE on error |
| **🔧 Utility Functions** |
| Utility | `get_default_prompt` | `backend/igny8_core/modules/system/utils.py` | `get_default_prompt()` | 7 | Returns hardcoded default prompt by type |
| Utility | `get_prompt_value` | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` | 108 | Gets prompt from DB or returns default |
| Utility | `check_moderation` | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor.check_moderation()` | 594 | Checks content against OpenAI moderation API |
| Utility | `prepare` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.prepare()` | 85 | Loads keywords with relationships |
| Utility | `save_output` | `backend/igny8_core/ai/functions/auto_cluster.py` | `AutoClusterFunction.save_output()` | 205 | Saves clusters to database with transaction |

View File

@@ -0,0 +1,300 @@
# IGNY8 AI Elements Reference Table
Generated by extract_ai_elements.py analysis
---
## 🧠 AI Core Functions
| Function Name | Category | Type | File | Line | Uses AIProcessor | Celery | Progress | Steps | Prompt Source | Model Source |
|---------------|----------|------|------|------|------------------|--------|----------|-------|---------------|--------------|
| `_auto_cluster_keywords_core` | cluster | core_function | `backend/igny8_core/modules/planner/tasks.py` | 26 | ✅ | ❌ | ✅ | ✅ | Database (get_prompt_value) | AIProcessor.default_model |
| `_generate_single_idea_core` | ideas | core_function | `backend/igny8_core/modules/planner/tasks.py` | 1047 | ✅ | ❌ | ✅ | ✅ | Database (get_prompt_value) | AIProcessor.default_model |
| `auto_generate_content_task` | content | celery_task | `backend/igny8_core/modules/writer/tasks.py` | 27 | ✅ | ✅ | ✅ | ❌ | Database (get_prompt_value) | AIProcessor.default_model |
| `auto_generate_images_task` | image | celery_task | `backend/igny8_core/modules/writer/tasks.py` | 741 | ✅ | ✅ | ✅ | ❌ | Database (get_prompt_value) | AIProcessor.default_model |
| `AutoClusterFunction` | cluster | class | `backend/igny8_core/ai/functions/auto_cluster.py` | 14 | ✅ | ❌ | ✅ | ✅ | Database (get_prompt_value) | Function.get_model() |
| `cluster_keywords` | cluster | method | `backend/igny8_core/utils/ai_processor.py` | 1080 | ✅ | ❌ | ✅ | ✅ | Inline/Hardcoded | AIProcessor.default_model |
| `generate_ideas` | ideas | method | `backend/igny8_core/utils/ai_processor.py` | 1280 | ✅ | ❌ | ✅ | ✅ | Inline/Hardcoded | AIProcessor.default_model |
| `generate_content` | content | method | `backend/igny8_core/utils/ai_processor.py` | 446 | ✅ | ❌ | ❌ | ❌ | Inline/Hardcoded | AIProcessor.default_model |
| `generate_image` | image | method | `backend/igny8_core/utils/ai_processor.py` | 656 | ✅ | ❌ | ❌ | ❌ | Inline/Hardcoded | Parameter or default |
| `run_ai_task` | unified | celery_task | `backend/igny8_core/ai/tasks.py` | 13 | ❌ | ✅ | ✅ | ✅ | Via function | Via function |
| `execute` | unified | method | `backend/igny8_core/ai/engine.py` | 26 | ✅ | ❌ | ✅ | ✅ | Via function | Via function |
---
## 🧱 Prompt Sources
| Prompt Type | Source | File | Retrieval Method |
|-------------|--------|------|------------------|
| `clustering` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
| `ideas` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
| `content_generation` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
| `image_prompt_extraction` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
| `image_prompt_template` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
| `negative_prompt` | Hardcoded in get_default_prompt() | `backend/igny8_core/modules/system/utils.py` | `get_prompt_value()` -> AIPrompt model or default |
**Prompt Storage:**
- **Database Model**: `AIPrompt` in `backend/igny8_core/modules/system/models.py`
- **Table**: `igny8_ai_prompts`
- **Fields**: `prompt_type`, `prompt_value`, `default_prompt`, `account` (FK)
- **Unique Constraint**: `(account, prompt_type)`
**Prompt Retrieval Flow:**
1. `get_prompt_value(account, prompt_type)` in `modules/system/utils.py:108`
2. Tries: `AIPrompt.objects.get(account=account, prompt_type=prompt_type, is_active=True)`
3. Falls back to: `get_default_prompt(prompt_type)` if not found
---
## 🧾 Model Configuration
| Model Name | Source | File | Selection Method |
|------------|--------|------|------------------|
| `gpt-4.1` | MODEL_RATES constant | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_model()` -> IntegrationSettings or Django settings |
| `gpt-4o-mini` | MODEL_RATES constant | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_model()` -> IntegrationSettings or Django settings |
| `gpt-4o` | MODEL_RATES constant | `backend/igny8_core/utils/ai_processor.py` | `AIProcessor._get_model()` -> IntegrationSettings or Django settings |
| `dall-e-3` | IMAGE_MODEL_RATES constant | `backend/igny8_core/utils/ai_processor.py` | Parameter or default in `generate_image()` |
| `dall-e-2` | IMAGE_MODEL_RATES constant | `backend/igny8_core/utils/ai_processor.py` | Parameter or default in `generate_image()` |
**Model Selection Flow:**
1. `AIProcessor.__init__(account)` in `utils/ai_processor.py:54`
2. Calls `_get_model('openai', account)` in `utils/ai_processor.py:98`
3. Tries: `IntegrationSettings.objects.filter(integration_type='openai', account=account, is_active=True).first().config.get('model')`
4. Validates model is in `MODEL_RATES` dict
5. Falls back to: `settings.DEFAULT_AI_MODEL` (default: 'gpt-4.1')
**Model Storage:**
- **Database Model**: `IntegrationSettings` in `backend/igny8_core/modules/system/models.py`
- **Table**: `igny8_integration_settings`
- **Fields**: `integration_type`, `config` (JSONField), `account` (FK)
- **Config Structure**: `{"apiKey": "...", "model": "gpt-4.1", "enabled": true}`
---
## ⚠️ Validation & Limits
| Function | Validation Checks | Limit Checks |
|----------|-------------------|--------------|
| `_auto_cluster_keywords_core` | Has validate() call, Keywords exist check | Credit check, Plan limits (daily_cluster_limit, max_clusters) |
| `AutoClusterFunction.validate()` | Base validation (ids array, max_items), Keywords exist | Plan limits (daily_cluster_limit, max_clusters) |
| `auto_generate_content_task` | Task existence, Account validation | Credit check (via CreditService) |
| `auto_generate_images_task` | Task existence, Account validation | Credit check (via CreditService) |
| `generate_image` | Model validation (VALID_OPENAI_IMAGE_MODELS), Size validation (VALID_SIZES_BY_MODEL) | None |
| `AIProcessor._get_model()` | Model in MODEL_RATES validation | None |
**Validation Details:**
1. **Plan Limits** (in `AutoClusterFunction.validate()`):
- `plan.daily_cluster_limit` - Max clusters per day
- `plan.max_clusters` - Total max clusters
- Checked in `backend/igny8_core/ai/functions/auto_cluster.py:59-79`
2. **Credit Checks**:
- `CreditService.check_credits(account, required_credits)` in `modules/billing/services.py:16`
- Used before AI operations
3. **Model Validation**:
- OpenAI images: Only `dall-e-3` and `dall-e-2` valid (line 704-708 in `ai_processor.py`)
- Size validation per model (line 719-724 in `ai_processor.py`)
4. **Input Validation**:
- Base validation in `BaseAIFunction.validate()` checks for 'ids' array and max_items limit
- `AutoClusterFunction.get_max_items()` returns 20 (max keywords per cluster)
---
## 🔁 Retry & Error Handling
| Component | Retry Logic | Error Handling | Fallback |
|-----------|-------------|----------------|----------|
| `run_ai_task` | `max_retries=3` (Celery decorator) | Exception caught, task state updated to FAILURE | None |
| `auto_generate_content_task` | `max_retries=3` (Celery decorator) | Try/except blocks, error logging | None |
| `_call_openai` | None (single attempt) | HTTP error handling, JSON parse errors, timeout (60s) | Returns error dict |
| `_get_api_key` | None | Exception caught, logs warning | Falls back to Django settings (`OPENAI_API_KEY`, `RUNWARE_API_KEY`) |
| `_get_model` | None | Exception caught, logs warning | Falls back to Django settings (`DEFAULT_AI_MODEL`) |
---
## 🪵 AI Debug Steps
| Function | Request Steps | Response Steps | Step Tracking Method |
|----------|---------------|----------------|---------------------|
| `_auto_cluster_keywords_core` | ✅ (manual list) | ✅ (manual list) | Manual `request_steps.append()` and `response_steps.append()` |
| `AutoClusterFunction` | ✅ (via StepTracker) | ✅ (via StepTracker) | `StepTracker.add_request_step()` and `add_response_step()` |
| `run_ai_task` | ✅ (via engine) | ✅ (via engine) | Extracted from `engine.execute()` result |
| `AIEngine.execute` | ✅ (via StepTracker) | ✅ (via StepTracker) | `StepTracker` instance tracks all steps |
| `auto_generate_content_task` | ❌ | ❌ | No step tracking (legacy) |
| `auto_generate_images_task` | ❌ | ❌ | No step tracking (legacy) |
**Step Tracking Implementation:**
1. **New Framework** (AIEngine):
- Uses `StepTracker` class in `backend/igny8_core/ai/tracker.py`
- Steps added at each phase: INIT, PREP, AI_CALL, PARSE, SAVE, DONE
- Steps stored in `request_steps` and `response_steps` lists
- Returned in result dict and logged to `AITaskLog` model
2. **Legacy Functions**:
- Manual step tracking with lists
- Steps added to `meta` dict for Celery task progress
- Extracted in `integration_views.py:task_progress()` for frontend
3. **Step Structure**:
```python
{
'stepNumber': int,
'stepName': str, # INIT, PREP, AI_CALL, PARSE, SAVE, DONE
'functionName': str,
'status': 'success' | 'error' | 'pending',
'message': str,
'error': str (optional),
'duration': int (milliseconds, optional)
}
```
---
## 📦 Request/Response Structuring
| Function | Request Format | Response Format | JSON Mode | Parsing Method |
|----------|----------------|-----------------|-----------|----------------|
| `_call_openai` | OpenAI API format: `{'model': str, 'messages': [...], 'temperature': float, 'max_tokens': int, 'response_format': dict}` | `{'content': str, 'input_tokens': int, 'output_tokens': int, 'total_tokens': int, 'model': str, 'cost': float, 'error': str, 'api_id': str}` | ✅ (if `response_format={'type': 'json_object'}`) | `_extract_json_from_response()` |
| `cluster_keywords` | Prompt string with keywords | JSON with 'clusters' array | ✅ (auto-enabled for json_models) | `_extract_json_from_response()` then extract 'clusters' |
| `generate_ideas` | Prompt string with clusters | JSON with 'ideas' array | ✅ (auto-enabled for json_models) | `_extract_json_from_response()` then extract 'ideas' |
| `generate_image` (OpenAI) | `{'prompt': str, 'model': str, 'n': int, 'size': str}` | `{'url': str, 'revised_prompt': str, 'cost': float}` | N/A | Direct JSON response |
| `generate_image` (Runware) | Array format with `imageInference` tasks | `{'url': str, 'cost': float}` | N/A | Extract from nested response structure |
**JSON Mode Auto-Enable:**
- Models: `['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']`
- Auto-enabled in `AIProcessor.call()` if `response_format` not specified
- Location: `backend/igny8_core/ai/processor.py:40-42`
**JSON Extraction:**
- Primary: Direct `json.loads()` on response
- Fallback: `_extract_json_from_response()` handles:
- Markdown code blocks (```json ... ```)
- Multiline JSON
- Partial JSON extraction
- Location: `backend/igny8_core/utils/ai_processor.py:334-440`
---
## 📍 Paths & Constants
| Constant | Value | File | Usage |
|----------|-------|------|-------|
| `OPENAI_API_KEY` | Django setting | `backend/igny8_core/utils/ai_processor.py:93` | Fallback API key |
| `RUNWARE_API_KEY` | Django setting | `backend/igny8_core/utils/ai_processor.py:95` | Fallback API key |
| `DEFAULT_AI_MODEL` | Django setting (default: 'gpt-4.1') | `backend/igny8_core/utils/ai_processor.py:121` | Fallback model |
| OpenAI API URL | `'https://api.openai.com/v1/chat/completions'` | `backend/igny8_core/utils/ai_processor.py:163` | Text generation endpoint |
| OpenAI Images URL | `'https://api.openai.com/v1/images/generations'` | `backend/igny8_core/utils/ai_processor.py:735` | Image generation endpoint |
| Runware API URL | `'https://api.runware.ai/v1'` | `backend/igny8_core/utils/ai_processor.py:844` | Runware image generation |
| `MODEL_RATES` | Dict with pricing per 1M tokens | `backend/igny8_core/utils/ai_processor.py:19` | Cost calculation |
| `IMAGE_MODEL_RATES` | Dict with pricing per image | `backend/igny8_core/utils/ai_processor.py:26` | Image cost calculation |
| `VALID_OPENAI_IMAGE_MODELS` | `{'dall-e-3', 'dall-e-2'}` | `backend/igny8_core/utils/ai_processor.py:34` | Model validation |
| `VALID_SIZES_BY_MODEL` | Dict mapping models to valid sizes | `backend/igny8_core/utils/ai_processor.py:41` | Size validation |
---
## 💰 Cost Tracking
| Component | Cost Calculation | Token Tracking | Storage |
|-----------|------------------|----------------|---------|
| `_call_openai` | Calculated from `MODEL_RATES` based on input/output tokens | ✅ (input_tokens, output_tokens, total_tokens) | Returned in result dict |
| `generate_image` (OpenAI) | `IMAGE_MODEL_RATES[model] * n` | N/A | Returned in result dict |
| `generate_image` (Runware) | `0.036 * n` (hardcoded) | N/A | Returned in result dict |
| `CostTracker` | Aggregates costs from multiple operations | ✅ (total_tokens) | In-memory during execution |
| `AITaskLog` | Stored in `cost` field (DecimalField) | ✅ (stored in `tokens` field) | Database table `igny8_ai_task_logs` |
| `CreditUsageLog` | Stored in `cost_usd` field | ✅ (tokens_input, tokens_output) | Database table (billing module) |
**Cost Calculation Formula:**
```python
# Text generation
input_cost = (input_tokens / 1_000_000) * MODEL_RATES[model]['input']
output_cost = (output_tokens / 1_000_000) * MODEL_RATES[model]['output']
total_cost = input_cost + output_cost
# Image generation
cost = IMAGE_MODEL_RATES[model] * n_images
```
---
## 📊 Progress Tracking
| Function | Progress Method | Phase Tracking | Percentage Mapping |
|----------|-----------------|----------------|-------------------|
| `_auto_cluster_keywords_core` | `progress_callback()` function | Manual phase strings | Manual percentage |
| `auto_generate_content_task` | `self.update_state()` (Celery) | Manual phase strings | Manual percentage |
| `AIEngine.execute` | `ProgressTracker.update()` | Automatic (INIT, PREP, AI_CALL, PARSE, SAVE, DONE) | Automatic: INIT (0-10%), PREP (10-25%), AI_CALL (25-70%), PARSE (70-85%), SAVE (85-98%), DONE (98-100%) |
| `run_ai_task` | Via `AIEngine` | Via `AIEngine` | Via `AIEngine` |
**Progress Tracker:**
- Class: `ProgressTracker` in `backend/igny8_core/ai/tracker.py:77`
- Updates Celery task state via `task.update_state()`
- Tracks: phase, percentage, message, current, total, meta
---
## 🗄️ Database Logging
| Component | Log Table | Fields Logged | When Logged |
|-----------|-----------|---------------|-------------|
| `AIEngine.execute` | `AITaskLog` | task_id, function_name, phase, message, status, duration, cost, tokens, request_steps, response_steps, error, payload, result | After execution (success or error) |
| Credit usage | `CreditUsageLog` | account, operation_type, credits_used, cost_usd, model_used, tokens_input, tokens_output | After successful save operation |
**AITaskLog Model:**
- Table: `igny8_ai_task_logs`
- Location: `backend/igny8_core/ai/models.py:8`
- Fields: All execution details including steps, costs, tokens, errors
---
## 🔄 Celery Integration
| Task | Entrypoint | Task ID | State Updates | Error Handling |
|------|------------|---------|---------------|----------------|
| `run_ai_task` | `backend/igny8_core/ai/tasks.py:13` | `self.request.id` | Via `ProgressTracker` | Updates state to FAILURE, raises exception |
| `auto_generate_content_task` | `backend/igny8_core/modules/writer/tasks.py:27` | `self.request.id` | Manual `self.update_state()` | Try/except, logs error |
| `auto_generate_images_task` | `backend/igny8_core/modules/writer/tasks.py:741` | `self.request.id` | Manual `self.update_state()` | Try/except, logs error |
**Task Progress Endpoint:**
- Route: `/api/v1/system/settings/task_progress/{task_id}/`
- Handler: `IntegrationSettingsViewSet.task_progress()` in `modules/system/integration_views.py:936`
- Extracts: `request_steps` and `response_steps` from task meta
- Returns: Progress data to frontend for debug panel
---
## Summary
**Key Findings:**
1. **Two AI Systems Coexist:**
- **Legacy**: Direct functions in `modules/planner/tasks.py` and `modules/writer/tasks.py`
- **New Framework**: `AIEngine` + `BaseAIFunction` classes in `ai/` directory
2. **Unified Entrypoint:**
- `run_ai_task()` in `ai/tasks.py` is the unified Celery entrypoint
- Uses `AIEngine` to execute any registered AI function
3. **Prompt Management:**
- All prompts stored in `AIPrompt` model (database)
- Fallback to hardcoded defaults in `get_default_prompt()`
- Retrieved via `get_prompt_value(account, prompt_type)`
4. **Model Selection:**
- Per-account via `IntegrationSettings.config['model']`
- Falls back to Django `DEFAULT_AI_MODEL` setting
- Validated against `MODEL_RATES` dict
5. **Step Tracking:**
- New framework uses `StepTracker` class
- Legacy functions use manual lists
- Both stored in Celery task meta and `AITaskLog` model
6. **Cost Tracking:**
- Calculated from `MODEL_RATES` and `IMAGE_MODEL_RATES`
- Logged to `AITaskLog` and `CreditUsageLog`
- Tracked via `CostTracker` during execution

View File

@@ -0,0 +1,121 @@
# Color Tokens Comparison
Side-by-side comparison of provided tokens vs. global color palette
| Color Token | Provided Value | Our Global Palette | Match Status | Notes |
|-------------|----------------|-------------------|--------------|-------|
| **RED** |
| `--color-red-50` | `oklch(.971 .013 17.38)` | `oklch(0.971 0.013 17.38)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:10` |
| `--color-red-100` | `oklch(.936 .032 17.717)` | `oklch(0.936 0.032 17.717)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:11` |
| `--color-red-500` | `oklch(.637 .237 25.331)` | `oklch(0.637 0.237 25.331)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:15` |
| `--color-red-600` | `oklch(.577 .245 27.325)` | `oklch(0.577 0.245 27.325)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:16` |
| `--color-red-700` | `oklch(.505 .213 27.518)` | `oklch(0.505 0.213 27.518)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:17` |
| **ORANGE** |
| `--color-orange-50` | `#fff6ed` | `oklch(0.98 0.016 73.684)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:22`) |
| `--color-orange-100` | `#ffead5` | `oklch(0.954 0.038 75.164)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:23`) |
| `--color-orange-400` | `#fd853a` | `oklch(0.75 0.183 55.934)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:26`) |
| `--color-orange-500` | `#fb6514` | `oklch(0.705 0.213 47.604)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:27`) |
| `--color-orange-600` | `#ec4a0a` | `oklch(0.646 0.222 41.116)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:28`) |
| `--color-orange-700` | `#c4320a` | `oklch(0.553 0.195 38.402)` ❌ | **DIFFERENT** | Provided: hex, Ours: oklch (in `tailwindcss/theme.css:29`) |
| **YELLOW** |
| `--color-yellow-100` | `oklch(.973 .071 103.193)` | `oklch(0.973 0.071 103.193)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:47` |
| `--color-yellow-600` | `oklch(.681 .162 75.834)` | `oklch(0.681 0.162 75.834)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:52` |
| **GREEN** |
| `--color-green-50` | `oklch(.982 .018 155.826)` | `oklch(0.982 0.018 155.826)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:70` |
| `--color-green-100` | `oklch(.962 .044 156.743)` | `oklch(0.962 0.044 156.743)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:71` |
| `--color-green-500` | `oklch(.723 .219 149.579)` | `oklch(0.723 0.219 149.579)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:75` |
| `--color-green-600` | `oklch(.627 .194 149.214)` | `oklch(0.627 0.194 149.214)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:76` |
| `--color-green-700` | `oklch(.527 .154 150.069)` | `oklch(0.527 0.154 150.069)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:77` |
| **CYAN** |
| `--color-cyan-100` | `oklch(.956 .045 203.388)` | `oklch(0.956 0.045 203.388)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:107` |
| `--color-cyan-600` | `oklch(.609 .126 221.723)` | `oklch(0.609 0.126 221.723)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:112` |
| **BLUE** |
| `--color-blue-50` | `oklch(.97 .014 254.604)` | `oklch(0.97 0.014 254.604)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:130` |
| `--color-blue-100` | `oklch(.932 .032 255.585)` | `oklch(0.932 0.032 255.585)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:131` |
| `--color-blue-500` | `oklch(.623 .214 259.815)` | `oklch(0.623 0.214 259.815)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:135` |
| `--color-blue-700` | `oklch(.488 .243 264.376)` | `oklch(0.488 0.243 264.376)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:137` |
| **PURPLE** |
| `--color-purple-50` | `oklch(.977 .014 308.299)` | `oklch(0.977 0.014 308.299)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:166` |
| `--color-purple-100` | `oklch(.946 .033 307.174)` | `oklch(0.946 0.033 307.174)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:167` |
| `--color-purple-400` | `oklch(.714 .203 305.504)` | `oklch(0.714 0.203 305.504)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:170` |
| `--color-purple-500` | `oklch(.627 .265 303.9)` | `oklch(0.627 0.265 303.9)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:171` |
| `--color-purple-600` | `oklch(.558 .288 302.321)` | `oklch(0.558 0.288 302.321)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:172` |
| `--color-purple-700` | `oklch(.496 .265 301.924)` | `oklch(0.496 0.265 301.924)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:173` |
| **PINK** |
| `--color-pink-100` | `oklch(.948 .028 342.258)` | `oklch(0.948 0.028 342.258)` ✅ | **EXACT MATCH** | In `tailwindcss/theme.css:191` |
| `--color-pink-600` | `oklch(.592 .249 .584)` | `oklch(0.592 0.249 0.584)` ⚠️ | **FORMAT DIFFERENCE** | Provided: `.584` (missing hue), Ours: full oklch (in `tailwindcss/theme.css:196`) |
| **GRAY** |
| `--color-gray-50` | `#f9fafb` | `#f9fafb` ✅ | **EXACT MATCH** | In `index.css:87` |
| `--color-gray-100` | `#f2f4f7` | `#f2f4f7` ✅ | **EXACT MATCH** | In `index.css:88` |
| `--color-gray-200` | `#e4e7ec` | `#e4e7ec` ✅ | **EXACT MATCH** | In `index.css:89` |
| `--color-gray-300` | `#d0d5dd` | `#d0d5dd` ✅ | **EXACT MATCH** | In `index.css:90` |
| `--color-gray-400` | `#98a2b3` | `#98a2b3` ✅ | **EXACT MATCH** | In `index.css:91` |
| `--color-gray-500` | `#667085` | `#667085` ✅ | **EXACT MATCH** | In `index.css:92` |
| `--color-gray-600` | `#475467` | `#475467` ✅ | **EXACT MATCH** | In `index.css:93` |
| `--color-gray-700` | `#344054` | `#344054` ✅ | **EXACT MATCH** | In `index.css:94` |
| `--color-gray-800` | `#1d2939` | `#1d2939` ✅ | **EXACT MATCH** | In `index.css:95` |
| `--color-gray-900` | `#101828` | `#101828` ✅ | **EXACT MATCH** | In `index.css:96` |
| `--color-gray-950` | `#0c111d` | `#0c111d` ✅ | **EXACT MATCH** | In `index.css:97` |
| **BLACK & WHITE** |
| `--color-black` | `#101828` | `#101828` ✅ | **EXACT MATCH** | In `index.css:58` |
| `--color-white` | `#fff` | `#ffffff` ✅ | **EQUIVALENT** | In `index.css:57` (same color, different format) |
---
## Summary
### ✅ **Matches (28 tokens)**
- **Red**: All 5 tokens match exactly (oklch format)
- **Yellow**: Both tokens match exactly (oklch format)
- **Green**: All 5 tokens match exactly (oklch format)
- **Cyan**: Both tokens match exactly (oklch format)
- **Blue**: All 4 tokens match exactly (oklch format)
- **Purple**: All 6 tokens match exactly (oklch format)
- **Gray**: All 11 tokens match exactly (hex format)
- **Black/White**: Both match (hex format)
### ❌ **Differences (6 tokens)**
- **Orange**: All 6 tokens differ
- Provided: Hex format (`#fff6ed`, `#ffead5`, etc.)
- Ours: oklch format in Tailwind theme
- **Note**: These are likely the same colors, just different formats. The hex values you provided match our custom orange palette in `index.css:100-111` (which uses hex), but Tailwind's theme uses oklch.
### ⚠️ **Format Issue (1 token)**
- **Pink-600**: `oklch(.592 .249 .584)` - Missing hue value (should be 3 values: lightness, chroma, hue)
---
## Location of Colors in Codebase
### Tailwind Default Colors (oklch format)
- **File**: `frontend/node_modules/tailwindcss/theme.css`
- Contains: red, orange, yellow, green, cyan, blue, purple, pink (all in oklch)
### Custom IGNY8 Colors (hex format)
- **File**: `frontend/src/index.css` (lines 55-154)
- Contains:
- Gray scale (hex)
- Orange scale (hex) - **Matches your provided orange values!**
- Brand colors (hex)
- Success/Error/Warning colors (hex)
### IGNY8 Brand Colors
- **File**: `frontend/src/styles/igny8-colors.css`
- Contains: Custom brand colors with `--igny8-` prefix
---
## Recommendation
**Your provided orange tokens match our custom orange palette in `index.css`!**
The orange colors you provided are already in our codebase:
- `--color-orange-50: #fff6ed` ✅ (line 101)
- `--color-orange-100: #ffead5` ✅ (line 102)
- `--color-orange-400: #fd853a` ✅ (line 105)
- `--color-orange-500: #fb6514` ✅ (line 106)
- `--color-orange-600: #ec4a0a` ✅ (line 107)
- `--color-orange-700: #c4320a` ✅ (line 108)
All other colors (red, yellow, green, cyan, blue, purple, gray, black, white) also match either in Tailwind's theme or our custom palette.

View File

@@ -0,0 +1,380 @@
#!/usr/bin/env python3
"""
IGNY8 AI Data Mapping Script
Extracts complete reference table for all AI-related elements (functions, models, prompts, limits, calls)
to eliminate assumptions during restructuring.
Output: Markdown table with all AI Elements for cluster, idea, content, image
"""
import os
import re
import json
import ast
from pathlib import Path
from typing import Dict, List, Any, Optional
# Project root (assuming script is in scripts/ directory)
PROJECT_ROOT = Path(__file__).parent.parent
BACKEND_ROOT = PROJECT_ROOT / "backend" / "igny8_core"
def extract_function_info(file_path: Path, function_name: str) -> Dict[str, Any]:
"""Extract information about a function from a Python file"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {"error": f"Could not read file: {e}"}
# Try to parse AST
try:
tree = ast.parse(content)
except SyntaxError:
return {"error": "Syntax error in file"}
info = {
"file": str(file_path.relative_to(PROJECT_ROOT)),
"function_name": function_name,
"found": False,
"line_number": None,
"uses_ai_processor": False,
"uses_celery": False,
"has_progress_callback": False,
"has_request_steps": False,
"has_response_steps": False,
"prompt_source": "Unknown",
"model_source": "Unknown",
"validation_checks": [],
"limit_checks": [],
}
# Search for function definition
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
info["found"] = True
info["line_number"] = node.lineno
# Check function body for patterns
func_content = ast.get_source_segment(content, node) or ""
# Check for AIProcessor usage
if "AIProcessor" in func_content or "ai_processor" in func_content:
info["uses_ai_processor"] = True
# Check for Celery
if "self.request" in func_content or "@shared_task" in content[:node.lineno * 100]:
info["uses_celery"] = True
# Check for progress tracking
if "progress_callback" in func_content or "progress_tracker" in func_content:
info["has_progress_callback"] = True
# Check for step tracking
if "request_steps" in func_content:
info["has_request_steps"] = True
if "response_steps" in func_content:
info["has_response_steps"] = True
# Check for prompt sources
if "get_prompt_value" in func_content:
info["prompt_source"] = "Database (get_prompt_value)"
elif "get_default_prompt" in func_content:
info["prompt_source"] = "Default (get_default_prompt)"
elif "prompt_template" in func_content.lower():
info["prompt_source"] = "Inline/Hardcoded"
# Check for model selection
if "default_model" in func_content or "self.default_model" in func_content:
info["model_source"] = "AIProcessor.default_model"
elif "get_model" in func_content:
info["model_source"] = "Function.get_model()"
elif "IntegrationSettings" in func_content:
info["model_source"] = "IntegrationSettings.config['model']"
# Check for validation
if "validate" in func_content.lower():
info["validation_checks"].append("Has validate() call")
if "check_credits" in func_content:
info["limit_checks"].append("Credit check")
if "daily_cluster_limit" in func_content or "max_clusters" in func_content:
info["limit_checks"].append("Plan limits")
break
return info
def extract_class_info(file_path: Path, class_name: str) -> Dict[str, Any]:
"""Extract information about a class from a Python file"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {"error": f"Could not read file: {e}"}
try:
tree = ast.parse(content)
except SyntaxError:
return {"error": "Syntax error in file"}
info = {
"file": str(file_path.relative_to(PROJECT_ROOT)),
"class_name": class_name,
"found": False,
"line_number": None,
"methods": [],
"inherits_from": [],
}
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef) and node.name == class_name:
info["found"] = True
info["line_number"] = node.lineno
# Get base classes
for base in node.bases:
if isinstance(base, ast.Name):
info["inherits_from"].append(base.id)
# Get methods
for item in node.body:
if isinstance(item, ast.FunctionDef):
info["methods"].append(item.name)
break
return info
def find_ai_functions() -> List[Dict[str, Any]]:
"""Find all AI-related functions in the codebase"""
functions = []
# Define AI functions to search for
ai_function_definitions = [
{
"name": "_auto_cluster_keywords_core",
"file": BACKEND_ROOT / "modules" / "planner" / "tasks.py",
"type": "core_function",
"category": "cluster"
},
{
"name": "_generate_single_idea_core",
"file": BACKEND_ROOT / "modules" / "planner" / "tasks.py",
"type": "core_function",
"category": "ideas"
},
{
"name": "auto_generate_content_task",
"file": BACKEND_ROOT / "modules" / "writer" / "tasks.py",
"type": "celery_task",
"category": "content"
},
{
"name": "AutoClusterFunction",
"file": BACKEND_ROOT / "ai" / "functions" / "auto_cluster.py",
"type": "class",
"category": "cluster"
},
{
"name": "cluster_keywords",
"file": BACKEND_ROOT / "utils" / "ai_processor.py",
"type": "method",
"category": "cluster"
},
{
"name": "generate_ideas",
"file": BACKEND_ROOT / "utils" / "ai_processor.py",
"type": "method",
"category": "ideas"
},
{
"name": "generate_content",
"file": BACKEND_ROOT / "utils" / "ai_processor.py",
"type": "method",
"category": "content"
},
{
"name": "generate_image",
"file": BACKEND_ROOT / "utils" / "ai_processor.py",
"type": "method",
"category": "image"
},
{
"name": "run_ai_task",
"file": BACKEND_ROOT / "ai" / "tasks.py",
"type": "celery_task",
"category": "unified"
},
{
"name": "execute",
"file": BACKEND_ROOT / "ai" / "engine.py",
"type": "method",
"category": "unified"
},
]
for func_def in ai_function_definitions:
file_path = func_def["file"]
if not file_path.exists():
continue
if func_def["type"] == "class":
info = extract_class_info(file_path, func_def["name"])
else:
info = extract_function_info(file_path, func_def["name"])
info.update({
"type": func_def["type"],
"category": func_def["category"]
})
functions.append(info)
return functions
def extract_prompt_info() -> List[Dict[str, Any]]:
"""Extract prompt information"""
prompts = []
utils_file = BACKEND_ROOT / "modules" / "system" / "utils.py"
if utils_file.exists():
with open(utils_file, 'r', encoding='utf-8') as f:
content = f.read()
# Find prompt types in get_default_prompt
prompt_types = re.findall(r"'(\w+)':\s*\"\"\"", content)
for prompt_type in prompt_types:
prompts.append({
"prompt_type": prompt_type,
"source": "Hardcoded in get_default_prompt()",
"file": "modules/system/utils.py",
"retrieval": "get_prompt_value() -> AIPrompt model or default"
})
return prompts
def extract_model_info() -> List[Dict[str, Any]]:
"""Extract model configuration information"""
models = []
processor_file = BACKEND_ROOT / "utils" / "ai_processor.py"
if processor_file.exists():
with open(processor_file, 'r', encoding='utf-8') as f:
content = f.read()
# Find MODEL_RATES
model_rates_match = re.search(r'MODEL_RATES\s*=\s*\{([^}]+)\}', content, re.DOTALL)
if model_rates_match:
models_text = model_rates_match.group(1)
model_names = re.findall(r"'([^']+)'", models_text)
for model in model_names:
models.append({
"model_name": model,
"source": "MODEL_RATES constant",
"file": "utils/ai_processor.py",
"selection": "AIProcessor._get_model() -> IntegrationSettings or Django settings"
})
return models
def generate_markdown_table(functions: List[Dict], prompts: List[Dict], models: List[Dict]) -> str:
"""Generate markdown table from extracted data"""
output = []
output.append("# IGNY8 AI Elements Reference Table\n")
output.append("Generated by extract_ai_elements.py\n")
output.append("---\n\n")
# Functions table
output.append("## 🧠 AI Core Functions\n\n")
output.append("| Function Name | Category | Type | File | Line | Uses AIProcessor | Celery | Progress | Steps | Prompt Source | Model Source |\n")
output.append("|---------------|----------|------|------|------|------------------|--------|----------|-------|---------------|--------------|\n")
for func in functions:
if func.get("error"):
continue
name = func.get("function_name") or func.get("class_name", "N/A")
category = func.get("category", "N/A")
func_type = func.get("type", "N/A")
file = func.get("file", "N/A")
line = str(func.get("line_number", "N/A"))
uses_ai = "" if func.get("uses_ai_processor") else ""
celery = "" if func.get("uses_celery") else ""
progress = "" if func.get("has_progress_callback") else ""
steps = "" if (func.get("has_request_steps") or func.get("has_response_steps")) else ""
prompt = func.get("prompt_source", "Unknown")
model = func.get("model_source", "Unknown")
output.append(f"| {name} | {category} | {func_type} | `{file}` | {line} | {uses_ai} | {celery} | {progress} | {steps} | {prompt} | {model} |\n")
# Prompts table
output.append("\n## 🧱 Prompt Sources\n\n")
output.append("| Prompt Type | Source | File | Retrieval Method |\n")
output.append("|-------------|--------|------|------------------|\n")
for prompt in prompts:
output.append(f"| {prompt['prompt_type']} | {prompt['source']} | `{prompt['file']}` | {prompt['retrieval']} |\n")
# Models table
output.append("\n## 🧾 Model Configuration\n\n")
output.append("| Model Name | Source | File | Selection Method |\n")
output.append("|------------|--------|------|------------------|\n")
for model in models:
output.append(f"| {model['model_name']} | {model['source']} | `{model['file']}` | {model['selection']} |\n")
# Validation and Limits
output.append("\n## ⚠️ Validation & Limits\n\n")
output.append("| Function | Validation Checks | Limit Checks |\n")
output.append("|----------|-------------------|--------------|\n")
for func in functions:
if func.get("error") or not func.get("found"):
continue
name = func.get("function_name") or func.get("class_name", "N/A")
validations = ", ".join(func.get("validation_checks", [])) or "None"
limits = ", ".join(func.get("limit_checks", [])) or "None"
output.append(f"| {name} | {validations} | {limits} |\n")
return "".join(output)
def main():
"""Main execution"""
print("🔍 Extracting AI elements from codebase...")
functions = find_ai_functions()
prompts = extract_prompt_info()
models = extract_model_info()
print(f"✅ Found {len(functions)} functions")
print(f"✅ Found {len(prompts)} prompt types")
print(f"✅ Found {len(models)} models")
# Generate markdown
markdown = generate_markdown_table(functions, prompts, models)
# Save to file
output_file = PROJECT_ROOT / "docs" / "ActiveDocs" / "AI-ELEMENTS-EXTRACTED.md"
output_file.parent.mkdir(parents=True, exist_ok=True)
with open(output_file, 'w', encoding='utf-8') as f:
f.write(markdown)
print(f"\n✅ Table saved to: {output_file.relative_to(PROJECT_ROOT)}")
# Also print to console
print("\n" + "="*80)
print(markdown)
print("="*80)
if __name__ == "__main__":
main()