Implement V2 AI functions and enhance progress handling
- Added support for new V2 functions: `auto_cluster_v2` and `generate_ideas_v2`, including backend logic and API endpoints. - Updated model configuration to ensure V2 functions validate the presence of models before execution. - Enhanced progress modal to provide better feedback during asynchronous tasks, including task IDs for debugging. - Updated frontend components to integrate new V2 functionalities and improve user experience with clustering and idea generation.
This commit is contained in:
@@ -82,10 +82,8 @@ class AIEngine:
|
||||
ai_core = AICore(account=self.account)
|
||||
function_name = fn.get_name()
|
||||
|
||||
# Generate function_id for tracking (ai-{function_name}-01)
|
||||
# Normalize underscores to hyphens to match frontend tracking IDs
|
||||
function_id_base = function_name.replace('_', '-')
|
||||
function_id = f"ai-{function_id_base}-01-desktop"
|
||||
# Generate function_id for tracking (ai_{function_name})
|
||||
function_id = f"ai_{function_name}"
|
||||
|
||||
# Get model config from settings (Stage 4 requirement)
|
||||
# Pass account to read model from IntegrationSettings
|
||||
@@ -111,6 +109,18 @@ class AIEngine:
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# For V2 functions: Validate model exists - no default, only execute if model is present
|
||||
if function_name.endswith('_v2'):
|
||||
if not model_from_integration or not model:
|
||||
error_msg = "AI model not configured. Please configure OpenAI model in Integration settings."
|
||||
self.console_tracker.error('ModelError', error_msg)
|
||||
self.step_tracker.add_request_step("PREP", "error", error_msg)
|
||||
self.tracker.error(error_msg, meta=self.step_tracker.get_meta())
|
||||
return {
|
||||
'success': False,
|
||||
'error': error_msg
|
||||
}
|
||||
|
||||
# Track configured model information so it shows in the progress modal
|
||||
self.step_tracker.add_request_step(
|
||||
"PREP",
|
||||
|
||||
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Auto Cluster Keywords V2 - Workflow Function
|
||||
Uses helpers folder imports and dynamic model loading
|
||||
Max 50 keywords for bulk actions
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.helpers.base import BaseAIFunction
|
||||
from igny8_core.modules.planner.models import Keywords, Clusters
|
||||
from igny8_core.ai.helpers.ai_core import AICore
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.helpers.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoClusterV2Function(BaseAIFunction):
|
||||
"""Auto-cluster keywords using AI - V2 with dynamic model"""
|
||||
|
||||
def get_name(self) -> str:
|
||||
return 'auto_cluster_v2'
|
||||
|
||||
def get_metadata(self) -> Dict:
|
||||
return {
|
||||
'display_name': 'Keywords Clustering',
|
||||
'description': 'Group related keywords into semantic clusters',
|
||||
'phases': {
|
||||
'INIT': 'Validating keywords...',
|
||||
'PREP': 'Loading keyword data...',
|
||||
'AI_CALL': 'Analyzing relationships with AI...',
|
||||
'PARSE': 'Processing cluster results...',
|
||||
'SAVE': 'Creating clusters...',
|
||||
'DONE': 'Clustering completed successfully'
|
||||
}
|
||||
}
|
||||
|
||||
def get_max_items(self) -> int:
|
||||
return 50 # Max 50 keywords
|
||||
|
||||
def validate(self, payload: dict, account=None) -> Dict:
|
||||
"""Validate input with max 50 keywords"""
|
||||
ids = payload.get('ids', [])
|
||||
if not ids:
|
||||
return {'valid': False, 'error': 'No keywords selected'}
|
||||
|
||||
if len(ids) > 50:
|
||||
return {'valid': False, 'error': 'Maximum 50 keywords allowed for clustering'}
|
||||
|
||||
# Check keywords exist
|
||||
queryset = Keywords.objects.filter(id__in=ids)
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
keywords = queryset
|
||||
|
||||
if keywords.count() != len(ids):
|
||||
return {'valid': False, 'error': 'Some selected keywords not found'}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
def prepare(self, payload: dict, account=None) -> Dict:
|
||||
"""Load keywords with relationships"""
|
||||
ids = payload.get('ids', [])
|
||||
sector_id = payload.get('sector_id')
|
||||
|
||||
queryset = Keywords.objects.filter(id__in=ids)
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
if sector_id:
|
||||
queryset = queryset.filter(sector_id=sector_id)
|
||||
|
||||
keywords = list(queryset.select_related('seed_keyword', 'cluster', 'account', 'site', 'sector'))
|
||||
|
||||
if not keywords:
|
||||
raise ValueError("No keywords found")
|
||||
|
||||
keyword_data = []
|
||||
for kw in keywords:
|
||||
keyword_data.append({
|
||||
'id': kw.id,
|
||||
'keyword': kw.keyword,
|
||||
'volume': kw.volume,
|
||||
'difficulty': kw.difficulty,
|
||||
'intent': kw.seed_keyword.intent if kw.seed_keyword else None,
|
||||
})
|
||||
|
||||
return {
|
||||
'keywords': keywords, # Store original objects
|
||||
'keyword_data': keyword_data,
|
||||
'sector_id': sector_id
|
||||
}
|
||||
|
||||
def build_prompt(self, data: Dict, account=None) -> str:
|
||||
"""Build clustering prompt"""
|
||||
keyword_data = data.get('keyword_data', [])
|
||||
sector_id = data.get('sector_id')
|
||||
|
||||
# Format keywords
|
||||
keywords_text = '\n'.join([
|
||||
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']}, Intent: {kw.get('intent', 'N/A')})"
|
||||
for kw in keyword_data
|
||||
])
|
||||
|
||||
# Build context
|
||||
context = {'KEYWORDS': keywords_text}
|
||||
|
||||
# Add sector context if available
|
||||
if sector_id:
|
||||
try:
|
||||
from igny8_core.auth.models import Sector
|
||||
sector = Sector.objects.get(id=sector_id)
|
||||
if sector:
|
||||
context['SECTOR'] = sector.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get prompt from registry
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='auto_cluster',
|
||||
account=account,
|
||||
context=context
|
||||
)
|
||||
|
||||
# Ensure JSON format instruction
|
||||
prompt_lower = prompt.lower()
|
||||
has_json_request = (
|
||||
'json' in prompt_lower and
|
||||
('format' in prompt_lower or 'respond' in prompt_lower or 'return' in prompt_lower or 'output' in prompt_lower)
|
||||
)
|
||||
|
||||
if not has_json_request:
|
||||
prompt += "\n\nIMPORTANT: You must respond with valid JSON only. The response must be a JSON object with a 'clusters' array."
|
||||
|
||||
return prompt
|
||||
|
||||
def parse_response(self, response: str, step_tracker=None) -> List[Dict]:
|
||||
"""Parse AI response into cluster data"""
|
||||
if not response or not response.strip():
|
||||
raise ValueError("Empty response from AI")
|
||||
|
||||
ai_core = AICore(account=getattr(self, 'account', None))
|
||||
json_data = ai_core.extract_json(response)
|
||||
|
||||
if not json_data or 'clusters' not in json_data:
|
||||
raise ValueError("Invalid response format: missing 'clusters' array")
|
||||
|
||||
return json_data['clusters']
|
||||
|
||||
def save_output(self, parsed: List[Dict], original_data: Any, account=None, step_tracker=None) -> Dict:
|
||||
"""Save clusters and update keywords"""
|
||||
keywords = original_data.get('keywords', [])
|
||||
keyword_map = {kw.id: kw for kw in keywords}
|
||||
|
||||
clusters_created = 0
|
||||
keywords_updated = 0
|
||||
|
||||
with transaction.atomic():
|
||||
for cluster_data in parsed:
|
||||
cluster_name = cluster_data.get('name', 'Unnamed Cluster')
|
||||
cluster_keywords = cluster_data.get('keywords', [])
|
||||
|
||||
if not cluster_keywords:
|
||||
continue
|
||||
|
||||
# Create cluster
|
||||
cluster = Clusters.objects.create(
|
||||
name=cluster_name,
|
||||
description=f"Auto-clustered from {len(cluster_keywords)} keywords",
|
||||
account=account,
|
||||
status='active'
|
||||
)
|
||||
clusters_created += 1
|
||||
|
||||
# Update keywords
|
||||
for keyword_text in cluster_keywords:
|
||||
for kw in keywords:
|
||||
if kw.keyword.lower() == keyword_text.lower():
|
||||
kw.cluster = cluster
|
||||
kw.save()
|
||||
keywords_updated += 1
|
||||
break
|
||||
|
||||
return {
|
||||
'clusters_created': clusters_created,
|
||||
'keywords_updated': keywords_updated,
|
||||
'count': clusters_created
|
||||
}
|
||||
|
||||
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
Generate Ideas V2 - Workflow Function
|
||||
Single cluster only, uses helpers folder imports
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List, Any
|
||||
from django.db import transaction
|
||||
from igny8_core.ai.helpers.base import BaseAIFunction
|
||||
from igny8_core.modules.planner.models import Clusters, ContentIdeas, Keywords
|
||||
from igny8_core.ai.helpers.ai_core import AICore
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.ai.helpers.settings import get_model_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GenerateIdeasV2Function(BaseAIFunction):
|
||||
"""Generate content ideas from cluster - V2 with dynamic model"""
|
||||
|
||||
def get_name(self) -> str:
|
||||
return 'generate_ideas_v2'
|
||||
|
||||
def get_metadata(self) -> Dict:
|
||||
return {
|
||||
'display_name': 'Generate Ideas',
|
||||
'description': 'Generate SEO-optimized content ideas from keyword cluster',
|
||||
'phases': {
|
||||
'INIT': 'Validating cluster...',
|
||||
'PREP': 'Loading cluster data...',
|
||||
'AI_CALL': 'Generating ideas with AI...',
|
||||
'PARSE': 'Processing idea results...',
|
||||
'SAVE': 'Saving ideas...',
|
||||
'DONE': 'Ideas generated successfully'
|
||||
}
|
||||
}
|
||||
|
||||
def get_max_items(self) -> int:
|
||||
return 1 # Single cluster only
|
||||
|
||||
def validate(self, payload: dict, account=None) -> Dict:
|
||||
"""Validate single cluster"""
|
||||
ids = payload.get('ids', [])
|
||||
if not ids:
|
||||
return {'valid': False, 'error': 'No cluster selected'}
|
||||
|
||||
if len(ids) > 1:
|
||||
return {'valid': False, 'error': 'Only one cluster can be processed at a time'}
|
||||
|
||||
queryset = Clusters.objects.filter(id=ids[0])
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
cluster = queryset.first()
|
||||
|
||||
if not cluster:
|
||||
return {'valid': False, 'error': 'Cluster not found'}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
def prepare(self, payload: dict, account=None) -> Dict:
|
||||
"""Load cluster with keywords"""
|
||||
cluster_id = payload.get('ids', [])[0]
|
||||
queryset = Clusters.objects.filter(id=cluster_id)
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
|
||||
cluster = queryset.prefetch_related('keywords__seed_keyword').first()
|
||||
|
||||
if not cluster:
|
||||
raise ValueError("Cluster not found")
|
||||
|
||||
# Get keywords
|
||||
keyword_objects = Keywords.objects.filter(cluster=cluster).select_related('seed_keyword')
|
||||
keywords = []
|
||||
for kw in keyword_objects:
|
||||
keywords.append({
|
||||
'keyword': kw.seed_keyword.keyword if kw.seed_keyword else kw.keyword,
|
||||
'volume': kw.volume,
|
||||
'difficulty': kw.difficulty,
|
||||
})
|
||||
|
||||
return {
|
||||
'cluster': cluster, # Store original object
|
||||
'cluster_data': {
|
||||
'id': cluster.id,
|
||||
'name': cluster.name,
|
||||
'description': cluster.description or '',
|
||||
'keywords': keywords,
|
||||
}
|
||||
}
|
||||
|
||||
def build_prompt(self, data: Dict, account=None) -> str:
|
||||
"""Build idea generation prompt"""
|
||||
cluster_data = data.get('cluster_data', {})
|
||||
keywords = cluster_data.get('keywords', [])
|
||||
keyword_list = [kw['keyword'] for kw in keywords]
|
||||
|
||||
# Format clusters text
|
||||
clusters_text = f"Cluster ID: {cluster_data.get('id', '')} | Name: {cluster_data.get('name', '')} | Description: {cluster_data.get('description', '')}"
|
||||
|
||||
# Format cluster keywords
|
||||
cluster_keywords_text = f"Cluster ID: {cluster_data.get('id', '')} | Name: {cluster_data.get('name', '')} | Keywords: {', '.join(keyword_list)}"
|
||||
|
||||
# Get prompt from registry
|
||||
prompt = PromptRegistry.get_prompt(
|
||||
function_name='generate_ideas',
|
||||
account=account,
|
||||
context={
|
||||
'CLUSTERS': clusters_text,
|
||||
'CLUSTER_KEYWORDS': cluster_keywords_text,
|
||||
}
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
def parse_response(self, response: str, step_tracker=None) -> List[Dict]:
|
||||
"""Parse AI response into idea data"""
|
||||
if not response or not response.strip():
|
||||
raise ValueError("Empty response from AI")
|
||||
|
||||
ai_core = AICore(account=getattr(self, 'account', None))
|
||||
json_data = ai_core.extract_json(response)
|
||||
|
||||
if not json_data or 'ideas' not in json_data:
|
||||
raise ValueError("Invalid response format: missing 'ideas' array")
|
||||
|
||||
return json_data.get('ideas', [])
|
||||
|
||||
def save_output(self, parsed: List[Dict], original_data: Any, account=None, step_tracker=None) -> Dict:
|
||||
"""Save ideas to database"""
|
||||
cluster = original_data.get('cluster')
|
||||
if not cluster:
|
||||
raise ValueError("Cluster not found in original data")
|
||||
|
||||
ideas_created = 0
|
||||
|
||||
with transaction.atomic():
|
||||
for idea_data in parsed:
|
||||
ContentIdeas.objects.create(
|
||||
cluster=cluster,
|
||||
title=idea_data.get('title', 'Untitled Idea'),
|
||||
description=idea_data.get('description', ''),
|
||||
structure=idea_data.get('structure', 'article'),
|
||||
account=account,
|
||||
status='new'
|
||||
)
|
||||
ideas_created += 1
|
||||
|
||||
return {
|
||||
'ideas_created': ideas_created,
|
||||
'count': ideas_created
|
||||
}
|
||||
|
||||
@@ -34,6 +34,16 @@ MODEL_CONFIG = {
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
"auto_cluster_v2": {
|
||||
"max_tokens": 3000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
"generate_ideas_v2": {
|
||||
"max_tokens": 4000,
|
||||
"temperature": 0.7,
|
||||
"response_format": {"type": "json_object"},
|
||||
},
|
||||
}
|
||||
|
||||
# Function name aliases (for backward compatibility)
|
||||
@@ -86,7 +96,26 @@ def get_model_config(function_name: str, account=None) -> Dict[str, Any]:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Could not load model from IntegrationSettings: {e}", exc_info=True)
|
||||
|
||||
# Merge with defaults
|
||||
# For V2 functions: Don't use defaults - only return config if model is present
|
||||
if function_name.endswith('_v2'):
|
||||
# V2 functions require model from IntegrationSettings - no defaults
|
||||
if not model_from_settings:
|
||||
# Return config without model (will be validated in engine)
|
||||
return {
|
||||
"model": None,
|
||||
"max_tokens": config.get('max_tokens', 4000),
|
||||
"temperature": config.get('temperature', 0.7),
|
||||
"response_format": config.get('response_format'),
|
||||
}
|
||||
# Model exists, return config with model
|
||||
return {
|
||||
"model": model_from_settings,
|
||||
"max_tokens": config.get('max_tokens', 4000),
|
||||
"temperature": config.get('temperature', 0.7),
|
||||
"response_format": config.get('response_format'),
|
||||
}
|
||||
|
||||
# For non-V2 functions: Merge with defaults (backward compatibility)
|
||||
default_config = {
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 4000,
|
||||
|
||||
@@ -89,8 +89,20 @@ def _load_generate_images():
|
||||
from igny8_core.ai.functions.generate_images import GenerateImagesFunction
|
||||
return GenerateImagesFunction
|
||||
|
||||
def _load_auto_cluster_v2():
|
||||
"""Lazy loader for auto_cluster_v2 function"""
|
||||
from igny8_core.ai.functions.workflow_functions.auto_cluster_v2 import AutoClusterV2Function
|
||||
return AutoClusterV2Function
|
||||
|
||||
def _load_generate_ideas_v2():
|
||||
"""Lazy loader for generate_ideas_v2 function"""
|
||||
from igny8_core.ai.functions.workflow_functions.generate_ideas_v2 import GenerateIdeasV2Function
|
||||
return GenerateIdeasV2Function
|
||||
|
||||
register_lazy_function('auto_cluster', _load_auto_cluster)
|
||||
register_lazy_function('generate_ideas', _load_generate_ideas)
|
||||
register_lazy_function('generate_content', _load_generate_content)
|
||||
register_lazy_function('generate_images', _load_generate_images)
|
||||
register_lazy_function('auto_cluster_v2', _load_auto_cluster_v2)
|
||||
register_lazy_function('generate_ideas_v2', _load_generate_ideas_v2)
|
||||
|
||||
|
||||
@@ -571,6 +571,118 @@ class KeywordViewSet(SiteSectorModelViewSet):
|
||||
'error': f'Unexpected error: {str(e)}'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
@action(detail=False, methods=['post'], url_path='auto_cluster_v2', url_name='auto_cluster_v2')
|
||||
def auto_cluster_v2(self, request):
|
||||
"""Auto-cluster keywords V2 - New workflow function with max 50 keywords"""
|
||||
import logging
|
||||
from igny8_core.ai.tasks import run_ai_task
|
||||
from kombu.exceptions import OperationalError as KombuOperationalError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
# Get account
|
||||
account = getattr(request, 'account', None)
|
||||
account_id = account.id if account else None
|
||||
|
||||
# Check model exists - no default, only execute if model is present
|
||||
if account:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=account,
|
||||
is_active=True
|
||||
).first()
|
||||
if not openai_settings or not openai_settings.config or not openai_settings.config.get('model'):
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'AI model not configured. Please configure OpenAI model in Integration settings.'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Prepare payload
|
||||
payload = {
|
||||
'ids': request.data.get('ids', []),
|
||||
'sector_id': request.data.get('sector_id')
|
||||
}
|
||||
|
||||
logger.info(f"auto_cluster_v2 called with ids={payload['ids']}, sector_id={payload.get('sector_id')}")
|
||||
|
||||
# Validate basic input
|
||||
if not payload['ids']:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'No keywords selected'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if len(payload['ids']) > 50:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'Maximum 50 keywords allowed for clustering'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Try to queue Celery task
|
||||
try:
|
||||
if hasattr(run_ai_task, 'delay'):
|
||||
task = run_ai_task.delay(
|
||||
function_name='auto_cluster_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
logger.info(f"Task queued: {task.id}")
|
||||
return Response({
|
||||
'success': True,
|
||||
'task_id': str(task.id),
|
||||
'message': 'Clustering started'
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
# Celery not available - execute synchronously
|
||||
logger.warning("Celery not available, executing synchronously")
|
||||
result = run_ai_task(
|
||||
function_name='auto_cluster_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
if result.get('success'):
|
||||
return Response({
|
||||
'success': True,
|
||||
**result
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': result.get('error', 'Clustering failed')
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except (KombuOperationalError, ConnectionError) as e:
|
||||
# Broker connection failed - fall back to synchronous execution
|
||||
logger.warning(f"Celery broker unavailable, falling back to synchronous execution: {str(e)}")
|
||||
result = run_ai_task(
|
||||
function_name='auto_cluster_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
if result.get('success'):
|
||||
return Response({
|
||||
'success': True,
|
||||
**result
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': result.get('error', 'Clustering failed')
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in auto_cluster_v2: {str(e)}", exc_info=True)
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in auto_cluster_v2: {str(e)}", exc_info=True)
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': f'Unexpected error: {str(e)}'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class ClusterViewSet(SiteSectorModelViewSet):
|
||||
"""
|
||||
@@ -811,6 +923,117 @@ class ClusterViewSet(SiteSectorModelViewSet):
|
||||
'error': f'Unexpected error: {str(e)}'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
@action(detail=False, methods=['post'], url_path='generate_ideas_v2', url_name='generate_ideas_v2')
|
||||
def generate_ideas_v2(self, request):
|
||||
"""Generate ideas V2 - Single cluster only"""
|
||||
import logging
|
||||
from igny8_core.ai.tasks import run_ai_task
|
||||
from kombu.exceptions import OperationalError as KombuOperationalError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
# Get account
|
||||
account = getattr(request, 'account', None)
|
||||
account_id = account.id if account else None
|
||||
|
||||
# Check model exists - no default, only execute if model is present
|
||||
if account:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=account,
|
||||
is_active=True
|
||||
).first()
|
||||
if not openai_settings or not openai_settings.config or not openai_settings.config.get('model'):
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'AI model not configured. Please configure OpenAI model in Integration settings.'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Prepare payload
|
||||
payload = {
|
||||
'ids': request.data.get('ids', []),
|
||||
}
|
||||
|
||||
logger.info(f"generate_ideas_v2 called with ids={payload['ids']}")
|
||||
|
||||
# Validate basic input - exactly one cluster
|
||||
if not payload['ids']:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'No cluster selected'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if len(payload['ids']) > 1:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': 'Only one cluster can be processed at a time'
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Try to queue Celery task
|
||||
try:
|
||||
if hasattr(run_ai_task, 'delay'):
|
||||
task = run_ai_task.delay(
|
||||
function_name='generate_ideas_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
logger.info(f"Task queued: {task.id}")
|
||||
return Response({
|
||||
'success': True,
|
||||
'task_id': str(task.id),
|
||||
'message': 'Idea generation started'
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
# Celery not available - execute synchronously
|
||||
logger.warning("Celery not available, executing synchronously")
|
||||
result = run_ai_task(
|
||||
function_name='generate_ideas_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
if result.get('success'):
|
||||
return Response({
|
||||
'success': True,
|
||||
**result
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': result.get('error', 'Idea generation failed')
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except (KombuOperationalError, ConnectionError) as e:
|
||||
# Broker connection failed - fall back to synchronous execution
|
||||
logger.warning(f"Celery broker unavailable, falling back to synchronous execution: {str(e)}")
|
||||
result = run_ai_task(
|
||||
function_name='generate_ideas_v2',
|
||||
payload=payload,
|
||||
account_id=account_id
|
||||
)
|
||||
if result.get('success'):
|
||||
return Response({
|
||||
'success': True,
|
||||
**result
|
||||
}, status=status.HTTP_200_OK)
|
||||
else:
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': result.get('error', 'Idea generation failed')
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in generate_ideas_v2: {str(e)}", exc_info=True)
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in generate_ideas_v2: {str(e)}", exc_info=True)
|
||||
return Response({
|
||||
'success': False,
|
||||
'error': f'Unexpected error: {str(e)}'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""
|
||||
Override list to optimize keyword stats calculation using bulk aggregation
|
||||
|
||||
@@ -66,7 +66,7 @@ export default function AIProgressModal({
|
||||
|
||||
const modalInstanceId = modalInstanceIdRef.current || 'modal-01';
|
||||
|
||||
// Build full function ID with modal instance
|
||||
// Build full function ID with modal instance (only for debugging, not shown in UI)
|
||||
const fullFunctionId = functionId ? `${functionId}-${modalInstanceId}` : null;
|
||||
|
||||
// Determine color based on status
|
||||
@@ -201,11 +201,10 @@ export default function AIProgressModal({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
{/* Task ID (for debugging - Function ID not shown per requirements) */}
|
||||
{taskId && (
|
||||
<div className="mb-6 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && <div>Function ID: {fullFunctionId}</div>}
|
||||
{taskId && <div>Task ID: {taskId}</div>}
|
||||
<div>Task ID: {taskId}</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -282,11 +281,10 @@ export default function AIProgressModal({
|
||||
{config.errorMessage || message}
|
||||
</p>
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
{/* Task ID (for debugging - Function ID not shown per requirements) */}
|
||||
{taskId && (
|
||||
<div className="mb-6 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && <div>Function ID: {fullFunctionId}</div>}
|
||||
{taskId && <div>Task ID: {taskId}</div>}
|
||||
<div>Task ID: {taskId}</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -390,15 +388,10 @@ export default function AIProgressModal({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Function ID and Task ID (for debugging) */}
|
||||
{(fullFunctionId || taskId) && (
|
||||
<div className="mb-4 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
{fullFunctionId && (
|
||||
<div>Function ID: {fullFunctionId}</div>
|
||||
)}
|
||||
{/* Task ID (for debugging - Function ID not shown per requirements) */}
|
||||
{taskId && (
|
||||
<div className="mb-4 space-y-1 text-xs text-gray-400 dark:text-gray-600">
|
||||
<div>Task ID: {taskId}</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
@@ -144,6 +144,12 @@ const tableActionsConfigs: Record<string, TableActionsConfig> = {
|
||||
icon: <BoltIcon className="w-4 h-4 text-warning-500" />,
|
||||
variant: 'secondary',
|
||||
},
|
||||
{
|
||||
key: 'keywords_clustering',
|
||||
label: 'Keywords Clustering',
|
||||
icon: <BoltIcon className="w-4 h-4 text-brand-500" />,
|
||||
variant: 'secondary',
|
||||
},
|
||||
],
|
||||
},
|
||||
'/planner/clusters': {
|
||||
@@ -160,6 +166,12 @@ const tableActionsConfigs: Record<string, TableActionsConfig> = {
|
||||
icon: <BoltIcon className="w-5 h-5" />,
|
||||
variant: 'primary',
|
||||
},
|
||||
{
|
||||
key: 'generate_ideas_v2',
|
||||
label: 'Generate Ideas V2',
|
||||
icon: <BoltIcon className="w-5 h-5" />,
|
||||
variant: 'primary',
|
||||
},
|
||||
],
|
||||
bulkActions: [
|
||||
{
|
||||
|
||||
@@ -232,8 +232,10 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
`/v1/system/settings/task_progress/${taskId}/`
|
||||
);
|
||||
|
||||
// Helper function to start auto-increment progress (1% every 350ms until 80%)
|
||||
// Only runs when no backend updates are coming (smooth fill-in animation)
|
||||
// Helper function to start auto-increment progress
|
||||
// 0-50%: 300ms per 1%
|
||||
// 50-80%: 500ms per 1%
|
||||
// If stuck at 80%: 1% per 500ms
|
||||
const startAutoIncrement = () => {
|
||||
// Clear any existing auto-increment interval
|
||||
if (autoIncrementIntervalRef.current) {
|
||||
@@ -241,11 +243,10 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
autoIncrementIntervalRef.current = null;
|
||||
}
|
||||
|
||||
// Only start if we're below 80% and status is processing
|
||||
// Only start if we're below 100% and status is processing
|
||||
const current = displayedPercentageRef.current;
|
||||
if (current < 80) {
|
||||
// Use a slightly longer interval to avoid conflicts with backend updates
|
||||
autoIncrementIntervalRef.current = setInterval(() => {
|
||||
if (current < 100) {
|
||||
const doIncrement = () => {
|
||||
setProgress(prev => {
|
||||
// Check current status - stop if not processing
|
||||
if (prev.status !== 'processing') {
|
||||
@@ -257,17 +258,30 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
}
|
||||
|
||||
const currentPercent = displayedPercentageRef.current;
|
||||
// Only increment if still below 80%
|
||||
if (currentPercent < 80) {
|
||||
const newPercentage = Math.min(currentPercent + 1, 80);
|
||||
let newPercentage = currentPercent;
|
||||
let nextInterval = 300; // Default 300ms
|
||||
|
||||
if (currentPercent < 50) {
|
||||
// 0-50%: 300ms per 1%
|
||||
newPercentage = Math.min(currentPercent + 1, 50);
|
||||
nextInterval = 300;
|
||||
} else if (currentPercent < 80) {
|
||||
// 50-80%: 500ms per 1%
|
||||
newPercentage = Math.min(currentPercent + 1, 80);
|
||||
nextInterval = 500;
|
||||
} else if (currentPercent < 100) {
|
||||
// Stuck at 80%+: 1% per 500ms
|
||||
newPercentage = Math.min(currentPercent + 1, 99);
|
||||
nextInterval = 500;
|
||||
}
|
||||
|
||||
if (newPercentage > currentPercent && newPercentage < 100) {
|
||||
displayedPercentageRef.current = newPercentage;
|
||||
|
||||
// Stop if we've reached 80%
|
||||
if (newPercentage >= 80) {
|
||||
// Restart interval with new speed if needed
|
||||
if (autoIncrementIntervalRef.current) {
|
||||
clearInterval(autoIncrementIntervalRef.current);
|
||||
autoIncrementIntervalRef.current = null;
|
||||
}
|
||||
autoIncrementIntervalRef.current = setInterval(doIncrement, nextInterval);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -275,7 +289,7 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
percentage: newPercentage,
|
||||
};
|
||||
} else {
|
||||
// Stop if we've reached 80%
|
||||
// Stop if we've reached 100% or can't increment
|
||||
if (autoIncrementIntervalRef.current) {
|
||||
clearInterval(autoIncrementIntervalRef.current);
|
||||
autoIncrementIntervalRef.current = null;
|
||||
@@ -283,7 +297,11 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
return prev;
|
||||
}
|
||||
});
|
||||
}, 350); // Slightly longer interval to reduce conflicts
|
||||
};
|
||||
|
||||
// Start with appropriate interval based on current percentage
|
||||
const initialInterval = current < 50 ? 300 : 500;
|
||||
autoIncrementIntervalRef.current = setInterval(doIncrement, initialInterval);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -387,19 +405,20 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
const safeTargetPercentage = Math.max(targetPercentage, currentDisplayedPercentage);
|
||||
|
||||
// Smooth progress animation: increment gradually until reaching target
|
||||
// Use smaller increments and faster updates for smoother animation
|
||||
// Speed: 300ms per 1% until 50%, then 500ms per 1%
|
||||
if (safeTargetPercentage > currentDisplayedPercentage) {
|
||||
// Start smooth animation
|
||||
let animatedPercentage = currentDisplayedPercentage;
|
||||
const animateProgress = () => {
|
||||
if (animatedPercentage < safeTargetPercentage) {
|
||||
// Calculate increment based on distance for smooth animation
|
||||
const diff = safeTargetPercentage - animatedPercentage;
|
||||
// Use smaller increments for smoother feel
|
||||
// If close (< 5%), increment by 1, otherwise by 2
|
||||
const increment = diff <= 5 ? 1 : Math.min(2, Math.ceil(diff / 10));
|
||||
// Always increment by 1%
|
||||
const increment = 1;
|
||||
animatedPercentage = Math.min(animatedPercentage + increment, safeTargetPercentage);
|
||||
displayedPercentageRef.current = animatedPercentage;
|
||||
|
||||
// Determine speed based on current percentage
|
||||
const speed = animatedPercentage < 50 ? 300 : 500; // 300ms until 50%, then 500ms
|
||||
|
||||
setProgress({
|
||||
percentage: animatedPercentage,
|
||||
message: friendlyMessage,
|
||||
@@ -414,13 +433,17 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
});
|
||||
|
||||
if (animatedPercentage < safeTargetPercentage) {
|
||||
// Smooth updates: 150ms for better UX
|
||||
stepTransitionTimeoutRef.current = setTimeout(animateProgress, 150);
|
||||
// Use appropriate speed based on current percentage
|
||||
const nextSpeed = animatedPercentage < 50 ? 300 : 500;
|
||||
stepTransitionTimeoutRef.current = setTimeout(animateProgress, nextSpeed);
|
||||
} else {
|
||||
stepTransitionTimeoutRef.current = null;
|
||||
// After reaching target, start auto-increment if below 80% and no backend update pending
|
||||
if (safeTargetPercentage < 80) {
|
||||
startAutoIncrement();
|
||||
} else if (safeTargetPercentage >= 80 && safeTargetPercentage < 100) {
|
||||
// If at 80%+, start slow auto-increment (1% per 500ms)
|
||||
startAutoIncrement();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -435,7 +458,8 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
} else {
|
||||
// Same step or first step - start animation immediately
|
||||
currentStepRef.current = currentStep;
|
||||
animateProgress();
|
||||
const initialSpeed = currentDisplayedPercentage < 50 ? 300 : 500;
|
||||
stepTransitionTimeoutRef.current = setTimeout(animateProgress, initialSpeed);
|
||||
}
|
||||
} else {
|
||||
// Target is same or less than current - just update message and details
|
||||
@@ -455,6 +479,9 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
// Start auto-increment if below 80% and no backend update
|
||||
if (currentDisplayedPercentage < 80 && safeTargetPercentage === currentDisplayedPercentage) {
|
||||
startAutoIncrement();
|
||||
} else if (currentDisplayedPercentage >= 80 && currentDisplayedPercentage < 100) {
|
||||
// If at 80%+, start slow auto-increment (1% per 500ms)
|
||||
startAutoIncrement();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -527,18 +554,52 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
} else if (response.state === 'SUCCESS') {
|
||||
const meta = response.meta || {};
|
||||
|
||||
// Clear any existing transition timeout
|
||||
// Clear any existing transition timeout and auto-increment
|
||||
if (stepTransitionTimeoutRef.current) {
|
||||
clearTimeout(stepTransitionTimeoutRef.current);
|
||||
stepTransitionTimeoutRef.current = null;
|
||||
}
|
||||
if (autoIncrementIntervalRef.current) {
|
||||
clearInterval(autoIncrementIntervalRef.current);
|
||||
autoIncrementIntervalRef.current = null;
|
||||
}
|
||||
|
||||
// Get completion message with extracted values
|
||||
const completionMessage = meta.message || '';
|
||||
const allSteps = [...(meta.request_steps || []), ...(meta.response_steps || [])];
|
||||
const stepInfo = getStepInfo('DONE', completionMessage, allSteps);
|
||||
|
||||
// Update to 100% with user-friendly completion message
|
||||
// Smooth completion animation: 5% per 500ms until 100%
|
||||
const currentPercent = displayedPercentageRef.current;
|
||||
if (currentPercent < 100) {
|
||||
const animateToCompletion = () => {
|
||||
const current = displayedPercentageRef.current;
|
||||
if (current < 100) {
|
||||
const increment = Math.min(5, 100 - current); // 5% per step, or remaining if less
|
||||
const newPercentage = current + increment;
|
||||
displayedPercentageRef.current = newPercentage;
|
||||
|
||||
setProgress({
|
||||
percentage: newPercentage,
|
||||
message: stepInfo.friendlyMessage,
|
||||
status: 'completed',
|
||||
details: meta.details,
|
||||
});
|
||||
|
||||
if (newPercentage < 100) {
|
||||
stepTransitionTimeoutRef.current = setTimeout(animateToCompletion, 500);
|
||||
} else {
|
||||
currentStepRef.current = 'DONE';
|
||||
}
|
||||
} else {
|
||||
currentStepRef.current = 'DONE';
|
||||
}
|
||||
};
|
||||
|
||||
currentStepRef.current = 'DONE';
|
||||
animateToCompletion();
|
||||
} else {
|
||||
// Already at 100%, just update message and status
|
||||
currentStepRef.current = 'DONE';
|
||||
displayedPercentageRef.current = 100;
|
||||
setProgress({
|
||||
@@ -547,6 +608,7 @@ export function useProgressModal(): UseProgressModalReturn {
|
||||
status: 'completed',
|
||||
details: meta.details,
|
||||
});
|
||||
}
|
||||
|
||||
// Update final step logs
|
||||
if (meta.request_steps || meta.response_steps) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
bulkDeleteClusters,
|
||||
bulkUpdateClustersStatus,
|
||||
autoGenerateIdeas,
|
||||
generateIdeasV2,
|
||||
Cluster,
|
||||
ClusterFilters,
|
||||
ClusterCreateData,
|
||||
@@ -218,6 +219,22 @@ export default function Clusters() {
|
||||
} catch (error: any) {
|
||||
toast.error(`Failed to generate ideas: ${error.message}`);
|
||||
}
|
||||
} else if (action === 'generate_ideas_v2') {
|
||||
try {
|
||||
const result = await generateIdeasV2([row.id]);
|
||||
|
||||
if (result.success && result.task_id) {
|
||||
// Async task - show progress modal
|
||||
progressModal.openModal(result.task_id, 'Generate Ideas', 'ai_generate_ideas_v2');
|
||||
} else if (result.success) {
|
||||
toast.success(result.message || 'Ideas generated successfully');
|
||||
await loadClusters();
|
||||
} else {
|
||||
toast.error(result.error || 'Failed to generate ideas');
|
||||
}
|
||||
} catch (error: any) {
|
||||
toast.error(`Failed to generate ideas: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}, [toast, progressModal, loadClusters]);
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import {
|
||||
Cluster,
|
||||
API_BASE_URL,
|
||||
autoClusterKeywords,
|
||||
autoClusterKeywordsV2,
|
||||
fetchSeedKeywords,
|
||||
SeedKeyword,
|
||||
} from '../../services/api';
|
||||
@@ -448,6 +449,35 @@ export default function Keywords() {
|
||||
}]);
|
||||
toast.error(errorMsg);
|
||||
}
|
||||
} else if (action === 'keywords_clustering') {
|
||||
if (ids.length === 0) {
|
||||
toast.error('Please select at least one keyword');
|
||||
return;
|
||||
}
|
||||
if (ids.length > 50) {
|
||||
toast.error('Maximum 50 keywords allowed for clustering');
|
||||
return;
|
||||
}
|
||||
|
||||
const numIds = ids.map(id => parseInt(id));
|
||||
const sectorId = activeSector?.id;
|
||||
|
||||
try {
|
||||
const result = await autoClusterKeywordsV2(numIds, sectorId);
|
||||
|
||||
if (result.success && result.task_id) {
|
||||
// Async task - open progress modal
|
||||
hasReloadedRef.current = false;
|
||||
progressModal.openModal(result.task_id, 'Keywords Clustering', 'ai_auto_cluster_v2');
|
||||
} else if (result.success) {
|
||||
toast.success(result.message || 'Clustering completed');
|
||||
await loadKeywords();
|
||||
} else {
|
||||
toast.error(result.error || 'Clustering failed');
|
||||
}
|
||||
} catch (error: any) {
|
||||
toast.error(`Clustering failed: ${error.message}`);
|
||||
}
|
||||
} else {
|
||||
toast.info(`Bulk action "${action}" for ${ids.length} items`);
|
||||
}
|
||||
|
||||
@@ -807,6 +807,140 @@ export async function autoGenerateIdeas(clusterIds: number[]): Promise<{ success
|
||||
}
|
||||
}
|
||||
|
||||
export async function autoClusterKeywordsV2(keywordIds: number[], sectorId?: number): Promise<{ success: boolean; task_id?: string; clusters_created?: number; keywords_updated?: number; message?: string; error?: string }> {
|
||||
const startTime = Date.now();
|
||||
const addLog = useAIRequestLogsStore.getState().addLog;
|
||||
|
||||
const endpoint = `/v1/planner/keywords/auto_cluster_v2/`;
|
||||
const requestBody = { ids: keywordIds, sector_id: sectorId };
|
||||
|
||||
const pendingLogId = addLog({
|
||||
function: 'autoClusterKeywordsV2',
|
||||
endpoint,
|
||||
request: {
|
||||
method: 'POST',
|
||||
body: requestBody,
|
||||
},
|
||||
status: 'pending',
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await fetchAPI(endpoint, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
const updateLog = useAIRequestLogsStore.getState().updateLog;
|
||||
|
||||
if (pendingLogId && response) {
|
||||
updateLog(pendingLogId, {
|
||||
response: {
|
||||
status: 200,
|
||||
data: response,
|
||||
},
|
||||
status: response.success === false ? 'error' : 'success',
|
||||
duration,
|
||||
});
|
||||
}
|
||||
|
||||
if (response && response.success === false) {
|
||||
return response;
|
||||
}
|
||||
|
||||
return response;
|
||||
} catch (error: any) {
|
||||
const duration = Date.now() - startTime;
|
||||
const updateLog = useAIRequestLogsStore.getState().updateLog;
|
||||
|
||||
let errorMessage = error.message || 'Unknown error';
|
||||
|
||||
if (pendingLogId) {
|
||||
updateLog(pendingLogId, {
|
||||
response: {
|
||||
status: 500,
|
||||
error: errorMessage,
|
||||
},
|
||||
status: 'error',
|
||||
duration,
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateIdeasV2(clusterIds: number[]): Promise<{ success: boolean; task_id?: string; ideas_created?: number; message?: string; error?: string }> {
|
||||
const startTime = Date.now();
|
||||
const { useAIRequestLogsStore } = await import('../store/aiRequestLogsStore').catch(() => ({ useAIRequestLogsStore: null }));
|
||||
const addLog = useAIRequestLogsStore?.getState().addLog;
|
||||
|
||||
const endpoint = `/v1/planner/clusters/generate_ideas_v2/`;
|
||||
const requestBody = { ids: clusterIds };
|
||||
|
||||
addLog?.({
|
||||
function: 'generateIdeasV2',
|
||||
endpoint,
|
||||
request: {
|
||||
method: 'POST',
|
||||
body: requestBody,
|
||||
},
|
||||
status: 'pending',
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await fetchAPI(endpoint, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
addLog?.({
|
||||
function: 'generateIdeasV2',
|
||||
endpoint,
|
||||
request: {
|
||||
method: 'POST',
|
||||
body: requestBody,
|
||||
},
|
||||
response: {
|
||||
status: 200,
|
||||
data: response,
|
||||
},
|
||||
status: 'success',
|
||||
duration,
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error: any) {
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
let errorMessage = error.message || 'Unknown error';
|
||||
|
||||
addLog?.({
|
||||
function: 'generateIdeasV2',
|
||||
endpoint,
|
||||
request: {
|
||||
method: 'POST',
|
||||
body: requestBody,
|
||||
},
|
||||
response: {
|
||||
status: 500,
|
||||
error: errorMessage,
|
||||
},
|
||||
status: 'error',
|
||||
duration,
|
||||
});
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateSingleIdea(ideaId: string | number, clusterId: number): Promise<{ success: boolean; task_id?: string; idea_created?: number; message?: string; error?: string }> {
|
||||
const startTime = Date.now();
|
||||
const { useAIRequestLogsStore } = await import('../store/aiRequestLogsStore').catch(() => ({ useAIRequestLogsStore: null }));
|
||||
|
||||
Reference in New Issue
Block a user