This commit is contained in:
alorig
2025-11-09 23:50:19 +05:00
parent 98e900da73
commit bee6a68718
5 changed files with 96 additions and 79 deletions

View File

@@ -269,6 +269,7 @@ class AICore:
'cost': cost,
'error': None,
'api_id': api_id,
'duration': request_duration, # Add duration tracking
}
else:
error_msg = 'No content in OpenAI response'
@@ -315,8 +316,9 @@ class AICore:
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(error_msg, exc_info=True)
logger.error(f"[AI][{function_name}][Error] {error_msg}", exc_info=True)
if tracker:
tracker.error('UnexpectedError', error_msg, e)
return {
'content': None,
'error': error_msg,

View File

@@ -238,12 +238,15 @@ class ConsoleStepTracker:
self.current_phase = None
# Debug: Verify DEBUG_MODE is enabled
import sys
if DEBUG_MODE:
print(f"[DEBUG] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is ENABLED", flush=True)
logger.info(f"ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is ENABLED")
init_msg = f"[DEBUG] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is ENABLED"
logger.info(init_msg)
print(init_msg, flush=True, file=sys.stdout)
else:
print(f"[WARNING] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is DISABLED", flush=True)
logger.warning(f"ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is DISABLED")
init_msg = f"[WARNING] ConsoleStepTracker initialized for '{function_name}' - DEBUG_MODE is DISABLED"
logger.warning(init_msg)
print(init_msg, flush=True, file=sys.stdout)
def _log(self, phase: str, message: str, status: str = 'info'):
"""Internal logging method that checks DEBUG_MODE"""
@@ -256,16 +259,18 @@ class ConsoleStepTracker:
if status == 'error':
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] [ERROR] {message}"
# Use logger.error for errors so they're always visible
logger.error(log_msg)
elif status == 'success':
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] ✅ {message}"
logger.info(log_msg)
else:
log_msg = f"[{timestamp}] [{self.function_name}] [{phase_label}] {message}"
# Print and flush immediately to ensure console output
print(log_msg, flush=True)
# Also log to Python logger for better visibility
logger.info(log_msg)
# Also print to stdout for immediate visibility (works in Celery worker logs)
print(log_msg, flush=True, file=sys.stdout)
self.steps.append({
'timestamp': timestamp,
'phase': phase,
@@ -300,8 +305,9 @@ class ConsoleStepTracker:
self._log('DONE', f"{message} (Duration: {duration:.2f}s)", status='success')
if DEBUG_MODE:
import sys
print(f"[{self.function_name}] === AI Task Complete ===", flush=True)
logger.info(f"[{self.function_name}] === AI Task Complete ===")
complete_msg = f"[{self.function_name}] === AI Task Complete ==="
logger.info(complete_msg)
print(complete_msg, flush=True, file=sys.stdout)
def error(self, error_type: str, message: str, exception: Exception = None):
"""Log error with standardized format"""
@@ -312,9 +318,10 @@ class ConsoleStepTracker:
if DEBUG_MODE and exception:
import sys
import traceback
print(f"[{self.function_name}] [ERROR] Stack trace:", flush=True)
traceback.print_exc()
logger.error(f"[{self.function_name}] [ERROR] Stack trace:", exc_info=exception)
error_trace_msg = f"[{self.function_name}] [ERROR] Stack trace:"
logger.error(error_trace_msg, exc_info=exception)
print(error_trace_msg, flush=True, file=sys.stdout)
traceback.print_exc(file=sys.stdout)
def retry(self, attempt: int, max_attempts: int, reason: str = ""):
"""Log retry attempt"""

View File

@@ -961,15 +961,30 @@ class IntegrationSettingsViewSet(viewsets.ViewSet):
'meta': response_meta
})
elif task_state == 'FAILURE':
# Try to get error from task.info meta first (this is where run_ai_task sets it)
if not error_message and isinstance(task_info, dict):
error_message = task_info.get('error') or task_info.get('message', '')
error_type = task_info.get('error_type', 'UnknownError')
# Also check if message contains error info
if not error_message and 'message' in task_info:
msg = task_info.get('message', '')
if msg and 'Error:' in msg:
error_message = msg.replace('Error: ', '')
# Use extracted error_message if available, otherwise try to get from error_info
if not error_message:
error_info = task_info
if isinstance(error_info, Exception):
error_message = str(error_info)
elif isinstance(error_info, dict):
error_message = error_info.get('error', str(error_info))
else:
error_message = str(error_info) if error_info else 'Task failed'
error_message = error_info.get('error') or error_info.get('message', '') or str(error_info)
elif error_info:
error_message = str(error_info)
# Final fallback - ensure we always have an error message
if not error_message or error_message.strip() == '':
error_message = f'Task execution failed - check Celery worker logs for task {task_id}'
error_type = 'ExecutionError'
response_meta = {
'error': error_message,

View File

@@ -128,6 +128,8 @@ export default function ResourceDebugOverlay({ enabled }: ResourceDebugOverlayPr
headers['Authorization'] = `Bearer ${token}`;
}
// Silently handle 404s and other errors - metrics might not exist for all requests
try {
const response = await nativeFetch.call(window, `${API_BASE_URL}/v1/system/request-metrics/${requestId}/`, {
method: 'GET',
headers,
@@ -165,32 +167,20 @@ export default function ResourceDebugOverlay({ enabled }: ResourceDebugOverlayPr
}
}
} catch (refreshError) {
// Refresh failed - user needs to re-login
console.warn('Token refresh failed, user may need to re-authenticate');
// Refresh failed - silently ignore
}
// Silently ignore 401 errors - user might not be authenticated
} else if (response.status === 404) {
// Metrics not found - could be race condition, retry once after short delay
if (retryCount === 0) {
// First attempt failed, retry once after 200ms (middleware might still be storing)
setTimeout(() => fetchRequestMetrics(requestId, 1), 200);
return;
}
// Second attempt also failed - metrics truly not available
// This is expected: metrics expired (5min TTL), request wasn't tracked, or middleware error
// Silently ignore - no need to log or show error
// Metrics not found - silently ignore (metrics might not exist for all requests)
return;
} else {
// Only log non-404/401 errors (500, 403, etc.)
console.warn('Failed to fetch metrics:', response.status, response.statusText, 'for request:', requestId);
}
} catch (error) {
// Only log non-network errors
if (error instanceof TypeError && error.message.includes('fetch')) {
// Network error - silently ignore
// Other errors - silently ignore
return;
}
console.error('Failed to fetch request metrics:', error);
} catch (error) {
// Silently ignore all fetch errors (network errors, etc.)
// Metrics are optional and not critical for functionality
return;
}
};

View File

@@ -346,11 +346,14 @@ export function useProgressModal(): UseProgressModalReturn {
}
} else if (response.state === 'FAILURE') {
const meta = response.meta || {};
const errorMsg = meta.error || 'Task failed';
// Try multiple error message sources
const errorMsg = meta.error || meta.message || response.error || 'Task failed - exception details unavailable';
const errorType = meta.error_type || 'Error';
setProgress({
percentage: 0,
message: `Error: ${errorMsg}`,
message: errorMsg.includes('exception details unavailable') ? errorMsg : `Error: ${errorMsg}`,
status: 'error',
details: meta.error_type ? `${errorType}: ${errorMsg}` : errorMsg,
});
// Update step logs from failure response