Implement V2 AI functions and enhance progress handling

- Added support for new V2 functions: `auto_cluster_v2` and `generate_ideas_v2`, including backend logic and API endpoints.
- Updated model configuration to ensure V2 functions validate the presence of models before execution.
- Enhanced progress modal to provide better feedback during asynchronous tasks, including task IDs for debugging.
- Updated frontend components to integrate new V2 functionalities and improve user experience with clustering and idea generation.
This commit is contained in:
Desktop
2025-11-10 22:16:02 +05:00
parent 46f5bb4d62
commit e2f2d79d4c
12 changed files with 920 additions and 58 deletions

View File

@@ -34,6 +34,16 @@ MODEL_CONFIG = {
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"auto_cluster_v2": {
"max_tokens": 3000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
"generate_ideas_v2": {
"max_tokens": 4000,
"temperature": 0.7,
"response_format": {"type": "json_object"},
},
}
# Function name aliases (for backward compatibility)
@@ -86,7 +96,26 @@ def get_model_config(function_name: str, account=None) -> Dict[str, Any]:
logger = logging.getLogger(__name__)
logger.warning(f"Could not load model from IntegrationSettings: {e}", exc_info=True)
# Merge with defaults
# For V2 functions: Don't use defaults - only return config if model is present
if function_name.endswith('_v2'):
# V2 functions require model from IntegrationSettings - no defaults
if not model_from_settings:
# Return config without model (will be validated in engine)
return {
"model": None,
"max_tokens": config.get('max_tokens', 4000),
"temperature": config.get('temperature', 0.7),
"response_format": config.get('response_format'),
}
# Model exists, return config with model
return {
"model": model_from_settings,
"max_tokens": config.get('max_tokens', 4000),
"temperature": config.get('temperature', 0.7),
"response_format": config.get('response_format'),
}
# For non-V2 functions: Merge with defaults (backward compatibility)
default_config = {
"model": "gpt-4.1",
"max_tokens": 4000,