imp part 4

This commit is contained in:
IGNY8 VPS (Salman)
2025-12-30 13:14:21 +00:00
parent 51950c7ce1
commit 1632ee62b6
8 changed files with 1200 additions and 3 deletions

View File

@@ -40,6 +40,8 @@ class AICore:
self.account = account
self._openai_api_key = None
self._runware_api_key = None
self._bria_api_key = None
self._anthropic_api_key = None
self._load_account_settings()
def _load_account_settings(self):
@@ -53,11 +55,15 @@ class AICore:
# Load API keys from global settings (platform-wide)
self._openai_api_key = global_settings.openai_api_key
self._runware_api_key = global_settings.runware_api_key
self._bria_api_key = getattr(global_settings, 'bria_api_key', None)
self._anthropic_api_key = getattr(global_settings, 'anthropic_api_key', None)
except Exception as e:
logger.error(f"Could not load GlobalIntegrationSettings: {e}", exc_info=True)
self._openai_api_key = None
self._runware_api_key = None
self._bria_api_key = None
self._anthropic_api_key = None
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
"""Get API key for integration type"""
@@ -65,6 +71,10 @@ class AICore:
return self._openai_api_key
elif integration_type == 'runware':
return self._runware_api_key
elif integration_type == 'bria':
return self._bria_api_key
elif integration_type == 'anthropic':
return self._anthropic_api_key
return None
def get_model(self, integration_type: str = 'openai') -> str:
@@ -380,6 +390,289 @@ class AICore:
'api_id': None,
}
def run_anthropic_request(
self,
prompt: str,
model: str,
max_tokens: int = 8192,
temperature: float = 0.7,
api_key: Optional[str] = None,
function_name: str = 'anthropic_request',
prompt_prefix: Optional[str] = None,
tracker: Optional[ConsoleStepTracker] = None,
system_prompt: Optional[str] = None,
) -> Dict[str, Any]:
"""
Anthropic (Claude) AI request handler with console logging.
Alternative to OpenAI for text generation.
Args:
prompt: Prompt text
model: Claude model name (required - must be provided from IntegrationSettings)
max_tokens: Maximum tokens
temperature: Temperature (0-1)
api_key: Optional API key override
function_name: Function name for logging (e.g., 'cluster_keywords')
prompt_prefix: Optional prefix to add before prompt
tracker: Optional ConsoleStepTracker instance for logging
system_prompt: Optional system prompt for Claude
Returns:
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
'model', 'cost', 'error', 'api_id'
Raises:
ValueError: If model is not provided
"""
# Use provided tracker or create a new one
if tracker is None:
tracker = ConsoleStepTracker(function_name)
tracker.ai_call("Preparing Anthropic request...")
# Step 1: Validate model is provided
if not model:
error_msg = "Model is required. Ensure IntegrationSettings is configured for the account."
tracker.error('ConfigurationError', error_msg)
logger.error(f"[AICore][Anthropic] {error_msg}")
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': None,
'cost': 0.0,
'api_id': None,
}
# Step 2: Validate API key
api_key = api_key or self._anthropic_api_key
if not api_key:
error_msg = 'Anthropic API key not configured'
tracker.error('ConfigurationError', error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': model,
'cost': 0.0,
'api_id': None,
}
active_model = model
# Debug logging: Show model used
logger.info(f"[AICore][Anthropic] Model Configuration:")
logger.info(f" - Model parameter passed: {model}")
logger.info(f" - Model used in request: {active_model}")
tracker.ai_call(f"Using Anthropic model: {active_model}")
# Add prompt_prefix to prompt if provided (for tracking)
final_prompt = prompt
if prompt_prefix:
final_prompt = f'{prompt_prefix}\n\n{prompt}'
tracker.ai_call(f"Added prompt prefix: {prompt_prefix}")
# Step 5: Build request payload using Anthropic Messages API
url = 'https://api.anthropic.com/v1/messages'
headers = {
'x-api-key': api_key,
'anthropic-version': '2023-06-01',
'Content-Type': 'application/json',
}
body_data = {
'model': active_model,
'max_tokens': max_tokens,
'messages': [{'role': 'user', 'content': final_prompt}],
}
# Only add temperature if it's less than 1.0 (Claude's default)
if temperature < 1.0:
body_data['temperature'] = temperature
# Add system prompt if provided
if system_prompt:
body_data['system'] = system_prompt
tracker.ai_call(f"Request payload prepared (model={active_model}, max_tokens={max_tokens}, temp={temperature})")
# Step 6: Send request
tracker.ai_call("Sending request to Anthropic API...")
request_start = time.time()
try:
response = requests.post(url, headers=headers, json=body_data, timeout=180)
request_duration = time.time() - request_start
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
# Step 7: Validate HTTP response
if response.status_code != 200:
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
error_message = f"HTTP {response.status_code} error"
if isinstance(error_data, dict) and 'error' in error_data:
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
error_message += f": {error_data['error']['message']}"
# Check for rate limit
if response.status_code == 429:
retry_after = response.headers.get('retry-after', '60')
tracker.rate_limit(retry_after)
error_message += f" (Rate limit - retry after {retry_after}s)"
else:
tracker.error('HTTPError', error_message)
logger.error(f"Anthropic API HTTP error {response.status_code}: {error_message}")
return {
'content': None,
'error': error_message,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
# Step 8: Parse response JSON
try:
data = response.json()
except json.JSONDecodeError as e:
error_msg = f'Failed to parse JSON response: {str(e)}'
tracker.malformed_json(str(e))
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
api_id = data.get('id')
# Step 9: Extract content (Anthropic format)
# Claude returns content as array: [{"type": "text", "text": "..."}]
if 'content' in data and len(data['content']) > 0:
# Extract text from first content block
content_blocks = data['content']
content = ''
for block in content_blocks:
if block.get('type') == 'text':
content += block.get('text', '')
usage = data.get('usage', {})
input_tokens = usage.get('input_tokens', 0)
output_tokens = usage.get('output_tokens', 0)
total_tokens = input_tokens + output_tokens
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
tracker.parse(f"Content length: {len(content)} characters")
# Step 10: Calculate cost using ModelRegistry (with fallback)
# Claude pricing as of 2024:
# claude-3-5-sonnet: $3/1M input, $15/1M output
# claude-3-opus: $15/1M input, $75/1M output
# claude-3-haiku: $0.25/1M input, $1.25/1M output
from igny8_core.ai.model_registry import ModelRegistry
cost = float(ModelRegistry.calculate_cost(
active_model,
input_tokens=input_tokens,
output_tokens=output_tokens
))
# Fallback to hardcoded rates if ModelRegistry returns 0
if cost == 0:
anthropic_rates = {
'claude-3-5-sonnet-20241022': {'input': 3.00, 'output': 15.00},
'claude-3-5-haiku-20241022': {'input': 1.00, 'output': 5.00},
'claude-3-opus-20240229': {'input': 15.00, 'output': 75.00},
'claude-3-sonnet-20240229': {'input': 3.00, 'output': 15.00},
'claude-3-haiku-20240307': {'input': 0.25, 'output': 1.25},
}
rates = anthropic_rates.get(active_model, {'input': 3.00, 'output': 15.00})
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
tracker.parse(f"Cost calculated: ${cost:.6f}")
tracker.done("Anthropic request completed successfully")
return {
'content': content,
'input_tokens': input_tokens,
'output_tokens': output_tokens,
'total_tokens': total_tokens,
'model': active_model,
'cost': cost,
'error': None,
'api_id': api_id,
'duration': request_duration,
}
else:
error_msg = 'No content in Anthropic response'
tracker.error('EmptyResponse', error_msg)
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': api_id,
}
except requests.exceptions.Timeout:
error_msg = 'Request timeout (180s exceeded)'
tracker.timeout(180)
logger.error(error_msg)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
except requests.exceptions.RequestException as e:
error_msg = f'Request exception: {str(e)}'
tracker.error('RequestException', error_msg, e)
logger.error(f"Anthropic API error: {error_msg}", exc_info=True)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
logger.error(f"[AI][{function_name}][Anthropic][Error] {error_msg}", exc_info=True)
if tracker:
tracker.error('UnexpectedError', error_msg, e)
return {
'content': None,
'error': error_msg,
'input_tokens': 0,
'output_tokens': 0,
'total_tokens': 0,
'model': active_model,
'cost': 0.0,
'api_id': None,
}
def extract_json(self, response_text: str) -> Optional[Dict]:
"""
Extract JSON from response text.
@@ -453,6 +746,8 @@ class AICore:
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name)
elif provider == 'runware':
return self._generate_image_runware(prompt, model, size, n, api_key, negative_prompt, function_name)
elif provider == 'bria':
return self._generate_image_bria(prompt, model, size, n, api_key, negative_prompt, function_name)
else:
error_msg = f'Unknown provider: {provider}'
print(f"[AI][{function_name}][Error] {error_msg}")
@@ -830,6 +1125,170 @@ class AICore:
'error': error_msg,
}
def _generate_image_bria(
self,
prompt: str,
model: Optional[str],
size: str,
n: int,
api_key: Optional[str],
negative_prompt: Optional[str],
function_name: str
) -> Dict[str, Any]:
"""
Generate image using Bria AI.
Bria API Reference: https://docs.bria.ai/reference/text-to-image
"""
print(f"[AI][{function_name}] Provider: Bria AI")
api_key = api_key or self._bria_api_key
if not api_key:
error_msg = 'Bria API key not configured'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
bria_model = model or 'bria-2.3'
print(f"[AI][{function_name}] Step 2: Using model: {bria_model}, size: {size}")
# Parse size
try:
width, height = map(int, size.split('x'))
except ValueError:
error_msg = f"Invalid size format: {size}. Expected format: WIDTHxHEIGHT"
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
# Bria API endpoint
url = 'https://engine.prod.bria-api.com/v1/text-to-image/base'
headers = {
'api_token': api_key,
'Content-Type': 'application/json'
}
payload = {
'prompt': prompt,
'num_results': n,
'sync': True, # Wait for result
'model_version': bria_model.replace('bria-', ''), # e.g., '2.3'
}
# Add negative prompt if provided
if negative_prompt:
payload['negative_prompt'] = negative_prompt
# Add size constraints if not default
if width and height:
# Bria uses aspect ratio or fixed sizes
payload['width'] = width
payload['height'] = height
print(f"[AI][{function_name}] Step 3: Sending request to Bria API...")
request_start = time.time()
try:
response = requests.post(url, json=payload, headers=headers, timeout=150)
request_duration = time.time() - request_start
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
if response.status_code != 200:
error_msg = f"HTTP {response.status_code} error: {response.text[:200]}"
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
body = response.json()
print(f"[AI][{function_name}] Bria response keys: {list(body.keys()) if isinstance(body, dict) else type(body)}")
# Bria returns { "result": [ { "urls": ["..."] } ] }
image_url = None
error_msg = None
if isinstance(body, dict):
if 'result' in body and isinstance(body['result'], list) and len(body['result']) > 0:
first_result = body['result'][0]
if 'urls' in first_result and isinstance(first_result['urls'], list) and len(first_result['urls']) > 0:
image_url = first_result['urls'][0]
elif 'url' in first_result:
image_url = first_result['url']
elif 'error' in body:
error_msg = body['error']
elif 'message' in body:
error_msg = body['message']
if error_msg:
print(f"[AI][{function_name}][Error] Bria API error: {error_msg}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
if image_url:
# Cost based on model
cost_per_image = {
'bria-2.3': 0.015,
'bria-2.3-fast': 0.010,
'bria-2.2': 0.012,
}.get(bria_model, 0.015)
cost = cost_per_image * n
print(f"[AI][{function_name}] Step 5: Image generated successfully")
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
print(f"[AI][{function_name}][Success] Image generation completed")
return {
'url': image_url,
'provider': 'bria',
'cost': cost,
'error': None,
}
else:
error_msg = f'No image data in Bria response'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(f"[AI][{function_name}] Full Bria response: {json.dumps(body, indent=2) if isinstance(body, dict) else str(body)}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
except requests.exceptions.Timeout:
error_msg = 'Request timeout (150s exceeded)'
print(f"[AI][{function_name}][Error] {error_msg}")
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
except Exception as e:
error_msg = f'Unexpected error: {str(e)}'
print(f"[AI][{function_name}][Error] {error_msg}")
logger.error(error_msg, exc_info=True)
return {
'url': None,
'provider': 'bria',
'cost': 0.0,
'error': error_msg,
}
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
"""Calculate cost for API call using ModelRegistry with fallback to constants"""
from igny8_core.ai.model_registry import ModelRegistry

View File

@@ -186,8 +186,146 @@ def seed_ai_models(apps, schema_editor):
},
]
# Bria AI Image Models
bria_models = [
{
'model_name': 'bria-2.3',
'display_name': 'Bria 2.3 High Quality',
'model_type': 'image',
'provider': 'bria',
'cost_per_image': Decimal('0.015'),
'valid_sizes': ['512x512', '768x768', '1024x1024', '1024x1792', '1792x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': False,
'sort_order': 11,
'description': 'Bria 2.3 - High quality image generation',
},
{
'model_name': 'bria-2.3-fast',
'display_name': 'Bria 2.3 Fast',
'model_type': 'image',
'provider': 'bria',
'cost_per_image': Decimal('0.010'),
'valid_sizes': ['512x512', '768x768', '1024x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': False,
'sort_order': 12,
'description': 'Bria 2.3 Fast - Quick generation, lower cost',
},
{
'model_name': 'bria-2.2',
'display_name': 'Bria 2.2 Standard',
'model_type': 'image',
'provider': 'bria',
'cost_per_image': Decimal('0.012'),
'valid_sizes': ['512x512', '768x768', '1024x1024'],
'supports_json_mode': False,
'supports_vision': False,
'supports_function_calling': False,
'is_active': True,
'is_default': False,
'sort_order': 13,
'description': 'Bria 2.2 - Standard image generation',
},
]
# Anthropic Claude Text Models
anthropic_models = [
{
'model_name': 'claude-3-5-sonnet-20241022',
'display_name': 'Claude 3.5 Sonnet (Latest)',
'model_type': 'text',
'provider': 'anthropic',
'input_cost_per_1m': Decimal('3.00'),
'output_cost_per_1m': Decimal('15.00'),
'context_window': 200000,
'max_output_tokens': 8192,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 20,
'description': 'Claude 3.5 Sonnet - Best for most tasks, excellent reasoning',
},
{
'model_name': 'claude-3-5-haiku-20241022',
'display_name': 'Claude 3.5 Haiku (Fast)',
'model_type': 'text',
'provider': 'anthropic',
'input_cost_per_1m': Decimal('1.00'),
'output_cost_per_1m': Decimal('5.00'),
'context_window': 200000,
'max_output_tokens': 8192,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 21,
'description': 'Claude 3.5 Haiku - Fast and affordable',
},
{
'model_name': 'claude-3-opus-20240229',
'display_name': 'Claude 3 Opus',
'model_type': 'text',
'provider': 'anthropic',
'input_cost_per_1m': Decimal('15.00'),
'output_cost_per_1m': Decimal('75.00'),
'context_window': 200000,
'max_output_tokens': 4096,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 22,
'description': 'Claude 3 Opus - Most capable Claude model',
},
{
'model_name': 'claude-3-sonnet-20240229',
'display_name': 'Claude 3 Sonnet',
'model_type': 'text',
'provider': 'anthropic',
'input_cost_per_1m': Decimal('3.00'),
'output_cost_per_1m': Decimal('15.00'),
'context_window': 200000,
'max_output_tokens': 4096,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 23,
'description': 'Claude 3 Sonnet - Balanced performance and cost',
},
{
'model_name': 'claude-3-haiku-20240307',
'display_name': 'Claude 3 Haiku',
'model_type': 'text',
'provider': 'anthropic',
'input_cost_per_1m': Decimal('0.25'),
'output_cost_per_1m': Decimal('1.25'),
'context_window': 200000,
'max_output_tokens': 4096,
'supports_json_mode': True,
'supports_vision': True,
'supports_function_calling': True,
'is_active': True,
'is_default': False,
'sort_order': 24,
'description': 'Claude 3 Haiku - Most affordable Claude model',
},
]
# Create all models
all_models = text_models + image_models + runware_models
all_models = text_models + image_models + runware_models + bria_models + anthropic_models
for model_data in all_models:
AIModelConfig.objects.update_or_create(
@@ -202,7 +340,10 @@ def reverse_migration(apps, schema_editor):
seeded_models = [
'gpt-4.1', 'gpt-4o-mini', 'gpt-4o', 'gpt-5.1', 'gpt-5.2',
'dall-e-3', 'dall-e-2', 'gpt-image-1', 'gpt-image-1-mini',
'runware:100@1'
'runware:100@1',
'bria-2.3', 'bria-2.3-fast', 'bria-2.2',
'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-20241022',
'claude-3-opus-20240229', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307'
]
AIModelConfig.objects.filter(model_name__in=seeded_models).delete()

View File

@@ -57,6 +57,12 @@ class GlobalIntegrationSettings(models.Model):
('runware:101@1', 'Runware 101@1 - Fast Generation'),
]
BRIA_MODEL_CHOICES = [
('bria-2.3', 'Bria 2.3 - High Quality ($0.015/image)'),
('bria-2.3-fast', 'Bria 2.3 Fast - Quick Generation ($0.010/image)'),
('bria-2.2', 'Bria 2.2 - Standard ($0.012/image)'),
]
IMAGE_QUALITY_CHOICES = [
('standard', 'Standard'),
('hd', 'HD'),
@@ -73,6 +79,20 @@ class GlobalIntegrationSettings(models.Model):
IMAGE_SERVICE_CHOICES = [
('openai', 'OpenAI DALL-E'),
('runware', 'Runware'),
('bria', 'Bria AI'),
]
ANTHROPIC_MODEL_CHOICES = [
('claude-3-5-sonnet-20241022', 'Claude 3.5 Sonnet - $3.00 / $15.00 per 1M tokens'),
('claude-3-5-haiku-20241022', 'Claude 3.5 Haiku - $1.00 / $5.00 per 1M tokens'),
('claude-3-opus-20240229', 'Claude 3 Opus - $15.00 / $75.00 per 1M tokens'),
('claude-3-sonnet-20240229', 'Claude 3 Sonnet - $3.00 / $15.00 per 1M tokens'),
('claude-3-haiku-20240307', 'Claude 3 Haiku - $0.25 / $1.25 per 1M tokens'),
]
TEXT_PROVIDER_CHOICES = [
('openai', 'OpenAI (GPT)'),
('anthropic', 'Anthropic (Claude)'),
]
# OpenAI Settings (for text generation)
@@ -96,6 +116,35 @@ class GlobalIntegrationSettings(models.Model):
help_text="Default max tokens for responses (accounts can override if plan allows)"
)
# Anthropic Settings (for text generation - alternative to OpenAI)
anthropic_api_key = models.CharField(
max_length=500,
blank=True,
help_text="Platform Anthropic API key - used by ALL accounts"
)
anthropic_model = models.CharField(
max_length=100,
default='claude-3-5-sonnet-20241022',
choices=ANTHROPIC_MODEL_CHOICES,
help_text="Default Claude model (accounts can override if plan allows)"
)
anthropic_temperature = models.FloatField(
default=0.7,
help_text="Default temperature for Claude 0.0-1.0 (accounts can override if plan allows)"
)
anthropic_max_tokens = models.IntegerField(
default=8192,
help_text="Default max tokens for Claude responses (accounts can override if plan allows)"
)
# Default Text Generation Provider
default_text_provider = models.CharField(
max_length=20,
default='openai',
choices=TEXT_PROVIDER_CHOICES,
help_text="Default text generation provider for all accounts (openai=GPT, anthropic=Claude)"
)
# Image Generation Settings (OpenAI/DALL-E)
dalle_api_key = models.CharField(
max_length=500,
@@ -128,12 +177,25 @@ class GlobalIntegrationSettings(models.Model):
help_text="Default Runware model (accounts can override if plan allows)"
)
# Image Generation Settings (Bria AI)
bria_api_key = models.CharField(
max_length=500,
blank=True,
help_text="Platform Bria API key - used by ALL accounts"
)
bria_model = models.CharField(
max_length=100,
default='bria-2.3',
choices=BRIA_MODEL_CHOICES,
help_text="Default Bria model (accounts can override if plan allows)"
)
# Default Image Generation Service
default_image_service = models.CharField(
max_length=20,
default='openai',
choices=IMAGE_SERVICE_CHOICES,
help_text="Default image generation service for all accounts (openai=DALL-E, runware=Runware)"
help_text="Default image generation service for all accounts (openai=DALL-E, runware=Runware, bria=Bria)"
)
# Universal Image Generation Settings (applies to ALL providers)

View File

@@ -0,0 +1,53 @@
# Generated migration for Bria AI integration
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0011_disable_phase2_modules'),
]
operations = [
# Add Bria API key field
migrations.AddField(
model_name='globalintegrationsettings',
name='bria_api_key',
field=models.CharField(
blank=True,
help_text='Platform Bria API key - used by ALL accounts',
max_length=500
),
),
# Add Bria model selection field
migrations.AddField(
model_name='globalintegrationsettings',
name='bria_model',
field=models.CharField(
choices=[
('bria-2.3', 'Bria 2.3 - High Quality ($0.015/image)'),
('bria-2.3-fast', 'Bria 2.3 Fast - Quick Generation ($0.010/image)'),
('bria-2.2', 'Bria 2.2 - Standard ($0.012/image)'),
],
default='bria-2.3',
help_text='Default Bria model (accounts can override if plan allows)',
max_length=100
),
),
# Update default_image_service choices to include bria
migrations.AlterField(
model_name='globalintegrationsettings',
name='default_image_service',
field=models.CharField(
choices=[
('openai', 'OpenAI DALL-E'),
('runware', 'Runware'),
('bria', 'Bria AI'),
],
default='openai',
help_text='Default image generation service for all accounts (openai=DALL-E, runware=Runware, bria=Bria)',
max_length=20
),
),
]

View File

@@ -0,0 +1,64 @@
# Generated migration for Anthropic (Claude) integration
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0012_add_bria_integration'),
]
operations = [
migrations.AddField(
model_name='globalintegrationsettings',
name='anthropic_api_key',
field=models.CharField(
blank=True,
help_text='Platform Anthropic API key - used by ALL accounts',
max_length=500
),
),
migrations.AddField(
model_name='globalintegrationsettings',
name='anthropic_model',
field=models.CharField(
choices=[
('claude-3-5-sonnet-20241022', 'Claude 3.5 Sonnet (Latest)'),
('claude-3-5-haiku-20241022', 'Claude 3.5 Haiku (Fast)'),
('claude-3-opus-20240229', 'Claude 3 Opus'),
('claude-3-sonnet-20240229', 'Claude 3 Sonnet'),
('claude-3-haiku-20240307', 'Claude 3 Haiku'),
],
default='claude-3-5-sonnet-20241022',
help_text='Default Claude model (accounts can override if plan allows)',
max_length=100
),
),
migrations.AddField(
model_name='globalintegrationsettings',
name='anthropic_temperature',
field=models.FloatField(
default=0.7,
help_text='Default temperature for Claude 0.0-1.0 (accounts can override if plan allows)'
),
),
migrations.AddField(
model_name='globalintegrationsettings',
name='anthropic_max_tokens',
field=models.IntegerField(
default=8192,
help_text='Default max tokens for Claude responses (accounts can override if plan allows)'
),
),
migrations.AddField(
model_name='globalintegrationsettings',
name='default_text_provider',
field=models.CharField(
choices=[('openai', 'OpenAI (GPT)'), ('anthropic', 'Anthropic (Claude)')],
default='openai',
help_text='Default text generation provider for all accounts (openai=GPT, anthropic=Claude)',
max_length=20
),
),
]