imp part 4
This commit is contained in:
@@ -40,6 +40,8 @@ class AICore:
|
||||
self.account = account
|
||||
self._openai_api_key = None
|
||||
self._runware_api_key = None
|
||||
self._bria_api_key = None
|
||||
self._anthropic_api_key = None
|
||||
self._load_account_settings()
|
||||
|
||||
def _load_account_settings(self):
|
||||
@@ -53,11 +55,15 @@ class AICore:
|
||||
# Load API keys from global settings (platform-wide)
|
||||
self._openai_api_key = global_settings.openai_api_key
|
||||
self._runware_api_key = global_settings.runware_api_key
|
||||
self._bria_api_key = getattr(global_settings, 'bria_api_key', None)
|
||||
self._anthropic_api_key = getattr(global_settings, 'anthropic_api_key', None)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load GlobalIntegrationSettings: {e}", exc_info=True)
|
||||
self._openai_api_key = None
|
||||
self._runware_api_key = None
|
||||
self._bria_api_key = None
|
||||
self._anthropic_api_key = None
|
||||
|
||||
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
|
||||
"""Get API key for integration type"""
|
||||
@@ -65,6 +71,10 @@ class AICore:
|
||||
return self._openai_api_key
|
||||
elif integration_type == 'runware':
|
||||
return self._runware_api_key
|
||||
elif integration_type == 'bria':
|
||||
return self._bria_api_key
|
||||
elif integration_type == 'anthropic':
|
||||
return self._anthropic_api_key
|
||||
return None
|
||||
|
||||
def get_model(self, integration_type: str = 'openai') -> str:
|
||||
@@ -380,6 +390,289 @@ class AICore:
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
def run_anthropic_request(
|
||||
self,
|
||||
prompt: str,
|
||||
model: str,
|
||||
max_tokens: int = 8192,
|
||||
temperature: float = 0.7,
|
||||
api_key: Optional[str] = None,
|
||||
function_name: str = 'anthropic_request',
|
||||
prompt_prefix: Optional[str] = None,
|
||||
tracker: Optional[ConsoleStepTracker] = None,
|
||||
system_prompt: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Anthropic (Claude) AI request handler with console logging.
|
||||
Alternative to OpenAI for text generation.
|
||||
|
||||
Args:
|
||||
prompt: Prompt text
|
||||
model: Claude model name (required - must be provided from IntegrationSettings)
|
||||
max_tokens: Maximum tokens
|
||||
temperature: Temperature (0-1)
|
||||
api_key: Optional API key override
|
||||
function_name: Function name for logging (e.g., 'cluster_keywords')
|
||||
prompt_prefix: Optional prefix to add before prompt
|
||||
tracker: Optional ConsoleStepTracker instance for logging
|
||||
system_prompt: Optional system prompt for Claude
|
||||
|
||||
Returns:
|
||||
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
|
||||
'model', 'cost', 'error', 'api_id'
|
||||
|
||||
Raises:
|
||||
ValueError: If model is not provided
|
||||
"""
|
||||
# Use provided tracker or create a new one
|
||||
if tracker is None:
|
||||
tracker = ConsoleStepTracker(function_name)
|
||||
|
||||
tracker.ai_call("Preparing Anthropic request...")
|
||||
|
||||
# Step 1: Validate model is provided
|
||||
if not model:
|
||||
error_msg = "Model is required. Ensure IntegrationSettings is configured for the account."
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
logger.error(f"[AICore][Anthropic] {error_msg}")
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': None,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 2: Validate API key
|
||||
api_key = api_key or self._anthropic_api_key
|
||||
if not api_key:
|
||||
error_msg = 'Anthropic API key not configured'
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
active_model = model
|
||||
|
||||
# Debug logging: Show model used
|
||||
logger.info(f"[AICore][Anthropic] Model Configuration:")
|
||||
logger.info(f" - Model parameter passed: {model}")
|
||||
logger.info(f" - Model used in request: {active_model}")
|
||||
tracker.ai_call(f"Using Anthropic model: {active_model}")
|
||||
|
||||
# Add prompt_prefix to prompt if provided (for tracking)
|
||||
final_prompt = prompt
|
||||
if prompt_prefix:
|
||||
final_prompt = f'{prompt_prefix}\n\n{prompt}'
|
||||
tracker.ai_call(f"Added prompt prefix: {prompt_prefix}")
|
||||
|
||||
# Step 5: Build request payload using Anthropic Messages API
|
||||
url = 'https://api.anthropic.com/v1/messages'
|
||||
headers = {
|
||||
'x-api-key': api_key,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
body_data = {
|
||||
'model': active_model,
|
||||
'max_tokens': max_tokens,
|
||||
'messages': [{'role': 'user', 'content': final_prompt}],
|
||||
}
|
||||
|
||||
# Only add temperature if it's less than 1.0 (Claude's default)
|
||||
if temperature < 1.0:
|
||||
body_data['temperature'] = temperature
|
||||
|
||||
# Add system prompt if provided
|
||||
if system_prompt:
|
||||
body_data['system'] = system_prompt
|
||||
|
||||
tracker.ai_call(f"Request payload prepared (model={active_model}, max_tokens={max_tokens}, temp={temperature})")
|
||||
|
||||
# Step 6: Send request
|
||||
tracker.ai_call("Sending request to Anthropic API...")
|
||||
request_start = time.time()
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=body_data, timeout=180)
|
||||
request_duration = time.time() - request_start
|
||||
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
# Step 7: Validate HTTP response
|
||||
if response.status_code != 200:
|
||||
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
|
||||
error_message = f"HTTP {response.status_code} error"
|
||||
|
||||
if isinstance(error_data, dict) and 'error' in error_data:
|
||||
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
|
||||
error_message += f": {error_data['error']['message']}"
|
||||
|
||||
# Check for rate limit
|
||||
if response.status_code == 429:
|
||||
retry_after = response.headers.get('retry-after', '60')
|
||||
tracker.rate_limit(retry_after)
|
||||
error_message += f" (Rate limit - retry after {retry_after}s)"
|
||||
else:
|
||||
tracker.error('HTTPError', error_message)
|
||||
|
||||
logger.error(f"Anthropic API HTTP error {response.status_code}: {error_message}")
|
||||
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_message,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 8: Parse response JSON
|
||||
try:
|
||||
data = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
error_msg = f'Failed to parse JSON response: {str(e)}'
|
||||
tracker.malformed_json(str(e))
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
api_id = data.get('id')
|
||||
|
||||
# Step 9: Extract content (Anthropic format)
|
||||
# Claude returns content as array: [{"type": "text", "text": "..."}]
|
||||
if 'content' in data and len(data['content']) > 0:
|
||||
# Extract text from first content block
|
||||
content_blocks = data['content']
|
||||
content = ''
|
||||
for block in content_blocks:
|
||||
if block.get('type') == 'text':
|
||||
content += block.get('text', '')
|
||||
|
||||
usage = data.get('usage', {})
|
||||
input_tokens = usage.get('input_tokens', 0)
|
||||
output_tokens = usage.get('output_tokens', 0)
|
||||
total_tokens = input_tokens + output_tokens
|
||||
|
||||
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
|
||||
tracker.parse(f"Content length: {len(content)} characters")
|
||||
|
||||
# Step 10: Calculate cost using ModelRegistry (with fallback)
|
||||
# Claude pricing as of 2024:
|
||||
# claude-3-5-sonnet: $3/1M input, $15/1M output
|
||||
# claude-3-opus: $15/1M input, $75/1M output
|
||||
# claude-3-haiku: $0.25/1M input, $1.25/1M output
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
cost = float(ModelRegistry.calculate_cost(
|
||||
active_model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens
|
||||
))
|
||||
# Fallback to hardcoded rates if ModelRegistry returns 0
|
||||
if cost == 0:
|
||||
anthropic_rates = {
|
||||
'claude-3-5-sonnet-20241022': {'input': 3.00, 'output': 15.00},
|
||||
'claude-3-5-haiku-20241022': {'input': 1.00, 'output': 5.00},
|
||||
'claude-3-opus-20240229': {'input': 15.00, 'output': 75.00},
|
||||
'claude-3-sonnet-20240229': {'input': 3.00, 'output': 15.00},
|
||||
'claude-3-haiku-20240307': {'input': 0.25, 'output': 1.25},
|
||||
}
|
||||
rates = anthropic_rates.get(active_model, {'input': 3.00, 'output': 15.00})
|
||||
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
|
||||
tracker.parse(f"Cost calculated: ${cost:.6f}")
|
||||
|
||||
tracker.done("Anthropic request completed successfully")
|
||||
|
||||
return {
|
||||
'content': content,
|
||||
'input_tokens': input_tokens,
|
||||
'output_tokens': output_tokens,
|
||||
'total_tokens': total_tokens,
|
||||
'model': active_model,
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
'api_id': api_id,
|
||||
'duration': request_duration,
|
||||
}
|
||||
else:
|
||||
error_msg = 'No content in Anthropic response'
|
||||
tracker.error('EmptyResponse', error_msg)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': api_id,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (180s exceeded)'
|
||||
tracker.timeout(180)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_msg = f'Request exception: {str(e)}'
|
||||
tracker.error('RequestException', error_msg, e)
|
||||
logger.error(f"Anthropic API error: {error_msg}", exc_info=True)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
logger.error(f"[AI][{function_name}][Anthropic][Error] {error_msg}", exc_info=True)
|
||||
if tracker:
|
||||
tracker.error('UnexpectedError', error_msg, e)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
def extract_json(self, response_text: str) -> Optional[Dict]:
|
||||
"""
|
||||
Extract JSON from response text.
|
||||
@@ -453,6 +746,8 @@ class AICore:
|
||||
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
elif provider == 'runware':
|
||||
return self._generate_image_runware(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
elif provider == 'bria':
|
||||
return self._generate_image_bria(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
else:
|
||||
error_msg = f'Unknown provider: {provider}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
@@ -830,6 +1125,170 @@ class AICore:
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def _generate_image_bria(
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str],
|
||||
size: str,
|
||||
n: int,
|
||||
api_key: Optional[str],
|
||||
negative_prompt: Optional[str],
|
||||
function_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using Bria AI.
|
||||
|
||||
Bria API Reference: https://docs.bria.ai/reference/text-to-image
|
||||
"""
|
||||
print(f"[AI][{function_name}] Provider: Bria AI")
|
||||
|
||||
api_key = api_key or self._bria_api_key
|
||||
if not api_key:
|
||||
error_msg = 'Bria API key not configured'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
bria_model = model or 'bria-2.3'
|
||||
print(f"[AI][{function_name}] Step 2: Using model: {bria_model}, size: {size}")
|
||||
|
||||
# Parse size
|
||||
try:
|
||||
width, height = map(int, size.split('x'))
|
||||
except ValueError:
|
||||
error_msg = f"Invalid size format: {size}. Expected format: WIDTHxHEIGHT"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
# Bria API endpoint
|
||||
url = 'https://engine.prod.bria-api.com/v1/text-to-image/base'
|
||||
headers = {
|
||||
'api_token': api_key,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
payload = {
|
||||
'prompt': prompt,
|
||||
'num_results': n,
|
||||
'sync': True, # Wait for result
|
||||
'model_version': bria_model.replace('bria-', ''), # e.g., '2.3'
|
||||
}
|
||||
|
||||
# Add negative prompt if provided
|
||||
if negative_prompt:
|
||||
payload['negative_prompt'] = negative_prompt
|
||||
|
||||
# Add size constraints if not default
|
||||
if width and height:
|
||||
# Bria uses aspect ratio or fixed sizes
|
||||
payload['width'] = width
|
||||
payload['height'] = height
|
||||
|
||||
print(f"[AI][{function_name}] Step 3: Sending request to Bria API...")
|
||||
|
||||
request_start = time.time()
|
||||
try:
|
||||
response = requests.post(url, json=payload, headers=headers, timeout=150)
|
||||
request_duration = time.time() - request_start
|
||||
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f"HTTP {response.status_code} error: {response.text[:200]}"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
body = response.json()
|
||||
print(f"[AI][{function_name}] Bria response keys: {list(body.keys()) if isinstance(body, dict) else type(body)}")
|
||||
|
||||
# Bria returns { "result": [ { "urls": ["..."] } ] }
|
||||
image_url = None
|
||||
error_msg = None
|
||||
|
||||
if isinstance(body, dict):
|
||||
if 'result' in body and isinstance(body['result'], list) and len(body['result']) > 0:
|
||||
first_result = body['result'][0]
|
||||
if 'urls' in first_result and isinstance(first_result['urls'], list) and len(first_result['urls']) > 0:
|
||||
image_url = first_result['urls'][0]
|
||||
elif 'url' in first_result:
|
||||
image_url = first_result['url']
|
||||
elif 'error' in body:
|
||||
error_msg = body['error']
|
||||
elif 'message' in body:
|
||||
error_msg = body['message']
|
||||
|
||||
if error_msg:
|
||||
print(f"[AI][{function_name}][Error] Bria API error: {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
if image_url:
|
||||
# Cost based on model
|
||||
cost_per_image = {
|
||||
'bria-2.3': 0.015,
|
||||
'bria-2.3-fast': 0.010,
|
||||
'bria-2.2': 0.012,
|
||||
}.get(bria_model, 0.015)
|
||||
cost = cost_per_image * n
|
||||
|
||||
print(f"[AI][{function_name}] Step 5: Image generated successfully")
|
||||
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
|
||||
print(f"[AI][{function_name}][Success] Image generation completed")
|
||||
|
||||
return {
|
||||
'url': image_url,
|
||||
'provider': 'bria',
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
}
|
||||
else:
|
||||
error_msg = f'No image data in Bria response'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(f"[AI][{function_name}] Full Bria response: {json.dumps(body, indent=2) if isinstance(body, dict) else str(body)}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (150s exceeded)'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(error_msg, exc_info=True)
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
|
||||
"""Calculate cost for API call using ModelRegistry with fallback to constants"""
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
Reference in New Issue
Block a user