credits adn tokens final correct setup
This commit is contained in:
@@ -133,7 +133,10 @@ class OptimizerService:
|
||||
scores_after = self.analyzer.analyze(optimized_content)
|
||||
|
||||
# Calculate credits used
|
||||
credits_used = self.credit_service.get_credit_cost('optimization', word_count)
|
||||
estimated_tokens = len(content.html_content or '') // 4
|
||||
credits_used = self.credit_service.calculate_credits_from_tokens(
|
||||
'optimization', estimated_tokens, 0
|
||||
)
|
||||
|
||||
# Update optimization task
|
||||
task.scores_after = scores_after
|
||||
@@ -148,18 +151,22 @@ class OptimizerService:
|
||||
content.optimization_scores = scores_after
|
||||
content.save(update_fields=['html_content', 'optimizer_version', 'optimization_scores'])
|
||||
|
||||
# Deduct credits
|
||||
# Deduct credits (non-AI operation - use fixed token estimate based on content size)
|
||||
# Estimate: 1 token per 4 characters of HTML content
|
||||
estimated_tokens = len(content.html_content or '') // 4
|
||||
self.credit_service.deduct_credits_for_operation(
|
||||
account=account,
|
||||
operation_type='optimization',
|
||||
amount=word_count,
|
||||
tokens_input=estimated_tokens,
|
||||
tokens_output=0,
|
||||
description=f"Content optimization: {content.title or 'Untitled'}",
|
||||
related_object_type='content',
|
||||
related_object_id=content.id,
|
||||
metadata={
|
||||
'scores_before': scores_before,
|
||||
'scores_after': scores_after,
|
||||
'improvement': scores_after.get('overall_score', 0) - scores_before.get('overall_score', 0)
|
||||
'improvement': scores_after.get('overall_score', 0) - scores_before.get('overall_score', 0),
|
||||
'word_count': word_count
|
||||
}
|
||||
)
|
||||
|
||||
@@ -279,7 +286,10 @@ class OptimizerService:
|
||||
scores_after = self._enhance_product_scores(scores_after, optimized_content)
|
||||
|
||||
# Calculate credits used
|
||||
credits_used = self.credit_service.get_credit_cost('optimization', word_count)
|
||||
estimated_tokens = len(content.html_content or '') // 4
|
||||
credits_used = self.credit_service.calculate_credits_from_tokens(
|
||||
'optimization', estimated_tokens, 0
|
||||
)
|
||||
|
||||
# Update optimization task
|
||||
task.scores_after = scores_after
|
||||
@@ -294,11 +304,14 @@ class OptimizerService:
|
||||
content.optimization_scores = scores_after
|
||||
content.save(update_fields=['html_content', 'optimizer_version', 'optimization_scores'])
|
||||
|
||||
# Deduct credits
|
||||
# Deduct credits (non-AI operation - use fixed token estimate based on content size)
|
||||
# Estimate: 1 token per 4 characters of HTML content
|
||||
estimated_tokens = len(content.html_content or '') // 4
|
||||
self.credit_service.deduct_credits_for_operation(
|
||||
account=account,
|
||||
operation_type='optimization',
|
||||
amount=word_count,
|
||||
tokens_input=estimated_tokens,
|
||||
tokens_output=0,
|
||||
description=f"Product optimization: {content.title or 'Untitled'}",
|
||||
related_object_type='content',
|
||||
related_object_id=content.id,
|
||||
@@ -306,6 +319,7 @@ class OptimizerService:
|
||||
'scores_before': scores_before,
|
||||
'scores_after': scores_after,
|
||||
'improvement': scores_after.get('overall_score', 0) - scores_before.get('overall_score', 0),
|
||||
'word_count': word_count,
|
||||
'entity_type': 'product'
|
||||
}
|
||||
)
|
||||
@@ -372,7 +386,11 @@ class OptimizerService:
|
||||
scores_after = self._enhance_taxonomy_scores(scores_after, optimized_content)
|
||||
|
||||
# Calculate credits used
|
||||
credits_used = self.credit_service.get_credit_cost('optimization', word_count)
|
||||
# Calculate estimated credits for task tracking
|
||||
estimated_tokens = len(content.html_content or '') // 4
|
||||
credits_used = self.credit_service.calculate_credits_from_tokens(
|
||||
'optimization', estimated_tokens, 0
|
||||
)
|
||||
|
||||
# Update optimization task
|
||||
task.scores_after = scores_after
|
||||
@@ -387,17 +405,20 @@ class OptimizerService:
|
||||
content.optimization_scores = scores_after
|
||||
content.save(update_fields=['html_content', 'optimizer_version', 'optimization_scores'])
|
||||
|
||||
# Deduct credits
|
||||
# Deduct credits (non-AI operation - use fixed token estimate based on content size)
|
||||
# Estimate: 1 token per 4 characters of HTML content
|
||||
self.credit_service.deduct_credits_for_operation(
|
||||
account=account,
|
||||
operation_type='optimization',
|
||||
amount=word_count,
|
||||
tokens_input=estimated_tokens,
|
||||
tokens_output=0,
|
||||
description=f"Taxonomy optimization: {content.title or 'Untitled'}",
|
||||
related_object_type='content',
|
||||
related_object_id=content.id,
|
||||
metadata={
|
||||
'scores_before': scores_before,
|
||||
'scores_after': scores_after,
|
||||
'word_count': word_count,
|
||||
'improvement': scores_after.get('overall_score', 0) - scores_before.get('overall_score', 0),
|
||||
'entity_type': 'taxonomy'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user