Compare commits
275 Commits
31c06d032c
...
cleanup/ph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e317e1de26 | ||
|
|
f04eb0a900 | ||
|
|
264c720e3e | ||
|
|
0921adbabb | ||
|
|
82d6a9e879 | ||
|
|
0526553c9b | ||
|
|
7bb9d813f2 | ||
|
|
59f7455521 | ||
|
|
34c8cc410a | ||
|
|
4f99fc1451 | ||
|
|
84ed711f6d | ||
|
|
7c79bdcc6c | ||
|
|
74370685f4 | ||
|
|
e2a1c15183 | ||
|
|
51512d6c91 | ||
|
|
4e9f2d9dbc | ||
|
|
d4ecddba22 | ||
|
|
3651ee9ed4 | ||
|
|
7da3334c03 | ||
|
|
3028db5197 | ||
|
|
7ad1f6bdff | ||
|
|
ad75fa031e | ||
|
|
ad1756c349 | ||
|
|
0386d4bf33 | ||
|
|
87d1662a18 | ||
|
|
909ed1cb17 | ||
|
|
4b6a03a898 | ||
|
|
6c8e5fdd57 | ||
|
|
52603f2deb | ||
|
|
9ca048fb9d | ||
|
|
cb8e747387 | ||
|
|
abc6c011ea | ||
|
|
de0e42cca8 | ||
|
|
ff44827b35 | ||
|
|
e93ea77c2b | ||
|
|
1f2e734ea2 | ||
|
|
6947819742 | ||
|
|
dc7a459ebb | ||
|
|
6e30d2d4e8 | ||
|
|
b2922ebec5 | ||
|
|
c4de8994dd | ||
|
|
f518e1751b | ||
|
|
a70f8cdd01 | ||
|
|
a1016ec1c2 | ||
|
|
52600c9dca | ||
|
|
f10916bfab | ||
|
|
f1ba0aa531 | ||
|
|
4d6ee21408 | ||
|
|
935c7234b1 | ||
|
|
94d37a0d84 | ||
|
|
e2d462d8b6 | ||
|
|
16dfc56ba0 | ||
|
|
bc371e5482 | ||
|
|
f28f641fd5 | ||
|
|
a4691ad2da | ||
|
|
c880e24fc0 | ||
|
|
e96069775c | ||
|
|
0e57c50e56 | ||
|
|
c44d520a7f | ||
|
|
815c7b5129 | ||
|
|
d389576634 | ||
|
|
41e124d8e8 | ||
|
|
0340016932 | ||
|
|
f81fffc9a6 | ||
|
|
dd63403e94 | ||
|
|
d16e5e1a4b | ||
|
|
6caeed14cb | ||
|
|
af408d0747 | ||
|
|
0d3e25e50f | ||
|
|
a02e485f7d | ||
|
|
89b64cd737 | ||
|
|
b61bd6e64d | ||
|
|
6953343026 | ||
|
|
1632ee62b6 | ||
|
|
51950c7ce1 | ||
|
|
885158e152 | ||
|
|
2af7bb725f | ||
|
|
96aaa4151a | ||
|
|
6c1cf99488 | ||
|
|
b23cb07f41 | ||
|
|
4f7ab9c606 | ||
|
|
c91175fdcb | ||
|
|
0ffd21b9bf | ||
|
|
53fdebf733 | ||
|
|
748de099dd | ||
|
|
7f82ef4551 | ||
|
|
f92b3fba6e | ||
|
|
d4b9c8693a | ||
|
|
ea9125b805 | ||
|
|
0605f650b1 | ||
|
|
28a60f8141 | ||
|
|
e0f3060df9 | ||
|
|
d0f98d35d6 | ||
|
|
5f9a4b8dca | ||
|
|
627938aa95 | ||
|
|
a145e6742e | ||
|
|
24cdb4fdf9 | ||
|
|
a1ec3100fd | ||
|
|
c44bee7fa7 | ||
|
|
9d54bbff48 | ||
|
|
c227d9ee03 | ||
|
|
efd7193951 | ||
|
|
034c640601 | ||
|
|
4482d2f4c4 | ||
|
|
d5bda678fd | ||
|
|
302af6337e | ||
|
|
726d945bda | ||
|
|
fd6e7eb2dd | ||
|
|
e5959c3e72 | ||
|
|
4e9bf0ba56 | ||
|
|
74a3441ee4 | ||
|
|
178b7c23ce | ||
|
|
add04e2ad5 | ||
|
|
890e138829 | ||
|
|
7af4190e6d | ||
|
|
7a9fa8fd8f | ||
|
|
277ef5c81d | ||
|
|
544a397e3d | ||
|
|
33b4454f96 | ||
|
|
444d53dc7b | ||
|
|
91525b8999 | ||
|
|
4bffede052 | ||
|
|
90e6e96b2b | ||
|
|
4248fd0969 | ||
|
|
e736697d6d | ||
|
|
d21b5b1363 | ||
|
|
34e8017770 | ||
|
|
65bf65bb6b | ||
|
|
d9346e6f16 | ||
|
|
f559bd44a1 | ||
|
|
62fc47cfe8 | ||
|
|
9e48d728fd | ||
|
|
272a3e3d83 | ||
|
|
ebf6a9f27a | ||
|
|
2d4767530d | ||
|
|
b0c14ccc32 | ||
|
|
826ad89a3e | ||
|
|
504d0174f7 | ||
|
|
5299fd82eb | ||
|
|
abeede5f04 | ||
|
|
64e76f5436 | ||
|
|
02d4f1fa46 | ||
|
|
355b0ac897 | ||
|
|
0a12123c85 | ||
|
|
646095da65 | ||
|
|
5c9ef81aba | ||
|
|
7a1e952a57 | ||
|
|
9e8ff4fbb1 | ||
|
|
3283a83b42 | ||
|
|
eb6cba7920 | ||
|
|
ab0d6469d4 | ||
|
|
c17b22e927 | ||
|
|
e041cb8e65 | ||
|
|
98e68f6bd8 | ||
|
|
71fe687681 | ||
|
|
1993d45f32 | ||
|
|
8c1d933647 | ||
|
|
62e55389f9 | ||
|
|
e43f8553b6 | ||
|
|
7ad06c6227 | ||
|
|
9f826c92f8 | ||
|
|
4bba5a9a1f | ||
|
|
45d9dfa0f5 | ||
|
|
9656643f0f | ||
|
|
69c0fd8b69 | ||
|
|
8f97666522 | ||
|
|
84fd4bc11a | ||
|
|
1887f2a665 | ||
|
|
5366cc1805 | ||
|
|
25f1c32366 | ||
|
|
4fb3a144d7 | ||
|
|
06e5f252a4 | ||
|
|
7fb2a9309e | ||
|
|
1ef4bb7db6 | ||
|
|
558ce9250a | ||
|
|
f8c6dfe889 | ||
|
|
41551f2edc | ||
|
|
1924c8fdbe | ||
|
|
68942410ae | ||
|
|
9ec87ed932 | ||
|
|
c61cf7c39f | ||
|
|
cff00f87ff | ||
|
|
c23698f7f8 | ||
|
|
8162b6ae92 | ||
|
|
d9dbb1e4b8 | ||
|
|
125489df0f | ||
|
|
cda56f15ba | ||
|
|
aa48a55504 | ||
|
|
78f71558ed | ||
|
|
f637f700eb | ||
|
|
9150b60c2d | ||
|
|
93ecb5ceb8 | ||
|
|
9149281c1c | ||
|
|
293c1e9c0d | ||
|
|
985d7bc3e1 | ||
|
|
4b81ac07f5 | ||
|
|
a518997467 | ||
|
|
94b1ce8d8f | ||
|
|
f7f6a12e7b | ||
|
|
a6fab8784d | ||
|
|
cd2c84116b | ||
|
|
ade055c971 | ||
|
|
90aa99b2c1 | ||
|
|
eb88a0e12d | ||
|
|
d161378bd9 | ||
|
|
1acecd8639 | ||
|
|
60263b4682 | ||
|
|
0b24fe8c77 | ||
|
|
c51270a3be | ||
|
|
75706e8b05 | ||
|
|
410d2b33ec | ||
|
|
db1fd2fff8 | ||
|
|
ad895fcb3a | ||
|
|
33ac4be8df | ||
|
|
44ecd3fa7d | ||
|
|
9824e9a4dc | ||
|
|
a3f817a292 | ||
|
|
9cb0e05618 | ||
|
|
f163a2e07d | ||
|
|
6e2101d019 | ||
|
|
12956ec64a | ||
|
|
b2e8732a82 | ||
|
|
a736bc3d34 | ||
|
|
20fdd3b295 | ||
|
|
50aafd9ce3 | ||
|
|
6997702b12 | ||
|
|
87d1392b4c | ||
|
|
aba2c7da01 | ||
|
|
c665c44aba | ||
|
|
3f49a2599e | ||
|
|
6a056e3589 | ||
|
|
69363b9b31 | ||
|
|
c812da6742 | ||
|
|
7a35981038 | ||
|
|
5fb3687854 | ||
|
|
4dd129b863 | ||
|
|
6a4f95c35a | ||
|
|
4d13a57068 | ||
|
|
72d0b6b0fd | ||
|
|
92211f065b | ||
|
|
bfbade7624 | ||
|
|
c54db6c2d9 | ||
|
|
74e29380fe | ||
|
|
92d16c76a7 | ||
|
|
9f85ce4f52 | ||
|
|
33ad6768ec | ||
|
|
73d7a6953b | ||
|
|
7d3ecd7cc2 | ||
|
|
c09c6cf7eb | ||
|
|
144e955b92 | ||
|
|
da3b45d1c7 | ||
|
|
affa783a4f | ||
|
|
8231c499c2 | ||
|
|
3f2879d269 | ||
|
|
40b7aced14 | ||
|
|
42d04fb7f2 | ||
|
|
d144f5d19a | ||
|
|
7483de6aba | ||
|
|
9764a09a25 | ||
|
|
4e9d8af768 | ||
|
|
156742d679 | ||
|
|
191287829f | ||
|
|
69e88432c5 | ||
|
|
6dcbc651dd | ||
|
|
f0066b6e7d | ||
|
|
65fea95d33 | ||
|
|
3cbed65601 | ||
|
|
1dd2d53a8e | ||
|
|
c87bc7266c | ||
|
|
8aef9c7727 | ||
|
|
2420f1678d | ||
|
|
508b6b4220 | ||
|
|
46fc6dcf04 | ||
|
|
6c4415ab16 | ||
|
|
4e764e208d |
362
.rules
Normal file
362
.rules
Normal file
@@ -0,0 +1,362 @@
|
||||
# IGNY8 AI Agent Rules
|
||||
|
||||
**Version:** 1.2.0 | **Updated:** January 2, 2026
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Start for AI Agents
|
||||
|
||||
**BEFORE any change, read these docs in order:**
|
||||
1. [docs/INDEX.md](docs/INDEX.md) - Quick navigation to any module/feature
|
||||
2. [docs/30-FRONTEND/COMPONENT-SYSTEM.md](docs/30-FRONTEND/COMPONENT-SYSTEM.md) - **REQUIRED** for any frontend work
|
||||
3. [docs/30-FRONTEND/DESIGN-TOKENS.md](docs/30-FRONTEND/DESIGN-TOKENS.md) - Color tokens and styling rules
|
||||
4. Module doc for the feature you're modifying (see INDEX.md for paths)
|
||||
5. [CHANGELOG.md](CHANGELOG.md) - Recent changes and version history
|
||||
|
||||
---
|
||||
|
||||
## 📁 Project Structure
|
||||
|
||||
| Layer | Path | Purpose |
|
||||
|-------|------|---------|
|
||||
| Backend | `backend/igny8_core/` | Django REST API |
|
||||
| Frontend | `frontend/src/` | React + TypeScript SPA |
|
||||
| Docs | `docs/` | Technical documentation |
|
||||
| AI Engine | `backend/igny8_core/ai/` | AI functions (use this, NOT `utils/ai_processor.py`) |
|
||||
| Design Tokens | `frontend/src/styles/design-system.css` | **Single source** for colors, shadows, typography |
|
||||
| UI Components | `frontend/src/components/ui/` | Button, Badge, Card, Modal, etc. |
|
||||
| Form Components | `frontend/src/components/form/` | InputField, Select, Checkbox, Switch |
|
||||
| Icons | `frontend/src/icons/` | All SVG icons (import from `../../icons`) |
|
||||
|
||||
**Module → File Quick Reference:** See [docs/INDEX.md](docs/INDEX.md#module--file-quick-reference)
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Module Status
|
||||
|
||||
| Module | Status | Notes |
|
||||
|--------|--------|-------|
|
||||
| Planner | ✅ Active | Keywords, Clusters, Ideas |
|
||||
| Writer | ✅ Active | Tasks, Content, Images |
|
||||
| Automation | ✅ Active | 7-stage pipeline |
|
||||
| Billing | ✅ Active | Credits, Plans |
|
||||
| Publisher | ✅ Active | WordPress publishing |
|
||||
| **Linker** | ⏸️ Inactive | Exists but disabled - Phase 2 |
|
||||
| **Optimizer** | ⏸️ Inactive | Exists but disabled - Phase 2 |
|
||||
| **SiteBuilder** | ❌ Removed | Code exists but NOT part of app - mark for removal in TODOS.md |
|
||||
|
||||
**Important:**
|
||||
- Do NOT work on Linker/Optimizer unless specifically requested
|
||||
- SiteBuilder code is deprecated - if found, add to `TODOS.md` for cleanup
|
||||
|
||||
---
|
||||
|
||||
## 🎨 DESIGN SYSTEM RULES (CRITICAL!)
|
||||
|
||||
> **🔒 STYLE LOCKED** - All UI must use the design system. ESLint enforces these rules.
|
||||
|
||||
### Color System (Only 6 Base Colors!)
|
||||
|
||||
All colors in the system derive from 6 primary hex values in `design-system.css`:
|
||||
- `--color-primary` (#0077B6) - Brand Blue
|
||||
- `--color-success` (#2CA18E) - Success Green
|
||||
- `--color-warning` (#D9A12C) - Warning Amber
|
||||
- `--color-danger` (#A12C40) - Danger Red
|
||||
- `--color-purple` (#2C40A1) - Purple accent
|
||||
- `--color-gray-base` (#667085) - Neutral gray
|
||||
|
||||
### Tailwind Color Classes
|
||||
|
||||
**✅ USE ONLY THESE** (Tailwind defaults are DISABLED):
|
||||
```
|
||||
brand-* (50-950) - Primary blue scale
|
||||
gray-* (25-950) - Neutral scale
|
||||
success-* (25-950) - Green scale
|
||||
error-* (25-950) - Red scale
|
||||
warning-* (25-950) - Amber scale
|
||||
purple-* (25-950) - Purple scale
|
||||
```
|
||||
|
||||
**❌ BANNED** (These will NOT work):
|
||||
```
|
||||
blue-*, red-*, green-*, emerald-*, amber-*, indigo-*,
|
||||
pink-*, rose-*, sky-*, teal-*, cyan-*, etc.
|
||||
```
|
||||
|
||||
### Styling Rules
|
||||
|
||||
| ✅ DO | ❌ DON'T |
|
||||
|-------|---------|
|
||||
| `className="bg-brand-500"` | `className="bg-blue-500"` |
|
||||
| `className="text-gray-700"` | `className="text-[#333]"` |
|
||||
| `<Button variant="primary">` | `<button className="...">` |
|
||||
| Import from `../../icons` | Import from `@heroicons/*` |
|
||||
| Use CSS variables `var(--color-primary)` | Hardcode hex values |
|
||||
|
||||
---
|
||||
|
||||
## 🧩 COMPONENT RULES (ESLint Enforced!)
|
||||
|
||||
> **Never use raw HTML elements** - Use design system components.
|
||||
|
||||
### Required Component Mappings
|
||||
|
||||
| HTML Element | Required Component | Import Path |
|
||||
|--------------|-------------------|-------------|
|
||||
| `<button>` | `Button` or `IconButton` | `components/ui/button/Button` |
|
||||
| `<input type="text/email/password">` | `InputField` | `components/form/input/InputField` |
|
||||
| `<input type="checkbox">` | `Checkbox` | `components/form/input/Checkbox` |
|
||||
| `<input type="radio">` | `Radio` | `components/form/input/Radio` |
|
||||
| `<select>` | `Select` or `SelectDropdown` | `components/form/Select` |
|
||||
| `<textarea>` | `TextArea` | `components/form/input/TextArea` |
|
||||
|
||||
### Component Quick Reference
|
||||
|
||||
```tsx
|
||||
// Buttons
|
||||
<Button variant="primary" tone="brand">Save</Button>
|
||||
<Button variant="outline" tone="danger">Delete</Button>
|
||||
<IconButton icon={<CloseIcon />} variant="ghost" title="Close" />
|
||||
|
||||
// Form Inputs
|
||||
<InputField type="text" label="Name" value={val} onChange={setVal} />
|
||||
<Select options={opts} onChange={setVal} />
|
||||
<Checkbox label="Accept" checked={val} onChange={setVal} />
|
||||
<Switch label="Enable" checked={val} onChange={setVal} />
|
||||
|
||||
// Display
|
||||
<Badge tone="success" variant="soft">Active</Badge>
|
||||
<Alert variant="error" title="Error" message="Failed" />
|
||||
<Spinner size="md" />
|
||||
```
|
||||
|
||||
### Icon Rules
|
||||
|
||||
**Always import from central location:**
|
||||
```tsx
|
||||
// ✅ CORRECT
|
||||
import { PlusIcon, CloseIcon, CheckCircleIcon } from '../../icons';
|
||||
|
||||
// ❌ BANNED - External icon libraries
|
||||
import { XIcon } from '@heroicons/react/24/outline';
|
||||
import { Trash } from 'lucide-react';
|
||||
```
|
||||
|
||||
**Icon sizing:**
|
||||
- `className="w-4 h-4"` - In buttons, badges
|
||||
- `className="w-5 h-5"` - Standalone
|
||||
- `className="w-6 h-6"` - Headers, features
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Docker Commands (IMPORTANT!)
|
||||
|
||||
**Container Names:**
|
||||
| Container | Name | Purpose |
|
||||
|-----------|------|---------|
|
||||
| Backend | `igny8_backend` | Django API server |
|
||||
| Frontend | `igny8_frontend` | React dev server |
|
||||
| Celery Worker | `igny8_celery_worker` | Background tasks |
|
||||
| Celery Beat | `igny8_celery_beat` | Scheduled tasks |
|
||||
|
||||
**Run commands INSIDE containers:**
|
||||
```bash
|
||||
# ✅ CORRECT - Run Django management commands
|
||||
docker exec -it igny8_backend python manage.py migrate
|
||||
docker exec -it igny8_backend python manage.py makemigrations
|
||||
docker exec -it igny8_backend python manage.py shell
|
||||
|
||||
# ✅ CORRECT - Run npm commands
|
||||
docker exec -it igny8_frontend npm install
|
||||
docker exec -it igny8_frontend npm run build
|
||||
docker exec -it igny8_frontend npm run lint # Check design system violations
|
||||
|
||||
# ✅ CORRECT - View logs
|
||||
docker logs igny8_backend -f
|
||||
docker logs igny8_celery_worker -f
|
||||
|
||||
# ❌ WRONG - Don't use docker-compose for commands
|
||||
# docker-compose exec backend python manage.py migrate
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 Data Scoping (CRITICAL!)
|
||||
|
||||
**Understand which data is scoped where:**
|
||||
|
||||
| Scope | Models | Notes |
|
||||
|-------|--------|-------|
|
||||
| **Global (Platform-wide)** | `GlobalIntegrationSettings`, `GlobalAIPrompt`, `GlobalAuthorProfile`, `GlobalStrategy`, `GlobalModuleSettings`, `Industry`, `SeedKeyword` | Admin-only, shared by ALL accounts |
|
||||
| **Account-scoped** | `Account`, `User`, `Plan`, `IntegrationSettings`, `ModuleEnableSettings`, `AISettings`, `AIPrompt`, `AuthorProfile` | Filter by `account` |
|
||||
| **Site+Sector-scoped** | `Keywords`, `Clusters`, `ContentIdeas`, `Tasks`, `Content`, `Images` | Filter by `site` AND optionally `sector` |
|
||||
|
||||
**Key Rules:**
|
||||
- Global settings: NO account filtering (platform-wide, admin managed)
|
||||
- Account models: Use `AccountBaseModel`, filter by `request.user.account`
|
||||
- Site/Sector models: Use `SiteSectorBaseModel`, filter by `site` and `sector`
|
||||
|
||||
---
|
||||
|
||||
## ✅ Rules (One Line Each)
|
||||
|
||||
### Before Coding
|
||||
1. **Read docs first** - Always read the relevant module doc from `docs/10-MODULES/` before changing code
|
||||
2. **Read COMPONENT-SYSTEM.md** - **REQUIRED** before any frontend changes
|
||||
3. **Check existing patterns** - Search codebase for similar implementations before creating new ones
|
||||
4. **Use existing components** - Never duplicate; reuse components from `frontend/src/components/`
|
||||
5. **Check data scope** - Know if your model is Global, Account, or Site/Sector scoped (see table above)
|
||||
|
||||
### During Coding - Backend
|
||||
6. **Use correct base class** - Global: `models.Model`, Account: `AccountBaseModel`, Site: `SiteSectorBaseModel`
|
||||
7. **Use AI framework** - Use `backend/igny8_core/ai/` for AI operations, NOT legacy `utils/ai_processor.py`
|
||||
8. **Follow service pattern** - Business logic in `backend/igny8_core/business/*/services/`
|
||||
9. **Check permissions** - Use `IsAuthenticatedAndActive`, `HasTenantAccess` in views
|
||||
|
||||
### During Coding - Frontend (DESIGN SYSTEM)
|
||||
10. **Use design system components** - Button, InputField, Select, Badge, Card - never raw HTML
|
||||
11. **Use only design system colors** - `brand-*`, `gray-*`, `success-*`, `error-*`, `warning-*`, `purple-*`
|
||||
12. **Import icons from central location** - `import { Icon } from '../../icons'` - never external libraries
|
||||
13. **No inline styles** - Use Tailwind utilities or CSS variables only
|
||||
14. **No hardcoded colors** - No hex values, no `blue-500`, `red-500` (Tailwind defaults disabled)
|
||||
15. **Use TypeScript types** - All frontend code must be typed
|
||||
|
||||
### After Coding
|
||||
16. **Run ESLint** - `docker exec -it igny8_frontend npm run lint` to check design system violations
|
||||
17. **Update CHANGELOG.md** - Every commit needs a changelog entry with git reference
|
||||
18. **Increment version** - PATCH for fixes, MINOR for features, MAJOR for breaking changes
|
||||
19. **Update docs** - If you changed APIs or architecture, update relevant docs in `docs/`
|
||||
20. **Run migrations** - After model changes: `docker exec -it igny8_backend python manage.py makemigrations`
|
||||
|
||||
---
|
||||
|
||||
## 📝 Changelog Format
|
||||
|
||||
```markdown
|
||||
## v1.1.1 - December 27, 2025
|
||||
|
||||
### Fixed
|
||||
- Description here (git: abc1234)
|
||||
|
||||
### Added
|
||||
- Description here (git: def5678)
|
||||
|
||||
### Changed
|
||||
- Description here (git: ghi9012)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔗 Key Documentation
|
||||
|
||||
| I want to... | Go to |
|
||||
|--------------|-------|
|
||||
| Find any module | [docs/INDEX.md](docs/INDEX.md) |
|
||||
| **Use UI components** | [docs/30-FRONTEND/COMPONENT-SYSTEM.md](docs/30-FRONTEND/COMPONENT-SYSTEM.md) |
|
||||
| **Check design tokens** | [docs/30-FRONTEND/DESIGN-TOKENS.md](docs/30-FRONTEND/DESIGN-TOKENS.md) |
|
||||
| **Design guide** | [docs/30-FRONTEND/DESIGN-GUIDE.md](docs/30-FRONTEND/DESIGN-GUIDE.md) |
|
||||
| Understand architecture | [docs/00-SYSTEM/ARCHITECTURE.md](docs/00-SYSTEM/ARCHITECTURE.md) |
|
||||
| Find an API endpoint | [docs/20-API/ENDPOINTS.md](docs/20-API/ENDPOINTS.md) |
|
||||
| See all models | [docs/90-REFERENCE/MODELS.md](docs/90-REFERENCE/MODELS.md) |
|
||||
| Understand AI functions | [docs/90-REFERENCE/AI-FUNCTIONS.md](docs/90-REFERENCE/AI-FUNCTIONS.md) |
|
||||
| See frontend pages | [docs/30-FRONTEND/PAGES.md](docs/30-FRONTEND/PAGES.md) |
|
||||
| See recent changes | [CHANGELOG.md](CHANGELOG.md) |
|
||||
| View component demos | App route: `/ui-elements` |
|
||||
|
||||
---
|
||||
|
||||
## 🚫 Don't Do
|
||||
|
||||
### General
|
||||
- ❌ Skip reading docs before coding
|
||||
- ❌ Create duplicate components
|
||||
- ❌ Use `docker-compose` for exec commands (use `docker exec`)
|
||||
- ❌ Use legacy `utils/ai_processor.py`
|
||||
- ❌ Add account filtering to Global models (they're platform-wide!)
|
||||
- ❌ Forget site/sector filtering on content models
|
||||
- ❌ Forget to update CHANGELOG
|
||||
- ❌ Hardcode values (use settings/constants)
|
||||
- ❌ Work on Linker/Optimizer (inactive modules - Phase 2)
|
||||
- ❌ Use any SiteBuilder code (deprecated - mark for removal)
|
||||
|
||||
### Frontend - DESIGN SYSTEM VIOLATIONS
|
||||
- ❌ Use raw `<button>` - use `Button` or `IconButton`
|
||||
- ❌ Use raw `<input>` - use `InputField`, `Checkbox`, `Radio`
|
||||
- ❌ Use raw `<select>` - use `Select` or `SelectDropdown`
|
||||
- ❌ Use raw `<textarea>` - use `TextArea`
|
||||
- ❌ Use inline `style={}` attributes
|
||||
- ❌ Hardcode hex colors (`#0693e3`, `#ff0000`)
|
||||
- ❌ Use Tailwind default colors (`blue-500`, `red-500`, `green-500`)
|
||||
- ❌ Import from `@heroicons/*`, `lucide-react`, `@mui/icons-material`
|
||||
- ❌ Create new CSS files (use `design-system.css` only)
|
||||
|
||||
---
|
||||
|
||||
## 📊 API Base URLs
|
||||
|
||||
| Module | Base URL |
|
||||
|--------|----------|
|
||||
| Auth | `/api/v1/auth/` |
|
||||
| Planner | `/api/v1/planner/` |
|
||||
| Writer | `/api/v1/writer/` |
|
||||
| Billing | `/api/v1/billing/` |
|
||||
| Integration | `/api/v1/integration/` |
|
||||
| System | `/api/v1/system/` |
|
||||
|
||||
**API Docs:** https://api.igny8.com/api/docs/
|
||||
**Admin:** https://api.igny8.com/admin/
|
||||
**App:** https://app.igny8.com/
|
||||
|
||||
---
|
||||
|
||||
## 📄 Documentation Rules
|
||||
|
||||
**Root folder MD files allowed (ONLY these):**
|
||||
- `.rules` - AI agent rules (this file)
|
||||
- `CHANGELOG.md` - Version history
|
||||
- `README.md` - Project quickstart
|
||||
|
||||
**All other docs go in `/docs/` folder:**
|
||||
```
|
||||
docs/
|
||||
├── INDEX.md # Master navigation
|
||||
├── 00-SYSTEM/ # Architecture, auth, tenancy, IGNY8-APP.md
|
||||
├── 10-MODULES/ # One file per module
|
||||
├── 20-API/ # API endpoints
|
||||
├── 30-FRONTEND/ # Pages, stores, DESIGN-GUIDE, DESIGN-TOKENS, COMPONENT-SYSTEM
|
||||
├── 40-WORKFLOWS/ # Cross-module flows
|
||||
├── 90-REFERENCE/ # Models, AI functions, FIXES-KB
|
||||
└── plans/ # FINAL-PRELAUNCH, implementation plans
|
||||
```
|
||||
|
||||
**When updating docs:**
|
||||
| Change Type | Update These Files |
|
||||
|-------------|-------------------|
|
||||
| New endpoint | Module doc + `docs/20-API/ENDPOINTS.md` |
|
||||
| New model | Module doc + `docs/90-REFERENCE/MODELS.md` |
|
||||
| New page | Module doc + `docs/30-FRONTEND/PAGES.md` |
|
||||
| New module | Create module doc + update `docs/INDEX.md` |
|
||||
|
||||
**DO NOT** create random MD files - update existing docs instead.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Quick Checklist Before Commit
|
||||
|
||||
### Backend Changes
|
||||
- [ ] Read relevant module docs
|
||||
- [ ] Correct data scope (Global/Account/Site)
|
||||
- [ ] Ran migrations if model changed
|
||||
|
||||
### Frontend Changes
|
||||
- [ ] Read COMPONENT-SYSTEM.md
|
||||
- [ ] Used design system components (not raw HTML)
|
||||
- [ ] Used design system colors (brand-*, gray-*, success-*, error-*, warning-*, purple-*)
|
||||
- [ ] Icons imported from `../../icons`
|
||||
- [ ] No inline styles or hardcoded hex colors
|
||||
- [ ] Ran `npm run lint` - no design system violations
|
||||
|
||||
### All Changes
|
||||
- [ ] Updated CHANGELOG.md with git reference
|
||||
- [ ] Incremented version number
|
||||
- [ ] Tested locally
|
||||
2121
CHANGELOG.md
Normal file
2121
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,784 +0,0 @@
|
||||
# IGNY8 Automation System - Detailed Task List for AI Agent
|
||||
|
||||
## CRITICAL ANALYSIS
|
||||
|
||||
Based on the documentation and current implementation status, I've identified significant issues with the automation system and legacy SiteBuilder references that need systematic resolution.
|
||||
|
||||
---
|
||||
## SECTION 1: udapte correct status, adn assoitae keywrods ot lcuster properly
|
||||
Auto cluster AI function is currently setting status of clusters to active, and keywords are not mapped to clusters when run with automation. Update the original auto cluster AI function to use status new instead of active, and identify whether the keyword-to-cluster mapping issue is in the AI function or in the automation.
|
||||
|
||||
Actualy the orringal ai fucntion has both of this issue, once fixed tehn ai fucntion wil lwork fine and autoamtion also, will run better,
|
||||
|
||||
|
||||
|
||||
## SECTION 2: LEGACY SITEBUILDER/BLUEPRINT REMOVAL
|
||||
|
||||
### Task 2.1: Database Models Cleanup
|
||||
**Files to Remove Completely:**
|
||||
1. `backend/igny8_core/business/site_building/models.py` - Already stubbed, remove entirely
|
||||
2. Migration already exists: `0002_remove_blueprint_models.py` - Verify it ran successfully
|
||||
|
||||
**Database Verification:**
|
||||
1. Connect to production database
|
||||
2. Run SQL: `SELECT tablename FROM pg_tables WHERE tablename LIKE '%blueprint%' OR tablename LIKE '%site_building%';`
|
||||
3. Expected result: No tables (already dropped)
|
||||
4. If tables exist, manually run DROP TABLE commands from migration
|
||||
|
||||
**Foreign Key Cleanup:**
|
||||
1. Check `igny8_deployment_records` table - verify `site_blueprint_id` column removed
|
||||
2. Check `igny8_publishing_records` table - verify `site_blueprint_id` column removed
|
||||
3. Confirm indexes dropped: `igny8_publishing_recor_site_blueprint_id_des_b7c4e5f8_idx`
|
||||
|
||||
---
|
||||
|
||||
### Task 2.2: Backend Code References Removal
|
||||
|
||||
**Phase 2.2.1: Remove Stub Models**
|
||||
- **File:** `backend/igny8_core/business/site_building/models.py`
|
||||
- **Action:** Delete entire file
|
||||
- **Reason:** Contains only stub classes (`SiteBlueprint`, `PageBlueprint`, `SiteBlueprintCluster`, `SiteBlueprintTaxonomy`) with no functionality
|
||||
|
||||
**Phase 2.2.2: Remove Entire site_building App**
|
||||
- **Directory:** `backend/igny8_core/business/site_building/`
|
||||
- **Action:** Delete entire directory
|
||||
- **Reason:** All functionality deprecated, no active code
|
||||
|
||||
**Files to Delete:**
|
||||
1. `services/structure_generation_service.py` - Calls deprecated AI function
|
||||
2. `services/page_generation_service.py` - References PageBlueprint
|
||||
3. `services/taxonomy_service.py` - Uses SiteBlueprintTaxonomy
|
||||
4. `services/file_management_service.py` - SiteBuilder file management
|
||||
5. `tests/` - All test files reference removed models
|
||||
6. `admin.py` - Already commented out
|
||||
7. `migrations/` - Keep for database history, but app removal makes them inert
|
||||
|
||||
**Phase 2.2.3: Remove site_builder Module**
|
||||
- **Directory:** `backend/igny8_core/modules/site_builder.backup/`
|
||||
- **Action:** Delete entire directory (already marked `.backup`)
|
||||
- **Contains:** Deprecated API endpoints, serializers, views for blueprint management
|
||||
|
||||
---
|
||||
|
||||
### Task 2.3: Settings Configuration Cleanup
|
||||
|
||||
**File:** `backend/igny8_core/settings.py`
|
||||
|
||||
**Changes:**
|
||||
1. Line 56: Already commented out - Remove comment entirely
|
||||
2. Line 61: Already commented out - Remove comment entirely
|
||||
3. Verify `INSTALLED_APPS` list is clean
|
||||
|
||||
**Verification:**
|
||||
- Run `python manage.py check` - Should pass
|
||||
- Run `python manage.py migrate --plan` - Should show no pending site_building migrations
|
||||
|
||||
---
|
||||
|
||||
### Task 2.4: URL Routing Cleanup
|
||||
|
||||
**File:** `backend/igny8_core/urls.py`
|
||||
|
||||
**Changes:**
|
||||
1. Line 42: Comment already exists - Remove comment entirely
|
||||
2. Verify no routing to `site-builder/` endpoints exists
|
||||
|
||||
**Verification:**
|
||||
- Run Django server
|
||||
- Attempt to access `/api/v1/site-builder/blueprints/` - Should return 404
|
||||
- Check API root `/api/v1/` - Should not list site-builder endpoints
|
||||
|
||||
---
|
||||
|
||||
### Task 2.5: AI Function Removal
|
||||
|
||||
**File:** `backend/igny8_core/ai/functions/generate_page_content.py`
|
||||
|
||||
**Problem:** This AI function depends on `PageBlueprint` model which no longer exists.
|
||||
|
||||
**Action Required:**
|
||||
1. **DELETE FILE:** `generate_page_content.py` (21 references to PageBlueprint)
|
||||
2. **UPDATE:** `backend/igny8_core/ai/registry.py` - Remove lazy loader registration
|
||||
3. **UPDATE:** `backend/igny8_core/ai/engine.py` - Remove from operation type mappings (line 599)
|
||||
|
||||
**Verification:**
|
||||
- Search codebase for `generate_page_content` function calls
|
||||
- Ensure no active code relies on this function
|
||||
- Confirm AI function registry no longer lists it
|
||||
|
||||
---
|
||||
|
||||
### Task 2.6: Backend Import Statement Cleanup
|
||||
|
||||
**Files with Import Statements to Update:**
|
||||
|
||||
1. **backend/igny8_core/business/integration/services/content_sync_service.py**
|
||||
- Lines 378, 488: `from igny8_core.business.site_building.models import SiteBlueprint`
|
||||
- **Action:** Remove import, remove dependent code blocks (lines 382-388, 491-496)
|
||||
- **Alternative:** Service should use `ContentTaxonomy` directly (post-migration model)
|
||||
|
||||
2. **backend/igny8_core/business/integration/services/sync_health_service.py**
|
||||
- Line 335: `from igny8_core.business.site_building.models import SiteBlueprint, SiteBlueprintTaxonomy`
|
||||
- **Action:** Remove import, refactor taxonomy checks to use `ContentTaxonomy`
|
||||
|
||||
3. **backend/igny8_core/business/publishing/services/adapters/sites_renderer_adapter.py**
|
||||
- Line 15: `from igny8_core.business.site_building.models import SiteBlueprint`
|
||||
- **Action:** Entire adapter is deprecated - DELETE FILE
|
||||
- **Reason:** Designed to deploy SiteBlueprint instances, no longer applicable
|
||||
|
||||
4. **backend/igny8_core/business/publishing/services/deployment_readiness_service.py**
|
||||
- Line 10: `from igny8_core.business.site_building.models import SiteBlueprint`
|
||||
- **Action:** DELETE FILE or refactor to remove blueprint checks
|
||||
- **Reason:** Service checks blueprint readiness for deployment
|
||||
|
||||
5. **backend/igny8_core/business/publishing/services/deployment_service.py**
|
||||
- Line 10: `from igny8_core.business.site_building.models import SiteBlueprint`
|
||||
- **Action:** Remove blueprint-specific deployment methods
|
||||
|
||||
---
|
||||
|
||||
### Task 2.7: Frontend Files Removal
|
||||
|
||||
**Phase 2.7.1: Remove Type Definitions**
|
||||
- **File:** `frontend/src/types/siteBuilder.ts`
|
||||
- **Action:** Delete file entirely
|
||||
- **References:** Used in store and components
|
||||
|
||||
**Phase 2.7.2: Remove API Service**
|
||||
- **File:** `frontend/src/services/siteBuilder.api.ts`
|
||||
- **Action:** Delete file
|
||||
- **Contains:** API methods for blueprint CRUD operations
|
||||
|
||||
**Phase 2.7.3: Remove Pages**
|
||||
- **Directory:** `frontend/src/pages/Sites/`
|
||||
- **Files to Review:**
|
||||
- `Editor.tsx` - Uses PageBlueprint, SiteBlueprint types (lines 15-36)
|
||||
- `PageManager.tsx` - Fetches blueprints (lines 126-137)
|
||||
- `DeploymentPanel.tsx` - Blueprint deployment UI (46 references)
|
||||
|
||||
**Action for Pages:**
|
||||
1. If pages ONLY deal with blueprints - DELETE
|
||||
2. If pages have mixed functionality - REFACTOR to remove blueprint code
|
||||
3. Likely DELETE: `Editor.tsx`, `DeploymentPanel.tsx`
|
||||
4. Likely REFACTOR: `Dashboard.tsx` (remove blueprint widget)
|
||||
|
||||
**Phase 2.7.4: Remove Store**
|
||||
- **File:** `frontend/src/store/siteDefinitionStore.ts`
|
||||
- **Action:** Review dependencies, likely DELETE
|
||||
- **Alternative:** If used for non-blueprint purposes, refactor to remove PageBlueprint types
|
||||
|
||||
**Phase 2.7.5: Remove Components**
|
||||
- **File:** `frontend/src/components/sites/SiteProgressWidget.tsx`
|
||||
- **Action:** DELETE if blueprint-specific
|
||||
- **Uses:** `blueprintId` prop, calls `fetchSiteProgress(blueprintId)`
|
||||
|
||||
---
|
||||
|
||||
### Task 2.8: Frontend Import and Reference Cleanup
|
||||
|
||||
**Files Requiring Updates:**
|
||||
|
||||
1. **frontend/src/services/api.ts**
|
||||
- Lines 2302-2532: Multiple blueprint-related functions
|
||||
- **Action:** Remove these function exports:
|
||||
- `fetchDeploymentReadiness`
|
||||
- `createSiteBlueprint`, `updateSiteBlueprint`
|
||||
- `attachClustersToBlueprint`, `detachClustersFromBlueprint`
|
||||
- `fetchBlueprintsTaxonomies`, `createBlueprintTaxonomy`
|
||||
- `importBlueprintsTaxonomies`
|
||||
- `updatePageBlueprint`, `regeneratePageBlueprint`
|
||||
|
||||
2. **frontend/src/pages/Planner/Dashboard.tsx**
|
||||
- Lines 30-31: Commented imports
|
||||
- **Action:** Remove commented lines entirely
|
||||
|
||||
3. **frontend/src/config/pages/tasks.config.tsx**
|
||||
- Lines 110-111: Logic for `[Site Builder]` task title formatting
|
||||
- **Action:** Remove special handling, update title display logic
|
||||
|
||||
---
|
||||
|
||||
### Task 2.9: Sites Renderer Cleanup
|
||||
|
||||
**File:** `sites/src/loaders/loadSiteDefinition.ts`
|
||||
|
||||
**Current Behavior (Lines 103-159):**
|
||||
- API load fails → Falls back to blueprint endpoint
|
||||
- Transforms blueprint to site definition format
|
||||
|
||||
**Required Changes:**
|
||||
1. Remove fallback to blueprint endpoint (lines 116-127)
|
||||
2. Remove `transformBlueprintToSiteDefinition` function (lines 137-159)
|
||||
3. If API fails, return proper error instead of fallback
|
||||
4. Update error messages to remove blueprint references
|
||||
|
||||
---
|
||||
|
||||
### Task 2.10: Documentation Cleanup
|
||||
|
||||
**Files to Remove:**
|
||||
1. `docs/igny8-pp/TAXONOMY/QUICK-REFERENCE-TAXONOMY.md` - References SiteBuilder removal
|
||||
2. Update `docs/tech-stack/00-SYSTEM-ARCHITECTURE-MASTER-REFERENCE.md`:
|
||||
- Remove "Site Blueprints" from feature list (line 45)
|
||||
- Remove `site_builder/` from architecture diagrams (lines 179-180)
|
||||
- Remove SiteBuilder from system overview (line 1517)
|
||||
|
||||
**Files to Update:**
|
||||
1. `docs/igny8-pp/01-IGNY8-REST-API-COMPLETE-REFERENCE.md`:
|
||||
- Remove entire section: "Site Blueprints" (lines 1201-1230)
|
||||
- Remove "Page Blueprints" section (lines 1230-1247)
|
||||
- Update deployment endpoints to remove blueprint references
|
||||
|
||||
2. `docs/igny8-pp/02-PLANNER-WRITER-WORKFLOW-TECHNICAL-GUIDE.md`:
|
||||
- Remove SiteBlueprintTaxonomy references (lines 114, 151)
|
||||
|
||||
---
|
||||
|
||||
### Task 2.11: Test Files Cleanup
|
||||
|
||||
**Backend Tests:**
|
||||
1. DELETE: `backend/igny8_core/ai/tests/test_generate_site_structure_function.py`
|
||||
2. DELETE: `backend/igny8_core/business/site_building/tests/` (entire directory)
|
||||
3. DELETE: `backend/igny8_core/business/publishing/tests/test_deployment_service.py`
|
||||
4. DELETE: `backend/igny8_core/business/publishing/tests/test_publisher_service.py`
|
||||
5. DELETE: `backend/igny8_core/business/publishing/tests/test_adapters.py`
|
||||
|
||||
**Frontend Tests:**
|
||||
1. DELETE: `frontend/src/__tests__/sites/BulkGeneration.test.tsx`
|
||||
2. UPDATE: `frontend/src/__tests__/sites/PromptManagement.test.tsx`:
|
||||
- Remove site_structure_generation prompt type checks (lines 27-28)
|
||||
3. UPDATE: `frontend/src/__tests__/sites/SiteManagement.test.tsx`:
|
||||
- Remove `[Site Builder]` task filtering logic (lines 50-51)
|
||||
|
||||
---
|
||||
|
||||
### Task 2.12: Database Migration Verification
|
||||
|
||||
**Critical Checks:**
|
||||
1. Verify `0002_remove_blueprint_models.py` migration applied in all environments
|
||||
2. Check for orphaned data:
|
||||
- Query for any `Tasks` with `taxonomy_id` pointing to deleted SiteBlueprintTaxonomy
|
||||
- Query for any `ContentIdeas` with old taxonomy foreign keys
|
||||
3. If orphaned data found, create data migration to:
|
||||
- Set taxonomy foreign keys to NULL
|
||||
- Or migrate to ContentTaxonomy if mapping exists
|
||||
|
||||
**SQL Verification Queries:**
|
||||
```sql
|
||||
-- Check for blueprint tables (should return empty)
|
||||
SELECT tablename FROM pg_tables
|
||||
WHERE tablename LIKE '%blueprint%' OR tablename LIKE '%site_building%';
|
||||
|
||||
-- Check for foreign key constraints (should return empty)
|
||||
SELECT constraint_name FROM information_schema.table_constraints
|
||||
WHERE constraint_name LIKE '%blueprint%';
|
||||
|
||||
-- Check for orphaned taxonomy references
|
||||
SELECT COUNT(*) FROM igny8_tasks WHERE taxonomy_id IS NOT NULL;
|
||||
SELECT COUNT(*) FROM igny8_content_ideas WHERE taxonomy_id IS NOT NULL;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## SECTION 3: AUTOMATION PAGE UI IMPROVEMENTS
|
||||
|
||||
### Task 3.1: Stage Card Visual Redesign
|
||||
|
||||
**Current Issues:**
|
||||
- Icons too large, taking excessive space
|
||||
- Stage names not clearly associated with stage numbers
|
||||
- Inconsistent visual hierarchy
|
||||
|
||||
**Required Changes:**
|
||||
1. **Reduce Icon Size:**
|
||||
- Current: Large colored square icons
|
||||
- New: Smaller icons (32x32px instead of current size)
|
||||
- Position: Top-left of card, not centered
|
||||
|
||||
2. **Restructure Stage Header:**
|
||||
- Move stage name directly below "Stage N" text
|
||||
- Format: "Stage 1" (bold) / "Keywords → Clusters" (regular weight, smaller font)
|
||||
- Remove redundant text repetition
|
||||
|
||||
3. **Status Badge Positioning:**
|
||||
- Move from separate row to same line as stage number
|
||||
- Right-align badge next to stage number
|
||||
|
||||
**Layout Example (No Code):**
|
||||
```
|
||||
┌─────────────────────────────┐
|
||||
│ [Icon] Stage 1 [Ready] │
|
||||
│ Keywords → Clusters │
|
||||
│ │
|
||||
│ Total Queue: 7 │
|
||||
│ Processed: 0 │
|
||||
│ Remaining: 7 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3.2: Add Progress Bars to Stage Cards
|
||||
|
||||
**Implementation Requirements:**
|
||||
1. **Individual Stage Progress Bar:**
|
||||
- Display below queue metrics
|
||||
- Calculate: `(Processed / Total Queue) * 100`
|
||||
- Visual: Colored bar matching stage color
|
||||
- Show percentage label
|
||||
|
||||
2. **Overall Pipeline Progress Bar:**
|
||||
- Large bar above all stage cards
|
||||
- Calculate: `(Sum of Processed Items Across All Stages) / (Sum of Total Queue Across All Stages) * 100`
|
||||
- Display current stage indicator: "Stage 4/7"
|
||||
- Show estimated completion time
|
||||
|
||||
3. **Progress Bar States:**
|
||||
- Empty (0%): Gray/outline only
|
||||
- In Progress (1-99%): Animated gradient
|
||||
- Complete (100%): Solid color, checkmark icon
|
||||
|
||||
---
|
||||
|
||||
### Task 3.3: Add Total Metrics Cards Above Pipeline
|
||||
|
||||
**New Component: MetricsSummary Cards**
|
||||
|
||||
**Cards to Display (Row above pipeline overview):**
|
||||
1. **Keywords Card:**
|
||||
- Total: Count from database
|
||||
- Processed: Keywords with `status='mapped'`
|
||||
- Pending: Keywords with `status='new'`
|
||||
|
||||
2. **Clusters Card:**
|
||||
- Total: All clusters for site
|
||||
- Processed: Clusters with ideas generated
|
||||
- Pending: Clusters without ideas
|
||||
|
||||
3. **Ideas Card:**
|
||||
- Total: All ideas for site
|
||||
- Processed: Ideas converted to tasks (`status='in_progress'`)
|
||||
- Pending: Ideas with `status='new'`
|
||||
|
||||
4. **Content Card:**
|
||||
- Total: All content for site
|
||||
- Processed: Content with `status='draft'` + all images generated
|
||||
- Pending: Content without images or in generation
|
||||
|
||||
5. **Images Card:**
|
||||
- Total: All image records for site content
|
||||
- Processed: Images with `status='generated'`
|
||||
- Pending: Images with `status='pending'`
|
||||
|
||||
**Card Layout:**
|
||||
- Width: Equal distribution across row
|
||||
- Display: Icon, Title, Total/Processed/Pending counts
|
||||
- Color: Match stage colors for visual consistency
|
||||
|
||||
---
|
||||
|
||||
### Task 3.4: Pipeline Status Card Redesign
|
||||
|
||||
**Current:** Wide row with text "Pipeline Status - Ready to run | 22 items pending"
|
||||
|
||||
**New Design Requirements:**
|
||||
1. **Convert to Centered Card:**
|
||||
- Position: Above stage cards, below metrics summary
|
||||
- Width: Narrower than full width, centered
|
||||
- Style: Elevated/shadowed for emphasis
|
||||
|
||||
2. **Content Structure:**
|
||||
- Large status indicator (icon + text)
|
||||
- Prominent pending items count
|
||||
- Quick action buttons (Run Now, Pause, Configure)
|
||||
|
||||
3. **Status Visual States:**
|
||||
- Ready: Green pulse animation
|
||||
- Running: Blue animated progress
|
||||
- Paused: Yellow warning icon
|
||||
- Failed: Red alert icon
|
||||
|
||||
---
|
||||
|
||||
### Task 3.5: Remove/Compact Header Elements
|
||||
|
||||
**Changes to Automation Page Header:**
|
||||
|
||||
1. **Remove "Pipeline Overview" Section:**
|
||||
- Delete heading: "📊 Pipeline Overview"
|
||||
- Delete subtitle: "Complete view of automation pipeline status and pending items"
|
||||
- Reason: Redundant with visible pipeline cards
|
||||
|
||||
2. **Compact Schedule Panel:**
|
||||
- Current: Large panel with heading, status row, action buttons
|
||||
- New: Single compact row
|
||||
- Layout: `[Status Badge] | [Schedule Text] | [Last Run] | [Estimated Credits] | [Configure Button] | [Run Now Button]`
|
||||
- Remove empty space and excessive padding
|
||||
|
||||
---
|
||||
|
||||
### Task 3.6: AI Request Delays Implementation
|
||||
|
||||
**Problem:** Rapid sequential AI requests may hit rate limits or overload AI service.
|
||||
|
||||
**Required Changes:**
|
||||
|
||||
1. **Within-Stage Delay (between batches):**
|
||||
- Location: `AutomationService` class methods
|
||||
- Add delay after each batch completion before processing next batch
|
||||
- Configurable: 3-5 seconds (default 3 seconds)
|
||||
- Implementation point: After each AI function call completes in stage loop
|
||||
|
||||
2. **Between-Stage Delay:**
|
||||
- Add delay after stage completion before triggering next stage
|
||||
- Configurable: 5-10 seconds (default 5 seconds)
|
||||
- Implementation point: After `_execute_stage()` returns before incrementing `current_stage`
|
||||
|
||||
3. **Configuration:**
|
||||
- Add to `AutomationConfig` model: `within_stage_delay` (integer, seconds)
|
||||
- Add to `AutomationConfig` model: `between_stage_delay` (integer, seconds)
|
||||
- Expose in Configure modal for user adjustment
|
||||
|
||||
4. **Logging:**
|
||||
- Log delay start: "Waiting 3 seconds before next batch..."
|
||||
- Log delay end: "Delay complete, resuming processing"
|
||||
|
||||
---
|
||||
|
||||
## SECTION 4: AUTOMATION STAGE PROCESSING FIXES
|
||||
|
||||
### Task 4.1: Verify Stage Sequential Processing Logic
|
||||
|
||||
**Problem:** Pipeline not following strict sequential stage completion before moving to next stage.
|
||||
|
||||
**Analysis Required:**
|
||||
1. Review `AutomationService.start_automation()` method
|
||||
2. Verify stage loop waits for 100% completion before `current_stage += 1`
|
||||
3. Check for any parallel execution logic that bypasses sequential flow
|
||||
|
||||
**Verification Steps:**
|
||||
1. Each stage method (`run_stage_1()` to `run_stage_7()`) must return ONLY after ALL batches processed
|
||||
2. Stage N+1 should NOT start if Stage N has `pending > 0`
|
||||
3. Add explicit completion check before stage transition
|
||||
|
||||
**Required Fixes:**
|
||||
- Add validation: Before starting Stage N, verify Stage N-1 has 0 pending items
|
||||
- If pending items found, log warning and halt automation
|
||||
- Return error status with message: "Stage N-1 incomplete, cannot proceed to Stage N"
|
||||
|
||||
---
|
||||
|
||||
### Task 4.2: Fix Batch Size Configuration Reading
|
||||
|
||||
**Problem:** Manual "Run Now" only processes 5 keywords instead of respecting configured batch size (20).
|
||||
|
||||
**Root Cause Analysis:**
|
||||
1. Check if `run_stage_1()` reads from `AutomationConfig.stage_1_batch_size`
|
||||
2. Verify query limit: `Keywords.objects.filter(...)[:batch_size]` uses correct variable
|
||||
3. Confirm configuration loaded at automation start: `config = AutomationConfig.objects.get(site=self.site)`
|
||||
|
||||
**Expected Behavior:**
|
||||
- If queue has 7 keywords and batch_size = 20: Process all 7 (not limit to 5)
|
||||
- If queue has 47 keywords and batch_size = 20: Process 20, then next batch of 20, then final 7
|
||||
- Batch size should be dynamic based on queue size: `min(queue_count, batch_size)`
|
||||
|
||||
**Fix Implementation:**
|
||||
1. Ensure configuration loaded once at automation start
|
||||
2. Pass batch_size to each stage method
|
||||
3. Update query to use: `[:min(pending_count, batch_size)]`
|
||||
4. Log batch selection: "Processing batch 1/3: 20 keywords"
|
||||
|
||||
---
|
||||
|
||||
### Task 4.3: Fix Stage 4 Processing Not Completing Full Queue
|
||||
|
||||
**Problem:** Stage 4 (Tasks → Content) not processing all tasks before moving to Stage 5.
|
||||
|
||||
**Investigation Steps:**
|
||||
1. Check `run_stage_4()` implementation in `AutomationService`
|
||||
2. Verify loop structure: Does it process tasks one-by-one until queue empty?
|
||||
3. Look for premature loop exit conditions
|
||||
|
||||
**Expected Logic:**
|
||||
```
|
||||
While tasks with status='pending' exist:
|
||||
1. Get next task
|
||||
2. Call generate_content AI function
|
||||
3. Wait for completion
|
||||
4. Verify Content created
|
||||
5. Check if more pending tasks exist
|
||||
6. If yes, continue loop
|
||||
7. If no, return stage complete
|
||||
```
|
||||
|
||||
**Common Issues to Check:**
|
||||
- Loop exits after first task instead of continuing
|
||||
- No loop at all - only processes one batch
|
||||
- Error handling breaks loop prematurely
|
||||
|
||||
---
|
||||
|
||||
### Task 4.4: Fix Stage 5 Not Triggering (Image Prompts Generation)
|
||||
|
||||
**Problem:** Automation exits after Stage 4 without generating image prompts.
|
||||
|
||||
**Analysis Required:**
|
||||
1. Verify Stage 4 completion status set correctly
|
||||
2. Check if Stage 5 start condition is met
|
||||
3. Review database query in `run_stage_5()`:
|
||||
- Query: Content with `status='draft'` AND `images_count=0`
|
||||
- Verify Content records created in Stage 4 have correct status
|
||||
|
||||
**Potential Issues:**
|
||||
1. Content created with status other than 'draft'
|
||||
2. Images count annotation incorrect (should use `annotate(images_count=Count('images'))`)
|
||||
3. Stage handover logic doesn't trigger Stage 5
|
||||
|
||||
**Fix Steps:**
|
||||
1. Verify Content model save in Stage 4 sets `status='draft'`
|
||||
2. Ensure Stage 5 query matches Content records from Stage 4
|
||||
3. Add logging: "Stage 5: Found X content pieces without images"
|
||||
4. If X > 0, process; if X = 0, skip stage gracefully
|
||||
|
||||
---
|
||||
|
||||
### Task 4.5: Add Stage Handover Validation
|
||||
|
||||
**New Logic Required Between Each Stage:**
|
||||
|
||||
1. **Pre-Stage Validation:**
|
||||
- Before starting Stage N, check Stage N-1 completion:
|
||||
- Query pending items for Stage N-1
|
||||
- If pending > 0: Log error, halt automation
|
||||
- If pending = 0: Log success, proceed
|
||||
|
||||
2. **Post-Stage Validation:**
|
||||
- After Stage N completes, verify:
|
||||
- All input items processed
|
||||
- Expected output items created
|
||||
- No errors in stage result
|
||||
- Log validation result before moving to Stage N+1
|
||||
|
||||
3. **Validation Logging:**
|
||||
- Stage 1 → Stage 2: "Verified: 0 keywords pending, 8 clusters created"
|
||||
- Stage 2 → Stage 3: "Verified: 0 clusters pending, 56 ideas created"
|
||||
- Stage 3 → Stage 4: "Verified: 0 ideas pending, 56 tasks created"
|
||||
- Stage 4 → Stage 5: "Verified: 0 tasks pending, 56 content pieces created"
|
||||
- Stage 5 → Stage 6: "Verified: 0 content without images, 224 prompts created"
|
||||
- Stage 6 → Stage 7: "Verified: 0 pending images, 224 images generated"
|
||||
|
||||
---
|
||||
|
||||
### Task 4.6: Implement Dynamic Batch Size Logic
|
||||
|
||||
**Problem:** Fixed batch sizes don't adapt to actual queue sizes.
|
||||
|
||||
**Required Smart Batch Logic:**
|
||||
|
||||
1. **For Stages 1, 2, 3, 5:**
|
||||
- If `queue_count <= batch_size`: Process ALL items in one batch
|
||||
- If `queue_count > batch_size`: Split into batches
|
||||
|
||||
2. **For Stage 4 (Tasks → Content):**
|
||||
- Always process one task at a time (sequential)
|
||||
- Reason: Content generation is expensive, better control
|
||||
- Batch size config for Stage 4 can be deprecated
|
||||
|
||||
3. **For Stage 6 (Images):**
|
||||
- Process one image at a time (current behavior)
|
||||
- Reason: Image generation has rate limits
|
||||
|
||||
**Configuration Update:**
|
||||
- Stage 1-3, 5: Batch size applies
|
||||
- Stage 4, 6: Batch size ignored (always 1)
|
||||
- Update Configure modal to clarify batch size usage per stage
|
||||
|
||||
---
|
||||
|
||||
## SECTION 5: STAGE CARD LAYOUT RESTRUCTURE
|
||||
|
||||
### Task 5.1: Add Missing Stage 5 Card (Content → Image Prompts)
|
||||
|
||||
**Problem:** Current UI combines Stages 3 & 4 into one card, Stage 5 missing.
|
||||
|
||||
**Required Change:**
|
||||
- Create separate card for Stage 5
|
||||
- Display: "Content → Image Prompts"
|
||||
- Queue metrics: Content without images (not total content)
|
||||
- Show progress bar for prompt extraction
|
||||
|
||||
---
|
||||
|
||||
### Task 5.2: Separate Stages 3 & 4 into Individual Cards
|
||||
|
||||
**Current:** One card shows "Ideas → Tasks → Content" with nested metrics.
|
||||
|
||||
**New Structure:**
|
||||
1. **Stage 3 Card:** "Ideas → Tasks"
|
||||
- Total Queue: Ideas with `status='new'`
|
||||
- Processed: Ideas converted to tasks
|
||||
- Progress: Task creation count
|
||||
|
||||
2. **Stage 4 Card:** "Tasks → Content"
|
||||
- Total Queue: Tasks with `status='pending'`
|
||||
- Processed: Tasks with `status='completed'`
|
||||
- Progress: Content generation count
|
||||
|
||||
---
|
||||
|
||||
### Task 5.3: Restructure Stage Card Rows
|
||||
|
||||
**New Layout Requirements:**
|
||||
|
||||
**Row 1 (Stages 1-4):**
|
||||
- Stage 1: Keywords → Clusters
|
||||
- Stage 2: Clusters → Ideas
|
||||
- Stage 3: Ideas → Tasks
|
||||
- Stage 4: Tasks → Content
|
||||
|
||||
**Row 2 (Stages 5-8):**
|
||||
- Stage 5: Content → Image Prompts
|
||||
- Stage 6: Image Prompts → Images
|
||||
- Stage 7: Review Gate (with action buttons)
|
||||
- Stage 8: Status Summary (new informational card)
|
||||
|
||||
**Responsive Behavior:**
|
||||
- Desktop: 4 cards per row
|
||||
- Tablet: 2 cards per row
|
||||
- Mobile: 1 card per row (vertical stack)
|
||||
|
||||
---
|
||||
|
||||
### Task 5.4: Design Stage 7 Card (Review Gate)
|
||||
|
||||
**Unique Requirements:**
|
||||
1. **Visual Distinction:**
|
||||
- Different color scheme (amber/orange warning color)
|
||||
- Icon: Stop sign or review icon
|
||||
- Border: Dashed or highlighted
|
||||
|
||||
2. **Content:**
|
||||
- Title: "Manual Review Gate"
|
||||
- Status: "Automation Stops Here"
|
||||
- Count: Number of content pieces ready for review
|
||||
- Two buttons:
|
||||
- "Go to Review Page" (navigates to Writer Content page filtered by status='review')
|
||||
- "Publish Without Review" (disabled initially, placeholder for future feature)
|
||||
|
||||
3. **Button Behavior:**
|
||||
- Review button: Active when count > 0
|
||||
- Publish button: Disabled with tooltip "Coming soon"
|
||||
|
||||
---
|
||||
|
||||
### Task 5.5: Design Stage 8 Card (Status Summary)
|
||||
|
||||
**New Informational Card:**
|
||||
|
||||
**Purpose:** Display current automation run status without queue processing.
|
||||
|
||||
**Content:**
|
||||
1. **Title:** "Current Status"
|
||||
2. **Large Status Icon:** Based on run status
|
||||
- Running: Animated spinner
|
||||
- Completed: Checkmark
|
||||
- Failed: X icon
|
||||
- Paused: Pause icon
|
||||
|
||||
3. **Metrics Display:**
|
||||
- Run ID
|
||||
- Started at timestamp
|
||||
- Current stage indicator
|
||||
- Total credits used
|
||||
- Completion percentage
|
||||
|
||||
4. **Visual Style:**
|
||||
- No queue metrics
|
||||
- No action buttons
|
||||
- Read-only information display
|
||||
- Distinct styling (different background color, no hover effects)
|
||||
|
||||
---
|
||||
|
||||
### Task 5.6: Adjust Card Width for New Layout
|
||||
|
||||
**Current:** Stage cards likely using equal width across full viewport.
|
||||
|
||||
**New Requirements:**
|
||||
- Row 1 (4 cards): Each card 23% width (with 2% gap)
|
||||
- Row 2 (4 cards): Same width distribution
|
||||
- Stage 8 card: Can be wider or styled differently as summary card
|
||||
|
||||
**Implementation Considerations:**
|
||||
- Use CSS Grid or Flexbox for responsive layout
|
||||
- Ensure consistent spacing between cards
|
||||
- Maintain card aspect ratio for visual balance
|
||||
|
||||
---
|
||||
|
||||
## SECTION 6: ADDITIONAL ENHANCEMENTS
|
||||
|
||||
### Task 6.1: Add Credit Usage Tracking per Stage
|
||||
|
||||
**Value Addition:** Real-time visibility into credit consumption.
|
||||
|
||||
**Implementation:**
|
||||
1. Track credits used at end of each stage in `stage_N_result` JSON field
|
||||
2. Display in stage card: "Credits Used: X"
|
||||
3. Add running total in overall pipeline progress bar
|
||||
4. Compare estimated vs actual credits used
|
||||
5. Alert if actual exceeds estimated by >20%
|
||||
|
||||
---
|
||||
|
||||
### Task 6.2: Add Estimated Completion Time per Stage
|
||||
|
||||
**Value Addition:** Predictable automation runtime for planning.
|
||||
|
||||
**Implementation:**
|
||||
1. Calculate average time per item based on historical runs
|
||||
2. Estimate: `Remaining Items * Average Time per Item`
|
||||
3. Display in stage card: "ETA: 45 minutes"
|
||||
4. Update dynamically as items process
|
||||
5. Store metrics in database for accuracy improvement over time
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Task 6.3: Add Error Rate Monitoring
|
||||
|
||||
**Value Addition:** Proactive issue detection.
|
||||
|
||||
**Implementation:**
|
||||
1. Track error count per stage
|
||||
2. Display: "Errors: X (Y%)"
|
||||
3. Highlight stages with >5% error rate
|
||||
4. Add "View Errors" button to navigate to error log
|
||||
5. Set up alerts for error rate spikes
|
||||
|
||||
---
|
||||
|
||||
### Task 6.4: Add Stage Completion Percentage
|
||||
|
||||
**Value Addition:** Clear progress visualization.
|
||||
|
||||
**Implementation:**
|
||||
1. Calculate: `(Completed Items / Total Items) * 100`
|
||||
2. Display as progress bar in stage card
|
||||
3. Color code:
|
||||
- Green: >75%
|
||||
- Yellow: 25-75%
|
||||
- Red: <25%
|
||||
4. Animate progress bar during active stages
|
||||
5. Show exact percentage in text format
|
||||
|
||||
---
|
||||
|
||||
### Task 6.5: Add Stage Start/End Timestamps
|
||||
|
||||
**Value Addition:** Audit trail for automation runs.
|
||||
|
||||
**Implementation:**
|
||||
1. Store start/end timestamps in `stage_N_result`
|
||||
2. Display in stage card: "Started: 10:30 AM | Ended: 11:15 AM"
|
||||
3
|
||||
56
README.md
56
README.md
@@ -1,11 +1,22 @@
|
||||
# IGNY8 - AI-Powered SEO Content Platform
|
||||
|
||||
**Version:** 1.0.0
|
||||
**Version:** 1.0.5
|
||||
**License:** Proprietary
|
||||
**Website:** https://igny8.com
|
||||
|
||||
---
|
||||
|
||||
## Quick Links
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [docs/00-SYSTEM/IGNY8-APP.md](docs/00-SYSTEM/IGNY8-APP.md) | Executive summary (non-technical) |
|
||||
| [docs/INDEX.md](docs/INDEX.md) | Full documentation index |
|
||||
| [CHANGELOG.md](CHANGELOG.md) | Version history |
|
||||
| [.rules](.rules) | AI agent rules |
|
||||
|
||||
---
|
||||
|
||||
## What is IGNY8?
|
||||
|
||||
IGNY8 is a full-stack SaaS platform that combines AI-powered content generation with intelligent SEO management. It helps content creators, marketers, and agencies streamline their content workflow from keyword research to published articles.
|
||||
@@ -15,8 +26,8 @@ IGNY8 is a full-stack SaaS platform that combines AI-powered content generation
|
||||
- 🔍 **Smart Keyword Management** - Import, cluster, and organize keywords with AI
|
||||
- ✍️ **AI Content Generation** - Generate SEO-optimized blog posts using GPT-4
|
||||
- 🖼️ **AI Image Creation** - Auto-generate featured and in-article images
|
||||
- 🔗 **Internal Linking** - AI-powered link suggestions for SEO
|
||||
- 📊 **Content Optimization** - Analyze and score content quality
|
||||
- 🔗 **Internal Linking** - AI-powered link suggestions (coming soon)
|
||||
- 📊 **Content Optimization** - Analyze and score content quality (coming soon)
|
||||
- 🔄 **WordPress Integration** - Bidirectional sync with WordPress sites
|
||||
- 📈 **Usage-Based Billing** - Credit system for AI operations
|
||||
- 👥 **Multi-Tenancy** - Manage multiple sites and teams
|
||||
@@ -25,14 +36,24 @@ IGNY8 is a full-stack SaaS platform that combines AI-powered content generation
|
||||
|
||||
## Repository Structure
|
||||
|
||||
This monorepo contains two main applications:
|
||||
|
||||
```
|
||||
igny8/
|
||||
├── README.md # This file
|
||||
├── CHANGELOG.md # Version history
|
||||
├── .rules # AI agent rules
|
||||
├── backend/ # Django REST API + Celery
|
||||
├── frontend/ # React + Vite SPA
|
||||
├── master-docs/ # Architecture documentation
|
||||
└── docker-compose.app.yml # Docker deployment config
|
||||
├── docs/ # Full documentation
|
||||
│ ├── INDEX.md # Documentation navigation
|
||||
│ ├── 00-SYSTEM/ # Architecture, auth, IGNY8-APP
|
||||
│ ├── 10-MODULES/ # Module documentation
|
||||
│ ├── 20-API/ # API endpoints
|
||||
│ ├── 30-FRONTEND/ # Frontend pages, stores, design system
|
||||
│ ├── 40-WORKFLOWS/ # Cross-module workflows
|
||||
│ ├── 50-DEPLOYMENT/ # Deployment guides
|
||||
│ ├── 90-REFERENCE/ # Models, AI functions, fixes
|
||||
│ └── plans/ # Implementation plans
|
||||
└── docker-compose.app.yml
|
||||
```
|
||||
|
||||
**Separate Repository:**
|
||||
@@ -210,14 +231,20 @@ The WordPress bridge plugin (`igny8-wp-integration`) creates a bidirectional con
|
||||
|
||||
## Documentation
|
||||
|
||||
Comprehensive documentation is available in the `master-docs/` directory:
|
||||
Start here: [docs/README.md](./docs/README.md) (index of all topics).
|
||||
|
||||
- **[MASTER_REFERENCE.md](./MASTER_REFERENCE.md)** - Complete system architecture and navigation
|
||||
- **[API-COMPLETE-REFERENCE.md](./master-docs/API-COMPLETE-REFERENCE.md)** - Full API documentation
|
||||
- **[02-APPLICATION-ARCHITECTURE.md](./master-docs/02-APPLICATION-ARCHITECTURE.md)** - System design
|
||||
- **[04-BACKEND-IMPLEMENTATION.md](./master-docs/04-BACKEND-IMPLEMENTATION.md)** - Backend details
|
||||
- **[03-FRONTEND-ARCHITECTURE.md](./master-docs/03-FRONTEND-ARCHITECTURE.md)** - Frontend details
|
||||
- **[WORDPRESS-PLUGIN-INTEGRATION.md](./master-docs/WORDPRESS-PLUGIN-INTEGRATION.md)** - Plugin integration guide
|
||||
Common entry points:
|
||||
- App architecture: `docs/igny8-app/IGNY8-APP-ARCHITECTURE.md`
|
||||
- Backend architecture: `docs/backend/IGNY8-BACKEND-ARCHITECTURE.md`
|
||||
- Planner backend detail: `docs/backend/IGNY8-PLANNER-BACKEND.md`
|
||||
- Writer backend detail: `docs/backend/IGNY8-WRITER-BACKEND.md`
|
||||
- Automation: `docs/automation/AUTOMATION-REFERENCE.md`
|
||||
- Tech stack: `docs/tech-stack/00-SYSTEM-ARCHITECTURE-MASTER-REFERENCE.md`
|
||||
- API: `docs/API/API-COMPLETE-REFERENCE-LATEST.md`
|
||||
- Billing & Credits: `docs/billing/billing-account-final-plan-2025-12-05.md`
|
||||
- App guides: `docs/igny8-app/` (planner/writer workflows, taxonomy, feature modification)
|
||||
- WordPress: `docs/wp/` (plugin integration and sync)
|
||||
- Docs changelog: `docs/CHANGELOG.md`
|
||||
|
||||
---
|
||||
|
||||
@@ -356,3 +383,4 @@ See [CHANGELOG.md](./CHANGELOG.md) for version history and updates.
|
||||
---
|
||||
|
||||
**Built with ❤️ by the IGNY8 team**
|
||||
# Test commit - Mon Dec 15 07:18:54 UTC 2025
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
# Backend API Endpoints - Test Results
|
||||
|
||||
**Test Date:** December 5, 2025
|
||||
**Backend URL:** http://localhost:8011
|
||||
|
||||
## ✅ WORKING ENDPOINTS
|
||||
|
||||
### Billing API Endpoints
|
||||
|
||||
| Endpoint | Method | Status | Notes |
|
||||
|----------|--------|--------|-------|
|
||||
| `/api/v1/billing/invoices/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/billing/payments/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/billing/credit-packages/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/billing/transactions/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/billing/transactions/balance/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/billing/admin/stats/` | GET | ✅ 401 | Auth required (correct) |
|
||||
|
||||
### Account Endpoints
|
||||
|
||||
| Endpoint | Method | Status | Notes |
|
||||
|----------|--------|--------|-------|
|
||||
| `/api/v1/account/settings/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/account/settings/` | PATCH | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/account/team/` | GET | ✅ 401 | Auth required (correct) |
|
||||
| `/api/v1/account/usage/analytics/` | GET | ✅ 401 | Auth required (correct) |
|
||||
|
||||
## ❌ ISSUES FIXED
|
||||
|
||||
### Frontend API Path Alignment
|
||||
**Problem:** Frontend must always call the canonical `/api/v1/billing/...` endpoints (no `/v2` alias).
|
||||
|
||||
**Files Fixed:**
|
||||
- `frontend/src/services/billing.api.ts` – ensured all billing calls use `/v1/billing/...`
|
||||
|
||||
**Changes:**
|
||||
```typescript
|
||||
// Before:
|
||||
fetchAPI('/billing/invoices/')
|
||||
|
||||
// After:
|
||||
fetchAPI('/v1/billing/invoices/')
|
||||
```
|
||||
|
||||
### Component Export Issues
|
||||
**Problem:** `PricingPlan` type export conflict
|
||||
|
||||
**File Fixed:**
|
||||
- `frontend/src/components/ui/pricing-table/index.tsx`
|
||||
|
||||
**Change:**
|
||||
```typescript
|
||||
// Before:
|
||||
export { PricingPlan };
|
||||
|
||||
// After:
|
||||
export type { PricingPlan };
|
||||
```
|
||||
|
||||
### Missing Function Issues
|
||||
**Problem:** `submitManualPayment` doesn't exist, should be `createManualPayment`
|
||||
|
||||
**File Fixed:**
|
||||
- `frontend/src/pages/account/PurchaseCreditsPage.tsx`
|
||||
|
||||
**Change:**
|
||||
```typescript
|
||||
// Import changed:
|
||||
import { submitManualPayment } from '...' // ❌
|
||||
import { createManualPayment } from '...' // ✅
|
||||
|
||||
// Usage changed:
|
||||
await submitManualPayment({...}) // ❌
|
||||
await createManualPayment({...}) // ✅
|
||||
```
|
||||
|
||||
## 📝 PAGES STATUS
|
||||
|
||||
### Account Pages
|
||||
| Page | Route | Status | Backend API |
|
||||
|------|-------|--------|-------------|
|
||||
| Account Settings | `/account/settings` | ✅ Ready | `/v1/account/settings/` |
|
||||
| Team Management | `/account/team` | ✅ Ready | `/v1/account/team/` |
|
||||
| Usage Analytics | `/account/usage` | ✅ Ready | `/v1/account/usage/analytics/` |
|
||||
| Purchase Credits | `/account/purchase-credits` | ✅ Ready | `/v1/billing/credit-packages/` |
|
||||
|
||||
### Billing Pages
|
||||
| Page | Route | Status | Backend API |
|
||||
|------|-------|--------|-------------|
|
||||
| Credits Overview | `/billing/credits` | ✅ Ready | `/v1/billing/transactions/balance/` |
|
||||
| Transactions | `/billing/transactions` | ✅ Ready | `/v1/billing/transactions/` |
|
||||
| Usage | `/billing/usage` | ✅ Ready | `/v1/billing/transactions/` |
|
||||
| Plans | `/settings/plans` | ✅ Ready | `/v1/auth/plans/` |
|
||||
|
||||
### Admin Pages
|
||||
| Page | Route | Status | Backend API |
|
||||
|------|-------|--------|-------------|
|
||||
| Admin Dashboard | `/admin/billing` | ⏳ Partial | `/v1/billing/admin/stats/` |
|
||||
| Billing Management | `/admin/billing` | ⏳ Partial | Multiple endpoints |
|
||||
|
||||
## 🔧 URL STRUCTURE
|
||||
|
||||
### Correct URL Pattern
|
||||
```
|
||||
Frontend calls: /v1/billing/invoices/
|
||||
↓
|
||||
API Base URL: https://api.igny8.com/api
|
||||
↓
|
||||
Full URL: https://api.igny8.com/api/v1/billing/invoices/
|
||||
↓
|
||||
Backend route: /api/v1/billing/ → igny8_core.business.billing.urls
|
||||
```
|
||||
|
||||
### API Base URL Detection
|
||||
```typescript
|
||||
// frontend/src/services/api.ts
|
||||
const API_BASE_URL = getApiBaseUrl();
|
||||
|
||||
// Returns:
|
||||
// - localhost:3000 → http://localhost:8011/api
|
||||
// - Production → https://api.igny8.com/api
|
||||
```
|
||||
|
||||
## ✅ BUILD STATUS
|
||||
|
||||
```bash
|
||||
cd /data/app/igny8/frontend
|
||||
npm run build
|
||||
# ✅ built in 10.87s
|
||||
```
|
||||
|
||||
## 🧪 TESTING CHECKLIST
|
||||
|
||||
### Backend Tests
|
||||
- [x] Invoices endpoint exists (401 auth required)
|
||||
- [x] Payments endpoint exists (401 auth required)
|
||||
- [x] Credit packages endpoint exists (401 auth required)
|
||||
- [x] Transactions endpoint exists (401 auth required)
|
||||
- [x] Balance endpoint exists (401 auth required)
|
||||
- [x] Account settings endpoint exists (401 auth required)
|
||||
- [x] Team management endpoint exists (401 auth required)
|
||||
- [x] Usage analytics endpoint exists (401 auth required)
|
||||
|
||||
### Frontend Tests
|
||||
- [x] Build completes without errors
|
||||
- [x] All API imports resolve correctly
|
||||
- [x] Component exports work correctly
|
||||
- [ ] Pages load in browser (requires authentication)
|
||||
- [ ] API calls work with auth token
|
||||
- [ ] Data displays correctly
|
||||
|
||||
## 🚀 NEXT STEPS
|
||||
|
||||
1. **Test with Authentication**
|
||||
- Login to app
|
||||
- Navigate to each page
|
||||
- Verify data loads correctly
|
||||
|
||||
2. **Test User Flows**
|
||||
- Purchase credits flow
|
||||
- View transactions
|
||||
- Manage team members
|
||||
- Update account settings
|
||||
|
||||
3. **Test Admin Features**
|
||||
- View billing stats
|
||||
- Approve/reject payments
|
||||
- Configure credit costs
|
||||
|
||||
4. **Missing Features**
|
||||
- Stripe payment integration (webhook handlers exist, UI integration pending)
|
||||
- PDF invoice generation
|
||||
- Email notifications
|
||||
- Subscription management UI
|
||||
|
||||
## 📚 DOCUMENTATION
|
||||
|
||||
### For Users
|
||||
- All account and billing pages accessible from sidebar
|
||||
- Credit balance visible on Credits page
|
||||
- Purchase credits via credit packages
|
||||
- View transaction history
|
||||
- Manage team members
|
||||
|
||||
### For Developers
|
||||
- Backend: Django REST Framework ViewSets
|
||||
- Frontend: React + TypeScript + Vite
|
||||
- API calls: Centralized in `services/billing.api.ts`
|
||||
- Auth: JWT tokens in localStorage
|
||||
- Multi-tenancy: Account-based access control
|
||||
@@ -22,6 +22,10 @@ RUN pip install --upgrade pip \
|
||||
# Copy full project
|
||||
COPY . /app/
|
||||
|
||||
# Copy startup script
|
||||
COPY container_startup.sh /app/
|
||||
RUN chmod +x /app/container_startup.sh
|
||||
|
||||
# Collect static files for WhiteNoise (skip during build if DB not available)
|
||||
# Will be run during container startup if needed
|
||||
RUN python manage.py collectstatic --noinput || echo "Skipping collectstatic during build"
|
||||
@@ -32,5 +36,7 @@ ENV DJANGO_SETTINGS_MODULE=igny8_core.settings
|
||||
# Expose port for Gunicorn (matches Portainer docker-compose config)
|
||||
EXPOSE 8010
|
||||
|
||||
# Use startup script as entrypoint to log container lifecycle
|
||||
# Start using Gunicorn (matches Portainer docker-compose config)
|
||||
ENTRYPOINT ["/app/container_startup.sh"]
|
||||
CMD ["gunicorn", "igny8_core.wsgi:application", "--bind", "0.0.0.0:8010"]
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import django
|
||||
import json
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
from igny8_core.auth.models import Site
|
||||
from django.test import RequestFactory
|
||||
from igny8_core.modules.integration.views import IntegrationViewSet
|
||||
|
||||
# Create a fake request
|
||||
factory = RequestFactory()
|
||||
request = factory.get('/api/v1/integration/integrations/1/content-types/')
|
||||
|
||||
# Create view and call the action
|
||||
integration = SiteIntegration.objects.get(id=1)
|
||||
viewset = IntegrationViewSet()
|
||||
viewset.format_kwarg = None
|
||||
viewset.request = request
|
||||
viewset.kwargs = {'pk': 1}
|
||||
|
||||
# Get the response data
|
||||
response = viewset.content_types_summary(request, pk=1)
|
||||
|
||||
print("Response Status:", response.status_code)
|
||||
print("\nResponse Data:")
|
||||
print(json.dumps(response.data, indent=2, default=str))
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""Check recent keyword creation"""
|
||||
import os
|
||||
import django
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.planning.models import Keywords
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
recent = timezone.now() - timedelta(hours=24)
|
||||
recent_keywords = Keywords.objects.filter(created_at__gte=recent)
|
||||
|
||||
print(f'Keywords created in last 24 hours: {recent_keywords.count()}')
|
||||
if recent_keywords.exists():
|
||||
print('\nRecent keyword statuses:')
|
||||
for k in recent_keywords[:10]:
|
||||
print(f' ID {k.id}: status={k.status}, created={k.created_at}')
|
||||
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Clean up structure-based categories that were incorrectly created
|
||||
This will remove categories like "Guide", "Article", etc. that match content_structure values
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import django
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from django.db import transaction
|
||||
from igny8_core.business.content.models import ContentTaxonomy
|
||||
|
||||
# List of structure values that were incorrectly added as categories
|
||||
STRUCTURE_VALUES = ['Guide', 'Article', 'Listicle', 'How To', 'Tutorial', 'Review', 'Comparison']
|
||||
|
||||
print("=" * 80)
|
||||
print("CLEANING UP STRUCTURE-BASED CATEGORIES")
|
||||
print("=" * 80)
|
||||
|
||||
for structure_name in STRUCTURE_VALUES:
|
||||
categories = ContentTaxonomy.objects.filter(
|
||||
taxonomy_type='category',
|
||||
name=structure_name
|
||||
)
|
||||
|
||||
if categories.exists():
|
||||
count = categories.count()
|
||||
print(f"\nRemoving {count} '{structure_name}' categor{'y' if count == 1 else 'ies'}...")
|
||||
categories.delete()
|
||||
print(f" ✓ Deleted {count} '{structure_name}' categor{'y' if count == 1 else 'ies'}")
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print("CLEANUP COMPLETE")
|
||||
print("=" * 80)
|
||||
47
backend/container_startup.sh
Normal file
47
backend/container_startup.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
# Container Startup Logger
|
||||
# Logs container lifecycle events for debugging restarts
|
||||
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo "[CONTAINER-STARTUP] $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "Container: igny8_backend"
|
||||
echo "Hostname: $(hostname)"
|
||||
echo "PID: $$"
|
||||
echo "=========================================="
|
||||
|
||||
# Log environment info
|
||||
echo "[INFO] Python version: $(python --version 2>&1)"
|
||||
echo "[INFO] Django settings: ${DJANGO_SETTINGS_MODULE:-igny8_core.settings}"
|
||||
echo "[INFO] Debug mode: ${DEBUG:-False}"
|
||||
echo "[INFO] Database host: ${DB_HOST:-not set}"
|
||||
|
||||
# Check if this is a restart (look for previous process artifacts)
|
||||
if [ -f /tmp/container_pid ]; then
|
||||
PREV_PID=$(cat /tmp/container_pid)
|
||||
echo "[WARNING] Previous container PID found: $PREV_PID"
|
||||
echo "[WARNING] This appears to be a RESTART event"
|
||||
echo "[WARNING] Check Docker logs for SIGTERM/SIGKILL signals"
|
||||
else
|
||||
echo "[INFO] First startup (no previous PID file found)"
|
||||
fi
|
||||
|
||||
# Save current PID
|
||||
echo $$ > /tmp/container_pid
|
||||
|
||||
# Run database migrations (will skip if up to date)
|
||||
echo "[INFO] Running database migrations..."
|
||||
python manage.py migrate --noinput || echo "[WARNING] Migration failed or skipped"
|
||||
|
||||
# Collect static files (skip if already done)
|
||||
echo "[INFO] Collecting static files..."
|
||||
python manage.py collectstatic --noinput || echo "[WARNING] Collectstatic failed or skipped"
|
||||
|
||||
echo "=========================================="
|
||||
echo "[CONTAINER-STARTUP] Initialization complete"
|
||||
echo "[CONTAINER-STARTUP] Starting Gunicorn..."
|
||||
echo "=========================================="
|
||||
|
||||
# Execute the CMD passed to the script (Gunicorn command)
|
||||
exec "$@"
|
||||
61
backend/create_groups.py
Normal file
61
backend/create_groups.py
Normal file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python
|
||||
"""Script to create admin permission groups"""
|
||||
import os
|
||||
import django
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from django.contrib.auth.models import Group, Permission
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
groups_permissions = {
|
||||
'Content Manager': {
|
||||
'models': [
|
||||
('writer', 'content'), ('writer', 'tasks'), ('writer', 'images'),
|
||||
('planner', 'keywords'), ('planner', 'clusters'), ('planner', 'contentideas'),
|
||||
],
|
||||
'permissions': ['add', 'change', 'view'],
|
||||
},
|
||||
'Billing Admin': {
|
||||
'models': [
|
||||
('billing', 'payment'), ('billing', 'invoice'), ('billing', 'credittransaction'),
|
||||
('billing', 'creditusagelog'), ('igny8_core_auth', 'account'),
|
||||
],
|
||||
'permissions': ['add', 'change', 'view', 'delete'],
|
||||
},
|
||||
'Support Agent': {
|
||||
'models': [
|
||||
('writer', 'content'), ('writer', 'tasks'),
|
||||
('igny8_core_auth', 'account'), ('igny8_core_auth', 'site'),
|
||||
],
|
||||
'permissions': ['view'],
|
||||
},
|
||||
}
|
||||
|
||||
print('Creating admin permission groups...\n')
|
||||
|
||||
for group_name, config in groups_permissions.items():
|
||||
group, created = Group.objects.get_or_create(name=group_name)
|
||||
status = 'Created' if created else 'Updated'
|
||||
print(f'✓ {status} group: {group_name}')
|
||||
|
||||
group.permissions.clear()
|
||||
added = 0
|
||||
|
||||
for app_label, model_name in config['models']:
|
||||
try:
|
||||
ct = ContentType.objects.get(app_label=app_label, model=model_name)
|
||||
for perm_type in config['permissions']:
|
||||
try:
|
||||
perm = Permission.objects.get(content_type=ct, codename=f'{perm_type}_{model_name}')
|
||||
group.permissions.add(perm)
|
||||
added += 1
|
||||
except Permission.DoesNotExist:
|
||||
print(f' ! Permission not found: {perm_type}_{model_name}')
|
||||
except ContentType.DoesNotExist:
|
||||
print(f' ! ContentType not found: {app_label}.{model_name}')
|
||||
|
||||
print(f' Added {added} permissions')
|
||||
|
||||
print('\n✓ Permission groups created successfully!')
|
||||
Binary file not shown.
@@ -1,116 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Automation System Deployment Script
|
||||
# Run this script to complete the automation system deployment
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
echo "========================================="
|
||||
echo "IGNY8 Automation System Deployment"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check if running from correct directory
|
||||
if [ ! -f "manage.py" ]; then
|
||||
echo -e "${RED}Error: Please run this script from the backend directory${NC}"
|
||||
echo "cd /data/app/igny8/backend && ./deploy_automation.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Step 1: Creating log directory...${NC}"
|
||||
mkdir -p logs/automation
|
||||
chmod 755 logs/automation
|
||||
echo -e "${GREEN}✓ Log directory created${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${YELLOW}Step 2: Running database migrations...${NC}"
|
||||
python3 manage.py makemigrations
|
||||
python3 manage.py migrate
|
||||
echo -e "${GREEN}✓ Migrations complete${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${YELLOW}Step 3: Checking Celery services...${NC}"
|
||||
if docker ps | grep -q celery; then
|
||||
echo -e "${GREEN}✓ Celery worker is running${NC}"
|
||||
else
|
||||
echo -e "${RED}⚠ Celery worker is NOT running${NC}"
|
||||
echo "Start with: docker-compose up -d celery"
|
||||
fi
|
||||
|
||||
if docker ps | grep -q beat; then
|
||||
echo -e "${GREEN}✓ Celery beat is running${NC}"
|
||||
else
|
||||
echo -e "${RED}⚠ Celery beat is NOT running${NC}"
|
||||
echo "Start with: docker-compose up -d celery-beat"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo -e "${YELLOW}Step 4: Verifying cache backend...${NC}"
|
||||
python3 -c "
|
||||
from django.core.cache import cache
|
||||
try:
|
||||
cache.set('test_key', 'test_value', 10)
|
||||
if cache.get('test_key') == 'test_value':
|
||||
print('${GREEN}✓ Cache backend working${NC}')
|
||||
else:
|
||||
print('${RED}⚠ Cache backend not working properly${NC}')
|
||||
except Exception as e:
|
||||
print('${RED}⚠ Cache backend error:', str(e), '${NC}')
|
||||
" || echo -e "${RED}⚠ Could not verify cache backend${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${YELLOW}Step 5: Testing automation API...${NC}"
|
||||
python3 manage.py shell << EOF
|
||||
from igny8_core.business.automation.services import AutomationService
|
||||
from igny8_core.modules.system.models import Account, Site
|
||||
|
||||
try:
|
||||
account = Account.objects.first()
|
||||
site = Site.objects.first()
|
||||
if account and site:
|
||||
service = AutomationService(account, site)
|
||||
estimate = service.estimate_credits()
|
||||
print('${GREEN}✓ AutomationService working - Estimated credits:', estimate, '${NC}')
|
||||
else:
|
||||
print('${YELLOW}⚠ No account or site found - create one first${NC}')
|
||||
except Exception as e:
|
||||
print('${RED}⚠ AutomationService error:', str(e), '${NC}')
|
||||
EOF
|
||||
echo ""
|
||||
|
||||
echo -e "${YELLOW}Step 6: Checking Celery beat schedule...${NC}"
|
||||
if docker ps | grep -q celery; then
|
||||
CELERY_CONTAINER=$(docker ps | grep celery | grep -v beat | awk '{print $1}')
|
||||
docker exec $CELERY_CONTAINER celery -A igny8_core inspect scheduled 2>/dev/null | grep -q "check-scheduled-automations" && \
|
||||
echo -e "${GREEN}✓ Automation task scheduled in Celery beat${NC}" || \
|
||||
echo -e "${YELLOW}⚠ Automation task not found in schedule (may need restart)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Celery worker not running - cannot check schedule${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo -e "${GREEN}Deployment Steps Completed!${NC}"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
echo "Next steps:"
|
||||
echo "1. Restart Celery services to pick up new tasks:"
|
||||
echo " docker-compose restart celery celery-beat"
|
||||
echo ""
|
||||
echo "2. Access the frontend at /automation page"
|
||||
echo ""
|
||||
echo "3. Test the automation:"
|
||||
echo " - Click [Configure] to set up schedule"
|
||||
echo " - Click [Run Now] to start automation"
|
||||
echo " - Monitor progress in real-time"
|
||||
echo ""
|
||||
echo "4. Check logs:"
|
||||
echo " tail -f logs/automation/{account_id}/{site_id}/{run_id}/automation_run.log"
|
||||
echo ""
|
||||
echo -e "${YELLOW}For troubleshooting, see: AUTOMATION-DEPLOYMENT-CHECKLIST.md${NC}"
|
||||
@@ -1,393 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Diagnostic script for generate_content function issues
|
||||
Tests each layer of the content generation pipeline to identify where it's failing
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import django
|
||||
import logging
|
||||
|
||||
# Setup Django
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.auth.models import Account
|
||||
from igny8_core.modules.writer.models import Tasks, Content
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
from igny8_core.ai.registry import get_function_instance
|
||||
from igny8_core.ai.engine import AIEngine
|
||||
from igny8_core.business.content.services.content_generation_service import ContentGenerationService
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def print_section(title):
|
||||
"""Print a section header"""
|
||||
print("\n" + "=" * 80)
|
||||
print(f" {title}")
|
||||
print("=" * 80 + "\n")
|
||||
|
||||
def test_prerequisites():
|
||||
"""Test that prerequisites are met"""
|
||||
print_section("1. TESTING PREREQUISITES")
|
||||
|
||||
# Check if account exists
|
||||
try:
|
||||
account = Account.objects.first()
|
||||
if not account:
|
||||
print("❌ FAIL: No account found in database")
|
||||
return None
|
||||
print(f"✅ PASS: Found account: {account.id} ({account.email})")
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error getting account: {e}")
|
||||
return None
|
||||
|
||||
# Check OpenAI integration settings
|
||||
try:
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=account,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if not openai_settings:
|
||||
print("❌ FAIL: No active OpenAI integration settings found")
|
||||
return None
|
||||
|
||||
if not openai_settings.config or not openai_settings.config.get('apiKey'):
|
||||
print("❌ FAIL: OpenAI API key not configured in IntegrationSettings")
|
||||
return None
|
||||
|
||||
api_key_preview = openai_settings.config['apiKey'][:10] + "..." if openai_settings.config.get('apiKey') else "None"
|
||||
model = openai_settings.config.get('model', 'Not set')
|
||||
print(f"✅ PASS: OpenAI settings found (API key: {api_key_preview}, Model: {model})")
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error checking OpenAI settings: {e}")
|
||||
return None
|
||||
|
||||
# Check if tasks exist
|
||||
try:
|
||||
tasks = Tasks.objects.filter(account=account, status='pending')[:5]
|
||||
task_count = tasks.count()
|
||||
|
||||
if task_count == 0:
|
||||
print("⚠️ WARNING: No pending tasks found, will try to use any task")
|
||||
tasks = Tasks.objects.filter(account=account)[:5]
|
||||
task_count = tasks.count()
|
||||
|
||||
if task_count == 0:
|
||||
print("❌ FAIL: No tasks found at all")
|
||||
return None
|
||||
|
||||
print(f"✅ PASS: Found {task_count} task(s)")
|
||||
for task in tasks:
|
||||
print(f" - Task {task.id}: {task.title or 'Untitled'} (status: {task.status})")
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error getting tasks: {e}")
|
||||
return None
|
||||
|
||||
return {
|
||||
'account': account,
|
||||
'tasks': list(tasks),
|
||||
'openai_settings': openai_settings
|
||||
}
|
||||
|
||||
def test_function_registry():
|
||||
"""Test that the generate_content function is registered"""
|
||||
print_section("2. TESTING FUNCTION REGISTRY")
|
||||
|
||||
try:
|
||||
fn = get_function_instance('generate_content')
|
||||
if not fn:
|
||||
print("❌ FAIL: generate_content function not found in registry")
|
||||
return False
|
||||
|
||||
print(f"✅ PASS: Function registered: {fn.get_name()}")
|
||||
metadata = fn.get_metadata()
|
||||
print(f" - Display name: {metadata.get('display_name')}")
|
||||
print(f" - Description: {metadata.get('description')}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error loading function: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_function_validation(context):
|
||||
"""Test function validation"""
|
||||
print_section("3. TESTING FUNCTION VALIDATION")
|
||||
|
||||
try:
|
||||
fn = get_function_instance('generate_content')
|
||||
account = context['account']
|
||||
task = context['tasks'][0]
|
||||
|
||||
payload = {'ids': [task.id]}
|
||||
print(f"Testing with payload: {payload}")
|
||||
|
||||
result = fn.validate(payload, account)
|
||||
|
||||
if result['valid']:
|
||||
print(f"✅ PASS: Validation succeeded")
|
||||
else:
|
||||
print(f"❌ FAIL: Validation failed: {result.get('error')}")
|
||||
return False
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error during validation: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_function_prepare(context):
|
||||
"""Test function prepare phase"""
|
||||
print_section("4. TESTING FUNCTION PREPARE")
|
||||
|
||||
try:
|
||||
fn = get_function_instance('generate_content')
|
||||
account = context['account']
|
||||
task = context['tasks'][0]
|
||||
|
||||
payload = {'ids': [task.id]}
|
||||
print(f"Preparing task {task.id}: {task.title or 'Untitled'}")
|
||||
|
||||
data = fn.prepare(payload, account)
|
||||
|
||||
if not data:
|
||||
print("❌ FAIL: Prepare returned no data")
|
||||
return False
|
||||
|
||||
if isinstance(data, list):
|
||||
print(f"✅ PASS: Prepared {len(data)} task(s)")
|
||||
for t in data:
|
||||
print(f" - Task {t.id}: {t.title or 'Untitled'}")
|
||||
print(f" Cluster: {t.cluster.name if t.cluster else 'None'}")
|
||||
print(f" Taxonomy: {t.taxonomy_term.name if t.taxonomy_term else 'None'}")
|
||||
print(f" Keywords: {t.keywords.count()} keyword(s)")
|
||||
else:
|
||||
print(f"✅ PASS: Prepared data: {type(data)}")
|
||||
|
||||
context['prepared_data'] = data
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error during prepare: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_function_build_prompt(context):
|
||||
"""Test prompt building"""
|
||||
print_section("5. TESTING PROMPT BUILDING")
|
||||
|
||||
try:
|
||||
fn = get_function_instance('generate_content')
|
||||
account = context['account']
|
||||
data = context['prepared_data']
|
||||
|
||||
prompt = fn.build_prompt(data, account)
|
||||
|
||||
if not prompt:
|
||||
print("❌ FAIL: No prompt generated")
|
||||
return False
|
||||
|
||||
print(f"✅ PASS: Prompt generated ({len(prompt)} characters)")
|
||||
print("\nPrompt preview (first 500 chars):")
|
||||
print("-" * 80)
|
||||
print(prompt[:500])
|
||||
if len(prompt) > 500:
|
||||
print(f"\n... ({len(prompt) - 500} more characters)")
|
||||
print("-" * 80)
|
||||
|
||||
context['prompt'] = prompt
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error building prompt: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_model_config(context):
|
||||
"""Test model configuration"""
|
||||
print_section("6. TESTING MODEL CONFIGURATION")
|
||||
|
||||
try:
|
||||
from igny8_core.ai.settings import get_model_config
|
||||
account = context['account']
|
||||
|
||||
model_config = get_model_config('generate_content', account=account)
|
||||
|
||||
if not model_config:
|
||||
print("❌ FAIL: No model config returned")
|
||||
return False
|
||||
|
||||
print(f"✅ PASS: Model configuration loaded")
|
||||
print(f" - Model: {model_config.get('model')}")
|
||||
print(f" - Max tokens: {model_config.get('max_tokens')}")
|
||||
print(f" - Temperature: {model_config.get('temperature')}")
|
||||
print(f" - Response format: {model_config.get('response_format')}")
|
||||
|
||||
context['model_config'] = model_config
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error getting model config: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_ai_core_request(context):
|
||||
"""Test AI core request (actual API call)"""
|
||||
print_section("7. TESTING AI CORE REQUEST (ACTUAL API CALL)")
|
||||
|
||||
# Ask user for confirmation
|
||||
print("⚠️ WARNING: This will make an actual API call to OpenAI and cost money!")
|
||||
print("Do you want to proceed? (yes/no): ", end='')
|
||||
response = input().strip().lower()
|
||||
|
||||
if response != 'yes':
|
||||
print("Skipping API call test")
|
||||
return True
|
||||
|
||||
try:
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
account = context['account']
|
||||
prompt = context['prompt']
|
||||
model_config = context['model_config']
|
||||
|
||||
# Use a shorter test prompt to save costs
|
||||
test_prompt = prompt[:1000] + "\n\n[TEST MODE - Generate only title and first paragraph]"
|
||||
|
||||
print(f"Making test API call with shortened prompt ({len(test_prompt)} chars)...")
|
||||
|
||||
ai_core = AICore(account=account)
|
||||
result = ai_core.run_ai_request(
|
||||
prompt=test_prompt,
|
||||
model=model_config['model'],
|
||||
max_tokens=500, # Limit tokens for testing
|
||||
temperature=model_config.get('temperature', 0.7),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name='generate_content_test'
|
||||
)
|
||||
|
||||
if result.get('error'):
|
||||
print(f"❌ FAIL: API call returned error: {result['error']}")
|
||||
return False
|
||||
|
||||
if not result.get('content'):
|
||||
print(f"❌ FAIL: API call returned no content")
|
||||
return False
|
||||
|
||||
print(f"✅ PASS: API call successful")
|
||||
print(f" - Tokens: {result.get('total_tokens', 0)}")
|
||||
print(f" - Cost: ${result.get('cost', 0):.6f}")
|
||||
print(f" - Model: {result.get('model')}")
|
||||
print(f"\nContent preview (first 300 chars):")
|
||||
print("-" * 80)
|
||||
print(result['content'][:300])
|
||||
print("-" * 80)
|
||||
|
||||
context['ai_response'] = result
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error during API call: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def test_service_layer(context):
|
||||
"""Test the content generation service"""
|
||||
print_section("8. TESTING CONTENT GENERATION SERVICE")
|
||||
|
||||
print("⚠️ WARNING: This will make a full API call and create content!")
|
||||
print("Do you want to proceed? (yes/no): ", end='')
|
||||
response = input().strip().lower()
|
||||
|
||||
if response != 'yes':
|
||||
print("Skipping service test")
|
||||
return True
|
||||
|
||||
try:
|
||||
account = context['account']
|
||||
task = context['tasks'][0]
|
||||
|
||||
service = ContentGenerationService()
|
||||
|
||||
print(f"Calling generate_content with task {task.id}...")
|
||||
|
||||
result = service.generate_content([task.id], account)
|
||||
|
||||
if not result:
|
||||
print("❌ FAIL: Service returned None")
|
||||
return False
|
||||
|
||||
if not result.get('success'):
|
||||
print(f"❌ FAIL: Service failed: {result.get('error')}")
|
||||
return False
|
||||
|
||||
print(f"✅ PASS: Service call successful")
|
||||
|
||||
if 'task_id' in result:
|
||||
print(f" - Celery task ID: {result['task_id']}")
|
||||
print(f" - Message: {result.get('message')}")
|
||||
print("\n⚠️ Note: Content generation is running in background (Celery)")
|
||||
print(" Check Celery logs for actual execution status")
|
||||
else:
|
||||
print(f" - Content created: {result.get('content_id')}")
|
||||
print(f" - Word count: {result.get('word_count')}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ FAIL: Error in service layer: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all diagnostic tests"""
|
||||
print("\n" + "=" * 80)
|
||||
print(" GENERATE_CONTENT DIAGNOSTIC TOOL")
|
||||
print("=" * 80)
|
||||
print("\nThis tool will test each layer of the content generation pipeline")
|
||||
print("to identify where the function is failing.")
|
||||
|
||||
# Run tests
|
||||
context = test_prerequisites()
|
||||
if not context:
|
||||
print("\n❌ FATAL: Prerequisites test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
if not test_function_registry():
|
||||
print("\n❌ FATAL: Function registry test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
if not test_function_validation(context):
|
||||
print("\n❌ FATAL: Validation test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
if not test_function_prepare(context):
|
||||
print("\n❌ FATAL: Prepare test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
if not test_function_build_prompt(context):
|
||||
print("\n❌ FATAL: Prompt building test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
if not test_model_config(context):
|
||||
print("\n❌ FATAL: Model config test failed. Cannot continue.")
|
||||
return
|
||||
|
||||
# Optional tests (require API calls)
|
||||
test_ai_core_request(context)
|
||||
test_service_layer(context)
|
||||
|
||||
print_section("DIAGNOSTIC COMPLETE")
|
||||
print("Review the results above to identify where the generate_content")
|
||||
print("function is failing.\n")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,67 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Final verification that the WordPress content types are properly synced
|
||||
"""
|
||||
import os
|
||||
import django
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
from igny8_core.auth.models import Site
|
||||
import json
|
||||
|
||||
print("=" * 70)
|
||||
print("WORDPRESS SYNC FIX VERIFICATION")
|
||||
print("=" * 70)
|
||||
|
||||
# Get site 5
|
||||
site = Site.objects.get(id=5)
|
||||
print(f"\n✓ Site: {site.name} (ID: {site.id})")
|
||||
|
||||
# Get WordPress integration
|
||||
integration = SiteIntegration.objects.get(site=site, platform='wordpress')
|
||||
print(f"✓ Integration: {integration.platform.upper()} (ID: {integration.id})")
|
||||
print(f"✓ Active: {integration.is_active}")
|
||||
print(f"✓ Sync Enabled: {integration.sync_enabled}")
|
||||
|
||||
# Verify config data
|
||||
config = integration.config_json or {}
|
||||
content_types = config.get('content_types', {})
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("CONTENT TYPES STRUCTURE")
|
||||
print("=" * 70)
|
||||
|
||||
# Post Types
|
||||
post_types = content_types.get('post_types', {})
|
||||
print(f"\n📝 Post Types: ({len(post_types)} total)")
|
||||
for pt_name, pt_data in post_types.items():
|
||||
print(f" • {pt_data['label']} ({pt_name})")
|
||||
print(f" - Count: {pt_data['count']}")
|
||||
print(f" - Enabled: {pt_data['enabled']}")
|
||||
print(f" - Fetch Limit: {pt_data['fetch_limit']}")
|
||||
|
||||
# Taxonomies
|
||||
taxonomies = content_types.get('taxonomies', {})
|
||||
print(f"\n🏷️ Taxonomies: ({len(taxonomies)} total)")
|
||||
for tax_name, tax_data in taxonomies.items():
|
||||
print(f" • {tax_data['label']} ({tax_name})")
|
||||
print(f" - Count: {tax_data['count']}")
|
||||
print(f" - Enabled: {tax_data['enabled']}")
|
||||
print(f" - Fetch Limit: {tax_data['fetch_limit']}")
|
||||
|
||||
# Last fetch time
|
||||
last_fetch = content_types.get('last_structure_fetch')
|
||||
print(f"\n🕐 Last Structure Fetch: {last_fetch}")
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("✅ SUCCESS! WordPress content types are properly configured")
|
||||
print("=" * 70)
|
||||
print("\nNext Steps:")
|
||||
print("1. Refresh the IGNY8 app page in your browser")
|
||||
print("2. Navigate to Sites → Settings → Content Types tab")
|
||||
print("3. You should now see all Post Types and Taxonomies listed")
|
||||
print("=" * 70)
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""Fix remaining cluster with old status"""
|
||||
import os
|
||||
import django
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.planning.models import Clusters
|
||||
|
||||
cluster = Clusters.objects.filter(status='active').first()
|
||||
if cluster:
|
||||
print(f"Found cluster: ID={cluster.id}, name={cluster.name}, status={cluster.status}")
|
||||
print(f"Ideas count: {cluster.ideas.count()}")
|
||||
if cluster.ideas.exists():
|
||||
cluster.status = 'mapped'
|
||||
else:
|
||||
cluster.status = 'new'
|
||||
cluster.save()
|
||||
print(f"Updated to: {cluster.status}")
|
||||
else:
|
||||
print("No clusters with 'active' status found")
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import django
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
from igny8_core.auth.models import Site
|
||||
from django.utils import timezone
|
||||
|
||||
try:
|
||||
# Get site 5
|
||||
site = Site.objects.get(id=5)
|
||||
print(f"✓ Site found: {site.name}")
|
||||
|
||||
# Get or create WordPress integration
|
||||
integration, created = SiteIntegration.objects.get_or_create(
|
||||
site=site,
|
||||
platform='wordpress',
|
||||
defaults={
|
||||
'is_active': True,
|
||||
'sync_enabled': True,
|
||||
'config_json': {}
|
||||
}
|
||||
)
|
||||
|
||||
print(f"✓ Integration ID: {integration.id} (created: {created})")
|
||||
|
||||
# Add structure data
|
||||
integration.config_json = {
|
||||
'content_types': {
|
||||
'post_types': {
|
||||
'post': {
|
||||
'label': 'Posts',
|
||||
'count': 150,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
},
|
||||
'page': {
|
||||
'label': 'Pages',
|
||||
'count': 25,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
},
|
||||
'product': {
|
||||
'label': 'Products',
|
||||
'count': 89,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
}
|
||||
},
|
||||
'taxonomies': {
|
||||
'category': {
|
||||
'label': 'Categories',
|
||||
'count': 15,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
},
|
||||
'post_tag': {
|
||||
'label': 'Tags',
|
||||
'count': 234,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
},
|
||||
'product_cat': {
|
||||
'label': 'Product Categories',
|
||||
'count': 12,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100
|
||||
}
|
||||
},
|
||||
'last_structure_fetch': timezone.now().isoformat()
|
||||
},
|
||||
'plugin_connection_enabled': True,
|
||||
'two_way_sync_enabled': True
|
||||
}
|
||||
|
||||
integration.save()
|
||||
print("✓ Structure data saved successfully!")
|
||||
print(f"✓ Integration ID: {integration.id}")
|
||||
print("\n✅ READY: Refresh the page to see the content types!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ ERROR: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Fix missing site_url in integration config
|
||||
Adds site_url to config_json from site.domain or site.wp_url
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import django
|
||||
|
||||
# Setup Django environment
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
from igny8_core.auth.models import Site
|
||||
|
||||
def fix_integration_site_urls():
|
||||
"""Add site_url to integration config if missing"""
|
||||
|
||||
integrations = SiteIntegration.objects.filter(platform='wordpress')
|
||||
|
||||
fixed_count = 0
|
||||
skipped_count = 0
|
||||
error_count = 0
|
||||
|
||||
for integration in integrations:
|
||||
try:
|
||||
config = integration.config_json or {}
|
||||
|
||||
# Check if site_url is already set
|
||||
if config.get('site_url'):
|
||||
print(f"✓ Integration {integration.id} already has site_url: {config.get('site_url')}")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Try to get site URL from multiple sources
|
||||
site_url = None
|
||||
|
||||
# First, try legacy wp_url
|
||||
if integration.site.wp_url:
|
||||
site_url = integration.site.wp_url
|
||||
print(f"→ Using legacy wp_url for integration {integration.id}: {site_url}")
|
||||
|
||||
# Fallback to domain
|
||||
elif integration.site.domain:
|
||||
site_url = integration.site.domain
|
||||
print(f"→ Using domain for integration {integration.id}: {site_url}")
|
||||
|
||||
if site_url:
|
||||
# Update config
|
||||
config['site_url'] = site_url
|
||||
integration.config_json = config
|
||||
integration.save(update_fields=['config_json'])
|
||||
print(f"✓ Updated integration {integration.id} with site_url: {site_url}")
|
||||
fixed_count += 1
|
||||
else:
|
||||
print(f"✗ Integration {integration.id} has no site URL available (site: {integration.site.name}, id: {integration.site.id})")
|
||||
error_count += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Error fixing integration {integration.id}: {e}")
|
||||
error_count += 1
|
||||
|
||||
print("\n" + "="*60)
|
||||
print(f"Summary:")
|
||||
print(f" Fixed: {fixed_count}")
|
||||
print(f" Skipped (already set): {skipped_count}")
|
||||
print(f" Errors: {error_count}")
|
||||
print("="*60)
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Fixing WordPress integration site URLs...")
|
||||
print("="*60)
|
||||
fix_integration_site_urls()
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""Script to inject WordPress structure data into the backend"""
|
||||
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
from igny8_core.auth.models import Site
|
||||
from django.utils import timezone
|
||||
|
||||
# Get site 5
|
||||
try:
|
||||
site = Site.objects.get(id=5)
|
||||
print(f"✓ Found site: {site.name}")
|
||||
except Site.DoesNotExist:
|
||||
print("✗ Site with ID 5 not found!")
|
||||
exit(1)
|
||||
|
||||
# Get or create WordPress integration for this site
|
||||
integration, created = SiteIntegration.objects.get_or_create(
|
||||
site=site,
|
||||
platform='wordpress',
|
||||
defaults={
|
||||
'is_active': True,
|
||||
'sync_enabled': True,
|
||||
'config_json': {}
|
||||
}
|
||||
)
|
||||
|
||||
print(f"✓ Integration ID: {integration.id} (newly created: {created})")
|
||||
|
||||
# Add structure data
|
||||
integration.config_json = {
|
||||
'content_types': {
|
||||
'post_types': {
|
||||
'post': {
|
||||
'label': 'Posts',
|
||||
'count': 150,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
},
|
||||
'page': {
|
||||
'label': 'Pages',
|
||||
'count': 25,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
},
|
||||
'product': {
|
||||
'label': 'Products',
|
||||
'count': 89,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
}
|
||||
},
|
||||
'taxonomies': {
|
||||
'category': {
|
||||
'label': 'Categories',
|
||||
'count': 15,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
},
|
||||
'post_tag': {
|
||||
'label': 'Tags',
|
||||
'count': 234,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
},
|
||||
'product_cat': {
|
||||
'label': 'Product Categories',
|
||||
'count': 12,
|
||||
'enabled': True,
|
||||
'fetch_limit': 100,
|
||||
'synced_count': 0
|
||||
}
|
||||
},
|
||||
'last_structure_fetch': timezone.now().isoformat()
|
||||
},
|
||||
'plugin_connection_enabled': True,
|
||||
'two_way_sync_enabled': True
|
||||
}
|
||||
|
||||
integration.save()
|
||||
print("✓ Structure data saved!")
|
||||
print(f"✓ Post Types: {len(integration.config_json['content_types']['post_types'])}")
|
||||
print(f"✓ Taxonomies: {len(integration.config_json['content_types']['taxonomies'])}")
|
||||
print(f"✓ Last fetch: {integration.config_json['content_types']['last_structure_fetch']}")
|
||||
print("\n🎉 SUCCESS! Now refresh: https://app.igny8.com/sites/5/settings?tab=content-types")
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Fix missing taxonomy relationships for existing content
|
||||
This script will:
|
||||
1. Find content that should have tags/categories based on their keywords
|
||||
2. Create appropriate taxonomy terms
|
||||
3. Link them to the content
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import django
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from django.db import transaction
|
||||
from django.utils.text import slugify
|
||||
from igny8_core.business.content.models import Content, ContentTaxonomy
|
||||
|
||||
print("=" * 80)
|
||||
print("FIXING MISSING TAXONOMY RELATIONSHIPS")
|
||||
print("=" * 80)
|
||||
|
||||
# Get all content without taxonomy terms
|
||||
content_without_tags = Content.objects.filter(taxonomy_terms__isnull=True).distinct()
|
||||
print(f"\nFound {content_without_tags.count()} content items without tags/categories")
|
||||
|
||||
fixed_count = 0
|
||||
for content in content_without_tags:
|
||||
print(f"\nProcessing Content #{content.id}: {content.title[:50]}...")
|
||||
|
||||
# Generate tags from keywords
|
||||
tags_to_add = []
|
||||
categories_to_add = []
|
||||
|
||||
# Use primary keyword as a tag
|
||||
if content.primary_keyword:
|
||||
tags_to_add.append(content.primary_keyword)
|
||||
|
||||
# Use secondary keywords as tags
|
||||
if content.secondary_keywords and isinstance(content.secondary_keywords, list):
|
||||
tags_to_add.extend(content.secondary_keywords[:3]) # Limit to 3
|
||||
|
||||
# Create category based on cluster only
|
||||
if content.cluster:
|
||||
categories_to_add.append(content.cluster.name)
|
||||
|
||||
with transaction.atomic():
|
||||
# Process tags
|
||||
for tag_name in tags_to_add:
|
||||
if tag_name and isinstance(tag_name, str):
|
||||
tag_name = tag_name.strip()
|
||||
if tag_name:
|
||||
try:
|
||||
tag_obj, created = ContentTaxonomy.objects.get_or_create(
|
||||
site=content.site,
|
||||
name=tag_name,
|
||||
taxonomy_type='tag',
|
||||
defaults={
|
||||
'slug': slugify(tag_name),
|
||||
'sector': content.sector,
|
||||
'account': content.account,
|
||||
'description': '',
|
||||
'external_taxonomy': '',
|
||||
'sync_status': '',
|
||||
'count': 0,
|
||||
'metadata': {},
|
||||
}
|
||||
)
|
||||
content.taxonomy_terms.add(tag_obj)
|
||||
print(f" + Tag: {tag_name} ({'created' if created else 'existing'})")
|
||||
except Exception as e:
|
||||
print(f" ✗ Failed to add tag '{tag_name}': {e}")
|
||||
|
||||
# Process categories
|
||||
for category_name in categories_to_add:
|
||||
if category_name and isinstance(category_name, str):
|
||||
category_name = category_name.strip()
|
||||
if category_name:
|
||||
try:
|
||||
category_obj, created = ContentTaxonomy.objects.get_or_create(
|
||||
site=content.site,
|
||||
name=category_name,
|
||||
taxonomy_type='category',
|
||||
defaults={
|
||||
'slug': slugify(category_name),
|
||||
'sector': content.sector,
|
||||
'account': content.account,
|
||||
'description': '',
|
||||
'external_taxonomy': '',
|
||||
'sync_status': '',
|
||||
'count': 0,
|
||||
'metadata': {},
|
||||
}
|
||||
)
|
||||
content.taxonomy_terms.add(category_obj)
|
||||
print(f" + Category: {category_name} ({'created' if created else 'existing'})")
|
||||
except Exception as e:
|
||||
print(f" ✗ Failed to add category '{category_name}': {e}")
|
||||
|
||||
fixed_count += 1
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print(f"FIXED {fixed_count} CONTENT ITEMS")
|
||||
print("=" * 80)
|
||||
@@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Force cancel stuck automation runs and clear cache locks"""
|
||||
import os
|
||||
import sys
|
||||
import django
|
||||
|
||||
# Setup Django
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'igny8_core.settings')
|
||||
django.setup()
|
||||
|
||||
from igny8_core.business.automation.models import AutomationRun
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
|
||||
print("=" * 80)
|
||||
print("AUTOMATION RUN FORCE CANCEL & CLEANUP")
|
||||
print("=" * 80)
|
||||
|
||||
# Check and cancel active runs
|
||||
runs = AutomationRun.objects.filter(status__in=['running', 'paused']).order_by('-started_at')
|
||||
print(f"\nFound {runs.count()} active run(s)")
|
||||
|
||||
if runs.count() == 0:
|
||||
print(" No runs to cancel\n")
|
||||
else:
|
||||
for r in runs:
|
||||
duration = (timezone.now() - r.started_at).total_seconds() / 60
|
||||
print(f"\nRun ID: {r.run_id}")
|
||||
print(f" Site: {r.site_id}")
|
||||
print(f" Status: {r.status}")
|
||||
print(f" Stage: {r.current_stage}")
|
||||
print(f" Started: {r.started_at} ({duration:.1f}m ago)")
|
||||
print(f" Credits: {r.total_credits_used}")
|
||||
|
||||
# Force cancel
|
||||
print(f" >>> FORCE CANCELLING...")
|
||||
r.status = 'cancelled'
|
||||
r.save()
|
||||
print(f" >>> Status: {r.status}")
|
||||
|
||||
# Clear cache lock
|
||||
lock_key = f'automation_lock_{r.site_id}'
|
||||
cache.delete(lock_key)
|
||||
print(f" >>> Lock cleared: {lock_key}")
|
||||
|
||||
print("\n" + "=" * 40)
|
||||
print("Cache lock status:")
|
||||
for site_id in [5, 16]:
|
||||
lock_key = f'automation_lock_{site_id}'
|
||||
lock_val = cache.get(lock_key)
|
||||
status = lock_val or 'UNLOCKED ✓'
|
||||
print(f" Site {site_id}: {status}")
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print("✓ CLEANUP COMPLETE - You can now start a new automation run")
|
||||
print("=" * 80)
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Admin module for IGNY8
|
||||
"""
|
||||
from .base import AccountAdminMixin, SiteSectorAdminMixin
|
||||
# Note: Igny8ModelAdmin is imported by individual admin modules as needed to avoid circular imports
|
||||
|
||||
__all__ = ['AccountAdminMixin', 'SiteSectorAdminMixin']
|
||||
__all__ = []
|
||||
|
||||
|
||||
122
backend/igny8_core/admin/alerts.py
Normal file
122
backend/igny8_core/admin/alerts.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""
|
||||
Admin Alert System
|
||||
"""
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
class AdminAlerts:
|
||||
"""System for admin alerts and notifications"""
|
||||
|
||||
@staticmethod
|
||||
def get_alerts():
|
||||
"""Get all active alerts"""
|
||||
alerts = []
|
||||
today = timezone.now().date()
|
||||
|
||||
# Check for pending payments
|
||||
from igny8_core.business.billing.models import Payment
|
||||
pending_payments = Payment.objects.filter(status='pending_approval').count()
|
||||
if pending_payments > 0:
|
||||
alerts.append({
|
||||
'level': 'warning',
|
||||
'icon': '⚠️',
|
||||
'message': f'{pending_payments} payment(s) awaiting approval',
|
||||
'url': '/admin/billing/payment/?status=pending_approval',
|
||||
'action': 'Review Payments'
|
||||
})
|
||||
|
||||
# Check for low credit accounts
|
||||
from igny8_core.auth.models import Account
|
||||
low_credit_accounts = Account.objects.filter(
|
||||
status='active',
|
||||
credits__lt=100
|
||||
).count()
|
||||
if low_credit_accounts > 0:
|
||||
alerts.append({
|
||||
'level': 'info',
|
||||
'icon': 'ℹ️',
|
||||
'message': f'{low_credit_accounts} account(s) with low credits',
|
||||
'url': '/admin/igny8_core_auth/account/?credits__lt=100',
|
||||
'action': 'View Accounts'
|
||||
})
|
||||
|
||||
# Check for very low credits (critical)
|
||||
critical_credit_accounts = Account.objects.filter(
|
||||
status='active',
|
||||
credits__lt=10
|
||||
).count()
|
||||
if critical_credit_accounts > 0:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'icon': '🔴',
|
||||
'message': f'{critical_credit_accounts} account(s) with critical low credits (< 10)',
|
||||
'url': '/admin/igny8_core_auth/account/?credits__lt=10',
|
||||
'action': 'Urgent Review'
|
||||
})
|
||||
|
||||
# Check for failed automations
|
||||
from igny8_core.business.automation.models import AutomationRun
|
||||
failed_today = AutomationRun.objects.filter(
|
||||
status='failed',
|
||||
started_at__date=today
|
||||
).count()
|
||||
if failed_today > 0:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'icon': '🔴',
|
||||
'message': f'{failed_today} automation(s) failed today',
|
||||
'url': '/admin/automation/automationrun/?status=failed',
|
||||
'action': 'Review Failures'
|
||||
})
|
||||
|
||||
# Check for failed syncs
|
||||
from igny8_core.business.integration.models import SyncEvent
|
||||
failed_syncs = SyncEvent.objects.filter(
|
||||
success=False,
|
||||
created_at__date=today
|
||||
).count()
|
||||
if failed_syncs > 5: # Only alert if more than 5
|
||||
alerts.append({
|
||||
'level': 'warning',
|
||||
'icon': '⚠️',
|
||||
'message': f'{failed_syncs} WordPress sync failures today',
|
||||
'url': '/admin/integration/syncevent/?success=False',
|
||||
'action': 'Review Syncs'
|
||||
})
|
||||
|
||||
# Check for failed Celery tasks
|
||||
try:
|
||||
from django_celery_results.models import TaskResult
|
||||
celery_failed = TaskResult.objects.filter(
|
||||
status='FAILURE',
|
||||
date_created__date=today
|
||||
).count()
|
||||
if celery_failed > 0:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'icon': '🔴',
|
||||
'message': f'{celery_failed} Celery task(s) failed today',
|
||||
'url': '/admin/django_celery_results/taskresult/?status=FAILURE',
|
||||
'action': 'Review Tasks'
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for stale pending tasks (older than 24 hours)
|
||||
from igny8_core.modules.writer.models import Tasks
|
||||
yesterday = today - timedelta(days=1)
|
||||
stale_tasks = Tasks.objects.filter(
|
||||
status='pending',
|
||||
created_at__date__lte=yesterday
|
||||
).count()
|
||||
if stale_tasks > 10:
|
||||
alerts.append({
|
||||
'level': 'info',
|
||||
'icon': 'ℹ️',
|
||||
'message': f'{stale_tasks} tasks pending for more than 24 hours',
|
||||
'url': '/admin/writer/tasks/?status=pending',
|
||||
'action': 'Review Tasks'
|
||||
})
|
||||
|
||||
return alerts
|
||||
@@ -28,6 +28,31 @@ class Igny8AdminConfig(AdminConfig):
|
||||
|
||||
def ready(self):
|
||||
super().ready()
|
||||
|
||||
# Replace default admin.site with our custom Igny8AdminSite
|
||||
# IMPORTANT: Must copy all registrations from old site to new site
|
||||
# because models register themselves before ready() is called
|
||||
from igny8_core.admin.site import admin_site
|
||||
import django.contrib.admin as admin_module
|
||||
|
||||
# Copy all model registrations from the default site to our custom site
|
||||
old_site = admin_module.site
|
||||
admin_site._registry = old_site._registry.copy()
|
||||
admin_site._actions = old_site._actions.copy()
|
||||
admin_site._global_actions = old_site._global_actions.copy()
|
||||
|
||||
# CRITICAL: Update each ModelAdmin's admin_site attribute to point to our custom site
|
||||
# Otherwise, each_context() will use the wrong admin site and miss our customizations
|
||||
for model, model_admin in admin_site._registry.items():
|
||||
model_admin.admin_site = admin_site
|
||||
|
||||
# Now replace the default site
|
||||
admin_module.site = admin_site
|
||||
admin_module.sites.site = admin_site
|
||||
|
||||
# Import Unfold AFTER apps are ready
|
||||
from unfold.admin import ModelAdmin as UnfoldModelAdmin
|
||||
|
||||
# Register Django internals in admin (read-only where appropriate)
|
||||
from django.contrib.admin.models import LogEntry
|
||||
from django.contrib.auth.models import Group, Permission
|
||||
@@ -35,9 +60,39 @@ class Igny8AdminConfig(AdminConfig):
|
||||
from django.contrib.sessions.models import Session
|
||||
|
||||
_safe_register(LogEntry, ReadOnlyAdmin)
|
||||
_safe_register(Permission, admin.ModelAdmin)
|
||||
_safe_register(Group, admin.ModelAdmin)
|
||||
_safe_register(Permission, UnfoldModelAdmin)
|
||||
_safe_register(Group, UnfoldModelAdmin)
|
||||
_safe_register(ContentType, ReadOnlyAdmin)
|
||||
_safe_register(Session, ReadOnlyAdmin)
|
||||
|
||||
# Import and setup enhanced Celery task monitoring
|
||||
self._setup_celery_admin()
|
||||
|
||||
def _setup_celery_admin(self):
|
||||
"""Setup enhanced Celery admin with proper unregister/register"""
|
||||
try:
|
||||
from django_celery_results.models import TaskResult, GroupResult
|
||||
from igny8_core.admin.celery_admin import CeleryTaskResultAdmin, CeleryGroupResultAdmin
|
||||
|
||||
# Unregister the default TaskResult admin
|
||||
try:
|
||||
admin.site.unregister(TaskResult)
|
||||
except admin.sites.NotRegistered:
|
||||
pass
|
||||
|
||||
# Unregister the default GroupResult admin
|
||||
try:
|
||||
admin.site.unregister(GroupResult)
|
||||
except admin.sites.NotRegistered:
|
||||
pass
|
||||
|
||||
# Register our enhanced versions
|
||||
admin.site.register(TaskResult, CeleryTaskResultAdmin)
|
||||
admin.site.register(GroupResult, CeleryGroupResultAdmin)
|
||||
except Exception as e:
|
||||
# Log the error but don't crash the app
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Could not setup enhanced Celery admin: {e}")
|
||||
|
||||
|
||||
|
||||
@@ -107,3 +107,86 @@ class SiteSectorAdminMixin:
|
||||
return obj.site in accessible_sites
|
||||
return super().has_delete_permission(request, obj)
|
||||
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Custom ModelAdmin for Sidebar Fix
|
||||
# ============================================================================
|
||||
|
||||
from unfold.admin import ModelAdmin as UnfoldModelAdmin
|
||||
|
||||
|
||||
class Igny8ModelAdmin(UnfoldModelAdmin):
|
||||
"""
|
||||
Custom ModelAdmin that ensures sidebar_navigation is set correctly on ALL pages
|
||||
|
||||
Django's ModelAdmin views don't call AdminSite.each_context(),
|
||||
so we override them to inject our custom sidebar.
|
||||
"""
|
||||
|
||||
def _inject_sidebar_context(self, request, extra_context=None):
|
||||
"""Helper to inject custom sidebar into context"""
|
||||
if extra_context is None:
|
||||
extra_context = {}
|
||||
|
||||
# Get our custom sidebar from the admin site
|
||||
from igny8_core.admin.site import admin_site
|
||||
|
||||
# CRITICAL: Get the full Unfold context (includes all branding, form classes, etc.)
|
||||
# This is what makes the logo/title appear properly
|
||||
unfold_context = admin_site.each_context(request)
|
||||
|
||||
# Get the current path to detect active group
|
||||
current_path = request.path
|
||||
|
||||
sidebar_navigation = admin_site.get_sidebar_list(request)
|
||||
|
||||
# Detect active group and expand it by setting collapsible=False
|
||||
for group in sidebar_navigation:
|
||||
group_is_active = False
|
||||
for item in group.get('items', []):
|
||||
# Unfold stores resolved link in 'link_callback', original lambda in 'link'
|
||||
item_link = item.get('link_callback') or item.get('link', '')
|
||||
# Convert to string (handles lazy proxy objects and ensures it's a string)
|
||||
try:
|
||||
item_link = str(item_link) if item_link else ''
|
||||
except:
|
||||
item_link = ''
|
||||
# Skip if it's a function representation (e.g., "<function ...>")
|
||||
if item_link.startswith('<'):
|
||||
continue
|
||||
# Check if current path matches this item's link
|
||||
if item_link and current_path.startswith(item_link):
|
||||
item['active'] = True
|
||||
group_is_active = True
|
||||
|
||||
# If any item in this group is active, expand the group
|
||||
if group_is_active:
|
||||
group['collapsible'] = False # Expanded state
|
||||
else:
|
||||
group['collapsible'] = True # Collapsed state
|
||||
|
||||
# Merge Unfold context with our custom sidebar
|
||||
unfold_context['sidebar_navigation'] = sidebar_navigation
|
||||
unfold_context['available_apps'] = admin_site.get_app_list(request, app_label=None)
|
||||
unfold_context['app_list'] = unfold_context['available_apps']
|
||||
|
||||
# Merge with any existing extra_context
|
||||
unfold_context.update(extra_context)
|
||||
|
||||
return unfold_context
|
||||
|
||||
def changelist_view(self, request, extra_context=None):
|
||||
"""Override to inject custom sidebar"""
|
||||
extra_context = self._inject_sidebar_context(request, extra_context)
|
||||
return super().changelist_view(request, extra_context)
|
||||
|
||||
def change_view(self, request, object_id, form_url='', extra_context=None):
|
||||
"""Override to inject custom sidebar"""
|
||||
extra_context = self._inject_sidebar_context(request, extra_context)
|
||||
return super().change_view(request, object_id, form_url, extra_context)
|
||||
|
||||
def add_view(self, request, form_url='', extra_context=None):
|
||||
"""Override to inject custom sidebar"""
|
||||
extra_context = self._inject_sidebar_context(request, extra_context)
|
||||
return super().add_view(request, form_url, extra_context)
|
||||
|
||||
213
backend/igny8_core/admin/celery_admin.py
Normal file
213
backend/igny8_core/admin/celery_admin.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""
|
||||
Celery Task Monitoring Admin - Unfold Style
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.utils.html import format_html
|
||||
from django.contrib import messages
|
||||
from django_celery_results.models import TaskResult, GroupResult
|
||||
from unfold.admin import ModelAdmin
|
||||
from unfold.contrib.filters.admin import RangeDateFilter
|
||||
from celery import current_app
|
||||
|
||||
|
||||
class CeleryTaskResultAdmin(ModelAdmin):
|
||||
"""Admin interface for monitoring Celery tasks with Unfold styling"""
|
||||
|
||||
list_display = [
|
||||
'task_id',
|
||||
'task_name',
|
||||
'colored_status',
|
||||
'date_created',
|
||||
'date_done',
|
||||
'execution_time',
|
||||
]
|
||||
list_filter = [
|
||||
'status',
|
||||
'task_name',
|
||||
('date_created', RangeDateFilter),
|
||||
('date_done', RangeDateFilter),
|
||||
]
|
||||
search_fields = ['task_id', 'task_name', 'task_args']
|
||||
readonly_fields = [
|
||||
'task_id', 'task_name', 'task_args', 'task_kwargs',
|
||||
'result', 'traceback', 'date_created', 'date_done',
|
||||
'colored_status', 'execution_time'
|
||||
]
|
||||
date_hierarchy = 'date_created'
|
||||
ordering = ['-date_created']
|
||||
|
||||
actions = ['retry_failed_tasks', 'clear_old_tasks']
|
||||
|
||||
fieldsets = (
|
||||
('Task Information', {
|
||||
'fields': ('task_id', 'task_name', 'colored_status')
|
||||
}),
|
||||
('Execution Details', {
|
||||
'fields': ('date_created', 'date_done', 'execution_time')
|
||||
}),
|
||||
('Task Arguments', {
|
||||
'fields': ('task_args', 'task_kwargs'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Result & Errors', {
|
||||
'fields': ('result', 'traceback'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
)
|
||||
|
||||
def colored_status(self, obj):
|
||||
"""Display status with color coding"""
|
||||
colors = {
|
||||
'SUCCESS': '#0bbf87', # IGNY8 success green
|
||||
'FAILURE': '#ef4444', # IGNY8 danger red
|
||||
'PENDING': '#ff7a00', # IGNY8 warning orange
|
||||
'STARTED': '#0693e3', # IGNY8 primary blue
|
||||
'RETRY': '#5d4ae3', # IGNY8 purple
|
||||
}
|
||||
color = colors.get(obj.status, '#64748b') # Default gray
|
||||
|
||||
return format_html(
|
||||
'<span style="color: {}; font-weight: bold; font-size: 14px;">{}</span>',
|
||||
color,
|
||||
obj.status
|
||||
)
|
||||
colored_status.short_description = 'Status'
|
||||
|
||||
def execution_time(self, obj):
|
||||
"""Calculate and display execution time"""
|
||||
if obj.date_done and obj.date_created:
|
||||
duration = obj.date_done - obj.date_created
|
||||
seconds = duration.total_seconds()
|
||||
|
||||
if seconds < 1:
|
||||
time_str = f'{seconds * 1000:.2f}ms'
|
||||
return format_html('<span style="color: #0bbf87;">{}</span>', time_str)
|
||||
elif seconds < 60:
|
||||
time_str = f'{seconds:.2f}s'
|
||||
return format_html('<span style="color: #0693e3;">{}</span>', time_str)
|
||||
else:
|
||||
minutes = seconds / 60
|
||||
time_str = f'{minutes:.1f}m'
|
||||
return format_html('<span style="color: #ff7a00;">{}</span>', time_str)
|
||||
return '-'
|
||||
execution_time.short_description = 'Duration'
|
||||
|
||||
def retry_failed_tasks(self, request, queryset):
|
||||
"""Retry failed celery tasks by re-queuing them"""
|
||||
from igny8_core.celery import app
|
||||
import json
|
||||
|
||||
failed_tasks = queryset.filter(status='FAILURE')
|
||||
count = 0
|
||||
errors = []
|
||||
|
||||
for task in failed_tasks:
|
||||
try:
|
||||
# Get task function
|
||||
task_func = current_app.tasks.get(task.task_name)
|
||||
if task_func:
|
||||
# Parse task args and kwargs
|
||||
import ast
|
||||
try:
|
||||
args = ast.literal_eval(task.task_args) if task.task_args else []
|
||||
kwargs = ast.literal_eval(task.task_kwargs) if task.task_kwargs else {}
|
||||
except:
|
||||
args = []
|
||||
kwargs = {}
|
||||
|
||||
# Retry the task
|
||||
task_func.apply_async(args=args, kwargs=kwargs)
|
||||
count += 1
|
||||
else:
|
||||
errors.append(f'Task function not found: {task.task_name}')
|
||||
except Exception as e:
|
||||
errors.append(f'Error retrying {task.task_id}: {str(e)}')
|
||||
|
||||
if count > 0:
|
||||
self.message_user(request, f'Successfully queued {count} task(s) for retry.', 'SUCCESS')
|
||||
|
||||
if errors:
|
||||
for error in errors[:5]: # Show max 5 errors
|
||||
self.message_user(request, f'Error: {error}', 'WARNING')
|
||||
|
||||
retry_failed_tasks.short_description = 'Retry Failed Tasks'
|
||||
|
||||
def clear_old_tasks(self, request, queryset):
|
||||
"""Clear old completed tasks"""
|
||||
from datetime import timedelta
|
||||
from django.utils import timezone
|
||||
|
||||
# Delete tasks older than 30 days
|
||||
cutoff_date = timezone.now() - timedelta(days=30)
|
||||
old_tasks = queryset.filter(
|
||||
date_created__lt=cutoff_date,
|
||||
status__in=['SUCCESS', 'FAILURE']
|
||||
)
|
||||
|
||||
count = old_tasks.count()
|
||||
old_tasks.delete()
|
||||
|
||||
self.message_user(request, f'Cleared {count} old task(s)', messages.SUCCESS)
|
||||
|
||||
clear_old_tasks.short_description = 'Clear Old Tasks (30+ days)'
|
||||
|
||||
def has_add_permission(self, request):
|
||||
"""Disable manual task creation"""
|
||||
return False
|
||||
|
||||
def has_change_permission(self, request, obj=None):
|
||||
"""Make read-only"""
|
||||
return False
|
||||
|
||||
|
||||
class CeleryGroupResultAdmin(ModelAdmin):
|
||||
"""Admin interface for monitoring Celery group results with Unfold styling"""
|
||||
|
||||
list_display = [
|
||||
'group_id',
|
||||
'date_created',
|
||||
'date_done',
|
||||
'result_count',
|
||||
]
|
||||
list_filter = [
|
||||
('date_created', RangeDateFilter),
|
||||
('date_done', RangeDateFilter),
|
||||
]
|
||||
search_fields = ['group_id', 'result']
|
||||
readonly_fields = [
|
||||
'group_id', 'date_created', 'date_done', 'content_type',
|
||||
'content_encoding', 'result'
|
||||
]
|
||||
date_hierarchy = 'date_created'
|
||||
ordering = ['-date_created']
|
||||
|
||||
fieldsets = (
|
||||
('Group Information', {
|
||||
'fields': ('group_id', 'date_created', 'date_done')
|
||||
}),
|
||||
('Result Details', {
|
||||
'fields': ('content_type', 'content_encoding', 'result'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
)
|
||||
|
||||
def result_count(self, obj):
|
||||
"""Count tasks in the group"""
|
||||
if obj.result:
|
||||
try:
|
||||
import json
|
||||
result_data = json.loads(obj.result) if isinstance(obj.result, str) else obj.result
|
||||
if isinstance(result_data, list):
|
||||
return len(result_data)
|
||||
except:
|
||||
pass
|
||||
return '-'
|
||||
result_count.short_description = 'Task Count'
|
||||
|
||||
def has_add_permission(self, request):
|
||||
"""Disable manual group result creation"""
|
||||
return False
|
||||
|
||||
def has_change_permission(self, request, obj=None):
|
||||
"""Make read-only"""
|
||||
return False
|
||||
189
backend/igny8_core/admin/dashboard.py
Normal file
189
backend/igny8_core/admin/dashboard.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""
|
||||
Custom Admin Dashboard with Key Metrics
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.shortcuts import render
|
||||
from django.db.models import Count, Sum, Q
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
def admin_dashboard(request):
|
||||
"""Custom admin dashboard with operational metrics"""
|
||||
|
||||
# Date ranges
|
||||
today = timezone.now().date()
|
||||
week_ago = today - timedelta(days=7)
|
||||
month_ago = today - timedelta(days=30)
|
||||
|
||||
# Account metrics
|
||||
from igny8_core.auth.models import Account, Site
|
||||
total_accounts = Account.objects.count()
|
||||
active_accounts = Account.objects.filter(status='active').count()
|
||||
low_credit_accounts = Account.objects.filter(
|
||||
status='active',
|
||||
credits__lt=100
|
||||
).count()
|
||||
critical_credit_accounts = Account.objects.filter(
|
||||
status='active',
|
||||
credits__lt=10
|
||||
).count()
|
||||
|
||||
# Site metrics
|
||||
total_sites = Site.objects.count()
|
||||
active_sites = Site.objects.filter(is_active=True, status='active').count()
|
||||
|
||||
# Content metrics
|
||||
from igny8_core.modules.writer.models import Content, Tasks
|
||||
content_this_week = Content.objects.filter(created_at__gte=week_ago).count()
|
||||
content_this_month = Content.objects.filter(created_at__gte=month_ago).count()
|
||||
tasks_pending = Tasks.objects.filter(status='pending').count()
|
||||
tasks_in_progress = Tasks.objects.filter(status='in_progress').count()
|
||||
|
||||
# Billing metrics
|
||||
from igny8_core.business.billing.models import Payment, CreditTransaction
|
||||
pending_payments = Payment.objects.filter(status='pending_approval').count()
|
||||
payments_this_month = Payment.objects.filter(
|
||||
created_at__gte=month_ago,
|
||||
status='succeeded'
|
||||
).aggregate(total=Sum('amount'))['total'] or 0
|
||||
|
||||
credit_usage_this_month = CreditTransaction.objects.filter(
|
||||
created_at__gte=month_ago,
|
||||
transaction_type='deduction'
|
||||
).aggregate(total=Sum('amount'))['total'] or 0
|
||||
|
||||
# Automation metrics
|
||||
from igny8_core.business.automation.models import AutomationRun
|
||||
automation_running = AutomationRun.objects.filter(status='running').count()
|
||||
automation_failed = AutomationRun.objects.filter(
|
||||
status='failed',
|
||||
started_at__gte=week_ago
|
||||
).count()
|
||||
|
||||
# Calculate success rate
|
||||
total_runs = AutomationRun.objects.filter(started_at__gte=week_ago).count()
|
||||
if total_runs > 0:
|
||||
success_runs = AutomationRun.objects.filter(
|
||||
started_at__gte=week_ago,
|
||||
status='completed'
|
||||
).count()
|
||||
automation_success_rate = round((success_runs / total_runs) * 100, 1)
|
||||
else:
|
||||
automation_success_rate = 0
|
||||
|
||||
# WordPress sync metrics
|
||||
from igny8_core.business.integration.models import SyncEvent
|
||||
sync_failed_today = SyncEvent.objects.filter(
|
||||
success=False,
|
||||
created_at__date=today
|
||||
).count()
|
||||
sync_success_today = SyncEvent.objects.filter(
|
||||
success=True,
|
||||
created_at__date=today
|
||||
).count()
|
||||
|
||||
# Celery task metrics
|
||||
try:
|
||||
from django_celery_results.models import TaskResult
|
||||
celery_failed = TaskResult.objects.filter(
|
||||
status='FAILURE',
|
||||
date_created__gte=week_ago
|
||||
).count()
|
||||
celery_pending = TaskResult.objects.filter(status='PENDING').count()
|
||||
except:
|
||||
celery_failed = 0
|
||||
celery_pending = 0
|
||||
|
||||
# Generate alerts
|
||||
alerts = []
|
||||
|
||||
if critical_credit_accounts > 0:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'message': f'{critical_credit_accounts} account(s) have CRITICAL low credits (< 10)',
|
||||
'action': 'Review Accounts',
|
||||
'url': '/admin/igny8_core_auth/account/?credits__lt=10'
|
||||
})
|
||||
|
||||
if low_credit_accounts > 0:
|
||||
alerts.append({
|
||||
'level': 'warning',
|
||||
'message': f'{low_credit_accounts} account(s) have low credits (< 100)',
|
||||
'action': 'Review Accounts',
|
||||
'url': '/admin/igny8_core_auth/account/?credits__lt=100'
|
||||
})
|
||||
|
||||
if pending_payments > 0:
|
||||
alerts.append({
|
||||
'level': 'warning',
|
||||
'message': f'{pending_payments} payment(s) awaiting approval',
|
||||
'action': 'Approve Payments',
|
||||
'url': '/admin/billing/payment/?status__exact=pending_approval'
|
||||
})
|
||||
|
||||
if automation_failed > 5:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'message': f'{automation_failed} automation runs failed this week',
|
||||
'action': 'View Failed Runs',
|
||||
'url': '/admin/automation/automationrun/?status__exact=failed'
|
||||
})
|
||||
|
||||
if sync_failed_today > 0:
|
||||
alerts.append({
|
||||
'level': 'warning',
|
||||
'message': f'{sync_failed_today} WordPress sync failure(s) today',
|
||||
'action': 'View Sync Events',
|
||||
'url': '/admin/integration/syncevent/?success__exact=0'
|
||||
})
|
||||
|
||||
if celery_failed > 10:
|
||||
alerts.append({
|
||||
'level': 'error',
|
||||
'message': f'{celery_failed} Celery tasks failed this week',
|
||||
'action': 'View Failed Tasks',
|
||||
'url': '/admin/django_celery_results/taskresult/?status__exact=FAILURE'
|
||||
})
|
||||
|
||||
context = {
|
||||
'title': 'IGNY8 Dashboard',
|
||||
'site_title': 'IGNY8 Admin',
|
||||
'site_header': 'IGNY8 Administration',
|
||||
# Account metrics
|
||||
'total_accounts': total_accounts,
|
||||
'active_accounts': active_accounts,
|
||||
'low_credit_accounts': low_credit_accounts,
|
||||
'critical_credit_accounts': critical_credit_accounts,
|
||||
# Site metrics
|
||||
'total_sites': total_sites,
|
||||
'active_sites': active_sites,
|
||||
# Content metrics
|
||||
'content_this_week': content_this_week,
|
||||
'content_this_month': content_this_month,
|
||||
'tasks_pending': tasks_pending,
|
||||
'tasks_in_progress': tasks_in_progress,
|
||||
# Billing metrics
|
||||
'pending_payments': pending_payments,
|
||||
'payments_this_month': float(payments_this_month),
|
||||
'credit_usage_this_month': abs(float(credit_usage_this_month)),
|
||||
# Automation metrics
|
||||
'automation_running': automation_running,
|
||||
'automation_failed': automation_failed,
|
||||
'automation_success_rate': automation_success_rate,
|
||||
# Integration metrics
|
||||
'sync_failed_today': sync_failed_today,
|
||||
'sync_success_today': sync_success_today,
|
||||
# Celery metrics
|
||||
'celery_failed': celery_failed,
|
||||
'celery_pending': celery_pending,
|
||||
# Alerts
|
||||
'alerts': alerts,
|
||||
}
|
||||
|
||||
# Merge with admin context to get sidebar and header
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/dashboard.html', context)
|
||||
406
backend/igny8_core/admin/monitoring.py
Normal file
406
backend/igny8_core/admin/monitoring.py
Normal file
@@ -0,0 +1,406 @@
|
||||
"""
|
||||
Admin Monitoring Module - System Health, API Monitor, Debug Console
|
||||
Provides read-only monitoring and debugging tools for Django Admin
|
||||
"""
|
||||
from django.shortcuts import render
|
||||
from django.contrib.admin.views.decorators import staff_member_required
|
||||
from django.utils import timezone
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
import time
|
||||
import os
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def system_health_dashboard(request):
|
||||
"""
|
||||
System infrastructure health monitoring
|
||||
Checks: Database, Redis, Celery, File System
|
||||
"""
|
||||
context = {
|
||||
'page_title': 'System Health Monitor',
|
||||
'checked_at': timezone.now(),
|
||||
'checks': []
|
||||
}
|
||||
|
||||
# Database Check
|
||||
db_check = {
|
||||
'name': 'PostgreSQL Database',
|
||||
'status': 'unknown',
|
||||
'message': '',
|
||||
'details': {}
|
||||
}
|
||||
try:
|
||||
start = time.time()
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("SELECT version()")
|
||||
version = cursor.fetchone()[0]
|
||||
cursor.execute("SELECT COUNT(*) FROM django_session")
|
||||
session_count = cursor.fetchone()[0]
|
||||
|
||||
elapsed = (time.time() - start) * 1000
|
||||
db_check.update({
|
||||
'status': 'healthy',
|
||||
'message': f'Connected ({elapsed:.2f}ms)',
|
||||
'details': {
|
||||
'version': version.split('\n')[0],
|
||||
'response_time': f'{elapsed:.2f}ms',
|
||||
'active_sessions': session_count
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
db_check.update({
|
||||
'status': 'error',
|
||||
'message': f'Connection failed: {str(e)}'
|
||||
})
|
||||
context['checks'].append(db_check)
|
||||
|
||||
# Redis Check
|
||||
redis_check = {
|
||||
'name': 'Redis Cache',
|
||||
'status': 'unknown',
|
||||
'message': '',
|
||||
'details': {}
|
||||
}
|
||||
try:
|
||||
import redis
|
||||
r = redis.Redis(
|
||||
host=settings.CACHES['default']['LOCATION'].split(':')[0] if ':' in settings.CACHES['default'].get('LOCATION', '') else 'redis',
|
||||
port=6379,
|
||||
db=0,
|
||||
socket_connect_timeout=2
|
||||
)
|
||||
start = time.time()
|
||||
r.ping()
|
||||
elapsed = (time.time() - start) * 1000
|
||||
|
||||
info = r.info()
|
||||
redis_check.update({
|
||||
'status': 'healthy',
|
||||
'message': f'Connected ({elapsed:.2f}ms)',
|
||||
'details': {
|
||||
'version': info.get('redis_version', 'unknown'),
|
||||
'uptime': f"{info.get('uptime_in_seconds', 0) // 3600}h",
|
||||
'connected_clients': info.get('connected_clients', 0),
|
||||
'used_memory': f"{info.get('used_memory_human', 'unknown')}",
|
||||
'response_time': f'{elapsed:.2f}ms'
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
redis_check.update({
|
||||
'status': 'error',
|
||||
'message': f'Connection failed: {str(e)}'
|
||||
})
|
||||
context['checks'].append(redis_check)
|
||||
|
||||
# Celery Workers Check
|
||||
celery_check = {
|
||||
'name': 'Celery Workers',
|
||||
'status': 'unknown',
|
||||
'message': '',
|
||||
'details': {}
|
||||
}
|
||||
try:
|
||||
from igny8_core.celery import app
|
||||
inspect = app.control.inspect(timeout=2)
|
||||
stats = inspect.stats()
|
||||
active = inspect.active()
|
||||
|
||||
if stats:
|
||||
worker_count = len(stats)
|
||||
total_tasks = sum(len(tasks) for tasks in active.values()) if active else 0
|
||||
celery_check.update({
|
||||
'status': 'healthy',
|
||||
'message': f'{worker_count} worker(s) active',
|
||||
'details': {
|
||||
'workers': worker_count,
|
||||
'active_tasks': total_tasks,
|
||||
'worker_names': list(stats.keys())
|
||||
}
|
||||
})
|
||||
else:
|
||||
celery_check.update({
|
||||
'status': 'warning',
|
||||
'message': 'No workers responding'
|
||||
})
|
||||
except Exception as e:
|
||||
celery_check.update({
|
||||
'status': 'error',
|
||||
'message': f'Check failed: {str(e)}'
|
||||
})
|
||||
context['checks'].append(celery_check)
|
||||
|
||||
# File System Check
|
||||
fs_check = {
|
||||
'name': 'File System',
|
||||
'status': 'unknown',
|
||||
'message': '',
|
||||
'details': {}
|
||||
}
|
||||
try:
|
||||
import shutil
|
||||
media_root = settings.MEDIA_ROOT
|
||||
static_root = settings.STATIC_ROOT
|
||||
|
||||
media_stat = shutil.disk_usage(media_root) if os.path.exists(media_root) else None
|
||||
|
||||
if media_stat:
|
||||
free_gb = media_stat.free / (1024**3)
|
||||
total_gb = media_stat.total / (1024**3)
|
||||
used_percent = (media_stat.used / media_stat.total) * 100
|
||||
|
||||
fs_check.update({
|
||||
'status': 'healthy' if used_percent < 90 else 'warning',
|
||||
'message': f'{free_gb:.1f}GB free of {total_gb:.1f}GB',
|
||||
'details': {
|
||||
'media_root': media_root,
|
||||
'free_space': f'{free_gb:.1f}GB',
|
||||
'total_space': f'{total_gb:.1f}GB',
|
||||
'used_percent': f'{used_percent:.1f}%'
|
||||
}
|
||||
})
|
||||
else:
|
||||
fs_check.update({
|
||||
'status': 'warning',
|
||||
'message': 'Media directory not found'
|
||||
})
|
||||
except Exception as e:
|
||||
fs_check.update({
|
||||
'status': 'error',
|
||||
'message': f'Check failed: {str(e)}'
|
||||
})
|
||||
context['checks'].append(fs_check)
|
||||
|
||||
# Overall system status
|
||||
statuses = [check['status'] for check in context['checks']]
|
||||
if 'error' in statuses:
|
||||
context['overall_status'] = 'error'
|
||||
context['overall_message'] = 'System has errors'
|
||||
elif 'warning' in statuses:
|
||||
context['overall_status'] = 'warning'
|
||||
context['overall_message'] = 'System has warnings'
|
||||
else:
|
||||
context['overall_status'] = 'healthy'
|
||||
context['overall_message'] = 'All systems operational'
|
||||
|
||||
return render(request, 'admin/monitoring/system_health.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def api_monitor_dashboard(request):
|
||||
"""
|
||||
API endpoint health monitoring
|
||||
Tests key endpoints and displays response times
|
||||
"""
|
||||
from django.test.client import Client
|
||||
|
||||
context = {
|
||||
'page_title': 'API Monitor',
|
||||
'checked_at': timezone.now(),
|
||||
'endpoint_groups': []
|
||||
}
|
||||
|
||||
# Define endpoint groups to check
|
||||
endpoint_configs = [
|
||||
{
|
||||
'name': 'Authentication',
|
||||
'endpoints': [
|
||||
{'path': '/api/v1/auth/check/', 'method': 'GET', 'auth_required': False},
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'System Settings',
|
||||
'endpoints': [
|
||||
{'path': '/api/v1/system/health/', 'method': 'GET', 'auth_required': False},
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'Planner Module',
|
||||
'endpoints': [
|
||||
{'path': '/api/v1/planner/keywords/', 'method': 'GET', 'auth_required': True},
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'Writer Module',
|
||||
'endpoints': [
|
||||
{'path': '/api/v1/writer/tasks/', 'method': 'GET', 'auth_required': True},
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'Billing',
|
||||
'endpoints': [
|
||||
{'path': '/api/v1/billing/credits/balance/', 'method': 'GET', 'auth_required': True},
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
client = Client()
|
||||
|
||||
for group_config in endpoint_configs:
|
||||
group_results = {
|
||||
'name': group_config['name'],
|
||||
'endpoints': []
|
||||
}
|
||||
|
||||
for endpoint in group_config['endpoints']:
|
||||
result = {
|
||||
'path': endpoint['path'],
|
||||
'method': endpoint['method'],
|
||||
'status': 'unknown',
|
||||
'status_code': None,
|
||||
'response_time': None,
|
||||
'message': ''
|
||||
}
|
||||
|
||||
try:
|
||||
start = time.time()
|
||||
|
||||
if endpoint['method'] == 'GET':
|
||||
response = client.get(endpoint['path'])
|
||||
else:
|
||||
response = client.post(endpoint['path'])
|
||||
|
||||
elapsed = (time.time() - start) * 1000
|
||||
|
||||
result.update({
|
||||
'status_code': response.status_code,
|
||||
'response_time': f'{elapsed:.2f}ms',
|
||||
})
|
||||
|
||||
# Determine status
|
||||
if response.status_code < 300:
|
||||
result['status'] = 'healthy'
|
||||
result['message'] = 'OK'
|
||||
elif response.status_code == 401 and endpoint.get('auth_required'):
|
||||
result['status'] = 'healthy'
|
||||
result['message'] = 'Auth required (expected)'
|
||||
elif response.status_code < 500:
|
||||
result['status'] = 'warning'
|
||||
result['message'] = 'Client error'
|
||||
else:
|
||||
result['status'] = 'error'
|
||||
result['message'] = 'Server error'
|
||||
|
||||
except Exception as e:
|
||||
result.update({
|
||||
'status': 'error',
|
||||
'message': str(e)[:100]
|
||||
})
|
||||
|
||||
group_results['endpoints'].append(result)
|
||||
|
||||
context['endpoint_groups'].append(group_results)
|
||||
|
||||
# Calculate overall stats
|
||||
all_endpoints = [ep for group in context['endpoint_groups'] for ep in group['endpoints']]
|
||||
total = len(all_endpoints)
|
||||
healthy = len([ep for ep in all_endpoints if ep['status'] == 'healthy'])
|
||||
warnings = len([ep for ep in all_endpoints if ep['status'] == 'warning'])
|
||||
errors = len([ep for ep in all_endpoints if ep['status'] == 'error'])
|
||||
|
||||
context['stats'] = {
|
||||
'total': total,
|
||||
'healthy': healthy,
|
||||
'warnings': warnings,
|
||||
'errors': errors,
|
||||
'health_percentage': (healthy / total * 100) if total > 0 else 0
|
||||
}
|
||||
|
||||
return render(request, 'admin/monitoring/api_monitor.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def debug_console(request):
|
||||
"""
|
||||
System debug information (read-only)
|
||||
Shows environment, database config, cache config, etc.
|
||||
"""
|
||||
context = {
|
||||
'page_title': 'Debug Console',
|
||||
'checked_at': timezone.now(),
|
||||
'sections': []
|
||||
}
|
||||
|
||||
# Environment Variables Section
|
||||
env_section = {
|
||||
'title': 'Environment',
|
||||
'items': {
|
||||
'DEBUG': settings.DEBUG,
|
||||
'ENVIRONMENT': os.getenv('ENVIRONMENT', 'not set'),
|
||||
'DJANGO_SETTINGS_MODULE': os.getenv('DJANGO_SETTINGS_MODULE', 'not set'),
|
||||
'ALLOWED_HOSTS': settings.ALLOWED_HOSTS,
|
||||
'TIME_ZONE': settings.TIME_ZONE,
|
||||
'USE_TZ': settings.USE_TZ,
|
||||
}
|
||||
}
|
||||
context['sections'].append(env_section)
|
||||
|
||||
# Database Configuration
|
||||
db_config = settings.DATABASES.get('default', {})
|
||||
db_section = {
|
||||
'title': 'Database Configuration',
|
||||
'items': {
|
||||
'ENGINE': db_config.get('ENGINE', 'not set'),
|
||||
'NAME': db_config.get('NAME', 'not set'),
|
||||
'HOST': db_config.get('HOST', 'not set'),
|
||||
'PORT': db_config.get('PORT', 'not set'),
|
||||
'CONN_MAX_AGE': db_config.get('CONN_MAX_AGE', 'not set'),
|
||||
}
|
||||
}
|
||||
context['sections'].append(db_section)
|
||||
|
||||
# Cache Configuration
|
||||
cache_config = settings.CACHES.get('default', {})
|
||||
cache_section = {
|
||||
'title': 'Cache Configuration',
|
||||
'items': {
|
||||
'BACKEND': cache_config.get('BACKEND', 'not set'),
|
||||
'LOCATION': cache_config.get('LOCATION', 'not set'),
|
||||
'KEY_PREFIX': cache_config.get('KEY_PREFIX', 'not set'),
|
||||
}
|
||||
}
|
||||
context['sections'].append(cache_section)
|
||||
|
||||
# Celery Configuration
|
||||
celery_section = {
|
||||
'title': 'Celery Configuration',
|
||||
'items': {
|
||||
'BROKER_URL': getattr(settings, 'CELERY_BROKER_URL', 'not set'),
|
||||
'RESULT_BACKEND': getattr(settings, 'CELERY_RESULT_BACKEND', 'not set'),
|
||||
'TASK_ALWAYS_EAGER': getattr(settings, 'CELERY_TASK_ALWAYS_EAGER', False),
|
||||
}
|
||||
}
|
||||
context['sections'].append(celery_section)
|
||||
|
||||
# Media & Static Files
|
||||
files_section = {
|
||||
'title': 'Media & Static Files',
|
||||
'items': {
|
||||
'MEDIA_ROOT': settings.MEDIA_ROOT,
|
||||
'MEDIA_URL': settings.MEDIA_URL,
|
||||
'STATIC_ROOT': settings.STATIC_ROOT,
|
||||
'STATIC_URL': settings.STATIC_URL,
|
||||
}
|
||||
}
|
||||
context['sections'].append(files_section)
|
||||
|
||||
# Installed Apps (count)
|
||||
apps_section = {
|
||||
'title': 'Installed Applications',
|
||||
'items': {
|
||||
'Total Apps': len(settings.INSTALLED_APPS),
|
||||
'Custom Apps': len([app for app in settings.INSTALLED_APPS if app.startswith('igny8_')]),
|
||||
}
|
||||
}
|
||||
context['sections'].append(apps_section)
|
||||
|
||||
# Middleware (count)
|
||||
middleware_section = {
|
||||
'title': 'Middleware',
|
||||
'items': {
|
||||
'Total Middleware': len(settings.MIDDLEWARE),
|
||||
}
|
||||
}
|
||||
context['sections'].append(middleware_section)
|
||||
|
||||
return render(request, 'admin/monitoring/debug_console.html', context)
|
||||
617
backend/igny8_core/admin/reports.py
Normal file
617
backend/igny8_core/admin/reports.py
Normal file
@@ -0,0 +1,617 @@
|
||||
"""
|
||||
Analytics & Reporting Views for IGNY8 Admin
|
||||
"""
|
||||
from django.contrib.admin.views.decorators import staff_member_required
|
||||
from django.shortcuts import render
|
||||
from django.db.models import Count, Sum, Avg, Q
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
import json
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def revenue_report(request):
|
||||
"""Revenue and billing analytics"""
|
||||
from igny8_core.business.billing.models import Payment
|
||||
from igny8_core.auth.models import Plan
|
||||
|
||||
# Date ranges
|
||||
today = timezone.now()
|
||||
months = []
|
||||
monthly_revenue = []
|
||||
|
||||
for i in range(6):
|
||||
month_start = today.replace(day=1) - timedelta(days=30*i)
|
||||
month_end = month_start.replace(day=28) + timedelta(days=4)
|
||||
|
||||
revenue = Payment.objects.filter(
|
||||
status='succeeded',
|
||||
processed_at__gte=month_start,
|
||||
processed_at__lt=month_end
|
||||
).aggregate(total=Sum('amount'))['total'] or 0
|
||||
|
||||
months.insert(0, month_start.strftime('%b %Y'))
|
||||
monthly_revenue.insert(0, float(revenue))
|
||||
|
||||
# Plan distribution
|
||||
plan_distribution = Plan.objects.annotate(
|
||||
account_count=Count('accounts')
|
||||
).values('name', 'account_count')
|
||||
|
||||
# Payment method breakdown
|
||||
payment_methods = Payment.objects.filter(
|
||||
status='succeeded'
|
||||
).values('payment_method').annotate(
|
||||
count=Count('id'),
|
||||
total=Sum('amount')
|
||||
).order_by('-total')
|
||||
|
||||
# Total revenue all time
|
||||
total_revenue = Payment.objects.filter(
|
||||
status='succeeded'
|
||||
).aggregate(total=Sum('amount'))['total'] or 0
|
||||
|
||||
context = {
|
||||
'title': 'Revenue Report',
|
||||
'months': json.dumps(months),
|
||||
'monthly_revenue': json.dumps(monthly_revenue),
|
||||
'plan_distribution': list(plan_distribution),
|
||||
'payment_methods': list(payment_methods),
|
||||
'total_revenue': float(total_revenue),
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/revenue.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def usage_report(request):
|
||||
"""Credit usage and AI operations analytics"""
|
||||
from igny8_core.business.billing.models import CreditUsageLog
|
||||
|
||||
# Usage by operation type
|
||||
usage_by_operation = CreditUsageLog.objects.values(
|
||||
'operation_type'
|
||||
).annotate(
|
||||
total_credits=Sum('credits_used'),
|
||||
total_cost=Sum('cost_usd'),
|
||||
operation_count=Count('id')
|
||||
).order_by('-total_credits')
|
||||
|
||||
# Format operation types as Title Case
|
||||
for usage in usage_by_operation:
|
||||
usage['operation_type'] = usage['operation_type'].replace('_', ' ').title() if usage['operation_type'] else 'Unknown'
|
||||
|
||||
# Top credit consumers
|
||||
top_consumers = CreditUsageLog.objects.values(
|
||||
'account__name'
|
||||
).annotate(
|
||||
total_credits=Sum('credits_used'),
|
||||
operation_count=Count('id')
|
||||
).order_by('-total_credits')[:10]
|
||||
|
||||
# Model usage distribution
|
||||
model_usage = CreditUsageLog.objects.values(
|
||||
'model_used'
|
||||
).annotate(
|
||||
usage_count=Count('id')
|
||||
).order_by('-usage_count')
|
||||
|
||||
# Total credits used
|
||||
total_credits = CreditUsageLog.objects.aggregate(
|
||||
total=Sum('credits_used')
|
||||
)['total'] or 0
|
||||
|
||||
context = {
|
||||
'title': 'Usage Report',
|
||||
'usage_by_operation': list(usage_by_operation),
|
||||
'top_consumers': list(top_consumers),
|
||||
'model_usage': list(model_usage),
|
||||
'total_credits': int(total_credits),
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/usage.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def content_report(request):
|
||||
"""Content production analytics"""
|
||||
from igny8_core.modules.writer.models import Content, Tasks
|
||||
|
||||
# Content by type
|
||||
content_by_type = Content.objects.values(
|
||||
'content_type'
|
||||
).annotate(count=Count('id')).order_by('-count')
|
||||
|
||||
# Production timeline (last 30 days)
|
||||
days = []
|
||||
daily_counts = []
|
||||
for i in range(30):
|
||||
day = timezone.now().date() - timedelta(days=i)
|
||||
count = Content.objects.filter(created_at__date=day).count()
|
||||
days.insert(0, day.strftime('%m/%d'))
|
||||
daily_counts.insert(0, count)
|
||||
|
||||
# Average word count by content type
|
||||
avg_words = Content.objects.values('content_type').annotate(
|
||||
avg_words=Avg('word_count')
|
||||
).order_by('-avg_words')
|
||||
|
||||
# Task completion rate
|
||||
total_tasks = Tasks.objects.count()
|
||||
completed_tasks = Tasks.objects.filter(status='completed').count()
|
||||
completion_rate = (completed_tasks / total_tasks * 100) if total_tasks > 0 else 0
|
||||
|
||||
# Total content produced
|
||||
total_content = Content.objects.count()
|
||||
|
||||
context = {
|
||||
'title': 'Content Production Report',
|
||||
'content_by_type': list(content_by_type),
|
||||
'days': json.dumps(days),
|
||||
'daily_counts': json.dumps(daily_counts),
|
||||
'avg_words': list(avg_words),
|
||||
'completion_rate': round(completion_rate, 1),
|
||||
'total_content': total_content,
|
||||
'total_tasks': total_tasks,
|
||||
'completed_tasks': completed_tasks,
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/content.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def data_quality_report(request):
|
||||
"""Check data quality and integrity"""
|
||||
issues = []
|
||||
|
||||
# Orphaned content (no site)
|
||||
from igny8_core.modules.writer.models import Content
|
||||
orphaned_content = Content.objects.filter(site__isnull=True).count()
|
||||
if orphaned_content > 0:
|
||||
issues.append({
|
||||
'severity': 'warning',
|
||||
'type': 'Orphaned Records',
|
||||
'count': orphaned_content,
|
||||
'description': 'Content items without assigned site',
|
||||
'action_url': '/admin/writer/content/?site__isnull=True'
|
||||
})
|
||||
|
||||
# Tasks without clusters
|
||||
from igny8_core.modules.writer.models import Tasks
|
||||
tasks_no_cluster = Tasks.objects.filter(cluster__isnull=True).count()
|
||||
if tasks_no_cluster > 0:
|
||||
issues.append({
|
||||
'severity': 'info',
|
||||
'type': 'Missing Relationships',
|
||||
'count': tasks_no_cluster,
|
||||
'description': 'Tasks without assigned cluster',
|
||||
'action_url': '/admin/writer/tasks/?cluster__isnull=True'
|
||||
})
|
||||
|
||||
# Accounts with negative credits
|
||||
from igny8_core.auth.models import Account
|
||||
negative_credits = Account.objects.filter(credits__lt=0).count()
|
||||
if negative_credits > 0:
|
||||
issues.append({
|
||||
'severity': 'error',
|
||||
'type': 'Data Integrity',
|
||||
'count': negative_credits,
|
||||
'description': 'Accounts with negative credit balance',
|
||||
'action_url': '/admin/igny8_core_auth/account/?credits__lt=0'
|
||||
})
|
||||
|
||||
# Duplicate keywords
|
||||
from igny8_core.modules.planner.models import Keywords
|
||||
duplicates = Keywords.objects.values('seed_keyword', 'site', 'sector').annotate(
|
||||
count=Count('id')
|
||||
).filter(count__gt=1).count()
|
||||
if duplicates > 0:
|
||||
issues.append({
|
||||
'severity': 'warning',
|
||||
'type': 'Duplicates',
|
||||
'count': duplicates,
|
||||
'description': 'Duplicate keywords for same site/sector',
|
||||
'action_url': '/admin/planner/keywords/'
|
||||
})
|
||||
|
||||
# Content without SEO data
|
||||
no_seo = Content.objects.filter(
|
||||
Q(meta_title__isnull=True) | Q(meta_title='') |
|
||||
Q(meta_description__isnull=True) | Q(meta_description='')
|
||||
).count()
|
||||
if no_seo > 0:
|
||||
issues.append({
|
||||
'severity': 'info',
|
||||
'type': 'Incomplete Data',
|
||||
'count': no_seo,
|
||||
'description': 'Content missing SEO metadata',
|
||||
'action_url': '/admin/writer/content/'
|
||||
})
|
||||
|
||||
context = {
|
||||
'title': 'Data Quality Report',
|
||||
'issues': issues,
|
||||
'total_issues': len(issues),
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/data_quality.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def token_usage_report(request):
|
||||
"""Comprehensive token usage analytics with multi-dimensional insights"""
|
||||
from igny8_core.business.billing.models import CreditUsageLog
|
||||
from igny8_core.auth.models import Account
|
||||
from decimal import Decimal
|
||||
|
||||
# Date filter setup
|
||||
days_filter = request.GET.get('days', '30')
|
||||
try:
|
||||
days = int(days_filter)
|
||||
except ValueError:
|
||||
days = 30
|
||||
|
||||
start_date = timezone.now() - timedelta(days=days)
|
||||
|
||||
# Base queryset - include all records (tokens may be 0 for historical data)
|
||||
logs = CreditUsageLog.objects.filter(
|
||||
created_at__gte=start_date
|
||||
)
|
||||
|
||||
# Total statistics
|
||||
total_tokens_input = logs.aggregate(total=Sum('tokens_input'))['total'] or 0
|
||||
total_tokens_output = logs.aggregate(total=Sum('tokens_output'))['total'] or 0
|
||||
total_tokens = total_tokens_input + total_tokens_output
|
||||
total_calls = logs.count()
|
||||
avg_tokens_per_call = total_tokens / total_calls if total_calls > 0 else 0
|
||||
|
||||
# Token usage by model
|
||||
token_by_model = logs.values('model_used').annotate(
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output'),
|
||||
call_count=Count('id'),
|
||||
total_cost=Sum('cost_usd')
|
||||
).order_by('-total_tokens_input')[:10]
|
||||
|
||||
# Add total_tokens to each model and sort by total
|
||||
for model in token_by_model:
|
||||
model['total_tokens'] = (model['total_tokens_input'] or 0) + (model['total_tokens_output'] or 0)
|
||||
model['avg_tokens'] = model['total_tokens'] / model['call_count'] if model['call_count'] > 0 else 0
|
||||
model['model'] = model['model_used'] # Add alias for template
|
||||
token_by_model = sorted(token_by_model, key=lambda x: x['total_tokens'], reverse=True)
|
||||
|
||||
# Token usage by function/operation
|
||||
token_by_function = logs.values('operation_type').annotate(
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output'),
|
||||
call_count=Count('id'),
|
||||
total_cost=Sum('cost_usd')
|
||||
).order_by('-total_tokens_input')[:10]
|
||||
|
||||
# Add total_tokens to each function and sort by total
|
||||
for func in token_by_function:
|
||||
func['total_tokens'] = (func['total_tokens_input'] or 0) + (func['total_tokens_output'] or 0)
|
||||
func['avg_tokens'] = func['total_tokens'] / func['call_count'] if func['call_count'] > 0 else 0
|
||||
# Format operation_type as Title Case
|
||||
func['function'] = func['operation_type'].replace('_', ' ').title() if func['operation_type'] else 'Unknown'
|
||||
token_by_function = sorted(token_by_function, key=lambda x: x['total_tokens'], reverse=True)
|
||||
|
||||
# Token usage by account (top consumers)
|
||||
token_by_account = logs.values('account__name', 'account_id').annotate(
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output'),
|
||||
call_count=Count('id'),
|
||||
total_cost=Sum('cost_usd')
|
||||
).order_by('-total_tokens_input')[:15]
|
||||
|
||||
# Add total_tokens to each account and sort by total
|
||||
for account in token_by_account:
|
||||
account['total_tokens'] = (account['total_tokens_input'] or 0) + (account['total_tokens_output'] or 0)
|
||||
token_by_account = sorted(token_by_account, key=lambda x: x['total_tokens'], reverse=True)[:15]
|
||||
|
||||
# Daily token trends (time series)
|
||||
daily_data = []
|
||||
daily_labels = []
|
||||
for i in range(days):
|
||||
day = timezone.now().date() - timedelta(days=days-i-1)
|
||||
day_logs = logs.filter(created_at__date=day)
|
||||
day_tokens_input = day_logs.aggregate(total=Sum('tokens_input'))['total'] or 0
|
||||
day_tokens_output = day_logs.aggregate(total=Sum('tokens_output'))['total'] or 0
|
||||
day_tokens = day_tokens_input + day_tokens_output
|
||||
daily_labels.append(day.strftime('%m/%d'))
|
||||
daily_data.append(int(day_tokens))
|
||||
|
||||
# Token efficiency metrics (CreditUsageLog doesn't have error field, so assume all successful)
|
||||
success_rate = 100.0
|
||||
successful_tokens = total_tokens
|
||||
wasted_tokens = 0
|
||||
|
||||
# Create tokens_by_status for template compatibility
|
||||
tokens_by_status = [{
|
||||
'error': None,
|
||||
'total_tokens': total_tokens,
|
||||
'call_count': total_calls,
|
||||
'avg_tokens': avg_tokens_per_call
|
||||
}]
|
||||
|
||||
# Peak usage times (hour of day)
|
||||
hourly_usage = logs.extra(
|
||||
select={'hour': "EXTRACT(hour FROM created_at)"}
|
||||
).values('hour').annotate(
|
||||
token_input=Sum('tokens_input'),
|
||||
token_output=Sum('tokens_output'),
|
||||
call_count=Count('id')
|
||||
).order_by('hour')
|
||||
|
||||
# Add total token_count for each hour
|
||||
for hour_data in hourly_usage:
|
||||
hour_data['token_count'] = (hour_data['token_input'] or 0) + (hour_data['token_output'] or 0)
|
||||
|
||||
# Cost efficiency
|
||||
total_cost = logs.aggregate(total=Sum('cost_usd'))['total'] or Decimal('0.00')
|
||||
cost_per_1k_tokens = float(total_cost) / (total_tokens / 1000) if total_tokens > 0 else 0.0
|
||||
|
||||
context = {
|
||||
'title': 'Token Usage Report',
|
||||
'days_filter': days,
|
||||
'total_tokens': int(total_tokens),
|
||||
'total_calls': total_calls,
|
||||
'avg_tokens_per_call': round(avg_tokens_per_call, 2),
|
||||
'token_by_model': list(token_by_model),
|
||||
'token_by_function': list(token_by_function),
|
||||
'token_by_account': list(token_by_account),
|
||||
'daily_labels': json.dumps(daily_labels),
|
||||
'daily_data': json.dumps(daily_data),
|
||||
'tokens_by_status': list(tokens_by_status),
|
||||
'success_rate': round(success_rate, 2),
|
||||
'successful_tokens': int(successful_tokens),
|
||||
'wasted_tokens': int(wasted_tokens),
|
||||
'hourly_usage': list(hourly_usage),
|
||||
'total_cost': float(total_cost),
|
||||
'cost_per_1k_tokens': float(cost_per_1k_tokens),
|
||||
'current_app': '_reports', # For active menu state
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/token_usage.html', context)
|
||||
|
||||
|
||||
@staff_member_required
|
||||
def ai_cost_analysis(request):
|
||||
"""Multi-dimensional AI cost analysis with model pricing, trends, and predictions"""
|
||||
from igny8_core.business.billing.models import CreditUsageLog
|
||||
from igny8_core.auth.models import Account
|
||||
from decimal import Decimal
|
||||
|
||||
# Date filter setup
|
||||
days_filter = request.GET.get('days', '30')
|
||||
try:
|
||||
days = int(days_filter)
|
||||
except ValueError:
|
||||
days = 30
|
||||
|
||||
start_date = timezone.now() - timedelta(days=days)
|
||||
|
||||
# Base queryset - filter for records with cost data
|
||||
logs = CreditUsageLog.objects.filter(
|
||||
created_at__gte=start_date,
|
||||
cost_usd__isnull=False
|
||||
)
|
||||
|
||||
# Overall cost metrics
|
||||
total_cost = logs.aggregate(total=Sum('cost_usd'))['total'] or Decimal('0.00')
|
||||
total_calls = logs.count()
|
||||
avg_cost_per_call = logs.aggregate(avg=Avg('cost_usd'))['avg'] or Decimal('0.00')
|
||||
total_tokens_input = logs.aggregate(total=Sum('tokens_input'))['total'] or 0
|
||||
total_tokens_output = logs.aggregate(total=Sum('tokens_output'))['total'] or 0
|
||||
total_tokens = total_tokens_input + total_tokens_output
|
||||
|
||||
# Revenue & Margin calculation
|
||||
from igny8_core.business.billing.models import BillingConfiguration
|
||||
billing_config = BillingConfiguration.get_config()
|
||||
total_credits_charged = logs.aggregate(total=Sum('credits_used'))['total'] or 0
|
||||
total_revenue = Decimal(total_credits_charged) * billing_config.default_credit_price_usd
|
||||
total_margin = total_revenue - total_cost
|
||||
margin_percentage = float((total_margin / total_revenue * 100) if total_revenue > 0 else 0)
|
||||
|
||||
# Per-unit margins
|
||||
# Calculate per 1M tokens (margin per million tokens)
|
||||
margin_per_1m_tokens = float(total_margin) / (total_tokens / 1_000_000) if total_tokens > 0 else 0
|
||||
# Calculate per 1K credits (margin per thousand credits)
|
||||
margin_per_1k_credits = float(total_margin) / (total_credits_charged / 1000) if total_credits_charged > 0 else 0
|
||||
|
||||
# Cost by model with efficiency metrics
|
||||
cost_by_model = logs.values('model_used').annotate(
|
||||
total_cost=Sum('cost_usd'),
|
||||
call_count=Count('id'),
|
||||
avg_cost=Avg('cost_usd'),
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output')
|
||||
).order_by('-total_cost')
|
||||
|
||||
# Add cost efficiency and margin for each model
|
||||
for model in cost_by_model:
|
||||
model['total_tokens'] = (model['total_tokens_input'] or 0) + (model['total_tokens_output'] or 0)
|
||||
model['avg_tokens'] = model['total_tokens'] / model['call_count'] if model['call_count'] > 0 else 0
|
||||
model['model'] = model['model_used'] # Add alias for template
|
||||
if model['total_tokens'] and model['total_tokens'] > 0:
|
||||
model['cost_per_1k_tokens'] = float(model['total_cost']) / (model['total_tokens'] / 1000)
|
||||
else:
|
||||
model['cost_per_1k_tokens'] = 0
|
||||
|
||||
# Calculate margin for this model
|
||||
model_credits = logs.filter(model_used=model['model_used']).aggregate(total=Sum('credits_used'))['total'] or 0
|
||||
model_revenue = Decimal(model_credits) * billing_config.default_credit_price_usd
|
||||
model_margin = model_revenue - model['total_cost']
|
||||
model['revenue'] = float(model_revenue)
|
||||
model['margin'] = float(model_margin)
|
||||
model['margin_percentage'] = float((model_margin / model_revenue * 100) if model_revenue > 0 else 0)
|
||||
|
||||
# Cost by account (top spenders)
|
||||
cost_by_account = logs.values('account__name', 'account_id').annotate(
|
||||
total_cost=Sum('cost_usd'),
|
||||
call_count=Count('id'),
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output'),
|
||||
avg_cost=Avg('cost_usd')
|
||||
).order_by('-total_cost')[:15]
|
||||
|
||||
# Add total_tokens to each account
|
||||
for account in cost_by_account:
|
||||
account['total_tokens'] = (account['total_tokens_input'] or 0) + (account['total_tokens_output'] or 0)
|
||||
|
||||
# Cost by function/operation
|
||||
cost_by_function = logs.values('operation_type').annotate(
|
||||
total_cost=Sum('cost_usd'),
|
||||
call_count=Count('id'),
|
||||
avg_cost=Avg('cost_usd'),
|
||||
total_tokens_input=Sum('tokens_input'),
|
||||
total_tokens_output=Sum('tokens_output')
|
||||
).order_by('-total_cost')[:10]
|
||||
|
||||
# Add total_tokens, function alias, and margin
|
||||
for func in cost_by_function:
|
||||
func['total_tokens'] = (func['total_tokens_input'] or 0) + (func['total_tokens_output'] or 0)
|
||||
# Format operation_type as Title Case
|
||||
func['function'] = func['operation_type'].replace('_', ' ').title() if func['operation_type'] else 'Unknown'
|
||||
|
||||
# Calculate margin for this operation
|
||||
func_credits = logs.filter(operation_type=func['operation_type']).aggregate(total=Sum('credits_used'))['total'] or 0
|
||||
func_revenue = Decimal(func_credits) * billing_config.default_credit_price_usd
|
||||
func_margin = func_revenue - func['total_cost']
|
||||
func['revenue'] = float(func_revenue)
|
||||
func['margin'] = float(func_margin)
|
||||
func['margin_percentage'] = float((func_margin / func_revenue * 100) if func_revenue > 0 else 0)
|
||||
|
||||
# Daily cost trends (time series)
|
||||
daily_cost_data = []
|
||||
daily_cost_labels = []
|
||||
daily_call_data = []
|
||||
|
||||
for i in range(days):
|
||||
day = timezone.now().date() - timedelta(days=days-i-1)
|
||||
day_logs = logs.filter(created_at__date=day)
|
||||
day_cost = day_logs.aggregate(total=Sum('cost_usd'))['total'] or Decimal('0.00')
|
||||
day_calls = day_logs.count()
|
||||
|
||||
daily_cost_labels.append(day.strftime('%m/%d'))
|
||||
daily_cost_data.append(float(day_cost))
|
||||
daily_call_data.append(day_calls)
|
||||
|
||||
# Cost prediction (simple linear extrapolation)
|
||||
if len(daily_cost_data) > 7:
|
||||
recent_avg_daily = sum(daily_cost_data[-7:]) / 7
|
||||
projected_monthly = recent_avg_daily * 30
|
||||
else:
|
||||
projected_monthly = 0
|
||||
|
||||
# Failed requests cost (CreditUsageLog doesn't track errors, so no failed cost)
|
||||
failed_cost = Decimal('0.00')
|
||||
|
||||
# Cost anomalies (calls costing > 3x average)
|
||||
if avg_cost_per_call > 0:
|
||||
anomaly_threshold = float(avg_cost_per_call) * 3
|
||||
anomalies = logs.filter(cost_usd__gt=anomaly_threshold).values(
|
||||
'model_used', 'operation_type', 'account__name', 'cost_usd', 'tokens_input', 'tokens_output', 'created_at'
|
||||
).order_by('-cost_usd')[:10]
|
||||
# Add aliases and calculate total tokens for each anomaly
|
||||
for anomaly in anomalies:
|
||||
anomaly['model'] = anomaly['model_used']
|
||||
# Format operation_type as Title Case
|
||||
anomaly['function'] = anomaly['operation_type'].replace('_', ' ').title() if anomaly['operation_type'] else 'Unknown'
|
||||
anomaly['cost'] = anomaly['cost_usd']
|
||||
anomaly['tokens'] = (anomaly['tokens_input'] or 0) + (anomaly['tokens_output'] or 0)
|
||||
else:
|
||||
anomalies = []
|
||||
|
||||
# Model comparison matrix
|
||||
model_comparison = []
|
||||
for model_data in cost_by_model:
|
||||
model_name = model_data['model']
|
||||
model_comparison.append({
|
||||
'model': model_name,
|
||||
'total_cost': float(model_data['total_cost']),
|
||||
'calls': model_data['call_count'],
|
||||
'avg_cost': float(model_data['avg_cost']),
|
||||
'total_tokens': model_data['total_tokens'],
|
||||
'cost_per_1k': model_data['cost_per_1k_tokens'],
|
||||
})
|
||||
|
||||
# Cost distribution percentages
|
||||
if total_cost > 0:
|
||||
for item in cost_by_model:
|
||||
item['cost_percentage'] = float((item['total_cost'] / total_cost) * 100)
|
||||
|
||||
# Peak cost hours
|
||||
hourly_cost = logs.extra(
|
||||
select={'hour': "EXTRACT(hour FROM created_at)"}
|
||||
).values('hour').annotate(
|
||||
total_cost=Sum('cost_usd'),
|
||||
call_count=Count('id')
|
||||
).order_by('hour')
|
||||
|
||||
# Cost efficiency score (CreditUsageLog doesn't track errors, assume all successful)
|
||||
successful_cost = total_cost
|
||||
efficiency_score = 100.0
|
||||
|
||||
context = {
|
||||
'title': 'AI Cost & Margin Analysis',
|
||||
'days_filter': days,
|
||||
'total_cost': float(total_cost),
|
||||
'total_revenue': float(total_revenue),
|
||||
'total_margin': float(total_margin),
|
||||
'margin_percentage': round(margin_percentage, 2),
|
||||
'margin_per_1m_tokens': round(margin_per_1m_tokens, 4),
|
||||
'margin_per_1k_credits': round(margin_per_1k_credits, 4),
|
||||
'total_credits_charged': total_credits_charged,
|
||||
'credit_price': float(billing_config.default_credit_price_usd),
|
||||
'total_calls': total_calls,
|
||||
'avg_cost_per_call': float(avg_cost_per_call),
|
||||
'total_tokens': int(total_tokens),
|
||||
'cost_by_model': list(cost_by_model),
|
||||
'cost_by_account': list(cost_by_account),
|
||||
'cost_by_function': list(cost_by_function),
|
||||
'daily_cost_labels': json.dumps(daily_cost_labels),
|
||||
'daily_cost_data': json.dumps(daily_cost_data),
|
||||
'daily_call_data': json.dumps(daily_call_data),
|
||||
'projected_monthly': round(projected_monthly, 2),
|
||||
'failed_cost': float(failed_cost),
|
||||
'wasted_percentage': float((failed_cost / total_cost * 100) if total_cost > 0 else 0),
|
||||
'anomalies': list(anomalies),
|
||||
'model_comparison': model_comparison,
|
||||
'hourly_cost': list(hourly_cost),
|
||||
'efficiency_score': round(efficiency_score, 2),
|
||||
'successful_cost': float(successful_cost),
|
||||
'current_app': '_reports', # For active menu state
|
||||
}
|
||||
|
||||
# Merge with admin context
|
||||
from igny8_core.admin.site import admin_site
|
||||
admin_context = admin_site.each_context(request)
|
||||
context.update(admin_context)
|
||||
|
||||
return render(request, 'admin/reports/ai_cost_analysis.html', context)
|
||||
@@ -1,184 +1,63 @@
|
||||
"""
|
||||
Custom AdminSite for IGNY8 to organize models into proper groups
|
||||
Custom AdminSite for IGNY8 using Unfold theme.
|
||||
|
||||
SIMPLIFIED VERSION - Navigation is now handled via UNFOLD settings in settings.py
|
||||
This file only handles:
|
||||
1. Custom URLs for dashboard, reports, and monitoring pages
|
||||
2. Index redirect to dashboard
|
||||
|
||||
All sidebar navigation is configured in settings.py under UNFOLD["SIDEBAR"]["navigation"]
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.contrib.admin.apps import AdminConfig
|
||||
from django.apps import apps
|
||||
from django.urls import path
|
||||
from django.shortcuts import redirect
|
||||
from unfold.sites import UnfoldAdminSite
|
||||
|
||||
|
||||
class Igny8AdminSite(admin.AdminSite):
|
||||
class Igny8AdminSite(UnfoldAdminSite):
|
||||
"""
|
||||
Custom AdminSite that organizes models into the planned groups:
|
||||
1. Billing & Tenancy
|
||||
2. Sites & Users
|
||||
3. Global Reference Data
|
||||
4. Planner
|
||||
5. Writer Module
|
||||
6. Thinker Module
|
||||
7. System Configuration
|
||||
Custom AdminSite based on Unfold.
|
||||
Navigation is handled via UNFOLD settings - this just adds custom URLs.
|
||||
"""
|
||||
site_header = 'IGNY8 Administration'
|
||||
site_title = 'IGNY8 Admin'
|
||||
index_title = 'IGNY8 Administration'
|
||||
|
||||
def get_app_list(self, request):
|
||||
"""
|
||||
Customize the app list to organize models into proper groups
|
||||
"""
|
||||
# Get the default app list
|
||||
app_dict = self._build_app_dict(request)
|
||||
|
||||
# Define our custom groups with their models (using object_name)
|
||||
custom_groups = {
|
||||
'Billing & Tenancy': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Plan'),
|
||||
('igny8_core_auth', 'Account'),
|
||||
('igny8_core_auth', 'Subscription'),
|
||||
('billing', 'CreditTransaction'),
|
||||
('billing', 'CreditUsageLog'),
|
||||
('billing', 'Invoice'),
|
||||
('billing', 'Payment'),
|
||||
('billing', 'CreditPackage'),
|
||||
('billing', 'PaymentMethodConfig'),
|
||||
('billing', 'AccountPaymentMethod'),
|
||||
('billing', 'CreditCostConfig'),
|
||||
],
|
||||
},
|
||||
'Sites & Users': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Site'),
|
||||
('igny8_core_auth', 'User'),
|
||||
('igny8_core_auth', 'SiteUserAccess'),
|
||||
('igny8_core_auth', 'PasswordResetToken'),
|
||||
('igny8_core_auth', 'Sector'),
|
||||
],
|
||||
},
|
||||
'Global Reference Data': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Industry'),
|
||||
('igny8_core_auth', 'IndustrySector'),
|
||||
('igny8_core_auth', 'SeedKeyword'),
|
||||
('site_building', 'BusinessType'),
|
||||
('site_building', 'AudienceProfile'),
|
||||
('site_building', 'BrandPersonality'),
|
||||
('site_building', 'HeroImageryDirection'),
|
||||
],
|
||||
},
|
||||
'Planner': {
|
||||
'models': [
|
||||
('planner', 'Keywords'),
|
||||
('planner', 'Clusters'),
|
||||
('planner', 'ContentIdeas'),
|
||||
],
|
||||
},
|
||||
'Writer Module': {
|
||||
'models': [
|
||||
('writer', 'Tasks'),
|
||||
('writer', 'Content'),
|
||||
('writer', 'Images'),
|
||||
('writer', 'ContentTaxonomy'),
|
||||
('writer', 'ContentAttribute'),
|
||||
('writer', 'ContentTaxonomyRelation'),
|
||||
('writer', 'ContentClusterMap'),
|
||||
],
|
||||
},
|
||||
'Thinker Module': {
|
||||
'models': [
|
||||
('system', 'AIPrompt'),
|
||||
('system', 'AuthorProfile'),
|
||||
('system', 'Strategy'),
|
||||
('ai', 'AITaskLog'),
|
||||
],
|
||||
},
|
||||
'System Configuration': {
|
||||
'models': [
|
||||
('system', 'IntegrationSettings'),
|
||||
('system', 'SystemLog'),
|
||||
('system', 'SystemStatus'),
|
||||
('system', 'SystemSettings'),
|
||||
('system', 'AccountSettings'),
|
||||
('system', 'UserSettings'),
|
||||
('system', 'ModuleSettings'),
|
||||
('system', 'AISettings'),
|
||||
('system', 'ModuleEnableSettings'),
|
||||
# Automation config lives under the automation app - include here
|
||||
('automation', 'AutomationConfig'),
|
||||
('automation', 'AutomationRun'),
|
||||
],
|
||||
},
|
||||
'Integrations & Sync': {
|
||||
'models': [
|
||||
('integration', 'SiteIntegration'),
|
||||
('integration', 'SyncEvent'),
|
||||
],
|
||||
},
|
||||
'Publishing': {
|
||||
'models': [
|
||||
('publishing', 'PublishingRecord'),
|
||||
('publishing', 'DeploymentRecord'),
|
||||
],
|
||||
},
|
||||
'Optimization': {
|
||||
'models': [
|
||||
('optimization', 'OptimizationTask'),
|
||||
],
|
||||
},
|
||||
'Django Internals': {
|
||||
'models': [
|
||||
('admin', 'LogEntry'),
|
||||
('auth', 'Group'),
|
||||
('auth', 'Permission'),
|
||||
('contenttypes', 'ContentType'),
|
||||
('sessions', 'Session'),
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# Build the custom app list
|
||||
app_list = []
|
||||
|
||||
for group_name, group_config in custom_groups.items():
|
||||
group_models = []
|
||||
|
||||
for app_label, model_name in group_config['models']:
|
||||
# Find the model in app_dict
|
||||
if app_label in app_dict:
|
||||
app_data = app_dict[app_label]
|
||||
# Look for the model in the app's models
|
||||
for model in app_data.get('models', []):
|
||||
if model['object_name'] == model_name:
|
||||
group_models.append(model)
|
||||
break
|
||||
|
||||
# Only add the group if it has models
|
||||
if group_models:
|
||||
app_list.append({
|
||||
'name': group_name,
|
||||
'app_label': group_name.lower().replace(' ', '_').replace('&', ''),
|
||||
'app_url': None,
|
||||
'has_module_perms': True,
|
||||
'models': group_models,
|
||||
})
|
||||
|
||||
# Sort the app list by our custom order
|
||||
order = [
|
||||
'Billing & Tenancy',
|
||||
'Sites & Users',
|
||||
'Global Reference Data',
|
||||
'Planner',
|
||||
'Writer Module',
|
||||
'Thinker Module',
|
||||
'System Configuration',
|
||||
'Integrations & Sync',
|
||||
'Publishing',
|
||||
'Optimization',
|
||||
'Django Internals',
|
||||
def get_urls(self):
|
||||
"""Add custom URLs for dashboard, reports, and monitoring pages"""
|
||||
from .dashboard import admin_dashboard
|
||||
from .reports import (
|
||||
revenue_report, usage_report, content_report, data_quality_report,
|
||||
token_usage_report, ai_cost_analysis
|
||||
)
|
||||
from .monitoring import (
|
||||
system_health_dashboard, api_monitor_dashboard, debug_console
|
||||
)
|
||||
|
||||
urls = super().get_urls()
|
||||
custom_urls = [
|
||||
# Dashboard
|
||||
path('dashboard/', self.admin_view(admin_dashboard), name='dashboard'),
|
||||
|
||||
# Reports
|
||||
path('reports/revenue/', self.admin_view(revenue_report), name='report_revenue'),
|
||||
path('reports/usage/', self.admin_view(usage_report), name='report_usage'),
|
||||
path('reports/content/', self.admin_view(content_report), name='report_content'),
|
||||
path('reports/data-quality/', self.admin_view(data_quality_report), name='report_data_quality'),
|
||||
path('reports/token-usage/', self.admin_view(token_usage_report), name='report_token_usage'),
|
||||
path('reports/ai-cost-analysis/', self.admin_view(ai_cost_analysis), name='report_ai_cost_analysis'),
|
||||
|
||||
# Monitoring
|
||||
path('monitoring/system-health/', self.admin_view(system_health_dashboard), name='monitoring_system_health'),
|
||||
path('monitoring/api-monitor/', self.admin_view(api_monitor_dashboard), name='monitoring_api_monitor'),
|
||||
path('monitoring/debug-console/', self.admin_view(debug_console), name='monitoring_debug_console'),
|
||||
]
|
||||
|
||||
app_list.sort(key=lambda x: order.index(x['name']) if x['name'] in order else 999)
|
||||
|
||||
return app_list
|
||||
|
||||
return custom_urls + urls
|
||||
|
||||
def index(self, request, extra_context=None):
|
||||
"""Redirect admin index to custom dashboard"""
|
||||
return redirect('admin:dashboard')
|
||||
|
||||
|
||||
# Instantiate custom admin site
|
||||
admin_site = Igny8AdminSite(name='admin')
|
||||
|
||||
179
backend/igny8_core/admin/site_backup.py
Normal file
179
backend/igny8_core/admin/site_backup.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Custom AdminSite for IGNY8 to organize models into proper groups using Unfold
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.contrib.admin.apps import AdminConfig
|
||||
from django.apps import apps
|
||||
from django.urls import path, reverse_lazy
|
||||
from django.shortcuts import redirect
|
||||
from unfold.admin import ModelAdmin as UnfoldModelAdmin
|
||||
from unfold.sites import UnfoldAdminSite
|
||||
|
||||
|
||||
class Igny8AdminSite(UnfoldAdminSite):
|
||||
"""
|
||||
Custom AdminSite based on Unfold that organizes models into the planned groups
|
||||
"""
|
||||
site_header = 'IGNY8 Administration'
|
||||
site_title = 'IGNY8 Admin'
|
||||
index_title = 'IGNY8 Administration'
|
||||
|
||||
def get_urls(self):
|
||||
"""Get admin URLs without custom dashboard"""
|
||||
urls = super().get_urls()
|
||||
return urls
|
||||
|
||||
def get_app_list(self, request):
|
||||
"""
|
||||
Customize the app list to organize models into logical groups
|
||||
"""
|
||||
# Get the default app list
|
||||
app_dict = self._build_app_dict(request)
|
||||
|
||||
# Define our custom groups with their models (using object_name)
|
||||
# Organized by business function with emoji icons for visual recognition
|
||||
custom_groups = {
|
||||
'💰 Billing & Accounts': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Plan'),
|
||||
('billing', 'PlanLimitUsage'),
|
||||
('igny8_core_auth', 'Account'),
|
||||
('igny8_core_auth', 'Subscription'),
|
||||
('billing', 'Invoice'),
|
||||
('billing', 'Payment'),
|
||||
('billing', 'CreditTransaction'),
|
||||
('billing', 'CreditUsageLog'),
|
||||
('billing', 'CreditPackage'),
|
||||
('billing', 'PaymentMethodConfig'),
|
||||
('billing', 'AccountPaymentMethod'),
|
||||
('billing', 'CreditCostConfig'),
|
||||
],
|
||||
},
|
||||
'👥 Sites & Users': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Site'),
|
||||
('igny8_core_auth', 'Sector'),
|
||||
('igny8_core_auth', 'User'),
|
||||
('igny8_core_auth', 'SiteUserAccess'),
|
||||
('igny8_core_auth', 'PasswordResetToken'),
|
||||
],
|
||||
},
|
||||
'📚 Content Management': {
|
||||
'models': [
|
||||
('writer', 'Content'),
|
||||
('writer', 'Tasks'),
|
||||
('writer', 'Images'),
|
||||
('writer', 'ContentTaxonomy'),
|
||||
('writer', 'ContentAttribute'),
|
||||
('writer', 'ContentTaxonomyRelation'),
|
||||
('writer', 'ContentClusterMap'),
|
||||
],
|
||||
},
|
||||
'🎯 Planning & Strategy': {
|
||||
'models': [
|
||||
('planner', 'Clusters'),
|
||||
('planner', 'Keywords'),
|
||||
('planner', 'ContentIdeas'),
|
||||
('system', 'Strategy'),
|
||||
],
|
||||
},
|
||||
'🔗 Integrations & Publishing': {
|
||||
'models': [
|
||||
('integration', 'SiteIntegration'),
|
||||
('integration', 'SyncEvent'),
|
||||
('publishing', 'PublishingRecord'),
|
||||
('publishing', 'DeploymentRecord'),
|
||||
],
|
||||
},
|
||||
'🤖 AI & Automation': {
|
||||
'models': [
|
||||
('ai', 'AITaskLog'),
|
||||
('system', 'AIPrompt'),
|
||||
('automation', 'AutomationConfig'),
|
||||
('automation', 'AutomationRun'),
|
||||
('optimization', 'OptimizationTask'),
|
||||
],
|
||||
},
|
||||
'🌍 Global Reference Data': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Industry'),
|
||||
('igny8_core_auth', 'IndustrySector'),
|
||||
('igny8_core_auth', 'SeedKeyword'),
|
||||
],
|
||||
},
|
||||
'⚙️ System Configuration': {
|
||||
'models': [
|
||||
('system', 'IntegrationSettings'),
|
||||
('system', 'AuthorProfile'),
|
||||
('system', 'SystemSettings'),
|
||||
('system', 'AccountSettings'),
|
||||
('system', 'UserSettings'),
|
||||
('system', 'ModuleSettings'),
|
||||
('system', 'AISettings'),
|
||||
('system', 'ModuleEnableSettings'),
|
||||
('system', 'SystemLog'),
|
||||
('system', 'SystemStatus'),
|
||||
],
|
||||
},
|
||||
'<EFBFBD> Monitoring & Tasks': {
|
||||
'models': [
|
||||
('django_celery_results', 'TaskResult'),
|
||||
('django_celery_results', 'GroupResult'),
|
||||
],
|
||||
},
|
||||
'<EFBFBD>🔧 Django System': {
|
||||
'models': [
|
||||
('admin', 'LogEntry'),
|
||||
('auth', 'Group'),
|
||||
('auth', 'Permission'),
|
||||
('contenttypes', 'ContentType'),
|
||||
('sessions', 'Session'),
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# Build the custom app list
|
||||
app_list = []
|
||||
|
||||
for group_name, group_config in custom_groups.items():
|
||||
group_models = []
|
||||
|
||||
for app_label, model_name in group_config['models']:
|
||||
# Find the model in app_dict
|
||||
if app_label in app_dict:
|
||||
app_data = app_dict[app_label]
|
||||
# Look for the model in the app's models
|
||||
for model in app_data.get('models', []):
|
||||
if model['object_name'] == model_name:
|
||||
group_models.append(model)
|
||||
break
|
||||
|
||||
# Only add the group if it has models
|
||||
if group_models:
|
||||
app_list.append({
|
||||
'name': group_name,
|
||||
'app_label': group_name.lower().replace(' ', '_').replace('&', '').replace('emoji', ''),
|
||||
'app_url': None,
|
||||
'has_module_perms': True,
|
||||
'models': group_models,
|
||||
})
|
||||
|
||||
# Sort the app list by our custom order
|
||||
order = [
|
||||
'💰 Billing & Accounts',
|
||||
'👥 Sites & Users',
|
||||
'📚 Content Management',
|
||||
'🎯 Planning & Strategy',
|
||||
'🔗 Integrations & Publishing',
|
||||
'🤖 AI & Automation',
|
||||
'🌍 Global Reference Data',
|
||||
'⚙️ System Configuration',
|
||||
'🔧 Django System',
|
||||
]
|
||||
|
||||
app_list.sort(key=lambda x: order.index(x['name']) if x['name'] in order else 999)
|
||||
|
||||
return app_list
|
||||
|
||||
|
||||
|
||||
179
backend/igny8_core/admin/site_old.py
Normal file
179
backend/igny8_core/admin/site_old.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Custom AdminSite for IGNY8 to organize models into proper groups using Unfold
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.contrib.admin.apps import AdminConfig
|
||||
from django.apps import apps
|
||||
from django.urls import path, reverse_lazy
|
||||
from django.shortcuts import redirect
|
||||
from unfold.admin import ModelAdmin as UnfoldModelAdmin
|
||||
from unfold.sites import UnfoldAdminSite
|
||||
|
||||
|
||||
class Igny8AdminSite(UnfoldAdminSite):
|
||||
"""
|
||||
Custom AdminSite based on Unfold that organizes models into the planned groups
|
||||
"""
|
||||
site_header = 'IGNY8 Administration'
|
||||
site_title = 'IGNY8 Admin'
|
||||
index_title = 'IGNY8 Administration'
|
||||
|
||||
def get_urls(self):
|
||||
"""Get admin URLs without custom dashboard"""
|
||||
urls = super().get_urls()
|
||||
return urls
|
||||
|
||||
def get_app_list(self, request):
|
||||
"""
|
||||
Customize the app list to organize models into logical groups
|
||||
"""
|
||||
# Get the default app list
|
||||
app_dict = self._build_app_dict(request)
|
||||
|
||||
# Define our custom groups with their models (using object_name)
|
||||
# Organized by business function with emoji icons for visual recognition
|
||||
custom_groups = {
|
||||
'💰 Billing & Accounts': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Plan'),
|
||||
('billing', 'PlanLimitUsage'),
|
||||
('igny8_core_auth', 'Account'),
|
||||
('igny8_core_auth', 'Subscription'),
|
||||
('billing', 'Invoice'),
|
||||
('billing', 'Payment'),
|
||||
('billing', 'CreditTransaction'),
|
||||
('billing', 'CreditUsageLog'),
|
||||
('billing', 'CreditPackage'),
|
||||
('billing', 'PaymentMethodConfig'),
|
||||
('billing', 'AccountPaymentMethod'),
|
||||
('billing', 'CreditCostConfig'),
|
||||
],
|
||||
},
|
||||
'👥 Sites & Users': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Site'),
|
||||
('igny8_core_auth', 'Sector'),
|
||||
('igny8_core_auth', 'User'),
|
||||
('igny8_core_auth', 'SiteUserAccess'),
|
||||
('igny8_core_auth', 'PasswordResetToken'),
|
||||
],
|
||||
},
|
||||
'📚 Content Management': {
|
||||
'models': [
|
||||
('writer', 'Content'),
|
||||
('writer', 'Tasks'),
|
||||
('writer', 'Images'),
|
||||
('writer', 'ContentTaxonomy'),
|
||||
('writer', 'ContentAttribute'),
|
||||
('writer', 'ContentTaxonomyRelation'),
|
||||
('writer', 'ContentClusterMap'),
|
||||
],
|
||||
},
|
||||
'🎯 Planning & Strategy': {
|
||||
'models': [
|
||||
('planner', 'Clusters'),
|
||||
('planner', 'Keywords'),
|
||||
('planner', 'ContentIdeas'),
|
||||
('system', 'Strategy'),
|
||||
],
|
||||
},
|
||||
'🔗 Integrations & Publishing': {
|
||||
'models': [
|
||||
('integration', 'SiteIntegration'),
|
||||
('integration', 'SyncEvent'),
|
||||
('publishing', 'PublishingRecord'),
|
||||
('publishing', 'DeploymentRecord'),
|
||||
],
|
||||
},
|
||||
'🤖 AI & Automation': {
|
||||
'models': [
|
||||
('ai', 'AITaskLog'),
|
||||
('system', 'AIPrompt'),
|
||||
('automation', 'AutomationConfig'),
|
||||
('automation', 'AutomationRun'),
|
||||
('optimization', 'OptimizationTask'),
|
||||
],
|
||||
},
|
||||
'🌍 Global Reference Data': {
|
||||
'models': [
|
||||
('igny8_core_auth', 'Industry'),
|
||||
('igny8_core_auth', 'IndustrySector'),
|
||||
('igny8_core_auth', 'SeedKeyword'),
|
||||
],
|
||||
},
|
||||
'⚙️ System Configuration': {
|
||||
'models': [
|
||||
('system', 'IntegrationSettings'),
|
||||
('system', 'AuthorProfile'),
|
||||
('system', 'SystemSettings'),
|
||||
('system', 'AccountSettings'),
|
||||
('system', 'UserSettings'),
|
||||
('system', 'ModuleSettings'),
|
||||
('system', 'AISettings'),
|
||||
('system', 'ModuleEnableSettings'),
|
||||
('system', 'SystemLog'),
|
||||
('system', 'SystemStatus'),
|
||||
],
|
||||
},
|
||||
'<EFBFBD> Monitoring & Tasks': {
|
||||
'models': [
|
||||
('django_celery_results', 'TaskResult'),
|
||||
('django_celery_results', 'GroupResult'),
|
||||
],
|
||||
},
|
||||
'<EFBFBD>🔧 Django System': {
|
||||
'models': [
|
||||
('admin', 'LogEntry'),
|
||||
('auth', 'Group'),
|
||||
('auth', 'Permission'),
|
||||
('contenttypes', 'ContentType'),
|
||||
('sessions', 'Session'),
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# Build the custom app list
|
||||
app_list = []
|
||||
|
||||
for group_name, group_config in custom_groups.items():
|
||||
group_models = []
|
||||
|
||||
for app_label, model_name in group_config['models']:
|
||||
# Find the model in app_dict
|
||||
if app_label in app_dict:
|
||||
app_data = app_dict[app_label]
|
||||
# Look for the model in the app's models
|
||||
for model in app_data.get('models', []):
|
||||
if model['object_name'] == model_name:
|
||||
group_models.append(model)
|
||||
break
|
||||
|
||||
# Only add the group if it has models
|
||||
if group_models:
|
||||
app_list.append({
|
||||
'name': group_name,
|
||||
'app_label': group_name.lower().replace(' ', '_').replace('&', '').replace('emoji', ''),
|
||||
'app_url': None,
|
||||
'has_module_perms': True,
|
||||
'models': group_models,
|
||||
})
|
||||
|
||||
# Sort the app list by our custom order
|
||||
order = [
|
||||
'💰 Billing & Accounts',
|
||||
'👥 Sites & Users',
|
||||
'📚 Content Management',
|
||||
'🎯 Planning & Strategy',
|
||||
'🔗 Integrations & Publishing',
|
||||
'🤖 AI & Automation',
|
||||
'🌍 Global Reference Data',
|
||||
'⚙️ System Configuration',
|
||||
'🔧 Django System',
|
||||
]
|
||||
|
||||
app_list.sort(key=lambda x: order.index(x['name']) if x['name'] in order else 999)
|
||||
|
||||
return app_list
|
||||
|
||||
|
||||
|
||||
@@ -2,11 +2,27 @@
|
||||
Admin configuration for AI models
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from unfold.admin import ModelAdmin
|
||||
from igny8_core.admin.base import Igny8ModelAdmin
|
||||
from igny8_core.ai.models import AITaskLog
|
||||
|
||||
|
||||
from import_export.admin import ExportMixin
|
||||
from import_export import resources
|
||||
|
||||
|
||||
class AITaskLogResource(resources.ModelResource):
|
||||
"""Resource class for exporting AI Task Logs"""
|
||||
class Meta:
|
||||
model = AITaskLog
|
||||
fields = ('id', 'function_name', 'account__name', 'status', 'phase',
|
||||
'cost', 'tokens', 'duration', 'created_at')
|
||||
export_order = fields
|
||||
|
||||
|
||||
@admin.register(AITaskLog)
|
||||
class AITaskLogAdmin(admin.ModelAdmin):
|
||||
class AITaskLogAdmin(ExportMixin, Igny8ModelAdmin):
|
||||
resource_class = AITaskLogResource
|
||||
"""Admin interface for AI task logs"""
|
||||
list_display = [
|
||||
'function_name',
|
||||
@@ -48,6 +64,10 @@ class AITaskLogAdmin(admin.ModelAdmin):
|
||||
'created_at',
|
||||
'updated_at'
|
||||
]
|
||||
actions = [
|
||||
'bulk_delete_old_logs',
|
||||
'bulk_mark_reviewed',
|
||||
]
|
||||
|
||||
def has_add_permission(self, request):
|
||||
"""Logs are created automatically, no manual creation"""
|
||||
@@ -56,4 +76,22 @@ class AITaskLogAdmin(admin.ModelAdmin):
|
||||
def has_change_permission(self, request, obj=None):
|
||||
"""Logs are read-only"""
|
||||
return False
|
||||
|
||||
def bulk_delete_old_logs(self, request, queryset):
|
||||
"""Delete AI task logs older than 90 days"""
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
cutoff_date = timezone.now() - timedelta(days=90)
|
||||
old_logs = queryset.filter(created_at__lt=cutoff_date)
|
||||
count = old_logs.count()
|
||||
old_logs.delete()
|
||||
self.message_user(request, f'{count} old AI task log(s) deleted (older than 90 days).', messages.SUCCESS)
|
||||
bulk_delete_old_logs.short_description = 'Delete old logs (>90 days)'
|
||||
|
||||
def bulk_mark_reviewed(self, request, queryset):
|
||||
"""Mark selected AI task logs as reviewed"""
|
||||
count = queryset.count()
|
||||
self.message_user(request, f'{count} AI task log(s) marked as reviewed.', messages.SUCCESS)
|
||||
bulk_mark_reviewed.short_description = 'Mark as reviewed'
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ from django.conf import settings
|
||||
from .constants import (
|
||||
DEFAULT_AI_MODEL,
|
||||
JSON_MODE_MODELS,
|
||||
MODEL_RATES,
|
||||
IMAGE_MODEL_RATES,
|
||||
VALID_OPENAI_IMAGE_MODELS,
|
||||
VALID_SIZES_BY_MODEL,
|
||||
DEBUG_MODE,
|
||||
@@ -40,39 +38,27 @@ class AICore:
|
||||
self.account = account
|
||||
self._openai_api_key = None
|
||||
self._runware_api_key = None
|
||||
self._bria_api_key = None
|
||||
self._anthropic_api_key = None
|
||||
self._load_account_settings()
|
||||
|
||||
def _load_account_settings(self):
|
||||
"""Load API keys and model from IntegrationSettings or Django settings"""
|
||||
if self.account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
|
||||
# Load OpenAI settings
|
||||
openai_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='openai',
|
||||
account=self.account,
|
||||
is_active=True
|
||||
).first()
|
||||
if openai_settings and openai_settings.config:
|
||||
self._openai_api_key = openai_settings.config.get('apiKey')
|
||||
|
||||
# Load Runware settings
|
||||
runware_settings = IntegrationSettings.objects.filter(
|
||||
integration_type='runware',
|
||||
account=self.account,
|
||||
is_active=True
|
||||
).first()
|
||||
if runware_settings and runware_settings.config:
|
||||
self._runware_api_key = runware_settings.config.get('apiKey')
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load account settings: {e}", exc_info=True)
|
||||
|
||||
# Fallback to Django settings for API keys only (no model fallback)
|
||||
if not self._openai_api_key:
|
||||
self._openai_api_key = getattr(settings, 'OPENAI_API_KEY', None)
|
||||
if not self._runware_api_key:
|
||||
self._runware_api_key = getattr(settings, 'RUNWARE_API_KEY', None)
|
||||
"""Load API keys from IntegrationProvider (centralized provider config)"""
|
||||
try:
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
# Load API keys from IntegrationProvider (centralized, platform-wide)
|
||||
self._openai_api_key = ModelRegistry.get_api_key('openai')
|
||||
self._runware_api_key = ModelRegistry.get_api_key('runware')
|
||||
self._bria_api_key = ModelRegistry.get_api_key('bria')
|
||||
self._anthropic_api_key = ModelRegistry.get_api_key('anthropic')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load API keys from IntegrationProvider: {e}", exc_info=True)
|
||||
self._openai_api_key = None
|
||||
self._runware_api_key = None
|
||||
self._bria_api_key = None
|
||||
self._anthropic_api_key = None
|
||||
|
||||
def get_api_key(self, integration_type: str = 'openai') -> Optional[str]:
|
||||
"""Get API key for integration type"""
|
||||
@@ -80,6 +66,10 @@ class AICore:
|
||||
return self._openai_api_key
|
||||
elif integration_type == 'runware':
|
||||
return self._runware_api_key
|
||||
elif integration_type == 'bria':
|
||||
return self._bria_api_key
|
||||
elif integration_type == 'anthropic':
|
||||
return self._anthropic_api_key
|
||||
return None
|
||||
|
||||
def get_model(self, integration_type: str = 'openai') -> str:
|
||||
@@ -97,18 +87,18 @@ class AICore:
|
||||
self,
|
||||
prompt: str,
|
||||
model: str,
|
||||
max_tokens: int = 4000,
|
||||
max_tokens: int = 8192,
|
||||
temperature: float = 0.7,
|
||||
response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None,
|
||||
function_name: str = 'ai_request',
|
||||
function_id: Optional[str] = None,
|
||||
prompt_prefix: Optional[str] = None,
|
||||
tracker: Optional[ConsoleStepTracker] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Centralized AI request handler with console logging.
|
||||
All AI text generation requests go through this method.
|
||||
|
||||
|
||||
Args:
|
||||
prompt: Prompt text
|
||||
model: Model name (required - must be provided from IntegrationSettings)
|
||||
@@ -117,12 +107,13 @@ class AICore:
|
||||
response_format: Optional response format dict (for JSON mode)
|
||||
api_key: Optional API key override
|
||||
function_name: Function name for logging (e.g., 'cluster_keywords')
|
||||
prompt_prefix: Optional prefix to add before prompt (e.g., '##GP01-Clustering')
|
||||
tracker: Optional ConsoleStepTracker instance for logging
|
||||
|
||||
|
||||
Returns:
|
||||
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
|
||||
'model', 'cost', 'error', 'api_id'
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If model is not provided
|
||||
"""
|
||||
@@ -173,8 +164,12 @@ class AICore:
|
||||
logger.info(f" - Model used in request: {active_model}")
|
||||
tracker.ai_call(f"Using model: {active_model}")
|
||||
|
||||
if active_model not in MODEL_RATES:
|
||||
error_msg = f"Model '{active_model}' is not supported. Supported models: {list(MODEL_RATES.keys())}"
|
||||
# Use ModelRegistry for validation (database-driven)
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
if not ModelRegistry.validate_model(active_model):
|
||||
# Get list of supported models from database
|
||||
supported_models = [m.model_name for m in ModelRegistry.list_models(model_type='text')]
|
||||
error_msg = f"Model '{active_model}' is not supported. Supported models: {supported_models}"
|
||||
logger.error(f"[AICore] {error_msg}")
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
@@ -199,16 +194,16 @@ class AICore:
|
||||
else:
|
||||
tracker.ai_call("Using text response format")
|
||||
|
||||
# Step 4: Validate prompt length and add function_id
|
||||
# Step 4: Validate prompt length and add prompt_prefix
|
||||
prompt_length = len(prompt)
|
||||
tracker.ai_call(f"Prompt length: {prompt_length} characters")
|
||||
|
||||
# Add function_id to prompt if provided (for tracking)
|
||||
|
||||
# Add prompt_prefix to prompt if provided (for tracking)
|
||||
# Format: ##GP01-Clustering or ##CP01-Clustering
|
||||
final_prompt = prompt
|
||||
if function_id:
|
||||
function_id_prefix = f'function_id: "{function_id}"\n\n'
|
||||
final_prompt = function_id_prefix + prompt
|
||||
tracker.ai_call(f"Added function_id to prompt: {function_id}")
|
||||
if prompt_prefix:
|
||||
final_prompt = f'{prompt_prefix}\n\n{prompt}'
|
||||
tracker.ai_call(f"Added prompt prefix: {prompt_prefix}")
|
||||
|
||||
# Step 5: Build request payload
|
||||
url = 'https://api.openai.com/v1/chat/completions'
|
||||
@@ -223,8 +218,12 @@ class AICore:
|
||||
'temperature': temperature,
|
||||
}
|
||||
|
||||
# GPT-5.1 and GPT-5.2 use max_completion_tokens instead of max_tokens
|
||||
if max_tokens:
|
||||
body_data['max_tokens'] = max_tokens
|
||||
if active_model in ['gpt-5.1', 'gpt-5.2']:
|
||||
body_data['max_completion_tokens'] = max_tokens
|
||||
else:
|
||||
body_data['max_tokens'] = max_tokens
|
||||
|
||||
if response_format:
|
||||
body_data['response_format'] = response_format
|
||||
@@ -236,7 +235,7 @@ class AICore:
|
||||
request_start = time.time()
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=body_data, timeout=60)
|
||||
response = requests.post(url, headers=headers, json=body_data, timeout=180)
|
||||
request_duration = time.time() - request_start
|
||||
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
@@ -301,9 +300,13 @@ class AICore:
|
||||
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
|
||||
tracker.parse(f"Content length: {len(content)} characters")
|
||||
|
||||
# Step 10: Calculate cost
|
||||
rates = MODEL_RATES.get(active_model, {'input': 2.00, 'output': 8.00})
|
||||
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
|
||||
# Step 10: Calculate cost using ModelRegistry (database-driven)
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
cost = float(ModelRegistry.calculate_cost(
|
||||
active_model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens
|
||||
))
|
||||
tracker.parse(f"Cost calculated: ${cost:.6f}")
|
||||
|
||||
tracker.done("Request completed successfully")
|
||||
@@ -335,8 +338,8 @@ class AICore:
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (60s exceeded)'
|
||||
tracker.timeout(60)
|
||||
error_msg = 'Request timeout (180s exceeded)'
|
||||
tracker.timeout(180)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
@@ -378,6 +381,289 @@ class AICore:
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
def run_anthropic_request(
|
||||
self,
|
||||
prompt: str,
|
||||
model: str,
|
||||
max_tokens: int = 8192,
|
||||
temperature: float = 0.7,
|
||||
api_key: Optional[str] = None,
|
||||
function_name: str = 'anthropic_request',
|
||||
prompt_prefix: Optional[str] = None,
|
||||
tracker: Optional[ConsoleStepTracker] = None,
|
||||
system_prompt: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Anthropic (Claude) AI request handler with console logging.
|
||||
Alternative to OpenAI for text generation.
|
||||
|
||||
Args:
|
||||
prompt: Prompt text
|
||||
model: Claude model name (required - must be provided from IntegrationSettings)
|
||||
max_tokens: Maximum tokens
|
||||
temperature: Temperature (0-1)
|
||||
api_key: Optional API key override
|
||||
function_name: Function name for logging (e.g., 'cluster_keywords')
|
||||
prompt_prefix: Optional prefix to add before prompt
|
||||
tracker: Optional ConsoleStepTracker instance for logging
|
||||
system_prompt: Optional system prompt for Claude
|
||||
|
||||
Returns:
|
||||
Dict with 'content', 'input_tokens', 'output_tokens', 'total_tokens',
|
||||
'model', 'cost', 'error', 'api_id'
|
||||
|
||||
Raises:
|
||||
ValueError: If model is not provided
|
||||
"""
|
||||
# Use provided tracker or create a new one
|
||||
if tracker is None:
|
||||
tracker = ConsoleStepTracker(function_name)
|
||||
|
||||
tracker.ai_call("Preparing Anthropic request...")
|
||||
|
||||
# Step 1: Validate model is provided
|
||||
if not model:
|
||||
error_msg = "Model is required. Ensure IntegrationSettings is configured for the account."
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
logger.error(f"[AICore][Anthropic] {error_msg}")
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': None,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 2: Validate API key
|
||||
api_key = api_key or self._anthropic_api_key
|
||||
if not api_key:
|
||||
error_msg = 'Anthropic API key not configured'
|
||||
tracker.error('ConfigurationError', error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
active_model = model
|
||||
|
||||
# Debug logging: Show model used
|
||||
logger.info(f"[AICore][Anthropic] Model Configuration:")
|
||||
logger.info(f" - Model parameter passed: {model}")
|
||||
logger.info(f" - Model used in request: {active_model}")
|
||||
tracker.ai_call(f"Using Anthropic model: {active_model}")
|
||||
|
||||
# Add prompt_prefix to prompt if provided (for tracking)
|
||||
final_prompt = prompt
|
||||
if prompt_prefix:
|
||||
final_prompt = f'{prompt_prefix}\n\n{prompt}'
|
||||
tracker.ai_call(f"Added prompt prefix: {prompt_prefix}")
|
||||
|
||||
# Step 5: Build request payload using Anthropic Messages API
|
||||
url = 'https://api.anthropic.com/v1/messages'
|
||||
headers = {
|
||||
'x-api-key': api_key,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
body_data = {
|
||||
'model': active_model,
|
||||
'max_tokens': max_tokens,
|
||||
'messages': [{'role': 'user', 'content': final_prompt}],
|
||||
}
|
||||
|
||||
# Only add temperature if it's less than 1.0 (Claude's default)
|
||||
if temperature < 1.0:
|
||||
body_data['temperature'] = temperature
|
||||
|
||||
# Add system prompt if provided
|
||||
if system_prompt:
|
||||
body_data['system'] = system_prompt
|
||||
|
||||
tracker.ai_call(f"Request payload prepared (model={active_model}, max_tokens={max_tokens}, temp={temperature})")
|
||||
|
||||
# Step 6: Send request
|
||||
tracker.ai_call("Sending request to Anthropic API...")
|
||||
request_start = time.time()
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=body_data, timeout=180)
|
||||
request_duration = time.time() - request_start
|
||||
tracker.ai_call(f"Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
# Step 7: Validate HTTP response
|
||||
if response.status_code != 200:
|
||||
error_data = response.json() if response.headers.get('content-type', '').startswith('application/json') else {}
|
||||
error_message = f"HTTP {response.status_code} error"
|
||||
|
||||
if isinstance(error_data, dict) and 'error' in error_data:
|
||||
if isinstance(error_data['error'], dict) and 'message' in error_data['error']:
|
||||
error_message += f": {error_data['error']['message']}"
|
||||
|
||||
# Check for rate limit
|
||||
if response.status_code == 429:
|
||||
retry_after = response.headers.get('retry-after', '60')
|
||||
tracker.rate_limit(retry_after)
|
||||
error_message += f" (Rate limit - retry after {retry_after}s)"
|
||||
else:
|
||||
tracker.error('HTTPError', error_message)
|
||||
|
||||
logger.error(f"Anthropic API HTTP error {response.status_code}: {error_message}")
|
||||
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_message,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
# Step 8: Parse response JSON
|
||||
try:
|
||||
data = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
error_msg = f'Failed to parse JSON response: {str(e)}'
|
||||
tracker.malformed_json(str(e))
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
api_id = data.get('id')
|
||||
|
||||
# Step 9: Extract content (Anthropic format)
|
||||
# Claude returns content as array: [{"type": "text", "text": "..."}]
|
||||
if 'content' in data and len(data['content']) > 0:
|
||||
# Extract text from first content block
|
||||
content_blocks = data['content']
|
||||
content = ''
|
||||
for block in content_blocks:
|
||||
if block.get('type') == 'text':
|
||||
content += block.get('text', '')
|
||||
|
||||
usage = data.get('usage', {})
|
||||
input_tokens = usage.get('input_tokens', 0)
|
||||
output_tokens = usage.get('output_tokens', 0)
|
||||
total_tokens = input_tokens + output_tokens
|
||||
|
||||
tracker.parse(f"Received {total_tokens} tokens (input: {input_tokens}, output: {output_tokens})")
|
||||
tracker.parse(f"Content length: {len(content)} characters")
|
||||
|
||||
# Step 10: Calculate cost using ModelRegistry (with fallback)
|
||||
# Claude pricing as of 2024:
|
||||
# claude-3-5-sonnet: $3/1M input, $15/1M output
|
||||
# claude-3-opus: $15/1M input, $75/1M output
|
||||
# claude-3-haiku: $0.25/1M input, $1.25/1M output
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
cost = float(ModelRegistry.calculate_cost(
|
||||
active_model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens
|
||||
))
|
||||
# Fallback to hardcoded rates if ModelRegistry returns 0
|
||||
if cost == 0:
|
||||
anthropic_rates = {
|
||||
'claude-3-5-sonnet-20241022': {'input': 3.00, 'output': 15.00},
|
||||
'claude-3-5-haiku-20241022': {'input': 1.00, 'output': 5.00},
|
||||
'claude-3-opus-20240229': {'input': 15.00, 'output': 75.00},
|
||||
'claude-3-sonnet-20240229': {'input': 3.00, 'output': 15.00},
|
||||
'claude-3-haiku-20240307': {'input': 0.25, 'output': 1.25},
|
||||
}
|
||||
rates = anthropic_rates.get(active_model, {'input': 3.00, 'output': 15.00})
|
||||
cost = (input_tokens * rates['input'] + output_tokens * rates['output']) / 1_000_000
|
||||
tracker.parse(f"Cost calculated: ${cost:.6f}")
|
||||
|
||||
tracker.done("Anthropic request completed successfully")
|
||||
|
||||
return {
|
||||
'content': content,
|
||||
'input_tokens': input_tokens,
|
||||
'output_tokens': output_tokens,
|
||||
'total_tokens': total_tokens,
|
||||
'model': active_model,
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
'api_id': api_id,
|
||||
'duration': request_duration,
|
||||
}
|
||||
else:
|
||||
error_msg = 'No content in Anthropic response'
|
||||
tracker.error('EmptyResponse', error_msg)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': api_id,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (180s exceeded)'
|
||||
tracker.timeout(180)
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_msg = f'Request exception: {str(e)}'
|
||||
tracker.error('RequestException', error_msg, e)
|
||||
logger.error(f"Anthropic API error: {error_msg}", exc_info=True)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
logger.error(f"[AI][{function_name}][Anthropic][Error] {error_msg}", exc_info=True)
|
||||
if tracker:
|
||||
tracker.error('UnexpectedError', error_msg, e)
|
||||
return {
|
||||
'content': None,
|
||||
'error': error_msg,
|
||||
'input_tokens': 0,
|
||||
'output_tokens': 0,
|
||||
'total_tokens': 0,
|
||||
'model': active_model,
|
||||
'cost': 0.0,
|
||||
'api_id': None,
|
||||
}
|
||||
|
||||
def extract_json(self, response_text: str) -> Optional[Dict]:
|
||||
"""
|
||||
Extract JSON from response text.
|
||||
@@ -427,7 +713,8 @@ class AICore:
|
||||
n: int = 1,
|
||||
api_key: Optional[str] = None,
|
||||
negative_prompt: Optional[str] = None,
|
||||
function_name: str = 'generate_image'
|
||||
function_name: str = 'generate_image',
|
||||
style: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using AI with console logging.
|
||||
@@ -448,9 +735,11 @@ class AICore:
|
||||
print(f"[AI][{function_name}] Step 1: Preparing image generation request...")
|
||||
|
||||
if provider == 'openai':
|
||||
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
return self._generate_image_openai(prompt, model, size, n, api_key, negative_prompt, function_name, style)
|
||||
elif provider == 'runware':
|
||||
return self._generate_image_runware(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
elif provider == 'bria':
|
||||
return self._generate_image_bria(prompt, model, size, n, api_key, negative_prompt, function_name)
|
||||
else:
|
||||
error_msg = f'Unknown provider: {provider}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
@@ -470,9 +759,15 @@ class AICore:
|
||||
n: int,
|
||||
api_key: Optional[str],
|
||||
negative_prompt: Optional[str],
|
||||
function_name: str
|
||||
function_name: str,
|
||||
style: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate image using OpenAI DALL-E"""
|
||||
"""Generate image using OpenAI DALL-E
|
||||
|
||||
Args:
|
||||
style: For DALL-E 3 only. 'vivid' (hyper-real/dramatic) or 'natural' (more realistic).
|
||||
Default is 'natural' for realistic photos.
|
||||
"""
|
||||
print(f"[AI][{function_name}] Provider: OpenAI")
|
||||
|
||||
# Determine character limit based on model
|
||||
@@ -557,6 +852,15 @@ class AICore:
|
||||
'size': size
|
||||
}
|
||||
|
||||
# For DALL-E 3, add style parameter
|
||||
# 'natural' = more realistic photos, 'vivid' = hyper-real/dramatic
|
||||
if model == 'dall-e-3':
|
||||
# Default to 'natural' for realistic images, but respect user preference
|
||||
dalle_style = style if style in ['vivid', 'natural'] else 'natural'
|
||||
data['style'] = dalle_style
|
||||
data['quality'] = 'hd' # Always use HD quality for best results
|
||||
print(f"[AI][{function_name}] DALL-E 3 style: {dalle_style}, quality: hd")
|
||||
|
||||
if negative_prompt:
|
||||
# Note: OpenAI DALL-E doesn't support negative_prompt in API, but we log it
|
||||
print(f"[AI][{function_name}] Note: Negative prompt provided but OpenAI DALL-E doesn't support it")
|
||||
@@ -589,7 +893,9 @@ class AICore:
|
||||
image_url = image_data.get('url')
|
||||
revised_prompt = image_data.get('revised_prompt')
|
||||
|
||||
cost = IMAGE_MODEL_RATES.get(model, 0.040) * n
|
||||
# Use ModelRegistry for image cost (database-driven)
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
cost = float(ModelRegistry.calculate_cost(model, num_images=n))
|
||||
print(f"[AI][{function_name}] Step 5: Image generated successfully")
|
||||
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
|
||||
print(f"[AI][{function_name}][Success] Image generation completed")
|
||||
@@ -681,24 +987,57 @@ class AICore:
|
||||
# Runware uses array payload with authentication task first, then imageInference
|
||||
# Reference: image-generation.php lines 79-97
|
||||
import uuid
|
||||
|
||||
# Build base inference task
|
||||
inference_task = {
|
||||
'taskType': 'imageInference',
|
||||
'taskUUID': str(uuid.uuid4()),
|
||||
'positivePrompt': prompt,
|
||||
'negativePrompt': negative_prompt or '',
|
||||
'model': runware_model,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'numberResults': 1,
|
||||
'outputFormat': 'webp'
|
||||
}
|
||||
|
||||
# Model-specific parameter configuration based on Runware documentation
|
||||
if runware_model.startswith('bria:'):
|
||||
# Bria 3.2 (bria:10@1) - Commercial-ready, steps 20-50 (API requires minimum 20)
|
||||
inference_task['steps'] = 20
|
||||
# Enhanced negative prompt for Bria to prevent disfigured images
|
||||
enhanced_negative = (negative_prompt or '') + ', disfigured, deformed, bad anatomy, wrong anatomy, extra limbs, missing limbs, floating limbs, mutated hands, extra fingers, missing fingers, fused fingers, poorly drawn hands, poorly drawn face, mutation, ugly, blurry, low quality, worst quality, jpeg artifacts, watermark, text, signature'
|
||||
inference_task['negativePrompt'] = enhanced_negative
|
||||
# Bria provider settings for enhanced quality
|
||||
inference_task['providerSettings'] = {
|
||||
'bria': {
|
||||
'promptEnhancement': True,
|
||||
'enhanceImage': True,
|
||||
'medium': 'photography',
|
||||
'contentModeration': True
|
||||
}
|
||||
}
|
||||
print(f"[AI][{function_name}] Using Bria 3.2 config: steps=20, enhanced negative prompt, providerSettings enabled")
|
||||
elif runware_model.startswith('google:'):
|
||||
# Nano Banana (google:4@2) - Premium quality
|
||||
# Google models use 'resolution' parameter INSTEAD of width/height
|
||||
# Remove width/height and use resolution only
|
||||
del inference_task['width']
|
||||
del inference_task['height']
|
||||
inference_task['resolution'] = '1k' # Use 1K tier for optimal speed/quality
|
||||
print(f"[AI][{function_name}] Using Nano Banana config: resolution=1k (no width/height)")
|
||||
else:
|
||||
# Hi Dream Full (runware:97@1) - General diffusion, steps 20, CFGScale 7
|
||||
inference_task['steps'] = 20
|
||||
inference_task['CFGScale'] = 7
|
||||
print(f"[AI][{function_name}] Using Hi Dream Full config: steps=20, CFGScale=7")
|
||||
|
||||
payload = [
|
||||
{
|
||||
'taskType': 'authentication',
|
||||
'apiKey': api_key
|
||||
},
|
||||
{
|
||||
'taskType': 'imageInference',
|
||||
'taskUUID': str(uuid.uuid4()),
|
||||
'positivePrompt': prompt,
|
||||
'negativePrompt': negative_prompt or '',
|
||||
'model': runware_model,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'steps': 30,
|
||||
'CFGScale': 7.5,
|
||||
'numberResults': 1,
|
||||
'outputFormat': 'webp'
|
||||
}
|
||||
inference_task
|
||||
]
|
||||
|
||||
request_start = time.time()
|
||||
@@ -708,7 +1047,29 @@ class AICore:
|
||||
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f"HTTP {response.status_code} error"
|
||||
# Log the full error response for debugging
|
||||
try:
|
||||
error_body = response.json()
|
||||
print(f"[AI][{function_name}][Error] Runware error response: {error_body}")
|
||||
logger.error(f"[AI][{function_name}] Runware HTTP {response.status_code} error body: {error_body}")
|
||||
|
||||
# Extract specific error message from Runware response
|
||||
error_detail = None
|
||||
if isinstance(error_body, list):
|
||||
for item in error_body:
|
||||
if isinstance(item, dict) and 'errors' in item:
|
||||
errors = item['errors']
|
||||
if isinstance(errors, list) and len(errors) > 0:
|
||||
err = errors[0]
|
||||
error_detail = err.get('message') or err.get('error') or str(err)
|
||||
break
|
||||
elif isinstance(error_body, dict):
|
||||
error_detail = error_body.get('message') or error_body.get('error') or str(error_body)
|
||||
|
||||
error_msg = f"HTTP {response.status_code}: {error_detail}" if error_detail else f"HTTP {response.status_code} error"
|
||||
except Exception as e:
|
||||
error_msg = f"HTTP {response.status_code} error (could not parse response: {e})"
|
||||
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
@@ -824,23 +1185,185 @@ class AICore:
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def _generate_image_bria(
|
||||
self,
|
||||
prompt: str,
|
||||
model: Optional[str],
|
||||
size: str,
|
||||
n: int,
|
||||
api_key: Optional[str],
|
||||
negative_prompt: Optional[str],
|
||||
function_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using Bria AI.
|
||||
|
||||
Bria API Reference: https://docs.bria.ai/reference/text-to-image
|
||||
"""
|
||||
print(f"[AI][{function_name}] Provider: Bria AI")
|
||||
|
||||
api_key = api_key or self._bria_api_key
|
||||
if not api_key:
|
||||
error_msg = 'Bria API key not configured'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
bria_model = model or 'bria-2.3'
|
||||
print(f"[AI][{function_name}] Step 2: Using model: {bria_model}, size: {size}")
|
||||
|
||||
# Parse size
|
||||
try:
|
||||
width, height = map(int, size.split('x'))
|
||||
except ValueError:
|
||||
error_msg = f"Invalid size format: {size}. Expected format: WIDTHxHEIGHT"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
# Bria API endpoint
|
||||
url = 'https://engine.prod.bria-api.com/v1/text-to-image/base'
|
||||
headers = {
|
||||
'api_token': api_key,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
payload = {
|
||||
'prompt': prompt,
|
||||
'num_results': n,
|
||||
'sync': True, # Wait for result
|
||||
'model_version': bria_model.replace('bria-', ''), # e.g., '2.3'
|
||||
}
|
||||
|
||||
# Add negative prompt if provided
|
||||
if negative_prompt:
|
||||
payload['negative_prompt'] = negative_prompt
|
||||
|
||||
# Add size constraints if not default
|
||||
if width and height:
|
||||
# Bria uses aspect ratio or fixed sizes
|
||||
payload['width'] = width
|
||||
payload['height'] = height
|
||||
|
||||
print(f"[AI][{function_name}] Step 3: Sending request to Bria API...")
|
||||
|
||||
request_start = time.time()
|
||||
try:
|
||||
response = requests.post(url, json=payload, headers=headers, timeout=150)
|
||||
request_duration = time.time() - request_start
|
||||
print(f"[AI][{function_name}] Step 4: Received response in {request_duration:.2f}s (status={response.status_code})")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f"HTTP {response.status_code} error: {response.text[:200]}"
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
body = response.json()
|
||||
print(f"[AI][{function_name}] Bria response keys: {list(body.keys()) if isinstance(body, dict) else type(body)}")
|
||||
|
||||
# Bria returns { "result": [ { "urls": ["..."] } ] }
|
||||
image_url = None
|
||||
error_msg = None
|
||||
|
||||
if isinstance(body, dict):
|
||||
if 'result' in body and isinstance(body['result'], list) and len(body['result']) > 0:
|
||||
first_result = body['result'][0]
|
||||
if 'urls' in first_result and isinstance(first_result['urls'], list) and len(first_result['urls']) > 0:
|
||||
image_url = first_result['urls'][0]
|
||||
elif 'url' in first_result:
|
||||
image_url = first_result['url']
|
||||
elif 'error' in body:
|
||||
error_msg = body['error']
|
||||
elif 'message' in body:
|
||||
error_msg = body['message']
|
||||
|
||||
if error_msg:
|
||||
print(f"[AI][{function_name}][Error] Bria API error: {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
if image_url:
|
||||
# Cost based on model
|
||||
cost_per_image = {
|
||||
'bria-2.3': 0.015,
|
||||
'bria-2.3-fast': 0.010,
|
||||
'bria-2.2': 0.012,
|
||||
}.get(bria_model, 0.015)
|
||||
cost = cost_per_image * n
|
||||
|
||||
print(f"[AI][{function_name}] Step 5: Image generated successfully")
|
||||
print(f"[AI][{function_name}] Step 6: Cost: ${cost:.4f}")
|
||||
print(f"[AI][{function_name}][Success] Image generation completed")
|
||||
|
||||
return {
|
||||
'url': image_url,
|
||||
'provider': 'bria',
|
||||
'cost': cost,
|
||||
'error': None,
|
||||
}
|
||||
else:
|
||||
error_msg = f'No image data in Bria response'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(f"[AI][{function_name}] Full Bria response: {json.dumps(body, indent=2) if isinstance(body, dict) else str(body)}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = 'Request timeout (150s exceeded)'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = f'Unexpected error: {str(e)}'
|
||||
print(f"[AI][{function_name}][Error] {error_msg}")
|
||||
logger.error(error_msg, exc_info=True)
|
||||
return {
|
||||
'url': None,
|
||||
'provider': 'bria',
|
||||
'cost': 0.0,
|
||||
'error': error_msg,
|
||||
}
|
||||
|
||||
def calculate_cost(self, model: str, input_tokens: int, output_tokens: int, model_type: str = 'text') -> float:
|
||||
"""Calculate cost for API call"""
|
||||
"""Calculate cost for API call using ModelRegistry (database-driven)"""
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
if model_type == 'text':
|
||||
rates = MODEL_RATES.get(model, {'input': 2.00, 'output': 8.00})
|
||||
input_cost = (input_tokens / 1_000_000) * rates['input']
|
||||
output_cost = (output_tokens / 1_000_000) * rates['output']
|
||||
return input_cost + output_cost
|
||||
return float(ModelRegistry.calculate_cost(model, input_tokens=input_tokens, output_tokens=output_tokens))
|
||||
elif model_type == 'image':
|
||||
rate = IMAGE_MODEL_RATES.get(model, 0.040)
|
||||
return rate * 1
|
||||
return float(ModelRegistry.calculate_cost(model, num_images=1))
|
||||
return 0.0
|
||||
|
||||
# Legacy method names for backward compatibility
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 4000,
|
||||
def call_openai(self, prompt: str, model: Optional[str] = None, max_tokens: int = 8192,
|
||||
temperature: float = 0.7, response_format: Optional[Dict] = None,
|
||||
api_key: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""Legacy method - redirects to run_ai_request()"""
|
||||
"""DEPRECATED: Legacy method - redirects to run_ai_request(). Use run_ai_request() directly."""
|
||||
return self.run_ai_request(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
|
||||
@@ -1,14 +1,27 @@
|
||||
"""
|
||||
AI Constants - Model pricing, valid models, and configuration constants
|
||||
AI Constants - Configuration constants for AI operations
|
||||
|
||||
NOTE: Model pricing (MODEL_RATES, IMAGE_MODEL_RATES) has been moved to the database
|
||||
via AIModelConfig. Use ModelRegistry to get model pricing:
|
||||
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
cost = ModelRegistry.calculate_cost(model_id, input_tokens=N, output_tokens=N)
|
||||
|
||||
The constants below are DEPRECATED and kept only for reference/backward compatibility.
|
||||
Do NOT use MODEL_RATES or IMAGE_MODEL_RATES in new code.
|
||||
"""
|
||||
# Model pricing (per 1M tokens) - EXACT from reference plugin model-rates-config.php
|
||||
# DEPRECATED - Use AIModelConfig database table instead
|
||||
# Model pricing (per 1M tokens) - kept for reference only
|
||||
MODEL_RATES = {
|
||||
'gpt-4.1': {'input': 2.00, 'output': 8.00},
|
||||
'gpt-4o-mini': {'input': 0.15, 'output': 0.60},
|
||||
'gpt-4o': {'input': 2.50, 'output': 10.00},
|
||||
'gpt-5.1': {'input': 1.25, 'output': 10.00},
|
||||
'gpt-5.2': {'input': 1.75, 'output': 14.00},
|
||||
}
|
||||
|
||||
# Image model pricing (per image) - EXACT from reference plugin
|
||||
# DEPRECATED - Use AIModelConfig database table instead
|
||||
# Image model pricing (per image) - kept for reference only
|
||||
IMAGE_MODEL_RATES = {
|
||||
'dall-e-3': 0.040,
|
||||
'dall-e-2': 0.020,
|
||||
@@ -33,7 +46,7 @@ VALID_SIZES_BY_MODEL = {
|
||||
DEFAULT_AI_MODEL = 'gpt-4.1'
|
||||
|
||||
# JSON mode supported models
|
||||
JSON_MODE_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
|
||||
JSON_MODE_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview', 'gpt-5.1', 'gpt-5.2']
|
||||
|
||||
# Debug mode - controls console logging
|
||||
# Set to False in production to disable verbose logging
|
||||
|
||||
@@ -31,11 +31,15 @@ class AIEngine:
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"{count} cluster{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_content':
|
||||
return f"{count} task{'s' if count != 1 else ''}"
|
||||
return f"{count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_images':
|
||||
return f"{count} task{'s' if count != 1 else ''}"
|
||||
return f"{count} image{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
return f"{count} image prompt{'s' if count != 1 else ''}"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"{count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return "1 site blueprint"
|
||||
return "site blueprint"
|
||||
return f"{count} item{'s' if count != 1 else ''}"
|
||||
|
||||
def _build_validation_message(self, function_name: str, payload: dict, count: int, input_description: str) -> str:
|
||||
@@ -51,12 +55,22 @@ class AIEngine:
|
||||
remaining = count - len(keyword_list)
|
||||
if remaining > 0:
|
||||
keywords_text = ', '.join(keyword_list)
|
||||
return f"Validating {keywords_text} and {remaining} more keyword{'s' if remaining != 1 else ''}"
|
||||
return f"Validating {count} keywords for clustering"
|
||||
else:
|
||||
keywords_text = ', '.join(keyword_list)
|
||||
return f"Validating {keywords_text}"
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load keyword names for validation message: {e}")
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"Analyzing {count} clusters for content opportunities"
|
||||
elif function_name == 'generate_content':
|
||||
return f"Preparing {count} article{'s' if count != 1 else ''} for generation"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
return f"Analyzing content for image opportunities"
|
||||
elif function_name == 'generate_images':
|
||||
return f"Queuing {count} image{'s' if count != 1 else ''} for generation"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"Analyzing {count} article{'s' if count != 1 else ''} for optimization"
|
||||
|
||||
# Fallback to simple count message
|
||||
return f"Validating {input_description}"
|
||||
@@ -64,24 +78,33 @@ class AIEngine:
|
||||
def _get_prep_message(self, function_name: str, count: int, data: Any) -> str:
|
||||
"""Get user-friendly prep message"""
|
||||
if function_name == 'auto_cluster':
|
||||
return f"Loading {count} keyword{'s' if count != 1 else ''}"
|
||||
return f"Analyzing keyword relationships for {count} keyword{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"Loading {count} cluster{'s' if count != 1 else ''}"
|
||||
# Count keywords in clusters if available
|
||||
keyword_count = 0
|
||||
if isinstance(data, dict) and 'cluster_data' in data:
|
||||
for cluster in data['cluster_data']:
|
||||
keyword_count += len(cluster.get('keywords', []))
|
||||
if keyword_count > 0:
|
||||
return f"Mapping {keyword_count} keywords to topic briefs"
|
||||
return f"Mapping keywords to topic briefs for {count} cluster{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_content':
|
||||
return f"Preparing {count} content idea{'s' if count != 1 else ''}"
|
||||
return f"Building content brief{'s' if count != 1 else ''} with target keywords"
|
||||
elif function_name == 'generate_images':
|
||||
return f"Extracting image prompts from {count} task{'s' if count != 1 else ''}"
|
||||
return f"Preparing AI image generation ({count} image{'s' if count != 1 else ''})"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
# Extract max_images from data if available
|
||||
if isinstance(data, list) and len(data) > 0:
|
||||
max_images = data[0].get('max_images', 2)
|
||||
max_images = data[0].get('max_images')
|
||||
total_images = 1 + max_images # 1 featured + max_images in-article
|
||||
return f"Mapping Content for {total_images} Image Prompts"
|
||||
return f"Identifying 1 featured + {max_images} in-article image slots"
|
||||
elif isinstance(data, dict) and 'max_images' in data:
|
||||
max_images = data.get('max_images', 2)
|
||||
max_images = data.get('max_images')
|
||||
total_images = 1 + max_images
|
||||
return f"Mapping Content for {total_images} Image Prompts"
|
||||
return f"Mapping Content for Image Prompts"
|
||||
return f"Identifying 1 featured + {max_images} in-article image slots"
|
||||
return f"Identifying featured and in-article image slots"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"Analyzing SEO factors for {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_site_structure':
|
||||
blueprint_name = ''
|
||||
if isinstance(data, dict):
|
||||
@@ -94,13 +117,17 @@ class AIEngine:
|
||||
def _get_ai_call_message(self, function_name: str, count: int) -> str:
|
||||
"""Get user-friendly AI call message"""
|
||||
if function_name == 'auto_cluster':
|
||||
return f"Grouping {count} keyword{'s' if count != 1 else ''} into clusters"
|
||||
return f"Grouping {count} keywords by search intent"
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"Generating content ideas for {count} cluster{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_content':
|
||||
return f"Writing article{'s' if count != 1 else ''} with AI"
|
||||
return f"Writing {count} article{'s' if count != 1 else ''} with AI"
|
||||
elif function_name == 'generate_images':
|
||||
return f"Creating image{'s' if count != 1 else ''} with AI"
|
||||
return f"Generating image{'s' if count != 1 else ''} with AI"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
return f"Creating optimized prompts for {count} image{'s' if count != 1 else ''}"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"Optimizing {count} article{'s' if count != 1 else ''} for SEO"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return "Designing complete site architecture"
|
||||
return f"Processing with AI"
|
||||
@@ -108,13 +135,17 @@ class AIEngine:
|
||||
def _get_parse_message(self, function_name: str) -> str:
|
||||
"""Get user-friendly parse message"""
|
||||
if function_name == 'auto_cluster':
|
||||
return "Organizing clusters"
|
||||
return "Organizing semantic clusters"
|
||||
elif function_name == 'generate_ideas':
|
||||
return "Structuring outlines"
|
||||
return "Structuring article outlines"
|
||||
elif function_name == 'generate_content':
|
||||
return "Formatting content"
|
||||
return "Formatting HTML content and metadata"
|
||||
elif function_name == 'generate_images':
|
||||
return "Processing images"
|
||||
return "Processing generated images"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
return "Refining contextual image descriptions"
|
||||
elif function_name == 'optimize_content':
|
||||
return "Compiling optimization scores"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return "Compiling site map"
|
||||
return "Processing results"
|
||||
@@ -122,19 +153,21 @@ class AIEngine:
|
||||
def _get_parse_message_with_count(self, function_name: str, count: int) -> str:
|
||||
"""Get user-friendly parse message with count"""
|
||||
if function_name == 'auto_cluster':
|
||||
return f"{count} cluster{'s' if count != 1 else ''} created"
|
||||
return f"Organizing {count} semantic cluster{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"{count} idea{'s' if count != 1 else ''} created"
|
||||
return f"Structuring {count} article outline{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_content':
|
||||
return f"{count} article{'s' if count != 1 else ''} created"
|
||||
return f"Formatting {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_images':
|
||||
return f"{count} image{'s' if count != 1 else ''} created"
|
||||
return f"Processing {count} generated image{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
# Count is total prompts, in-article is count - 1 (subtract featured)
|
||||
in_article_count = max(0, count - 1)
|
||||
if in_article_count > 0:
|
||||
return f"Writing {in_article_count} In‑article Image Prompts"
|
||||
return "Writing In‑article Image Prompts"
|
||||
return f"Refining {in_article_count} in-article image description{'s' if in_article_count != 1 else ''}"
|
||||
return "Refining image descriptions"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"Compiling scores for {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return f"{count} page blueprint{'s' if count != 1 else ''} mapped"
|
||||
return f"{count} item{'s' if count != 1 else ''} processed"
|
||||
@@ -142,20 +175,50 @@ class AIEngine:
|
||||
def _get_save_message(self, function_name: str, count: int) -> str:
|
||||
"""Get user-friendly save message"""
|
||||
if function_name == 'auto_cluster':
|
||||
return f"Saving {count} cluster{'s' if count != 1 else ''}"
|
||||
return f"Saving {count} cluster{'s' if count != 1 else ''} with keywords"
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"Saving {count} idea{'s' if count != 1 else ''}"
|
||||
return f"Saving {count} idea{'s' if count != 1 else ''} with outlines"
|
||||
elif function_name == 'generate_content':
|
||||
return f"Saving {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_images':
|
||||
return f"Saving {count} image{'s' if count != 1 else ''}"
|
||||
return f"Uploading {count} image{'s' if count != 1 else ''} to media library"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
# Count is total prompts created
|
||||
return f"Assigning {count} Prompts to Dedicated Slots"
|
||||
in_article = max(0, count - 1)
|
||||
return f"Assigning {count} prompts (1 featured + {in_article} in-article)"
|
||||
elif function_name == 'optimize_content':
|
||||
return f"Saving optimization scores for {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return f"Publishing {count} page blueprint{'s' if count != 1 else ''}"
|
||||
return f"Saving {count} item{'s' if count != 1 else ''}"
|
||||
|
||||
def _get_done_message(self, function_name: str, result: dict) -> str:
|
||||
"""Get user-friendly completion message with counts"""
|
||||
count = result.get('count', 0)
|
||||
|
||||
if function_name == 'auto_cluster':
|
||||
keyword_count = result.get('keywords_clustered', 0)
|
||||
return f"✓ Organized {keyword_count} keywords into {count} semantic cluster{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_ideas':
|
||||
return f"✓ Created {count} content idea{'s' if count != 1 else ''} with detailed outlines"
|
||||
elif function_name == 'generate_content':
|
||||
total_words = result.get('total_words', 0)
|
||||
if total_words > 0:
|
||||
return f"✓ Generated {count} article{'s' if count != 1 else ''} ({total_words:,} words)"
|
||||
return f"✓ Generated {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_images':
|
||||
return f"✓ Generated and saved {count} AI image{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_image_prompts':
|
||||
in_article = max(0, count - 1)
|
||||
return f"✓ Created {count} image prompt{'s' if count != 1 else ''} (1 featured + {in_article} in-article)"
|
||||
elif function_name == 'optimize_content':
|
||||
avg_score = result.get('average_score', 0)
|
||||
if avg_score > 0:
|
||||
return f"✓ Optimized {count} article{'s' if count != 1 else ''} (avg score: {avg_score}%)"
|
||||
return f"✓ Optimized {count} article{'s' if count != 1 else ''}"
|
||||
elif function_name == 'generate_site_structure':
|
||||
return f"✓ Created {count} page blueprint{'s' if count != 1 else ''}"
|
||||
return f"✓ {count} item{'s' if count != 1 else ''} completed"
|
||||
|
||||
def execute(self, fn: BaseAIFunction, payload: dict) -> dict:
|
||||
"""
|
||||
Unified execution pipeline for all AI functions.
|
||||
@@ -243,12 +306,13 @@ class AIEngine:
|
||||
|
||||
ai_core = AICore(account=self.account)
|
||||
function_name = fn.get_name()
|
||||
|
||||
# Generate function_id for tracking (ai-{function_name}-01)
|
||||
# Normalize underscores to hyphens to match frontend tracking IDs
|
||||
function_id_base = function_name.replace('_', '-')
|
||||
function_id = f"ai-{function_id_base}-01-desktop"
|
||||
|
||||
|
||||
# Generate prompt prefix for tracking (e.g., ##GP01-Clustering or ##CP01-Clustering)
|
||||
# This replaces function_id and indicates whether prompt is global or custom
|
||||
from igny8_core.ai.prompts import get_prompt_prefix_for_function
|
||||
prompt_prefix = get_prompt_prefix_for_function(function_name, account=self.account)
|
||||
logger.info(f"[AIEngine] Using prompt prefix: {prompt_prefix}")
|
||||
|
||||
# Get model config from settings (requires account)
|
||||
# This will raise ValueError if IntegrationSettings not configured
|
||||
try:
|
||||
@@ -286,7 +350,7 @@ class AIEngine:
|
||||
temperature=model_config.get('temperature'),
|
||||
response_format=model_config.get('response_format'),
|
||||
function_name=function_name,
|
||||
function_id=function_id # Pass function_id for tracking
|
||||
prompt_prefix=prompt_prefix # Pass prompt prefix for tracking (replaces function_id)
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"AI call failed: {str(e)}"
|
||||
@@ -376,18 +440,18 @@ class AIEngine:
|
||||
# Map function name to operation type
|
||||
operation_type = self._get_operation_type(function_name)
|
||||
|
||||
# Calculate actual amount based on results
|
||||
actual_amount = self._get_actual_amount(function_name, save_result, parsed, data)
|
||||
# Get actual token usage from response (AI returns 'input_tokens' and 'output_tokens')
|
||||
tokens_input = raw_response.get('input_tokens', 0)
|
||||
tokens_output = raw_response.get('output_tokens', 0)
|
||||
|
||||
# Deduct credits using the new convenience method
|
||||
# Deduct credits based on actual token usage
|
||||
CreditService.deduct_credits_for_operation(
|
||||
account=self.account,
|
||||
operation_type=operation_type,
|
||||
amount=actual_amount,
|
||||
tokens_input=tokens_input,
|
||||
tokens_output=tokens_output,
|
||||
cost_usd=raw_response.get('cost'),
|
||||
model_used=raw_response.get('model', ''),
|
||||
tokens_input=raw_response.get('tokens_input', 0),
|
||||
tokens_output=raw_response.get('tokens_output', 0),
|
||||
related_object_type=self._get_related_object_type(function_name),
|
||||
related_object_id=save_result.get('id') or save_result.get('cluster_id') or save_result.get('task_id'),
|
||||
metadata={
|
||||
@@ -399,7 +463,10 @@ class AIEngine:
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(f"[AIEngine] Credits deducted: {operation_type}, amount: {actual_amount}")
|
||||
logger.info(
|
||||
f"[AIEngine] Credits deducted: {operation_type}, "
|
||||
f"tokens: {tokens_input + tokens_output} ({tokens_input} in, {tokens_output} out)"
|
||||
)
|
||||
except InsufficientCreditsError as e:
|
||||
# This shouldn't happen since we checked before, but log it
|
||||
logger.error(f"[AIEngine] Insufficient credits during deduction: {e}")
|
||||
@@ -408,13 +475,16 @@ class AIEngine:
|
||||
# Don't fail the operation if credit deduction fails (for backward compatibility)
|
||||
|
||||
# Phase 6: DONE - Finalization (98-100%)
|
||||
success_msg = f"Task completed: {final_save_msg}" if 'final_save_msg' in locals() else "Task completed successfully"
|
||||
self.step_tracker.add_request_step("DONE", "success", "Task completed successfully")
|
||||
self.tracker.update("DONE", 100, "Task complete!", meta=self.step_tracker.get_meta())
|
||||
done_msg = self._get_done_message(function_name, save_result)
|
||||
self.step_tracker.add_request_step("DONE", "success", done_msg)
|
||||
self.tracker.update("DONE", 100, done_msg, meta=self.step_tracker.get_meta())
|
||||
|
||||
# Log to database
|
||||
self._log_to_database(fn, payload, parsed, save_result)
|
||||
|
||||
# Create notification for successful completion
|
||||
self._create_success_notification(function_name, save_result, payload)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
**save_result,
|
||||
@@ -458,6 +528,9 @@ class AIEngine:
|
||||
|
||||
self._log_to_database(fn, None, None, None, error=error)
|
||||
|
||||
# Create notification for failure
|
||||
self._create_failure_notification(function_name, error)
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
'error': error,
|
||||
@@ -585,4 +658,104 @@ class AIEngine:
|
||||
'generate_site_structure': 'site_blueprint',
|
||||
}
|
||||
return mapping.get(function_name, 'unknown')
|
||||
|
||||
def _create_success_notification(self, function_name: str, save_result: dict, payload: dict):
|
||||
"""Create notification for successful AI task completion"""
|
||||
if not self.account:
|
||||
return
|
||||
|
||||
# Lazy import to avoid circular dependency and Django app loading issues
|
||||
from igny8_core.business.notifications.services import NotificationService
|
||||
|
||||
# Get site from payload if available
|
||||
site = None
|
||||
site_id = payload.get('site_id')
|
||||
if site_id:
|
||||
try:
|
||||
from igny8_core.auth.models import Site
|
||||
site = Site.objects.get(id=site_id, account=self.account)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
# Map function to appropriate notification method
|
||||
if function_name == 'auto_cluster':
|
||||
NotificationService.notify_clustering_complete(
|
||||
account=self.account,
|
||||
site=site,
|
||||
cluster_count=save_result.get('clusters_created', 0),
|
||||
keyword_count=save_result.get('keywords_updated', 0)
|
||||
)
|
||||
elif function_name == 'generate_ideas':
|
||||
NotificationService.notify_ideas_complete(
|
||||
account=self.account,
|
||||
site=site,
|
||||
idea_count=save_result.get('count', 0),
|
||||
cluster_count=len(payload.get('ids', []))
|
||||
)
|
||||
elif function_name == 'generate_content':
|
||||
NotificationService.notify_content_complete(
|
||||
account=self.account,
|
||||
site=site,
|
||||
article_count=save_result.get('count', 0),
|
||||
word_count=save_result.get('word_count', 0)
|
||||
)
|
||||
elif function_name == 'generate_image_prompts':
|
||||
NotificationService.notify_prompts_complete(
|
||||
account=self.account,
|
||||
site=site,
|
||||
prompt_count=save_result.get('count', 0)
|
||||
)
|
||||
elif function_name == 'generate_images':
|
||||
NotificationService.notify_images_complete(
|
||||
account=self.account,
|
||||
site=site,
|
||||
image_count=save_result.get('count', 0)
|
||||
)
|
||||
|
||||
logger.info(f"[AIEngine] Created success notification for {function_name}")
|
||||
except Exception as e:
|
||||
# Don't fail the task if notification creation fails
|
||||
logger.warning(f"[AIEngine] Failed to create success notification: {e}", exc_info=True)
|
||||
|
||||
def _create_failure_notification(self, function_name: str, error: str):
|
||||
"""Create notification for failed AI task"""
|
||||
if not self.account:
|
||||
return
|
||||
|
||||
# Lazy import to avoid circular dependency and Django app loading issues
|
||||
from igny8_core.business.notifications.services import NotificationService
|
||||
|
||||
try:
|
||||
# Map function to appropriate failure notification method
|
||||
if function_name == 'auto_cluster':
|
||||
NotificationService.notify_clustering_failed(
|
||||
account=self.account,
|
||||
error=error
|
||||
)
|
||||
elif function_name == 'generate_ideas':
|
||||
NotificationService.notify_ideas_failed(
|
||||
account=self.account,
|
||||
error=error
|
||||
)
|
||||
elif function_name == 'generate_content':
|
||||
NotificationService.notify_content_failed(
|
||||
account=self.account,
|
||||
error=error
|
||||
)
|
||||
elif function_name == 'generate_image_prompts':
|
||||
NotificationService.notify_prompts_failed(
|
||||
account=self.account,
|
||||
error=error
|
||||
)
|
||||
elif function_name == 'generate_images':
|
||||
NotificationService.notify_images_failed(
|
||||
account=self.account,
|
||||
error=error
|
||||
)
|
||||
|
||||
logger.info(f"[AIEngine] Created failure notification for {function_name}")
|
||||
except Exception as e:
|
||||
# Don't fail the task if notification creation fails
|
||||
logger.warning(f"[AIEngine] Failed to create failure notification: {e}", exc_info=True)
|
||||
|
||||
|
||||
@@ -97,7 +97,6 @@ class AutoClusterFunction(BaseAIFunction):
|
||||
'keyword': kw.keyword,
|
||||
'volume': kw.volume,
|
||||
'difficulty': kw.difficulty,
|
||||
'intent': kw.intent,
|
||||
}
|
||||
for kw in keywords
|
||||
],
|
||||
@@ -111,7 +110,7 @@ class AutoClusterFunction(BaseAIFunction):
|
||||
|
||||
# Format keywords
|
||||
keywords_text = '\n'.join([
|
||||
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']}, Intent: {kw['intent']})"
|
||||
f"- {kw['keyword']} (Volume: {kw['volume']}, Difficulty: {kw['difficulty']})"
|
||||
for kw in keyword_data
|
||||
])
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ class GenerateImagePromptsFunction(BaseAIFunction):
|
||||
data = data[0]
|
||||
|
||||
extracted = data['extracted']
|
||||
max_images = data.get('max_images', 2)
|
||||
max_images = data.get('max_images')
|
||||
|
||||
# Format content for prompt
|
||||
content_text = self._format_content_for_prompt(extracted)
|
||||
@@ -112,7 +112,7 @@ class GenerateImagePromptsFunction(BaseAIFunction):
|
||||
return prompt
|
||||
|
||||
def parse_response(self, response: str, step_tracker=None) -> Dict:
|
||||
"""Parse AI response - same pattern as other functions"""
|
||||
"""Parse AI response with new structure including captions"""
|
||||
ai_core = AICore(account=getattr(self, 'account', None))
|
||||
json_data = ai_core.extract_json(response)
|
||||
|
||||
@@ -123,9 +123,28 @@ class GenerateImagePromptsFunction(BaseAIFunction):
|
||||
if 'featured_prompt' not in json_data:
|
||||
raise ValueError("Missing 'featured_prompt' in AI response")
|
||||
|
||||
if 'featured_caption' not in json_data:
|
||||
raise ValueError("Missing 'featured_caption' in AI response")
|
||||
|
||||
if 'in_article_prompts' not in json_data:
|
||||
raise ValueError("Missing 'in_article_prompts' in AI response")
|
||||
|
||||
# Validate in_article_prompts structure (should be list of objects with prompt & caption)
|
||||
in_article_prompts = json_data.get('in_article_prompts', [])
|
||||
if in_article_prompts:
|
||||
for idx, item in enumerate(in_article_prompts):
|
||||
if isinstance(item, dict):
|
||||
if 'prompt' not in item:
|
||||
raise ValueError(f"Missing 'prompt' in in_article_prompts[{idx}]")
|
||||
if 'caption' not in item:
|
||||
raise ValueError(f"Missing 'caption' in in_article_prompts[{idx}]")
|
||||
else:
|
||||
# Legacy format (just string) - convert to new format
|
||||
in_article_prompts[idx] = {
|
||||
'prompt': str(item),
|
||||
'caption': '' # Empty caption for legacy data
|
||||
}
|
||||
|
||||
return json_data
|
||||
|
||||
def save_output(
|
||||
@@ -146,36 +165,47 @@ class GenerateImagePromptsFunction(BaseAIFunction):
|
||||
|
||||
content = original_data['content']
|
||||
extracted = original_data['extracted']
|
||||
max_images = original_data.get('max_images', 2)
|
||||
max_images = original_data.get('max_images')
|
||||
|
||||
prompts_created = 0
|
||||
|
||||
with transaction.atomic():
|
||||
# Save featured image prompt - use content instead of task
|
||||
# Save featured image prompt with caption
|
||||
Images.objects.update_or_create(
|
||||
content=content,
|
||||
image_type='featured',
|
||||
defaults={
|
||||
'prompt': parsed['featured_prompt'],
|
||||
'caption': parsed.get('featured_caption', ''),
|
||||
'status': 'pending',
|
||||
'position': 0,
|
||||
}
|
||||
)
|
||||
prompts_created += 1
|
||||
|
||||
# Save in-article image prompts
|
||||
# Save in-article image prompts with captions
|
||||
in_article_prompts = parsed.get('in_article_prompts', [])
|
||||
h2_headings = extracted.get('h2_headings', [])
|
||||
|
||||
for idx, prompt_text in enumerate(in_article_prompts[:max_images]):
|
||||
heading = h2_headings[idx] if idx < len(h2_headings) else f"Section {idx + 1}"
|
||||
for idx, prompt_data in enumerate(in_article_prompts[:max_images]):
|
||||
# Handle both new format (dict with prompt & caption) and legacy format (string)
|
||||
if isinstance(prompt_data, dict):
|
||||
prompt_text = prompt_data.get('prompt', '')
|
||||
caption_text = prompt_data.get('caption', '')
|
||||
else:
|
||||
# Legacy format - just a string prompt
|
||||
prompt_text = str(prompt_data)
|
||||
caption_text = ''
|
||||
|
||||
heading = h2_headings[idx] if idx < len(h2_headings) else f"Section {idx}"
|
||||
|
||||
Images.objects.update_or_create(
|
||||
content=content,
|
||||
image_type='in_article',
|
||||
position=idx + 1,
|
||||
position=idx, # 0-based position matching section array indices
|
||||
defaults={
|
||||
'prompt': prompt_text,
|
||||
'caption': caption_text,
|
||||
'status': 'pending',
|
||||
}
|
||||
)
|
||||
@@ -188,16 +218,14 @@ class GenerateImagePromptsFunction(BaseAIFunction):
|
||||
|
||||
# Helper methods
|
||||
def _get_max_in_article_images(self, account) -> int:
|
||||
"""Get max_in_article_images from IntegrationSettings"""
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
settings = IntegrationSettings.objects.get(
|
||||
account=account,
|
||||
integration_type='image_generation'
|
||||
)
|
||||
return settings.config.get('max_in_article_images', 2)
|
||||
except IntegrationSettings.DoesNotExist:
|
||||
return 2 # Default
|
||||
"""
|
||||
Get max_in_article_images from AISettings (with account override).
|
||||
"""
|
||||
from igny8_core.modules.system.ai_settings import AISettings
|
||||
|
||||
max_images = AISettings.get_effective_max_images(account)
|
||||
logger.info(f"Using max_in_article_images={max_images} for account {account.id}")
|
||||
return max_images
|
||||
|
||||
def _extract_content_elements(self, content: Content, max_images: int) -> Dict:
|
||||
"""Extract title, intro paragraphs, and H2 headings from content HTML"""
|
||||
|
||||
@@ -67,42 +67,39 @@ class GenerateImagesFunction(BaseAIFunction):
|
||||
if not tasks:
|
||||
raise ValueError("No tasks found")
|
||||
|
||||
# Get image generation settings
|
||||
image_settings = {}
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
integration = IntegrationSettings.objects.get(
|
||||
account=account,
|
||||
integration_type='image_generation',
|
||||
is_active=True
|
||||
)
|
||||
image_settings = integration.config or {}
|
||||
except Exception:
|
||||
pass
|
||||
# Get image generation settings from AISettings (with account overrides)
|
||||
from igny8_core.modules.system.ai_settings import AISettings
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
# Extract settings with defaults
|
||||
provider = image_settings.get('provider') or image_settings.get('service', 'openai')
|
||||
if provider == 'runware':
|
||||
model = image_settings.get('model') or image_settings.get('runwareModel', 'runware:97@1')
|
||||
# Get effective settings (AISettings + AccountSettings overrides)
|
||||
image_style = AISettings.get_effective_image_style(account)
|
||||
max_images = AISettings.get_effective_max_images(account)
|
||||
|
||||
# Get default image model and provider from database
|
||||
default_model = ModelRegistry.get_default_model('image')
|
||||
if default_model:
|
||||
model_config = ModelRegistry.get_model(default_model)
|
||||
provider = model_config.provider if model_config else 'openai'
|
||||
model = default_model
|
||||
else:
|
||||
model = image_settings.get('model', 'dall-e-3')
|
||||
provider = 'openai'
|
||||
model = 'dall-e-3'
|
||||
|
||||
logger.info(f"Using image settings: provider={provider}, model={model}, style={image_style}, max={max_images}")
|
||||
|
||||
return {
|
||||
'tasks': tasks,
|
||||
'account': account,
|
||||
'provider': provider,
|
||||
'model': model,
|
||||
'image_type': image_settings.get('image_type', 'realistic'),
|
||||
'max_in_article_images': int(image_settings.get('max_in_article_images', 2)),
|
||||
'desktop_enabled': image_settings.get('desktop_enabled', True),
|
||||
'mobile_enabled': image_settings.get('mobile_enabled', True),
|
||||
'image_type': image_style,
|
||||
'max_in_article_images': max_images,
|
||||
}
|
||||
|
||||
def build_prompt(self, data: Dict, account=None) -> Dict:
|
||||
"""Extract image prompts from task content"""
|
||||
task = data.get('task')
|
||||
max_images = data.get('max_in_article_images', 2)
|
||||
max_images = data.get('max_in_article_images')
|
||||
|
||||
if not task or not task.content:
|
||||
raise ValueError("Task has no content")
|
||||
|
||||
377
backend/igny8_core/ai/model_registry.py
Normal file
377
backend/igny8_core/ai/model_registry.py
Normal file
@@ -0,0 +1,377 @@
|
||||
"""
|
||||
Model Registry Service
|
||||
Central registry for AI model configurations with caching.
|
||||
|
||||
This service provides:
|
||||
- Database-driven model configuration (from AIModelConfig)
|
||||
- Integration provider API key retrieval (from IntegrationProvider)
|
||||
- Caching for performance
|
||||
- Cost calculation methods
|
||||
|
||||
Usage:
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
# Get model config
|
||||
model = ModelRegistry.get_model('gpt-4o-mini')
|
||||
|
||||
# Get rate
|
||||
input_rate = ModelRegistry.get_rate('gpt-4o-mini', 'input')
|
||||
|
||||
# Calculate cost
|
||||
cost = ModelRegistry.calculate_cost('gpt-4o-mini', input_tokens=1000, output_tokens=500)
|
||||
|
||||
# Get API key for a provider
|
||||
api_key = ModelRegistry.get_api_key('openai')
|
||||
"""
|
||||
import logging
|
||||
from decimal import Decimal
|
||||
from typing import Optional, Dict, Any
|
||||
from django.core.cache import cache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Cache TTL in seconds (5 minutes)
|
||||
MODEL_CACHE_TTL = 300
|
||||
|
||||
# Cache key prefix
|
||||
CACHE_KEY_PREFIX = 'ai_model_'
|
||||
PROVIDER_CACHE_PREFIX = 'provider_'
|
||||
|
||||
|
||||
class ModelRegistry:
|
||||
"""
|
||||
Central registry for AI model configurations with caching.
|
||||
Uses AIModelConfig from database for model configs.
|
||||
Uses IntegrationProvider for API keys.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def _get_cache_key(cls, model_id: str) -> str:
|
||||
"""Generate cache key for model"""
|
||||
return f"{CACHE_KEY_PREFIX}{model_id}"
|
||||
|
||||
@classmethod
|
||||
def _get_provider_cache_key(cls, provider_id: str) -> str:
|
||||
"""Generate cache key for provider"""
|
||||
return f"{PROVIDER_CACHE_PREFIX}{provider_id}"
|
||||
|
||||
@classmethod
|
||||
def _get_from_db(cls, model_id: str) -> Optional[Any]:
|
||||
"""Get model config from database"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
return AIModelConfig.objects.filter(
|
||||
model_name=model_id,
|
||||
is_active=True
|
||||
).first()
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not fetch model {model_id} from DB: {e}")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model_id: str) -> Optional[Any]:
|
||||
"""
|
||||
Get model configuration by model_id.
|
||||
|
||||
Order of lookup:
|
||||
1. Cache
|
||||
2. Database (AIModelConfig)
|
||||
|
||||
Args:
|
||||
model_id: The model identifier (e.g., 'gpt-4o-mini', 'dall-e-3')
|
||||
|
||||
Returns:
|
||||
AIModelConfig instance, None if not found
|
||||
"""
|
||||
cache_key = cls._get_cache_key(model_id)
|
||||
|
||||
# Try cache first
|
||||
cached = cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
# Try database
|
||||
model_config = cls._get_from_db(model_id)
|
||||
|
||||
if model_config:
|
||||
cache.set(cache_key, model_config, MODEL_CACHE_TTL)
|
||||
return model_config
|
||||
|
||||
logger.warning(f"Model {model_id} not found in database")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_rate(cls, model_id: str, rate_type: str) -> Decimal:
|
||||
"""
|
||||
Get specific rate for a model.
|
||||
|
||||
Args:
|
||||
model_id: The model identifier
|
||||
rate_type: 'input', 'output' (for text models) or 'image' (for image models)
|
||||
|
||||
Returns:
|
||||
Decimal rate value, 0 if not found
|
||||
"""
|
||||
model = cls.get_model(model_id)
|
||||
if not model:
|
||||
return Decimal('0')
|
||||
|
||||
# Handle AIModelConfig instance
|
||||
if rate_type == 'input':
|
||||
return model.input_cost_per_1m or Decimal('0')
|
||||
elif rate_type == 'output':
|
||||
return model.output_cost_per_1m or Decimal('0')
|
||||
elif rate_type == 'image':
|
||||
return model.cost_per_image or Decimal('0')
|
||||
|
||||
return Decimal('0')
|
||||
|
||||
@classmethod
|
||||
def calculate_cost(cls, model_id: str, input_tokens: int = 0, output_tokens: int = 0, num_images: int = 0) -> Decimal:
|
||||
"""
|
||||
Calculate cost for model usage.
|
||||
|
||||
For text models: Uses input/output token counts
|
||||
For image models: Uses num_images
|
||||
|
||||
Args:
|
||||
model_id: The model identifier
|
||||
input_tokens: Number of input tokens (for text models)
|
||||
output_tokens: Number of output tokens (for text models)
|
||||
num_images: Number of images (for image models)
|
||||
|
||||
Returns:
|
||||
Decimal cost in USD
|
||||
"""
|
||||
model = cls.get_model(model_id)
|
||||
if not model:
|
||||
return Decimal('0')
|
||||
|
||||
# Get model type from AIModelConfig
|
||||
model_type = model.model_type
|
||||
|
||||
if model_type == 'text':
|
||||
input_rate = cls.get_rate(model_id, 'input')
|
||||
output_rate = cls.get_rate(model_id, 'output')
|
||||
|
||||
cost = (
|
||||
(Decimal(input_tokens) * input_rate) +
|
||||
(Decimal(output_tokens) * output_rate)
|
||||
) / Decimal('1000000')
|
||||
|
||||
return cost
|
||||
|
||||
elif model_type == 'image':
|
||||
image_rate = cls.get_rate(model_id, 'image')
|
||||
return image_rate * Decimal(num_images)
|
||||
|
||||
return Decimal('0')
|
||||
|
||||
@classmethod
|
||||
def get_default_model(cls, model_type: str = 'text') -> Optional[str]:
|
||||
"""
|
||||
Get the default model for a given type from database.
|
||||
|
||||
Args:
|
||||
model_type: 'text' or 'image'
|
||||
|
||||
Returns:
|
||||
model_id string or None
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
default = AIModelConfig.objects.filter(
|
||||
model_type=model_type,
|
||||
is_active=True,
|
||||
is_default=True
|
||||
).first()
|
||||
|
||||
if default:
|
||||
return default.model_name
|
||||
|
||||
# If no default is set, return first active model of this type
|
||||
first_active = AIModelConfig.objects.filter(
|
||||
model_type=model_type,
|
||||
is_active=True
|
||||
).order_by('model_name').first()
|
||||
|
||||
if first_active:
|
||||
return first_active.model_name
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Could not get default {model_type} model from DB: {e}")
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def list_models(cls, model_type: Optional[str] = None, provider: Optional[str] = None) -> list:
|
||||
"""
|
||||
List all available models from database, optionally filtered by type or provider.
|
||||
|
||||
Args:
|
||||
model_type: Filter by 'text', 'image', or 'embedding'
|
||||
provider: Filter by 'openai', 'anthropic', 'runware', etc.
|
||||
|
||||
Returns:
|
||||
List of AIModelConfig instances
|
||||
"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
queryset = AIModelConfig.objects.filter(is_active=True)
|
||||
|
||||
if model_type:
|
||||
queryset = queryset.filter(model_type=model_type)
|
||||
if provider:
|
||||
queryset = queryset.filter(provider=provider)
|
||||
|
||||
return list(queryset.order_by('model_name'))
|
||||
except Exception as e:
|
||||
logger.error(f"Could not list models from DB: {e}")
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def clear_cache(cls, model_id: Optional[str] = None):
|
||||
"""
|
||||
Clear model cache.
|
||||
|
||||
Args:
|
||||
model_id: Clear specific model cache, or all if None
|
||||
"""
|
||||
if model_id:
|
||||
cache.delete(cls._get_cache_key(model_id))
|
||||
else:
|
||||
# Clear all model caches - use pattern if available
|
||||
try:
|
||||
from django.core.cache import caches
|
||||
default_cache = caches['default']
|
||||
if hasattr(default_cache, 'delete_pattern'):
|
||||
default_cache.delete_pattern(f"{CACHE_KEY_PREFIX}*")
|
||||
else:
|
||||
# Fallback: clear all known models from DB
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
for model in AIModelConfig.objects.values_list('model_name', flat=True):
|
||||
cache.delete(cls._get_cache_key(model))
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not clear all model caches: {e}")
|
||||
|
||||
@classmethod
|
||||
def validate_model(cls, model_id: str) -> bool:
|
||||
"""
|
||||
Check if a model ID is valid and active.
|
||||
|
||||
Args:
|
||||
model_id: The model identifier to validate
|
||||
|
||||
Returns:
|
||||
True if model exists and is active, False otherwise
|
||||
"""
|
||||
model = cls.get_model(model_id)
|
||||
if not model:
|
||||
return False
|
||||
return model.is_active
|
||||
|
||||
# ========== IntegrationProvider methods ==========
|
||||
|
||||
@classmethod
|
||||
def get_provider(cls, provider_id: str) -> Optional[Any]:
|
||||
"""
|
||||
Get IntegrationProvider by provider_id.
|
||||
|
||||
Args:
|
||||
provider_id: The provider identifier (e.g., 'openai', 'stripe', 'resend')
|
||||
|
||||
Returns:
|
||||
IntegrationProvider instance, None if not found
|
||||
"""
|
||||
cache_key = cls._get_provider_cache_key(provider_id)
|
||||
|
||||
# Try cache first
|
||||
cached = cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationProvider
|
||||
provider = IntegrationProvider.objects.filter(
|
||||
provider_id=provider_id,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if provider:
|
||||
cache.set(cache_key, provider, MODEL_CACHE_TTL)
|
||||
return provider
|
||||
except Exception as e:
|
||||
logger.error(f"Could not fetch provider {provider_id} from DB: {e}")
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_api_key(cls, provider_id: str) -> Optional[str]:
|
||||
"""
|
||||
Get API key for a provider.
|
||||
|
||||
Args:
|
||||
provider_id: The provider identifier (e.g., 'openai', 'anthropic', 'runware')
|
||||
|
||||
Returns:
|
||||
API key string, None if not found or provider is inactive
|
||||
"""
|
||||
provider = cls.get_provider(provider_id)
|
||||
if provider and provider.api_key:
|
||||
return provider.api_key
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_api_secret(cls, provider_id: str) -> Optional[str]:
|
||||
"""
|
||||
Get API secret for a provider (for OAuth, Stripe secret key, etc.).
|
||||
|
||||
Args:
|
||||
provider_id: The provider identifier
|
||||
|
||||
Returns:
|
||||
API secret string, None if not found
|
||||
"""
|
||||
provider = cls.get_provider(provider_id)
|
||||
if provider and provider.api_secret:
|
||||
return provider.api_secret
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_webhook_secret(cls, provider_id: str) -> Optional[str]:
|
||||
"""
|
||||
Get webhook secret for a provider (for Stripe, PayPal webhooks).
|
||||
|
||||
Args:
|
||||
provider_id: The provider identifier
|
||||
|
||||
Returns:
|
||||
Webhook secret string, None if not found
|
||||
"""
|
||||
provider = cls.get_provider(provider_id)
|
||||
if provider and provider.webhook_secret:
|
||||
return provider.webhook_secret
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def clear_provider_cache(cls, provider_id: Optional[str] = None):
|
||||
"""
|
||||
Clear provider cache.
|
||||
|
||||
Args:
|
||||
provider_id: Clear specific provider cache, or all if None
|
||||
"""
|
||||
if provider_id:
|
||||
cache.delete(cls._get_provider_cache_key(provider_id))
|
||||
else:
|
||||
try:
|
||||
from django.core.cache import caches
|
||||
default_cache = caches['default']
|
||||
if hasattr(default_cache, 'delete_pattern'):
|
||||
default_cache.delete_pattern(f"{PROVIDER_CACHE_PREFIX}*")
|
||||
else:
|
||||
from igny8_core.modules.system.models import IntegrationProvider
|
||||
for pid in IntegrationProvider.objects.values_list('provider_id', flat=True):
|
||||
cache.delete(cls._get_provider_cache_key(pid))
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not clear provider caches: {e}")
|
||||
@@ -1,9 +1,9 @@
|
||||
"""
|
||||
Prompt Registry - Centralized prompt management with override hierarchy
|
||||
Supports: task-level overrides → DB prompts → default fallbacks
|
||||
Supports: task-level overrides → DB prompts → GlobalAIPrompt (REQUIRED)
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Dict, Any, Optional, Tuple
|
||||
from django.db import models
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -14,584 +14,12 @@ class PromptRegistry:
|
||||
Centralized prompt registry with hierarchical resolution:
|
||||
1. Task-level prompt_override (if exists)
|
||||
2. DB prompt for (account, function)
|
||||
3. Default fallback from registry
|
||||
3. GlobalAIPrompt (REQUIRED - no hardcoded fallbacks)
|
||||
"""
|
||||
|
||||
# Default prompts stored in registry
|
||||
DEFAULT_PROMPTS = {
|
||||
'clustering': """You are a semantic strategist and SEO architecture engine. Your task is to analyze the provided keyword list and group them into meaningful, intent-driven topic clusters that reflect how real users search, think, and act online.
|
||||
|
||||
Return a single JSON object with a "clusters" array. Each cluster must follow this structure:
|
||||
# Removed ALL hardcoded prompts - GlobalAIPrompt is now the ONLY source of default prompts
|
||||
# To add/modify prompts, use Django admin: /admin/system/globalaiprompt/
|
||||
|
||||
{
|
||||
"name": "[Descriptive cluster name — natural, SEO-relevant, clearly expressing the topic]",
|
||||
"description": "[1–2 concise sentences explaining what this cluster covers and why these keywords belong together]",
|
||||
"keywords": ["keyword 1", "keyword 2", "keyword 3", "..."]
|
||||
}
|
||||
|
||||
CLUSTERING STRATEGY:
|
||||
|
||||
1. Keyword-first, structure-follows:
|
||||
- Do NOT rely on assumed categories or existing content structures.
|
||||
- Begin purely from the meaning, intent, and behavioral connection between keywords.
|
||||
|
||||
2. Use multi-dimensional grouping logic:
|
||||
- Group keywords by these behavioral dimensions:
|
||||
• Search Intent → informational, commercial, transactional, navigational
|
||||
• Use-Case or Problem → what the user is trying to achieve or solve
|
||||
• Function or Feature → how something works or what it does
|
||||
• Persona or Audience → who the content or product serves
|
||||
• Context → location, time, season, platform, or device
|
||||
- Combine 2–3 dimensions naturally where they make sense.
|
||||
|
||||
3. Model real search behavior:
|
||||
- Favor clusters that form natural user journeys such as:
|
||||
• Problem ➝ Solution
|
||||
• General ➝ Specific
|
||||
• Product ➝ Use-case
|
||||
• Buyer ➝ Benefit
|
||||
• Tool ➝ Function
|
||||
• Task ➝ Method
|
||||
- Each cluster should feel like a real topic hub users would explore in depth.
|
||||
|
||||
4. Avoid superficial groupings:
|
||||
- Do not cluster keywords just because they share words.
|
||||
- Do not force-fit outliers or unrelated keywords.
|
||||
- Exclude keywords that don't logically connect to any cluster.
|
||||
|
||||
5. Quality rules:
|
||||
- Each cluster should include between 3–10 strongly related keywords.
|
||||
- Never duplicate a keyword across multiple clusters.
|
||||
- Prioritize semantic strength, search intent, and usefulness for SEO-driven content structure.
|
||||
- It's better to output fewer, high-quality clusters than many weak or shallow ones.
|
||||
|
||||
INPUT FORMAT:
|
||||
{
|
||||
"keywords": [IGNY8_KEYWORDS]
|
||||
}
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Return ONLY the final JSON object in this format:
|
||||
{
|
||||
"clusters": [
|
||||
{
|
||||
"name": "...",
|
||||
"description": "...",
|
||||
"keywords": ["...", "...", "..."]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Do not include any explanations, text, or commentary outside the JSON output.
|
||||
""",
|
||||
|
||||
'ideas': """Generate SEO-optimized, high-quality content ideas and outlines for each keyword cluster.
|
||||
Input:
|
||||
Clusters: [IGNY8_CLUSTERS]
|
||||
Keywords: [IGNY8_CLUSTER_KEYWORDS]
|
||||
|
||||
Output: JSON with "ideas" array.
|
||||
Each cluster → 1 cluster_hub + 2–4 supporting ideas.
|
||||
Each idea must include:
|
||||
title, description, content_type, content_structure, cluster_id, estimated_word_count (1500–2200), and covered_keywords.
|
||||
|
||||
Outline Rules:
|
||||
|
||||
Intro: 1 hook (30–40 words) + 2 intro paragraphs (50–60 words each).
|
||||
|
||||
5–8 H2 sections, each with 2–3 H3s.
|
||||
|
||||
Each H2 ≈ 250–300 words, mixed content (paragraphs, lists, tables, blockquotes).
|
||||
|
||||
Vary section format and tone; no bullets or lists at start.
|
||||
|
||||
Tables have columns; blockquotes = expert POV or data insight.
|
||||
|
||||
Use depth, examples, and real context.
|
||||
|
||||
Avoid repetitive structure.
|
||||
|
||||
Tone: Professional editorial flow. No generic phrasing. Use varied sentence openings and realistic examples.
|
||||
|
||||
Output JSON Example:
|
||||
|
||||
{
|
||||
"ideas": [
|
||||
{
|
||||
"title": "Best Organic Cotton Duvet Covers for All Seasons",
|
||||
"description": {
|
||||
"introduction": {
|
||||
"hook": "Transform your sleep with organic cotton that blends comfort and sustainability.",
|
||||
"paragraphs": [
|
||||
{"format": "paragraph", "details": "Overview of organic cotton's rise in bedding industry."},
|
||||
{"format": "paragraph", "details": "Why consumers prefer organic bedding over synthetic alternatives."}
|
||||
]
|
||||
},
|
||||
"H2": [
|
||||
{
|
||||
"heading": "Why Choose Organic Cotton for Bedding?",
|
||||
"subsections": [
|
||||
{"subheading": "Health and Skin Benefits", "format": "paragraph", "details": "Discuss hypoallergenic and chemical-free aspects."},
|
||||
{"subheading": "Environmental Sustainability", "format": "list", "details": "Eco benefits like low water use, no pesticides."},
|
||||
{"subheading": "Long-Term Cost Savings", "format": "table", "details": "Compare durability and pricing over time."}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"content_type": "post",
|
||||
"content_structure": "review",
|
||||
"cluster_id": 12,
|
||||
"estimated_word_count": 1800,
|
||||
"covered_keywords": "organic duvet covers, eco-friendly bedding, sustainable sheets"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Valid content_type values: post, page, product, taxonomy
|
||||
|
||||
Valid content_structure by type:
|
||||
- post: article, guide, comparison, review, listicle
|
||||
- page: landing_page, business_page, service_page, general, cluster_hub
|
||||
- product: product_page
|
||||
- taxonomy: category_archive, tag_archive, attribute_archive""",
|
||||
|
||||
'content_generation': """You are an editorial content strategist. Your task is to generate a complete JSON response object based on the provided content idea, keyword cluster, keyword list, and metadata context.
|
||||
|
||||
==================
|
||||
Generate a complete JSON response object matching this structure:
|
||||
==================
|
||||
|
||||
{
|
||||
"title": "[Article title using target keywords — full sentence case]",
|
||||
"content": "[HTML content — full editorial structure with <p>, <h2>, <h3>, <ul>, <ol>, <table>]"
|
||||
}
|
||||
|
||||
===========================
|
||||
CONTENT FLOW RULES
|
||||
===========================
|
||||
|
||||
**INTRODUCTION:**
|
||||
- Start with 1 italicized hook (30–40 words)
|
||||
- Follow with 2 narrative paragraphs (each 50–60 words; 2–3 sentences max)
|
||||
- No headings allowed in intro
|
||||
|
||||
**H2 SECTIONS (5–8 total):**
|
||||
Each section should be 250–300 words and follow this format:
|
||||
1. Two narrative paragraphs (80–120 words each, 2–3 sentences)
|
||||
2. One list or table (must come *after* a paragraph)
|
||||
3. Optional closing paragraph (40–60 words)
|
||||
4. Insert 2–3 subsections naturally after main paragraphs
|
||||
|
||||
**Formatting Rules:**
|
||||
- Vary use of unordered lists, ordered lists, and tables across sections
|
||||
- Never begin any section or sub-section with a list or table
|
||||
|
||||
===========================
|
||||
STYLE & QUALITY RULES
|
||||
===========================
|
||||
|
||||
- **Keyword Usage:**
|
||||
- Use keywords naturally in title, introduction, and headings
|
||||
- Prioritize readability over keyword density
|
||||
|
||||
- **Tone & style guidelines:**
|
||||
- No robotic or passive voice
|
||||
- Avoid generic intros like "In today's world…"
|
||||
- Don't repeat heading in opening sentence
|
||||
- Vary sentence structure and length
|
||||
|
||||
===========================
|
||||
STAGE 3: METADATA CONTEXT (NEW)
|
||||
===========================
|
||||
|
||||
**Content Structure:**
|
||||
[IGNY8_CONTENT_STRUCTURE]
|
||||
- If structure is "cluster_hub": Create comprehensive, authoritative content that serves as the main resource for this topic cluster. Include overview sections, key concepts, and links to related topics.
|
||||
- If structure is "article" or "guide": Create detailed, focused content that dives deep into the topic with actionable insights.
|
||||
- Other structures: Follow the appropriate format (listicle, comparison, review, landing_page, service_page, product_page, category_archive, tag_archive, attribute_archive).
|
||||
|
||||
**Taxonomy Context:**
|
||||
[IGNY8_TAXONOMY]
|
||||
- Use taxonomy information to structure categories and tags appropriately.
|
||||
- Align content with taxonomy hierarchy and relationships.
|
||||
- Ensure content fits within the defined taxonomy structure.
|
||||
|
||||
**Product/Service Attributes:**
|
||||
[IGNY8_ATTRIBUTES]
|
||||
- If attributes are provided (e.g., product specs, service modifiers), incorporate them naturally into the content.
|
||||
- For product content: Include specifications, features, dimensions, materials, etc. as relevant.
|
||||
- For service content: Include service tiers, pricing modifiers, availability, etc. as relevant.
|
||||
- Present attributes in a user-friendly format (tables, lists, or integrated into narrative).
|
||||
|
||||
===========================
|
||||
INPUT VARIABLES
|
||||
===========================
|
||||
|
||||
CONTENT IDEA DETAILS:
|
||||
[IGNY8_IDEA]
|
||||
|
||||
KEYWORD CLUSTER:
|
||||
[IGNY8_CLUSTER]
|
||||
|
||||
ASSOCIATED KEYWORDS:
|
||||
[IGNY8_KEYWORDS]
|
||||
|
||||
===========================
|
||||
OUTPUT FORMAT
|
||||
===========================
|
||||
|
||||
Return ONLY the final JSON object.
|
||||
Do NOT include any comments, formatting, or explanations.""",
|
||||
|
||||
'site_structure_generation': """You are a senior UX architect and information designer. Use the business brief, objectives, style references, and existing site info to propose a complete multi-page marketing website structure.
|
||||
|
||||
INPUT CONTEXT
|
||||
==============
|
||||
BUSINESS BRIEF:
|
||||
[IGNY8_BUSINESS_BRIEF]
|
||||
|
||||
PRIMARY OBJECTIVES:
|
||||
[IGNY8_OBJECTIVES]
|
||||
|
||||
STYLE & BRAND NOTES:
|
||||
[IGNY8_STYLE]
|
||||
|
||||
SITE INFO / CURRENT STRUCTURE:
|
||||
[IGNY8_SITE_INFO]
|
||||
|
||||
OUTPUT REQUIREMENTS
|
||||
====================
|
||||
Return ONE JSON object with the following keys:
|
||||
|
||||
{
|
||||
"site": {
|
||||
"name": "...",
|
||||
"primary_navigation": ["home", "services", "about", "contact"],
|
||||
"secondary_navigation": ["blog", "faq"],
|
||||
"hero_message": "High level value statement",
|
||||
"tone": "voice + tone summary"
|
||||
},
|
||||
"pages": [
|
||||
{
|
||||
"slug": "home",
|
||||
"title": "Home",
|
||||
"type": "home | about | services | products | blog | contact | custom",
|
||||
"status": "draft",
|
||||
"objective": "Explain the core brand promise and primary CTA",
|
||||
"primary_cta": "Book a strategy call",
|
||||
"seo": {
|
||||
"meta_title": "...",
|
||||
"meta_description": "..."
|
||||
},
|
||||
"blocks": [
|
||||
{
|
||||
"type": "hero | features | services | stats | testimonials | faq | contact | custom",
|
||||
"heading": "Section headline",
|
||||
"subheading": "Support copy",
|
||||
"layout": "full-width | two-column | cards | carousel",
|
||||
"content": [
|
||||
"Bullet or short paragraph describing what to render in this block"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
RULES
|
||||
=====
|
||||
- Include 5–8 pages covering the complete buyer journey (awareness → evaluation → conversion → trust).
|
||||
- Every page must have at least 3 blocks with concrete guidance (no placeholders like "Lorem ipsum").
|
||||
- Use consistent slug naming, all lowercase with hyphens.
|
||||
- Type must match the allowed enum and reflect page intent.
|
||||
- Ensure the navigation arrays align with the page list.
|
||||
- Focus on practical descriptions that an engineering team can hand off directly to the Site Builder.
|
||||
|
||||
Return ONLY valid JSON. No commentary, explanations, or Markdown.
|
||||
""",
|
||||
|
||||
'image_prompt_extraction': """Extract image prompts from the following article content.
|
||||
|
||||
ARTICLE TITLE: {title}
|
||||
|
||||
ARTICLE CONTENT:
|
||||
{content}
|
||||
|
||||
Extract image prompts for:
|
||||
1. Featured Image: One main image that represents the article topic
|
||||
2. In-Article Images: Up to {max_images} images that would be useful within the article content
|
||||
|
||||
Return a JSON object with this structure:
|
||||
{{
|
||||
"featured_prompt": "Detailed description of the featured image",
|
||||
"in_article_prompts": [
|
||||
"Description of first in-article image",
|
||||
"Description of second in-article image",
|
||||
...
|
||||
]
|
||||
}}
|
||||
|
||||
Make sure each prompt is detailed enough for image generation, describing the visual elements, style, mood, and composition.""",
|
||||
|
||||
'image_prompt_template': 'Create a high-quality {image_type} image to use as a featured photo for a blog post titled "{post_title}". The image should visually represent the theme, mood, and subject implied by the image prompt: {image_prompt}. Focus on a realistic, well-composed scene that naturally communicates the topic without text or logos. Use balanced lighting, pleasing composition, and photographic detail suitable for lifestyle or editorial web content. Avoid adding any visible or readable text, brand names, or illustrative effects. **And make sure image is not blurry.**',
|
||||
|
||||
'negative_prompt': 'text, watermark, logo, overlay, title, caption, writing on walls, writing on objects, UI, infographic elements, post title',
|
||||
|
||||
'optimize_content': """You are an expert content optimizer specializing in SEO, readability, and engagement.
|
||||
|
||||
Your task is to optimize the provided content to improve its SEO score, readability, and engagement metrics.
|
||||
|
||||
CURRENT CONTENT:
|
||||
Title: {CONTENT_TITLE}
|
||||
Word Count: {WORD_COUNT}
|
||||
Source: {SOURCE}
|
||||
Primary Keyword: {PRIMARY_KEYWORD}
|
||||
Internal Links: {INTERNAL_LINKS_COUNT}
|
||||
|
||||
CURRENT META DATA:
|
||||
Meta Title: {META_TITLE}
|
||||
Meta Description: {META_DESCRIPTION}
|
||||
|
||||
CURRENT SCORES:
|
||||
{CURRENT_SCORES}
|
||||
|
||||
HTML CONTENT:
|
||||
{HTML_CONTENT}
|
||||
|
||||
OPTIMIZATION REQUIREMENTS:
|
||||
|
||||
1. SEO Optimization:
|
||||
- Ensure meta title is 30-60 characters (if provided)
|
||||
- Ensure meta description is 120-160 characters (if provided)
|
||||
- Optimize primary keyword usage (natural, not keyword stuffing)
|
||||
- Improve heading structure (H1, H2, H3 hierarchy)
|
||||
- Add internal links where relevant (maintain existing links)
|
||||
|
||||
2. Readability:
|
||||
- Average sentence length: 15-20 words
|
||||
- Use clear, concise language
|
||||
- Break up long paragraphs
|
||||
- Use bullet points and lists where appropriate
|
||||
- Ensure proper paragraph structure
|
||||
|
||||
3. Engagement:
|
||||
- Add compelling headings
|
||||
- Include relevant images placeholders (alt text)
|
||||
- Use engaging language
|
||||
- Create clear call-to-action sections
|
||||
- Improve content flow and structure
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Return ONLY a JSON object in this format:
|
||||
{{
|
||||
"html_content": "[Optimized HTML content]",
|
||||
"meta_title": "[Optimized meta title, 30-60 chars]",
|
||||
"meta_description": "[Optimized meta description, 120-160 chars]",
|
||||
"optimization_notes": "[Brief notes on what was optimized]"
|
||||
}}
|
||||
|
||||
Do not include any explanations, text, or commentary outside the JSON output.
|
||||
""",
|
||||
|
||||
# Phase 8: Universal Content Types
|
||||
'product_generation': """You are a product content specialist. Generate comprehensive product content that includes detailed descriptions, features, specifications, pricing, and benefits.
|
||||
|
||||
INPUT:
|
||||
Product Name: [IGNY8_PRODUCT_NAME]
|
||||
Product Description: [IGNY8_PRODUCT_DESCRIPTION]
|
||||
Product Features: [IGNY8_PRODUCT_FEATURES]
|
||||
Target Audience: [IGNY8_TARGET_AUDIENCE]
|
||||
Primary Keyword: [IGNY8_PRIMARY_KEYWORD]
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Return ONLY a JSON object in this format:
|
||||
{
|
||||
"title": "[Product name and key benefit]",
|
||||
"meta_title": "[SEO-optimized meta title, 30-60 chars]",
|
||||
"meta_description": "[Compelling meta description, 120-160 chars]",
|
||||
"html_content": "[Complete HTML product page content]",
|
||||
"word_count": [Integer word count],
|
||||
"primary_keyword": "[Primary keyword]",
|
||||
"secondary_keywords": ["keyword1", "keyword2", "keyword3"],
|
||||
"tags": ["tag1", "tag2", "tag3"],
|
||||
"categories": ["Category > Subcategory"],
|
||||
"json_blocks": [
|
||||
{
|
||||
"type": "product_overview",
|
||||
"heading": "Product Overview",
|
||||
"content": "Detailed product description"
|
||||
},
|
||||
{
|
||||
"type": "features",
|
||||
"heading": "Key Features",
|
||||
"items": ["Feature 1", "Feature 2", "Feature 3"]
|
||||
},
|
||||
{
|
||||
"type": "specifications",
|
||||
"heading": "Specifications",
|
||||
"data": {"Spec 1": "Value 1", "Spec 2": "Value 2"}
|
||||
},
|
||||
{
|
||||
"type": "pricing",
|
||||
"heading": "Pricing",
|
||||
"content": "Pricing information"
|
||||
},
|
||||
{
|
||||
"type": "benefits",
|
||||
"heading": "Benefits",
|
||||
"items": ["Benefit 1", "Benefit 2", "Benefit 3"]
|
||||
}
|
||||
],
|
||||
"structure_data": {
|
||||
"product_type": "[Product type]",
|
||||
"price_range": "[Price range]",
|
||||
"target_market": "[Target market]"
|
||||
}
|
||||
}
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Include compelling product overview
|
||||
- List key features with benefits
|
||||
- Provide detailed specifications
|
||||
- Include pricing information (if available)
|
||||
- Highlight unique selling points
|
||||
- Use SEO-optimized headings
|
||||
- Include call-to-action sections
|
||||
- Ensure natural keyword usage
|
||||
""",
|
||||
|
||||
'service_generation': """You are a service page content specialist. Generate comprehensive service page content that explains services, benefits, process, and pricing.
|
||||
|
||||
INPUT:
|
||||
Service Name: [IGNY8_SERVICE_NAME]
|
||||
Service Description: [IGNY8_SERVICE_DESCRIPTION]
|
||||
Service Benefits: [IGNY8_SERVICE_BENEFITS]
|
||||
Target Audience: [IGNY8_TARGET_AUDIENCE]
|
||||
Primary Keyword: [IGNY8_PRIMARY_KEYWORD]
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Return ONLY a JSON object in this format:
|
||||
{
|
||||
"title": "[Service name and value proposition]",
|
||||
"meta_title": "[SEO-optimized meta title, 30-60 chars]",
|
||||
"meta_description": "[Compelling meta description, 120-160 chars]",
|
||||
"html_content": "[Complete HTML service page content]",
|
||||
"word_count": [Integer word count],
|
||||
"primary_keyword": "[Primary keyword]",
|
||||
"secondary_keywords": ["keyword1", "keyword2", "keyword3"],
|
||||
"tags": ["tag1", "tag2", "tag3"],
|
||||
"categories": ["Category > Subcategory"],
|
||||
"json_blocks": [
|
||||
{
|
||||
"type": "service_overview",
|
||||
"heading": "Service Overview",
|
||||
"content": "Detailed service description"
|
||||
},
|
||||
{
|
||||
"type": "benefits",
|
||||
"heading": "Benefits",
|
||||
"items": ["Benefit 1", "Benefit 2", "Benefit 3"]
|
||||
},
|
||||
{
|
||||
"type": "process",
|
||||
"heading": "Our Process",
|
||||
"steps": ["Step 1", "Step 2", "Step 3"]
|
||||
},
|
||||
{
|
||||
"type": "pricing",
|
||||
"heading": "Pricing",
|
||||
"content": "Pricing information"
|
||||
},
|
||||
{
|
||||
"type": "faq",
|
||||
"heading": "Frequently Asked Questions",
|
||||
"items": [{"question": "Q1", "answer": "A1"}]
|
||||
}
|
||||
],
|
||||
"structure_data": {
|
||||
"service_type": "[Service type]",
|
||||
"duration": "[Service duration]",
|
||||
"target_market": "[Target market]"
|
||||
}
|
||||
}
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Clear service overview and value proposition
|
||||
- Detailed benefits and outcomes
|
||||
- Step-by-step process explanation
|
||||
- Pricing information (if available)
|
||||
- FAQ section addressing common questions
|
||||
- Include testimonials or case studies (if applicable)
|
||||
- Use SEO-optimized headings
|
||||
- Include call-to-action sections
|
||||
""",
|
||||
|
||||
'taxonomy_generation': """You are a taxonomy and categorization specialist. Generate comprehensive taxonomy page content that organizes and explains categories, tags, and hierarchical structures.
|
||||
|
||||
INPUT:
|
||||
Taxonomy Name: [IGNY8_TAXONOMY_NAME]
|
||||
Taxonomy Description: [IGNY8_TAXONOMY_DESCRIPTION]
|
||||
Taxonomy Items: [IGNY8_TAXONOMY_ITEMS]
|
||||
Primary Keyword: [IGNY8_PRIMARY_KEYWORD]
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Return ONLY a JSON object in this format:
|
||||
{{
|
||||
"title": "[Taxonomy name and purpose]",
|
||||
"meta_title": "[SEO-optimized meta title, 30-60 chars]",
|
||||
"meta_description": "[Compelling meta description, 120-160 chars]",
|
||||
"html_content": "[Complete HTML taxonomy page content]",
|
||||
"word_count": [Integer word count],
|
||||
"primary_keyword": "[Primary keyword]",
|
||||
"secondary_keywords": ["keyword1", "keyword2", "keyword3"],
|
||||
"tags": ["tag1", "tag2", "tag3"],
|
||||
"categories": ["Category > Subcategory"],
|
||||
"json_blocks": [
|
||||
{{
|
||||
"type": "taxonomy_overview",
|
||||
"heading": "Taxonomy Overview",
|
||||
"content": "Detailed taxonomy description"
|
||||
}},
|
||||
{{
|
||||
"type": "categories",
|
||||
"heading": "Categories",
|
||||
"items": [
|
||||
{{
|
||||
"name": "Category 1",
|
||||
"description": "Category description",
|
||||
"subcategories": ["Subcat 1", "Subcat 2"]
|
||||
}}
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"type": "tags",
|
||||
"heading": "Tags",
|
||||
"items": ["Tag 1", "Tag 2", "Tag 3"]
|
||||
}},
|
||||
{{
|
||||
"type": "hierarchy",
|
||||
"heading": "Taxonomy Hierarchy",
|
||||
"structure": {{"Level 1": {{"Level 2": ["Level 3"]}}}}
|
||||
}}
|
||||
],
|
||||
"structure_data": {{
|
||||
"taxonomy_type": "[Taxonomy type]",
|
||||
"item_count": [Integer],
|
||||
"hierarchy_levels": [Integer]
|
||||
}}
|
||||
}}
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Clear taxonomy overview and purpose
|
||||
- Organized category structure
|
||||
- Tag organization and relationships
|
||||
- Hierarchical structure visualization
|
||||
- SEO-optimized headings
|
||||
- Include navigation and organization benefits
|
||||
- Use clear, descriptive language
|
||||
""",
|
||||
}
|
||||
|
||||
# Mapping from function names to prompt types
|
||||
FUNCTION_TO_PROMPT_TYPE = {
|
||||
'auto_cluster': 'clustering',
|
||||
@@ -607,7 +35,114 @@ CONTENT REQUIREMENTS:
|
||||
'generate_service_page': 'service_generation',
|
||||
'generate_taxonomy': 'taxonomy_generation',
|
||||
}
|
||||
|
||||
# Mapping of prompt types to their prefix numbers and display names
|
||||
# Format: {prompt_type: (number, display_name)}
|
||||
# GP = Global Prompt, CP = Custom Prompt
|
||||
PROMPT_PREFIX_MAP = {
|
||||
'clustering': ('01', 'Clustering'),
|
||||
'ideas': ('02', 'Ideas'),
|
||||
'content_generation': ('03', 'ContentGen'),
|
||||
'image_prompt_extraction': ('04', 'ImagePrompts'),
|
||||
'site_structure_generation': ('05', 'SiteStructure'),
|
||||
'optimize_content': ('06', 'OptimizeContent'),
|
||||
'product_generation': ('07', 'ProductGen'),
|
||||
'service_generation': ('08', 'ServiceGen'),
|
||||
'taxonomy_generation': ('09', 'TaxonomyGen'),
|
||||
'image_prompt_template': ('10', 'ImageTemplate'),
|
||||
'negative_prompt': ('11', 'NegativePrompt'),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_prompt_prefix(cls, prompt_type: str, is_custom: bool) -> str:
|
||||
"""
|
||||
Generate prompt prefix for tracking.
|
||||
|
||||
Args:
|
||||
prompt_type: The prompt type (e.g., 'clustering', 'ideas')
|
||||
is_custom: True if using custom/account-specific prompt, False if global
|
||||
|
||||
Returns:
|
||||
Prefix string like "##GP01-Clustering" or "##CP01-Clustering"
|
||||
"""
|
||||
prefix_info = cls.PROMPT_PREFIX_MAP.get(prompt_type, ('00', prompt_type.title()))
|
||||
number, display_name = prefix_info
|
||||
prefix_type = 'CP' if is_custom else 'GP'
|
||||
return f"##{prefix_type}{number}-{display_name}"
|
||||
|
||||
@classmethod
|
||||
def get_prompt_with_metadata(
|
||||
cls,
|
||||
function_name: str,
|
||||
account: Optional[Any] = None,
|
||||
task: Optional[Any] = None,
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
) -> Tuple[str, bool, str]:
|
||||
"""
|
||||
Get prompt for a function with metadata about source.
|
||||
|
||||
Priority:
|
||||
1. task.prompt_override (if task provided and has override)
|
||||
2. DB prompt for (account, function) - marked as custom if is_customized=True
|
||||
3. GlobalAIPrompt (REQUIRED - no hardcoded fallbacks)
|
||||
|
||||
Args:
|
||||
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
|
||||
account: Account object (optional)
|
||||
task: Task object with optional prompt_override (optional)
|
||||
context: Additional context for prompt rendering (optional)
|
||||
|
||||
Returns:
|
||||
Tuple of (prompt_string, is_custom, prompt_type)
|
||||
- prompt_string: The rendered prompt
|
||||
- is_custom: True if using custom/account prompt, False if global
|
||||
- prompt_type: The prompt type identifier
|
||||
"""
|
||||
# Step 1: Get prompt type
|
||||
prompt_type = cls.FUNCTION_TO_PROMPT_TYPE.get(function_name, function_name)
|
||||
|
||||
# Step 2: Check task-level override (always considered custom)
|
||||
if task and hasattr(task, 'prompt_override') and task.prompt_override:
|
||||
logger.info(f"Using task-level prompt override for {function_name}")
|
||||
prompt = task.prompt_override
|
||||
return cls._render_prompt(prompt, context or {}), True, prompt_type
|
||||
|
||||
# Step 3: Try DB prompt (account-specific)
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
# Check if prompt is customized
|
||||
is_custom = db_prompt.is_customized
|
||||
logger.info(f"Using {'customized' if is_custom else 'default'} account prompt for {function_name} (account {account.id})")
|
||||
prompt = db_prompt.prompt_value
|
||||
return cls._render_prompt(prompt, context or {}), is_custom, prompt_type
|
||||
except Exception as e:
|
||||
logger.debug(f"No account-specific prompt found for {function_name}: {e}")
|
||||
|
||||
# Step 4: Try GlobalAIPrompt (platform-wide default) - REQUIRED
|
||||
try:
|
||||
from igny8_core.modules.system.global_settings_models import GlobalAIPrompt
|
||||
global_prompt = GlobalAIPrompt.objects.get(
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
logger.info(f"Using global default prompt for {function_name} from GlobalAIPrompt")
|
||||
prompt = global_prompt.prompt_value
|
||||
return cls._render_prompt(prompt, context or {}), False, prompt_type
|
||||
except Exception as e:
|
||||
error_msg = (
|
||||
f"ERROR: Global prompt '{prompt_type}' not found for function '{function_name}'. "
|
||||
f"Please configure it in Django admin at: /admin/system/globalaiprompt/. "
|
||||
f"Error: {e}"
|
||||
)
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
@classmethod
|
||||
def get_prompt(
|
||||
cls,
|
||||
@@ -618,51 +153,23 @@ CONTENT REQUIREMENTS:
|
||||
) -> str:
|
||||
"""
|
||||
Get prompt for a function with hierarchical resolution.
|
||||
|
||||
|
||||
Priority:
|
||||
1. task.prompt_override (if task provided and has override)
|
||||
2. DB prompt for (account, function)
|
||||
3. Default fallback from registry
|
||||
|
||||
3. GlobalAIPrompt (REQUIRED - no hardcoded fallbacks)
|
||||
|
||||
Args:
|
||||
function_name: AI function name (e.g., 'auto_cluster', 'generate_ideas')
|
||||
account: Account object (optional)
|
||||
task: Task object with optional prompt_override (optional)
|
||||
context: Additional context for prompt rendering (optional)
|
||||
|
||||
|
||||
Returns:
|
||||
Prompt string ready for formatting
|
||||
"""
|
||||
# Step 1: Check task-level override
|
||||
if task and hasattr(task, 'prompt_override') and task.prompt_override:
|
||||
logger.info(f"Using task-level prompt override for {function_name}")
|
||||
prompt = task.prompt_override
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
|
||||
# Step 2: Get prompt type
|
||||
prompt_type = cls.FUNCTION_TO_PROMPT_TYPE.get(function_name, function_name)
|
||||
|
||||
# Step 3: Try DB prompt
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
logger.info(f"Using DB prompt for {function_name} (account {account.id})")
|
||||
prompt = db_prompt.prompt_value
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
except Exception as e:
|
||||
logger.debug(f"No DB prompt found for {function_name}: {e}")
|
||||
|
||||
# Step 4: Use default fallback
|
||||
prompt = cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
if not prompt:
|
||||
logger.warning(f"No default prompt found for {prompt_type}, using empty string")
|
||||
|
||||
return cls._render_prompt(prompt, context or {})
|
||||
prompt, _, _ = cls.get_prompt_with_metadata(function_name, account, task, context)
|
||||
return prompt
|
||||
|
||||
@classmethod
|
||||
def _render_prompt(cls, prompt_template: str, context: Dict[str, Any]) -> str:
|
||||
@@ -728,8 +235,17 @@ CONTENT REQUIREMENTS:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Use default
|
||||
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
# Try GlobalAIPrompt
|
||||
try:
|
||||
from igny8_core.modules.system.global_settings_models import GlobalAIPrompt
|
||||
global_prompt = GlobalAIPrompt.objects.get(
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
return global_prompt.prompt_value
|
||||
except Exception:
|
||||
# Fallback for image_prompt_template
|
||||
return '{image_type} image for blog post titled "{post_title}": {image_prompt}'
|
||||
|
||||
@classmethod
|
||||
def get_negative_prompt(cls, account: Optional[Any] = None) -> str:
|
||||
@@ -752,8 +268,17 @@ CONTENT REQUIREMENTS:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Use default
|
||||
return cls.DEFAULT_PROMPTS.get(prompt_type, '')
|
||||
# Try GlobalAIPrompt
|
||||
try:
|
||||
from igny8_core.modules.system.global_settings_models import GlobalAIPrompt
|
||||
global_prompt = GlobalAIPrompt.objects.get(
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
return global_prompt.prompt_value
|
||||
except Exception:
|
||||
# Fallback for negative_prompt
|
||||
return 'text, watermark, logo, overlay, title, caption, writing on walls, writing on objects, UI, infographic elements, post title'
|
||||
|
||||
|
||||
# Convenience function for backward compatibility
|
||||
@@ -761,3 +286,61 @@ def get_prompt(function_name: str, account=None, task=None, context=None) -> str
|
||||
"""Get prompt using registry"""
|
||||
return PromptRegistry.get_prompt(function_name, account=account, task=task, context=context)
|
||||
|
||||
|
||||
def get_prompt_with_prefix(function_name: str, account=None, task=None, context=None) -> Tuple[str, str]:
|
||||
"""
|
||||
Get prompt with its tracking prefix.
|
||||
|
||||
Args:
|
||||
function_name: AI function name
|
||||
account: Account object (optional)
|
||||
task: Task object with optional prompt_override (optional)
|
||||
context: Additional context for prompt rendering (optional)
|
||||
|
||||
Returns:
|
||||
Tuple of (prompt_string, prefix_string)
|
||||
- prompt_string: The rendered prompt
|
||||
- prefix_string: The tracking prefix (e.g., '##GP01-Clustering' or '##CP01-Clustering')
|
||||
"""
|
||||
prompt, is_custom, prompt_type = PromptRegistry.get_prompt_with_metadata(
|
||||
function_name, account=account, task=task, context=context
|
||||
)
|
||||
prefix = PromptRegistry.get_prompt_prefix(prompt_type, is_custom)
|
||||
return prompt, prefix
|
||||
|
||||
|
||||
def get_prompt_prefix_for_function(function_name: str, account=None, task=None) -> str:
|
||||
"""
|
||||
Get just the prefix for a function without fetching the full prompt.
|
||||
Useful when the prompt was already fetched elsewhere.
|
||||
|
||||
Args:
|
||||
function_name: AI function name
|
||||
account: Account object (optional)
|
||||
task: Task object with optional prompt_override (optional)
|
||||
|
||||
Returns:
|
||||
The tracking prefix (e.g., '##GP01-Clustering' or '##CP01-Clustering')
|
||||
"""
|
||||
prompt_type = PromptRegistry.FUNCTION_TO_PROMPT_TYPE.get(function_name, function_name)
|
||||
|
||||
# Check for task-level override (always custom)
|
||||
if task and hasattr(task, 'prompt_override') and task.prompt_override:
|
||||
return PromptRegistry.get_prompt_prefix(prompt_type, is_custom=True)
|
||||
|
||||
# Check for account-specific prompt
|
||||
if account:
|
||||
try:
|
||||
from igny8_core.modules.system.models import AIPrompt
|
||||
db_prompt = AIPrompt.objects.get(
|
||||
account=account,
|
||||
prompt_type=prompt_type,
|
||||
is_active=True
|
||||
)
|
||||
return PromptRegistry.get_prompt_prefix(prompt_type, is_custom=db_prompt.is_customized)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback to global (not custom)
|
||||
return PromptRegistry.get_prompt_prefix(prompt_type, is_custom=False)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
AI Settings - Centralized model configurations and limits
|
||||
Uses IntegrationSettings only - no hardcoded defaults or fallbacks.
|
||||
Uses AISettings (system defaults) with optional per-account overrides via AccountSettings.
|
||||
API keys are stored in IntegrationProvider.
|
||||
"""
|
||||
from typing import Dict, Any
|
||||
import logging
|
||||
@@ -19,18 +20,22 @@ FUNCTION_ALIASES = {
|
||||
|
||||
def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
"""
|
||||
Get model configuration from IntegrationSettings only.
|
||||
No fallbacks - account must have IntegrationSettings configured.
|
||||
Get model configuration for AI function.
|
||||
|
||||
Architecture:
|
||||
- API keys: From IntegrationProvider (centralized)
|
||||
- Model: From AIModelConfig (is_default=True)
|
||||
- Params: From AISettings with AccountSettings overrides
|
||||
|
||||
Args:
|
||||
function_name: Name of the AI function
|
||||
account: Account instance (required)
|
||||
|
||||
Returns:
|
||||
dict: Model configuration with 'model', 'max_tokens', 'temperature'
|
||||
dict: Model configuration with 'model', 'max_tokens', 'temperature', 'api_key'
|
||||
|
||||
Raises:
|
||||
ValueError: If account not provided or IntegrationSettings not configured
|
||||
ValueError: If account not provided or settings not configured
|
||||
"""
|
||||
if not account:
|
||||
raise ValueError("Account is required for model configuration")
|
||||
@@ -38,46 +43,60 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
# Resolve function alias
|
||||
actual_name = FUNCTION_ALIASES.get(function_name, function_name)
|
||||
|
||||
# Get IntegrationSettings for OpenAI
|
||||
try:
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
integration_settings = IntegrationSettings.objects.get(
|
||||
integration_type='openai',
|
||||
account=account,
|
||||
is_active=True
|
||||
)
|
||||
except IntegrationSettings.DoesNotExist:
|
||||
from igny8_core.modules.system.ai_settings import AISettings
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
# Get API key from IntegrationProvider
|
||||
api_key = ModelRegistry.get_api_key('openai')
|
||||
|
||||
if not api_key:
|
||||
raise ValueError(
|
||||
"Platform OpenAI API key not configured. "
|
||||
"Please configure IntegrationProvider in Django admin."
|
||||
)
|
||||
|
||||
# Get default text model from AIModelConfig
|
||||
default_model = ModelRegistry.get_default_model('text')
|
||||
if not default_model:
|
||||
default_model = 'gpt-4o-mini' # Ultimate fallback
|
||||
|
||||
model = default_model
|
||||
|
||||
# Get settings with account overrides
|
||||
temperature = AISettings.get_effective_temperature(account)
|
||||
max_tokens = AISettings.get_effective_max_tokens(account)
|
||||
|
||||
# Get max_tokens from AIModelConfig if available
|
||||
try:
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
model_config = AIModelConfig.objects.filter(
|
||||
model_name=model,
|
||||
is_active=True
|
||||
).first()
|
||||
if model_config and model_config.max_output_tokens:
|
||||
max_tokens = model_config.max_output_tokens
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load max_tokens from AIModelConfig for {model}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load OpenAI settings for account {account.id}: {e}")
|
||||
raise ValueError(
|
||||
f"OpenAI IntegrationSettings not configured for account {account.id}. "
|
||||
f"Please configure OpenAI settings in the integration page."
|
||||
f"Could not load OpenAI configuration for account {account.id}. "
|
||||
f"Please configure IntegrationProvider and AISettings."
|
||||
)
|
||||
|
||||
config = integration_settings.config or {}
|
||||
|
||||
# Get model from config
|
||||
model = config.get('model')
|
||||
if not model:
|
||||
raise ValueError(
|
||||
f"Model not configured in IntegrationSettings for account {account.id}. "
|
||||
f"Please set 'model' in OpenAI integration settings."
|
||||
)
|
||||
|
||||
# Validate model is in our supported list (optional validation)
|
||||
# Validate model is in our supported list using ModelRegistry (database-driven)
|
||||
try:
|
||||
from igny8_core.utils.ai_processor import MODEL_RATES
|
||||
if model not in MODEL_RATES:
|
||||
if not ModelRegistry.validate_model(model):
|
||||
supported_models = [m.model_name for m in ModelRegistry.list_models(model_type='text')]
|
||||
logger.warning(
|
||||
f"Model '{model}' for account {account.id} is not in supported list. "
|
||||
f"Supported models: {list(MODEL_RATES.keys())}"
|
||||
f"Supported models: {supported_models}"
|
||||
)
|
||||
except ImportError:
|
||||
# MODEL_RATES not available - skip validation
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get max_tokens and temperature from config (with reasonable defaults for API)
|
||||
max_tokens = config.get('max_tokens', 4000) # Reasonable default for API limits
|
||||
temperature = config.get('temperature', 0.7) # Reasonable default
|
||||
|
||||
# Build response format based on model (JSON mode for supported models)
|
||||
response_format = None
|
||||
try:
|
||||
@@ -85,7 +104,6 @@ def get_model_config(function_name: str, account) -> Dict[str, Any]:
|
||||
if model in JSON_MODE_MODELS:
|
||||
response_format = {"type": "json_object"}
|
||||
except ImportError:
|
||||
# JSON_MODE_MODELS not available - skip
|
||||
pass
|
||||
|
||||
return {
|
||||
|
||||
@@ -157,6 +157,7 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
from igny8_core.modules.system.models import IntegrationSettings
|
||||
from igny8_core.ai.ai_core import AICore
|
||||
from igny8_core.ai.prompts import PromptRegistry
|
||||
from igny8_core.business.billing.services.credit_service import CreditService
|
||||
|
||||
logger.info("=" * 80)
|
||||
logger.info(f"process_image_generation_queue STARTED")
|
||||
@@ -181,82 +182,97 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
failed = 0
|
||||
results = []
|
||||
|
||||
# Get image generation settings from IntegrationSettings
|
||||
# Get image generation settings from AISettings (with account overrides)
|
||||
logger.info("[process_image_generation_queue] Step 1: Loading image generation settings")
|
||||
try:
|
||||
image_settings = IntegrationSettings.objects.get(
|
||||
account=account,
|
||||
integration_type='image_generation',
|
||||
is_active=True
|
||||
)
|
||||
config = image_settings.config or {}
|
||||
logger.info(f"[process_image_generation_queue] Image generation settings found. Config keys: {list(config.keys())}")
|
||||
logger.info(f"[process_image_generation_queue] Full config: {config}")
|
||||
|
||||
# Get provider and model from config (respect user settings)
|
||||
provider = config.get('provider', 'openai')
|
||||
# Get model - try 'model' first, then 'imageModel' as fallback
|
||||
model = config.get('model') or config.get('imageModel') or 'dall-e-3'
|
||||
logger.info(f"[process_image_generation_queue] Using PROVIDER: {provider}, MODEL: {model} from settings")
|
||||
image_type = config.get('image_type', 'realistic')
|
||||
image_format = config.get('image_format', 'webp')
|
||||
desktop_enabled = config.get('desktop_enabled', True)
|
||||
mobile_enabled = config.get('mobile_enabled', True)
|
||||
# Get image sizes from config, with fallback defaults
|
||||
featured_image_size = config.get('featured_image_size') or ('1280x832' if provider == 'runware' else '1024x1024')
|
||||
desktop_image_size = config.get('desktop_image_size') or '1024x1024'
|
||||
in_article_image_size = config.get('in_article_image_size') or '512x512' # Default to 512x512
|
||||
|
||||
logger.info(f"[process_image_generation_queue] Settings loaded:")
|
||||
logger.info(f" - Provider: {provider}")
|
||||
logger.info(f" - Model: {model}")
|
||||
logger.info(f" - Image type: {image_type}")
|
||||
logger.info(f" - Image format: {image_format}")
|
||||
logger.info(f" - Desktop enabled: {desktop_enabled}")
|
||||
logger.info(f" - Mobile enabled: {mobile_enabled}")
|
||||
except IntegrationSettings.DoesNotExist:
|
||||
logger.error("[process_image_generation_queue] ERROR: Image generation settings not found")
|
||||
logger.error(f"[process_image_generation_queue] Account: {account.id if account else 'None'}, integration_type: 'image_generation'")
|
||||
return {'success': False, 'error': 'Image generation settings not found'}
|
||||
except Exception as e:
|
||||
logger.error(f"[process_image_generation_queue] ERROR loading image generation settings: {e}", exc_info=True)
|
||||
return {'success': False, 'error': f'Error loading image generation settings: {str(e)}'}
|
||||
from igny8_core.modules.system.ai_settings import AISettings
|
||||
from igny8_core.ai.model_registry import ModelRegistry
|
||||
|
||||
# Get provider API key (using same approach as test image generation)
|
||||
# Note: API key is stored as 'apiKey' (camelCase) in IntegrationSettings.config
|
||||
logger.info(f"[process_image_generation_queue] Step 2: Loading {provider.upper()} API key")
|
||||
try:
|
||||
provider_settings = IntegrationSettings.objects.get(
|
||||
account=account,
|
||||
integration_type=provider, # Use the provider from settings
|
||||
is_active=True
|
||||
)
|
||||
logger.info(f"[process_image_generation_queue] {provider.upper()} integration settings found")
|
||||
logger.info(f"[process_image_generation_queue] {provider.upper()} config keys: {list(provider_settings.config.keys()) if provider_settings.config else 'None'}")
|
||||
|
||||
api_key = provider_settings.config.get('apiKey') if provider_settings.config else None
|
||||
if not api_key:
|
||||
logger.error(f"[process_image_generation_queue] {provider.upper()} API key not found in config")
|
||||
logger.error(f"[process_image_generation_queue] {provider.upper()} config: {provider_settings.config}")
|
||||
return {'success': False, 'error': f'{provider.upper()} API key not configured'}
|
||||
|
||||
# Log API key presence (but not the actual key for security)
|
||||
api_key_preview = f"{api_key[:10]}...{api_key[-4:]}" if len(api_key) > 14 else "***"
|
||||
logger.info(f"[process_image_generation_queue] {provider.upper()} API key retrieved successfully (length: {len(api_key)}, preview: {api_key_preview})")
|
||||
except IntegrationSettings.DoesNotExist:
|
||||
logger.error(f"[process_image_generation_queue] ERROR: {provider.upper()} integration settings not found")
|
||||
logger.error(f"[process_image_generation_queue] Account: {account.id if account else 'None'}, integration_type: '{provider}'")
|
||||
return {'success': False, 'error': f'{provider.upper()} integration not found or not active'}
|
||||
except Exception as e:
|
||||
logger.error(f"[process_image_generation_queue] ERROR getting {provider.upper()} API key: {e}", exc_info=True)
|
||||
return {'success': False, 'error': f'Error retrieving {provider.upper()} API key: {str(e)}'}
|
||||
# Get effective settings
|
||||
image_type = AISettings.get_effective_image_style(account)
|
||||
image_format = 'webp' # Default format
|
||||
|
||||
# Get default image model from database
|
||||
default_model = ModelRegistry.get_default_model('image')
|
||||
if default_model:
|
||||
model_config = ModelRegistry.get_model(default_model)
|
||||
provider = model_config.provider if model_config else 'openai'
|
||||
model = default_model
|
||||
else:
|
||||
provider = 'openai'
|
||||
model = 'dall-e-3'
|
||||
|
||||
logger.info(f"[process_image_generation_queue] Using PROVIDER: {provider}, MODEL: {model} from settings")
|
||||
|
||||
# Style to prompt enhancement mapping
|
||||
# These style descriptors are added to the image prompt for better results
|
||||
STYLE_PROMPT_MAP = {
|
||||
# Runware styles
|
||||
'photorealistic': 'ultra realistic photography, natural lighting, real world look, photorealistic',
|
||||
'illustration': 'digital illustration, clean lines, artistic style, modern illustration',
|
||||
'3d_render': 'computer generated 3D render, modern polished 3D style, depth and dramatic lighting',
|
||||
'minimal_flat': 'minimal flat design, simple shapes, flat colors, modern graphic design aesthetic',
|
||||
'artistic': 'artistic painterly style, expressive brushstrokes, hand painted aesthetic',
|
||||
'cartoon': 'cartoon stylized illustration, playful exaggerated forms, animated character style',
|
||||
# DALL-E styles (mapped from OpenAI API style parameter)
|
||||
'natural': 'natural realistic style',
|
||||
'vivid': 'vivid dramatic hyper-realistic style',
|
||||
# Legacy fallbacks
|
||||
'realistic': 'ultra realistic photography, natural lighting, photorealistic',
|
||||
}
|
||||
|
||||
# Get the style description for prompt enhancement
|
||||
style_description = STYLE_PROMPT_MAP.get(image_type, STYLE_PROMPT_MAP.get('photorealistic'))
|
||||
logger.info(f"[process_image_generation_queue] Style: {image_type} -> prompt enhancement: {style_description[:50]}...")
|
||||
|
||||
# Model-specific landscape sizes (square is always 1024x1024)
|
||||
# For Runware models - based on Runware documentation for optimal results per model
|
||||
# For OpenAI DALL-E 3 - uses 1792x1024 for landscape
|
||||
MODEL_LANDSCAPE_SIZES = {
|
||||
'runware:97@1': '1280x768', # Hi Dream Full landscape
|
||||
'bria:10@1': '1344x768', # Bria 3.2 landscape (16:9)
|
||||
'google:4@2': '1376x768', # Nano Banana landscape (16:9)
|
||||
'dall-e-3': '1792x1024', # DALL-E 3 landscape
|
||||
'dall-e-2': '1024x1024', # DALL-E 2 only supports square
|
||||
}
|
||||
DEFAULT_SQUARE_SIZE = '1024x1024'
|
||||
|
||||
# Get model-specific landscape size for featured images
|
||||
model_landscape_size = MODEL_LANDSCAPE_SIZES.get(model, '1792x1024' if provider == 'openai' else '1280x768')
|
||||
|
||||
# Featured image always uses model-specific landscape size
|
||||
featured_image_size = model_landscape_size
|
||||
# In-article images: alternating square/landscape based on position (handled in image loop)
|
||||
in_article_square_size = DEFAULT_SQUARE_SIZE
|
||||
in_article_landscape_size = model_landscape_size
|
||||
|
||||
logger.info(f"[process_image_generation_queue] Settings loaded:")
|
||||
logger.info(f" - Provider: {provider}")
|
||||
logger.info(f" - Model: {model}")
|
||||
logger.info(f" - Image type: {image_type}")
|
||||
logger.info(f" - Image format: {image_format}")
|
||||
logger.info(f" - Featured image size: {featured_image_size}")
|
||||
logger.info(f" - In-article square: {in_article_square_size}, landscape: {in_article_landscape_size}")
|
||||
|
||||
# Get provider API key from IntegrationProvider (centralized)
|
||||
logger.info(f"[process_image_generation_queue] Step 2: Loading {provider.upper()} API key from IntegrationProvider")
|
||||
|
||||
# Get API key from IntegrationProvider (centralized)
|
||||
api_key = ModelRegistry.get_api_key(provider)
|
||||
|
||||
if not api_key:
|
||||
logger.error(f"[process_image_generation_queue] {provider.upper()} API key not configured in IntegrationProvider")
|
||||
return {'success': False, 'error': f'{provider.upper()} API key not configured'}
|
||||
|
||||
# Log API key presence (but not the actual key for security)
|
||||
api_key_preview = f"{api_key[:10]}...{api_key[-4:]}" if len(api_key) > 14 else "***"
|
||||
logger.info(f"[process_image_generation_queue] {provider.upper()} API key retrieved successfully (length: {len(api_key)}, preview: {api_key_preview})")
|
||||
|
||||
# Get image prompt template (has placeholders: {image_type}, {post_title}, {image_prompt})
|
||||
try:
|
||||
image_prompt_template = PromptRegistry.get_image_prompt_template(account)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get image prompt template: {e}, using fallback")
|
||||
image_prompt_template = 'Create a high-quality {image_type} image for a blog post titled "{post_title}". Image prompt: {image_prompt}'
|
||||
image_prompt_template = '{image_type} image for blog post titled "{post_title}": {image_prompt}'
|
||||
|
||||
# Get negative prompt for Runware (only needed for Runware provider)
|
||||
negative_prompt = None
|
||||
@@ -384,7 +400,7 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
# Calculate actual template length with placeholders filled
|
||||
# Format template with dummy values to measure actual length
|
||||
template_with_dummies = image_prompt_template.format(
|
||||
image_type=image_type,
|
||||
image_type=style_description, # Use actual style description length
|
||||
post_title='X' * len(post_title), # Use same length as actual post_title
|
||||
image_prompt='' # Empty to measure template overhead
|
||||
)
|
||||
@@ -411,7 +427,7 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
image_prompt = image_prompt[:max_image_prompt_length - 3] + "..."
|
||||
|
||||
formatted_prompt = image_prompt_template.format(
|
||||
image_type=image_type,
|
||||
image_type=style_description, # Use full style description instead of raw value
|
||||
post_title=post_title,
|
||||
image_prompt=image_prompt
|
||||
)
|
||||
@@ -476,15 +492,40 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
}
|
||||
)
|
||||
|
||||
# Use appropriate size based on image type
|
||||
# Use appropriate size based on image type and position
|
||||
# Featured: Always landscape (model-specific)
|
||||
# In-article: Alternating square/landscape based on position
|
||||
# Position 0: Square (1024x1024)
|
||||
# Position 1: Landscape (model-specific)
|
||||
# Position 2: Square (1024x1024)
|
||||
# Position 3: Landscape (model-specific)
|
||||
if image.image_type == 'featured':
|
||||
image_size = featured_image_size # Read from config
|
||||
elif image.image_type == 'desktop':
|
||||
image_size = desktop_image_size
|
||||
elif image.image_type == 'mobile':
|
||||
image_size = '512x512' # Fixed mobile size
|
||||
else: # in_article or other
|
||||
image_size = in_article_image_size # Read from config, default 512x512
|
||||
image_size = featured_image_size # Model-specific landscape
|
||||
elif image.image_type == 'in_article':
|
||||
# Alternate based on position: even=square, odd=landscape
|
||||
position = image.position or 0
|
||||
if position % 2 == 0: # Position 0, 2: Square
|
||||
image_size = in_article_square_size
|
||||
else: # Position 1, 3: Landscape
|
||||
image_size = in_article_landscape_size
|
||||
logger.info(f"[process_image_generation_queue] In-article image position {position}: using {'square' if position % 2 == 0 else 'landscape'} size {image_size}")
|
||||
else: # desktop or other (legacy)
|
||||
image_size = in_article_square_size # Default to square
|
||||
|
||||
# For DALL-E, convert image_type to style parameter
|
||||
# image_type is from user settings (e.g., 'vivid', 'natural', 'realistic')
|
||||
# DALL-E accepts 'vivid' or 'natural' - map accordingly
|
||||
dalle_style = None
|
||||
if provider == 'openai':
|
||||
# Map image_type to DALL-E style
|
||||
# 'natural' = more realistic photos (default)
|
||||
# 'vivid' = hyper-real, dramatic images
|
||||
if image_type in ['vivid']:
|
||||
dalle_style = 'vivid'
|
||||
else:
|
||||
# Default to 'natural' for realistic photos
|
||||
dalle_style = 'natural'
|
||||
logger.info(f"[process_image_generation_queue] DALL-E style: {dalle_style} (from image_type: {image_type})")
|
||||
|
||||
result = ai_core.generate_image(
|
||||
prompt=formatted_prompt,
|
||||
@@ -493,7 +534,8 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
size=image_size,
|
||||
api_key=api_key,
|
||||
negative_prompt=negative_prompt,
|
||||
function_name='generate_images_from_prompts'
|
||||
function_name='generate_images_from_prompts',
|
||||
style=dalle_style
|
||||
)
|
||||
|
||||
# Update progress: Image generation complete (50%)
|
||||
@@ -668,6 +710,33 @@ def process_image_generation_queue(self, image_ids: list, account_id: int = None
|
||||
})
|
||||
failed += 1
|
||||
else:
|
||||
# Deduct credits for successful image generation
|
||||
credits_deducted = 0
|
||||
cost_usd = result.get('cost_usd', 0)
|
||||
if account:
|
||||
try:
|
||||
credits_deducted = CreditService.deduct_credits_for_image(
|
||||
account=account,
|
||||
model_name=model,
|
||||
num_images=1,
|
||||
description=f"Image generation: {content.title[:50] if content else 'Image'}" if content else f"Image {image_id}",
|
||||
metadata={
|
||||
'image_id': image_id,
|
||||
'content_id': content_id,
|
||||
'provider': provider,
|
||||
'model': model,
|
||||
'image_type': image.image_type if image else 'unknown',
|
||||
'size': image_size,
|
||||
},
|
||||
cost_usd=cost_usd,
|
||||
related_object_type='image',
|
||||
related_object_id=image_id
|
||||
)
|
||||
logger.info(f"[process_image_generation_queue] Credits deducted for image {image_id}: account balance now {credits_deducted}")
|
||||
except Exception as credit_error:
|
||||
logger.error(f"[process_image_generation_queue] Failed to deduct credits for image {image_id}: {credit_error}")
|
||||
# Don't fail the image generation if credit deduction fails
|
||||
|
||||
# Update progress: Complete (100%)
|
||||
self.update_state(
|
||||
state='PROGRESS',
|
||||
|
||||
@@ -5,6 +5,7 @@ import time
|
||||
import logging
|
||||
from typing import List, Dict, Any, Optional, Callable
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from igny8_core.ai.constants import DEBUG_MODE
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -195,24 +196,35 @@ class CostTracker:
|
||||
"""Tracks API costs and token usage"""
|
||||
|
||||
def __init__(self):
|
||||
self.total_cost = 0.0
|
||||
self.total_cost = Decimal('0.0')
|
||||
self.total_tokens = 0
|
||||
self.operations = []
|
||||
|
||||
def record(self, function_name: str, cost: float, tokens: int, model: str = None):
|
||||
"""Record an API call cost"""
|
||||
def record(self, function_name: str, cost, tokens: int, model: str = None):
|
||||
"""Record an API call cost
|
||||
|
||||
Args:
|
||||
function_name: Name of the AI function
|
||||
cost: Cost value (can be float or Decimal)
|
||||
tokens: Number of tokens used
|
||||
model: Model name
|
||||
"""
|
||||
# Convert cost to Decimal if it's a float to avoid type mixing
|
||||
if not isinstance(cost, Decimal):
|
||||
cost = Decimal(str(cost))
|
||||
|
||||
self.total_cost += cost
|
||||
self.total_tokens += tokens
|
||||
self.operations.append({
|
||||
'function': function_name,
|
||||
'cost': cost,
|
||||
'cost': float(cost), # Store as float for JSON serialization
|
||||
'tokens': tokens,
|
||||
'model': model
|
||||
})
|
||||
|
||||
def get_total(self) -> float:
|
||||
"""Get total cost"""
|
||||
return self.total_cost
|
||||
def get_total(self):
|
||||
"""Get total cost (returns float for JSON serialization)"""
|
||||
return float(self.total_cost)
|
||||
|
||||
def get_total_tokens(self) -> int:
|
||||
"""Get total tokens"""
|
||||
|
||||
@@ -135,7 +135,7 @@ def validate_api_key(api_key: Optional[str], integration_type: str = 'openai') -
|
||||
|
||||
def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
|
||||
"""
|
||||
Validate that model is in supported list.
|
||||
Validate that model is in supported list using database.
|
||||
|
||||
Args:
|
||||
model: Model name to validate
|
||||
@@ -144,27 +144,50 @@ def validate_model(model: str, model_type: str = 'text') -> Dict[str, Any]:
|
||||
Returns:
|
||||
Dict with 'valid' (bool) and optional 'error' (str)
|
||||
"""
|
||||
from .constants import MODEL_RATES, VALID_OPENAI_IMAGE_MODELS
|
||||
|
||||
if model_type == 'text':
|
||||
if model not in MODEL_RATES:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Model "{model}" is not in supported models list'
|
||||
}
|
||||
elif model_type == 'image':
|
||||
if model not in VALID_OPENAI_IMAGE_MODELS:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Model "{model}" is not valid for OpenAI image generation. Only {", ".join(VALID_OPENAI_IMAGE_MODELS)} are supported.'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
try:
|
||||
# Use database-driven validation via AIModelConfig
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
|
||||
exists = AIModelConfig.objects.filter(
|
||||
model_name=model,
|
||||
model_type=model_type,
|
||||
is_active=True
|
||||
).exists()
|
||||
|
||||
if not exists:
|
||||
# Get available models for better error message
|
||||
available = list(AIModelConfig.objects.filter(
|
||||
model_type=model_type,
|
||||
is_active=True
|
||||
).values_list('model_name', flat=True))
|
||||
|
||||
if available:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Model "{model}" is not active or not found. Available {model_type} models: {", ".join(available)}'
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'No {model_type} models configured in database'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
except Exception as e:
|
||||
# Log error but don't fallback to constants - DB is authoritative
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"Error validating model {model}: {e}")
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Error validating model: {e}'
|
||||
}
|
||||
|
||||
|
||||
def validate_image_size(size: str, model: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate that image size is valid for the selected model.
|
||||
Validate that image size is valid for the selected model using database.
|
||||
|
||||
Args:
|
||||
size: Image size (e.g., '1024x1024')
|
||||
@@ -173,14 +196,40 @@ def validate_image_size(size: str, model: str) -> Dict[str, Any]:
|
||||
Returns:
|
||||
Dict with 'valid' (bool) and optional 'error' (str)
|
||||
"""
|
||||
from .constants import VALID_SIZES_BY_MODEL
|
||||
|
||||
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
|
||||
if size not in valid_sizes:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
try:
|
||||
# Try database first
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
|
||||
model_config = AIModelConfig.objects.filter(
|
||||
model_name=model,
|
||||
model_type='image',
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if model_config:
|
||||
if not model_config.validate_size(size):
|
||||
valid_sizes = model_config.valid_sizes or []
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
|
||||
}
|
||||
return {'valid': True}
|
||||
else:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image model "{model}" not found in database'
|
||||
}
|
||||
|
||||
except Exception:
|
||||
# Fallback to constants if database fails
|
||||
from .constants import VALID_SIZES_BY_MODEL
|
||||
|
||||
valid_sizes = VALID_SIZES_BY_MODEL.get(model, [])
|
||||
if size not in valid_sizes:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image size "{size}" is not valid for model "{model}". Valid sizes are: {", ".join(valid_sizes)}'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@ from django.urls import path
|
||||
from igny8_core.api.account_views import (
|
||||
AccountSettingsViewSet,
|
||||
TeamManagementViewSet,
|
||||
UsageAnalyticsViewSet
|
||||
UsageAnalyticsViewSet,
|
||||
DashboardStatsViewSet
|
||||
)
|
||||
|
||||
urlpatterns = [
|
||||
@@ -28,4 +29,9 @@ urlpatterns = [
|
||||
path('usage/analytics/', UsageAnalyticsViewSet.as_view({
|
||||
'get': 'overview'
|
||||
}), name='usage-analytics'),
|
||||
|
||||
# Dashboard Stats (real data for home page)
|
||||
path('dashboard/stats/', DashboardStatsViewSet.as_view({
|
||||
'get': 'stats'
|
||||
}), name='dashboard-stats'),
|
||||
]
|
||||
|
||||
@@ -10,6 +10,7 @@ from django.contrib.auth import get_user_model
|
||||
from django.db.models import Q, Count, Sum
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
from decimal import Decimal
|
||||
from drf_spectacular.utils import extend_schema, extend_schema_view
|
||||
|
||||
from igny8_core.auth.models import Account
|
||||
@@ -131,6 +132,16 @@ class TeamManagementViewSet(viewsets.ViewSet):
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
# Check hard limit for users BEFORE creating
|
||||
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
|
||||
try:
|
||||
LimitService.check_hard_limit(account, 'users', additional_count=1)
|
||||
except HardLimitExceededError as e:
|
||||
return Response(
|
||||
{'error': str(e)},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
# Create user (simplified - in production, send invitation email)
|
||||
user = User.objects.create_user(
|
||||
email=email,
|
||||
@@ -242,3 +253,216 @@ class UsageAnalyticsViewSet(viewsets.ViewSet):
|
||||
'total_usage': abs(transactions.filter(amount__lt=0).aggregate(Sum('amount'))['amount__sum'] or 0),
|
||||
'total_purchases': transactions.filter(amount__gt=0).aggregate(Sum('amount'))['amount__sum'] or 0,
|
||||
})
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
stats=extend_schema(tags=['Account']),
|
||||
)
|
||||
class DashboardStatsViewSet(viewsets.ViewSet):
|
||||
"""Dashboard statistics - real data for home page widgets"""
|
||||
permission_classes = [IsAuthenticated]
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def stats(self, request):
|
||||
"""
|
||||
Get dashboard statistics for the home page.
|
||||
|
||||
Query params:
|
||||
- site_id: Filter by site (optional, defaults to all sites)
|
||||
- days: Number of days for AI operations (default: 7)
|
||||
|
||||
Returns:
|
||||
- ai_operations: Real credit usage by operation type
|
||||
- recent_activity: Recent notifications
|
||||
- content_velocity: Content created this week/month
|
||||
- images_count: Actual total images count
|
||||
- published_count: Actual published content count
|
||||
"""
|
||||
account = request.user.account
|
||||
site_id = request.query_params.get('site_id')
|
||||
days = int(request.query_params.get('days', 7))
|
||||
|
||||
# Import models here to avoid circular imports
|
||||
from igny8_core.modules.writer.models import Images, Content
|
||||
from igny8_core.modules.planner.models import Keywords, Clusters, ContentIdeas
|
||||
from igny8_core.business.notifications.models import Notification
|
||||
from igny8_core.business.billing.models import CreditUsageLog
|
||||
from igny8_core.auth.models import Site
|
||||
|
||||
# Build base filter for site
|
||||
site_filter = {}
|
||||
if site_id:
|
||||
try:
|
||||
site_filter['site_id'] = int(site_id)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# ========== AI OPERATIONS (from CreditUsageLog) ==========
|
||||
start_date = timezone.now() - timedelta(days=days)
|
||||
usage_query = CreditUsageLog.objects.filter(
|
||||
account=account,
|
||||
created_at__gte=start_date
|
||||
)
|
||||
|
||||
# Get operations grouped by type
|
||||
operations_data = usage_query.values('operation_type').annotate(
|
||||
count=Count('id'),
|
||||
credits=Sum('credits_used')
|
||||
).order_by('-credits')
|
||||
|
||||
# Calculate totals
|
||||
total_ops = usage_query.count()
|
||||
total_credits = usage_query.aggregate(total=Sum('credits_used'))['total'] or 0
|
||||
|
||||
# Format operations for frontend
|
||||
operations = []
|
||||
for op in operations_data:
|
||||
op_type = op['operation_type'] or 'other'
|
||||
operations.append({
|
||||
'type': op_type,
|
||||
'count': op['count'] or 0,
|
||||
'credits': op['credits'] or 0,
|
||||
})
|
||||
|
||||
ai_operations = {
|
||||
'period': f'{days}d',
|
||||
'operations': operations,
|
||||
'totals': {
|
||||
'count': total_ops,
|
||||
'credits': total_credits,
|
||||
'successRate': 98.5, # TODO: calculate from actual success/failure
|
||||
'avgCreditsPerOp': round(total_credits / total_ops, 1) if total_ops > 0 else 0,
|
||||
}
|
||||
}
|
||||
|
||||
# ========== RECENT ACTIVITY (from Notifications) ==========
|
||||
recent_notifications = Notification.objects.filter(
|
||||
account=account
|
||||
).order_by('-created_at')[:10]
|
||||
|
||||
recent_activity = []
|
||||
for notif in recent_notifications:
|
||||
# Map notification type to activity type
|
||||
activity_type_map = {
|
||||
'ai_clustering_complete': 'clustering',
|
||||
'ai_ideas_complete': 'ideas',
|
||||
'ai_content_complete': 'content',
|
||||
'ai_images_complete': 'images',
|
||||
'ai_prompts_complete': 'images',
|
||||
'content_published': 'published',
|
||||
'wp_sync_success': 'published',
|
||||
}
|
||||
activity_type = activity_type_map.get(notif.notification_type, 'system')
|
||||
|
||||
# Map notification type to href
|
||||
href_map = {
|
||||
'clustering': '/planner/clusters',
|
||||
'ideas': '/planner/ideas',
|
||||
'content': '/writer/content',
|
||||
'images': '/writer/images',
|
||||
'published': '/writer/published',
|
||||
}
|
||||
|
||||
recent_activity.append({
|
||||
'id': str(notif.id),
|
||||
'type': activity_type,
|
||||
'title': notif.title,
|
||||
'description': notif.message[:100] if notif.message else '',
|
||||
'timestamp': notif.created_at.isoformat(),
|
||||
'href': href_map.get(activity_type, '/dashboard'),
|
||||
})
|
||||
|
||||
# ========== CONTENT COUNTS ==========
|
||||
content_base = Content.objects.filter(account=account)
|
||||
if site_filter:
|
||||
content_base = content_base.filter(**site_filter)
|
||||
|
||||
total_content = content_base.count()
|
||||
draft_content = content_base.filter(status='draft').count()
|
||||
review_content = content_base.filter(status='review').count()
|
||||
published_content = content_base.filter(status='published').count()
|
||||
|
||||
# ========== IMAGES COUNT (actual images, not content with images) ==========
|
||||
images_base = Images.objects.filter(account=account)
|
||||
if site_filter:
|
||||
images_base = images_base.filter(**site_filter)
|
||||
|
||||
total_images = images_base.count()
|
||||
generated_images = images_base.filter(status='generated').count()
|
||||
pending_images = images_base.filter(status='pending').count()
|
||||
|
||||
# ========== CONTENT VELOCITY ==========
|
||||
now = timezone.now()
|
||||
week_ago = now - timedelta(days=7)
|
||||
month_ago = now - timedelta(days=30)
|
||||
|
||||
# This week's content
|
||||
week_content = content_base.filter(created_at__gte=week_ago).count()
|
||||
week_images = images_base.filter(created_at__gte=week_ago).count()
|
||||
|
||||
# This month's content
|
||||
month_content = content_base.filter(created_at__gte=month_ago).count()
|
||||
month_images = images_base.filter(created_at__gte=month_ago).count()
|
||||
|
||||
# Estimate words (avg 1500 per article)
|
||||
content_velocity = {
|
||||
'thisWeek': {
|
||||
'articles': week_content,
|
||||
'words': week_content * 1500,
|
||||
'images': week_images,
|
||||
},
|
||||
'thisMonth': {
|
||||
'articles': month_content,
|
||||
'words': month_content * 1500,
|
||||
'images': month_images,
|
||||
},
|
||||
'total': {
|
||||
'articles': total_content,
|
||||
'words': total_content * 1500,
|
||||
'images': total_images,
|
||||
},
|
||||
'trend': 0, # TODO: calculate actual trend
|
||||
}
|
||||
|
||||
# ========== PIPELINE COUNTS ==========
|
||||
keywords_base = Keywords.objects.filter(account=account)
|
||||
clusters_base = Clusters.objects.filter(account=account)
|
||||
ideas_base = ContentIdeas.objects.filter(account=account)
|
||||
|
||||
if site_filter:
|
||||
keywords_base = keywords_base.filter(**site_filter)
|
||||
clusters_base = clusters_base.filter(**site_filter)
|
||||
ideas_base = ideas_base.filter(**site_filter)
|
||||
|
||||
# Get site count
|
||||
sites_count = Site.objects.filter(account=account, is_active=True).count()
|
||||
|
||||
pipeline = {
|
||||
'sites': sites_count,
|
||||
'keywords': keywords_base.count(),
|
||||
'clusters': clusters_base.count(),
|
||||
'ideas': ideas_base.count(),
|
||||
'tasks': ideas_base.filter(status='queued').count() + ideas_base.filter(status='completed').count(),
|
||||
'drafts': draft_content + review_content,
|
||||
'published': published_content,
|
||||
}
|
||||
|
||||
return Response({
|
||||
'ai_operations': ai_operations,
|
||||
'recent_activity': recent_activity,
|
||||
'content_velocity': content_velocity,
|
||||
'pipeline': pipeline,
|
||||
'counts': {
|
||||
'content': {
|
||||
'total': total_content,
|
||||
'draft': draft_content,
|
||||
'review': review_content,
|
||||
'published': published_content,
|
||||
},
|
||||
'images': {
|
||||
'total': total_images,
|
||||
'generated': generated_images,
|
||||
'pending': pending_images,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
@@ -109,9 +109,11 @@ class APIKeyAuthentication(BaseAuthentication):
|
||||
|
||||
try:
|
||||
from igny8_core.auth.models import Site, User
|
||||
from igny8_core.auth.utils import validate_account_and_plan
|
||||
from rest_framework.exceptions import AuthenticationFailed
|
||||
|
||||
# Find site by API key
|
||||
site = Site.objects.select_related('account', 'account__owner').filter(
|
||||
site = Site.objects.select_related('account', 'account__owner', 'account__plan').filter(
|
||||
wp_api_key=api_key,
|
||||
is_active=True
|
||||
).first()
|
||||
@@ -119,8 +121,17 @@ class APIKeyAuthentication(BaseAuthentication):
|
||||
if not site:
|
||||
return None # API key not found or site inactive
|
||||
|
||||
# Get account and user (prefer owner but gracefully fall back)
|
||||
# Get account and validate it
|
||||
account = site.account
|
||||
if not account:
|
||||
raise AuthenticationFailed('No account associated with this API key.')
|
||||
|
||||
# CRITICAL FIX: Validate account and plan status
|
||||
is_valid, error_message, http_status = validate_account_and_plan(account)
|
||||
if not is_valid:
|
||||
raise AuthenticationFailed(error_message)
|
||||
|
||||
# Get user (prefer owner but gracefully fall back)
|
||||
user = account.owner
|
||||
if not user or not getattr(user, 'is_active', False):
|
||||
# Fall back to any active developer/owner/admin in the account
|
||||
|
||||
@@ -19,34 +19,21 @@ class AccountModelViewSet(viewsets.ModelViewSet):
|
||||
# Filter by account if model has account field
|
||||
if hasattr(queryset.model, 'account'):
|
||||
user = getattr(self.request, 'user', None)
|
||||
|
||||
# ADMIN/DEV/SYSTEM ACCOUNT OVERRIDE: Skip account filtering for:
|
||||
# - Admins and developers (by role)
|
||||
# - Users in system accounts (aws-admin, default-account)
|
||||
|
||||
if user and hasattr(user, 'is_authenticated') and user.is_authenticated:
|
||||
try:
|
||||
# Check if user has admin/developer privileges
|
||||
is_admin_or_dev = (hasattr(user, 'is_admin_or_developer') and user.is_admin_or_developer()) if user else False
|
||||
is_system_user = (hasattr(user, 'is_system_account_user') and user.is_system_account_user()) if user else False
|
||||
|
||||
if is_admin_or_dev or is_system_user:
|
||||
# Skip account filtering - allow all accounts
|
||||
pass
|
||||
account = getattr(self.request, 'account', None)
|
||||
if not account and hasattr(self.request, 'user') and self.request.user and hasattr(self.request.user, 'is_authenticated') and self.request.user.is_authenticated:
|
||||
user_account = getattr(self.request.user, 'account', None)
|
||||
if user_account:
|
||||
account = user_account
|
||||
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
else:
|
||||
# Get account from request (set by middleware)
|
||||
account = getattr(self.request, 'account', None)
|
||||
if account:
|
||||
queryset = queryset.filter(account=account)
|
||||
elif hasattr(self.request, 'user') and self.request.user and hasattr(self.request.user, 'is_authenticated') and self.request.user.is_authenticated:
|
||||
# Fallback to user's account
|
||||
try:
|
||||
user_account = getattr(self.request.user, 'account', None)
|
||||
if user_account:
|
||||
queryset = queryset.filter(account=user_account)
|
||||
except (AttributeError, Exception):
|
||||
# If account access fails (e.g., column mismatch), skip account filtering
|
||||
pass
|
||||
except (AttributeError, TypeError) as e:
|
||||
# No account context -> block access
|
||||
return queryset.none()
|
||||
except (AttributeError, TypeError):
|
||||
# If there's an error accessing user attributes, return empty queryset
|
||||
return queryset.none()
|
||||
else:
|
||||
@@ -61,11 +48,11 @@ class AccountModelViewSet(viewsets.ModelViewSet):
|
||||
try:
|
||||
account = getattr(self.request.user, 'account', None)
|
||||
except (AttributeError, Exception):
|
||||
# If account access fails (e.g., column mismatch), set to None
|
||||
account = None
|
||||
|
||||
# If model has account field, set it
|
||||
if account and hasattr(serializer.Meta.model, 'account'):
|
||||
|
||||
if hasattr(serializer.Meta.model, 'account'):
|
||||
if not account:
|
||||
raise PermissionDenied("Account context is required to create this object.")
|
||||
serializer.save(account=account)
|
||||
else:
|
||||
serializer.save()
|
||||
@@ -253,24 +240,16 @@ class SiteSectorModelViewSet(AccountModelViewSet):
|
||||
# Check if user is authenticated and is a proper User instance (not AnonymousUser)
|
||||
if user and hasattr(user, 'is_authenticated') and user.is_authenticated and hasattr(user, 'get_accessible_sites'):
|
||||
try:
|
||||
# ADMIN/DEV/SYSTEM ACCOUNT OVERRIDE: Developers, admins, and system account users
|
||||
# can see all data regardless of site/sector
|
||||
if (hasattr(user, 'is_admin_or_developer') and user.is_admin_or_developer()) or \
|
||||
(hasattr(user, 'is_system_account_user') and user.is_system_account_user()):
|
||||
# Skip site/sector filtering for admins, developers, and system account users
|
||||
# But still respect optional query params if provided
|
||||
pass
|
||||
# Get user's accessible sites
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
|
||||
# If no accessible sites, return empty queryset
|
||||
if not accessible_sites.exists():
|
||||
queryset = queryset.none()
|
||||
else:
|
||||
# Get user's accessible sites
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
|
||||
# If no accessible sites, return empty queryset (unless admin/developer/system account)
|
||||
if not accessible_sites.exists():
|
||||
queryset = queryset.none()
|
||||
else:
|
||||
# Filter by accessible sites
|
||||
queryset = queryset.filter(site__in=accessible_sites)
|
||||
except (AttributeError, TypeError) as e:
|
||||
# Filter by accessible sites
|
||||
queryset = queryset.filter(site__in=accessible_sites)
|
||||
except (AttributeError, TypeError):
|
||||
# If there's an error accessing user attributes, return empty queryset
|
||||
queryset = queryset.none()
|
||||
else:
|
||||
@@ -295,21 +274,14 @@ class SiteSectorModelViewSet(AccountModelViewSet):
|
||||
# Convert site_id to int if it's a string
|
||||
site_id_int = int(site_id) if site_id else None
|
||||
if site_id_int:
|
||||
# ADMIN/DEV/SYSTEM ACCOUNT OVERRIDE: Admins, developers, and system account users
|
||||
# can filter by any site, others must verify access
|
||||
if user and hasattr(user, 'is_authenticated') and user.is_authenticated and hasattr(user, 'get_accessible_sites'):
|
||||
try:
|
||||
if (hasattr(user, 'is_admin_or_developer') and user.is_admin_or_developer()) or \
|
||||
(hasattr(user, 'is_system_account_user') and user.is_system_account_user()):
|
||||
# Admin/Developer/System Account User can filter by any site
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
if accessible_sites.filter(id=site_id_int).exists():
|
||||
queryset = queryset.filter(site_id=site_id_int)
|
||||
else:
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
if accessible_sites.filter(id=site_id_int).exists():
|
||||
queryset = queryset.filter(site_id=site_id_int)
|
||||
else:
|
||||
queryset = queryset.none() # Site not accessible
|
||||
except (AttributeError, TypeError) as e:
|
||||
queryset = queryset.none() # Site not accessible
|
||||
except (AttributeError, TypeError):
|
||||
# If there's an error accessing user attributes, return empty queryset
|
||||
queryset = queryset.none()
|
||||
else:
|
||||
@@ -369,14 +341,10 @@ class SiteSectorModelViewSet(AccountModelViewSet):
|
||||
|
||||
if user and hasattr(user, 'is_authenticated') and user.is_authenticated and site:
|
||||
try:
|
||||
# ADMIN/DEV/SYSTEM ACCOUNT OVERRIDE: Admins, developers, and system account users
|
||||
# can create in any site, others must verify access
|
||||
if not ((hasattr(user, 'is_admin_or_developer') and user.is_admin_or_developer()) or
|
||||
(hasattr(user, 'is_system_account_user') and user.is_system_account_user())):
|
||||
if hasattr(user, 'get_accessible_sites'):
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
if not accessible_sites.filter(id=site.id).exists():
|
||||
raise PermissionDenied("You do not have access to this site")
|
||||
if hasattr(user, 'get_accessible_sites'):
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
if not accessible_sites.filter(id=site.id).exists():
|
||||
raise PermissionDenied("You do not have access to this site")
|
||||
|
||||
# Verify sector belongs to site
|
||||
if sector and hasattr(sector, 'site') and sector.site != site:
|
||||
|
||||
@@ -12,13 +12,23 @@ class IsAuthenticatedAndActive(permissions.BasePermission):
|
||||
Base permission for most endpoints
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
logger.warning(f"[IsAuthenticatedAndActive] DENIED: User not authenticated")
|
||||
return False
|
||||
|
||||
# Check if user is active
|
||||
if hasattr(request.user, 'is_active'):
|
||||
return request.user.is_active
|
||||
is_active = request.user.is_active
|
||||
if is_active:
|
||||
logger.info(f"[IsAuthenticatedAndActive] ALLOWED: User {request.user.email} is active")
|
||||
else:
|
||||
logger.warning(f"[IsAuthenticatedAndActive] DENIED: User {request.user.email} is inactive")
|
||||
return is_active
|
||||
|
||||
logger.info(f"[IsAuthenticatedAndActive] ALLOWED: User {request.user.email} (no is_active check)")
|
||||
return True
|
||||
|
||||
|
||||
@@ -26,45 +36,41 @@ class HasTenantAccess(permissions.BasePermission):
|
||||
"""
|
||||
Permission class that requires user to belong to the tenant/account
|
||||
Ensures tenant isolation
|
||||
Superusers, developers, and system account users bypass this check.
|
||||
|
||||
CRITICAL: Every authenticated user MUST have an account.
|
||||
The middleware sets request.account from request.user.account.
|
||||
If a user doesn't have an account, it's a data integrity issue.
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
logger.warning(f"[HasTenantAccess] DENIED: User not authenticated")
|
||||
return False
|
||||
|
||||
# Get account from request (set by middleware)
|
||||
account = getattr(request, 'account', None)
|
||||
# SIMPLIFIED LOGIC: Every authenticated user MUST have an account
|
||||
# Middleware already set request.account from request.user.account
|
||||
# Just verify it exists
|
||||
if not hasattr(request.user, 'account'):
|
||||
logger.warning(f"[HasTenantAccess] DENIED: User {request.user.email} has no account attribute")
|
||||
return False
|
||||
|
||||
# If no account in request, try to get from user
|
||||
if not account and hasattr(request.user, 'account'):
|
||||
try:
|
||||
account = request.user.account
|
||||
except (AttributeError, Exception):
|
||||
pass
|
||||
|
||||
# Admin/Developer/System account users bypass tenant check
|
||||
if request.user and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated:
|
||||
try:
|
||||
is_admin_or_dev = (hasattr(request.user, 'is_admin_or_developer') and
|
||||
request.user.is_admin_or_developer()) if request.user else False
|
||||
is_system_user = (hasattr(request.user, 'is_system_account_user') and
|
||||
request.user.is_system_account_user()) if request.user else False
|
||||
|
||||
if is_admin_or_dev or is_system_user:
|
||||
return True
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
# Regular users must have account access
|
||||
if account:
|
||||
# Check if user belongs to this account
|
||||
if hasattr(request.user, 'account'):
|
||||
try:
|
||||
user_account = request.user.account
|
||||
return user_account == account or user_account.id == account.id
|
||||
except (AttributeError, Exception):
|
||||
pass
|
||||
|
||||
return False
|
||||
try:
|
||||
# Access the account to trigger any lazy loading
|
||||
user_account = request.user.account
|
||||
if not user_account:
|
||||
logger.warning(f"[HasTenantAccess] DENIED: User {request.user.email} has NULL account")
|
||||
return False
|
||||
|
||||
# Success - user has a valid account
|
||||
logger.info(f"[HasTenantAccess] ALLOWED: User {request.user.email} has account {user_account.name} (ID: {user_account.id})")
|
||||
return True
|
||||
except (AttributeError, Exception) as e:
|
||||
# User doesn't have account relationship - data integrity issue
|
||||
logger.warning(f"[HasTenantAccess] DENIED: User {request.user.email} account access failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
class IsViewerOrAbove(permissions.BasePermission):
|
||||
@@ -73,28 +79,26 @@ class IsViewerOrAbove(permissions.BasePermission):
|
||||
For read-only operations
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
return False
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Admin/Developer/System account users always have access
|
||||
try:
|
||||
is_admin_or_dev = (hasattr(request.user, 'is_admin_or_developer') and
|
||||
request.user.is_admin_or_developer()) if request.user else False
|
||||
is_system_user = (hasattr(request.user, 'is_system_account_user') and
|
||||
request.user.is_system_account_user()) if request.user else False
|
||||
|
||||
if is_admin_or_dev or is_system_user:
|
||||
return True
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
logger.warning(f"[IsViewerOrAbove] DENIED: User not authenticated")
|
||||
return False
|
||||
|
||||
# Check user role
|
||||
if hasattr(request.user, 'role'):
|
||||
role = request.user.role
|
||||
# viewer, editor, admin, owner all have access
|
||||
return role in ['viewer', 'editor', 'admin', 'owner']
|
||||
allowed = role in ['viewer', 'editor', 'admin', 'owner']
|
||||
if allowed:
|
||||
logger.info(f"[IsViewerOrAbove] ALLOWED: User {request.user.email} has role {role}")
|
||||
else:
|
||||
logger.warning(f"[IsViewerOrAbove] DENIED: User {request.user.email} has invalid role {role}")
|
||||
return allowed
|
||||
|
||||
# If no role system, allow authenticated users
|
||||
logger.info(f"[IsViewerOrAbove] ALLOWED: User {request.user.email} (no role system)")
|
||||
return True
|
||||
|
||||
|
||||
@@ -107,18 +111,6 @@ class IsEditorOrAbove(permissions.BasePermission):
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
return False
|
||||
|
||||
# Admin/Developer/System account users always have access
|
||||
try:
|
||||
is_admin_or_dev = (hasattr(request.user, 'is_admin_or_developer') and
|
||||
request.user.is_admin_or_developer()) if request.user else False
|
||||
is_system_user = (hasattr(request.user, 'is_system_account_user') and
|
||||
request.user.is_system_account_user()) if request.user else False
|
||||
|
||||
if is_admin_or_dev or is_system_user:
|
||||
return True
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
# Check user role
|
||||
if hasattr(request.user, 'role'):
|
||||
role = request.user.role
|
||||
@@ -132,23 +124,21 @@ class IsEditorOrAbove(permissions.BasePermission):
|
||||
class IsAdminOrOwner(permissions.BasePermission):
|
||||
"""
|
||||
Permission class that requires admin or owner role only
|
||||
OR user belongs to aws-admin account
|
||||
For settings, keys, billing operations
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
if not request.user or not request.user.is_authenticated:
|
||||
return False
|
||||
|
||||
# Admin/Developer/System account users always have access
|
||||
try:
|
||||
is_admin_or_dev = (hasattr(request.user, 'is_admin_or_developer') and
|
||||
request.user.is_admin_or_developer()) if request.user else False
|
||||
is_system_user = (hasattr(request.user, 'is_system_account_user') and
|
||||
request.user.is_system_account_user()) if request.user else False
|
||||
|
||||
if is_admin_or_dev or is_system_user:
|
||||
# Check if user belongs to aws-admin account (case-insensitive)
|
||||
if hasattr(request.user, 'account') and request.user.account:
|
||||
account_name = getattr(request.user.account, 'name', None)
|
||||
account_slug = getattr(request.user.account, 'slug', None)
|
||||
if account_name and account_name.lower() == 'aws admin':
|
||||
return True
|
||||
if account_slug == 'aws-admin':
|
||||
return True
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
# Check user role
|
||||
if hasattr(request.user, 'role'):
|
||||
@@ -158,5 +148,3 @@ class IsAdminOrOwner(permissions.BasePermission):
|
||||
|
||||
# If no role system, deny by default for security
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ def postprocess_schema_filter_tags(result, generator, request, public):
|
||||
for path, methods in result['paths'].items():
|
||||
for method, operation in methods.items():
|
||||
if isinstance(operation, dict) and 'tags' in operation:
|
||||
# Explicitly exclude system webhook from tagging/docs grouping
|
||||
if '/system/webhook' in path:
|
||||
operation['tags'] = []
|
||||
continue
|
||||
|
||||
# Keep only explicit tags from the operation
|
||||
filtered_tags = [
|
||||
tag for tag in operation['tags']
|
||||
|
||||
@@ -140,7 +140,7 @@ class GetModelConfigTestCase(TestCase):
|
||||
|
||||
def test_get_model_config_json_mode_models(self):
|
||||
"""Test get_model_config() sets response_format for JSON mode models"""
|
||||
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview']
|
||||
json_models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo-preview', 'gpt-5.1', 'gpt-5.2']
|
||||
|
||||
for model in json_models:
|
||||
IntegrationSettings.objects.filter(account=self.account).delete()
|
||||
|
||||
@@ -79,7 +79,7 @@ class IntegrationTestBase(TestCase):
|
||||
sector=self.industry_sector,
|
||||
volume=1000,
|
||||
difficulty=50,
|
||||
intent="informational"
|
||||
country="US"
|
||||
)
|
||||
|
||||
# Authenticate client
|
||||
|
||||
@@ -21,15 +21,12 @@ class DebugScopedRateThrottle(ScopedRateThrottle):
|
||||
|
||||
def allow_request(self, request, view):
|
||||
"""
|
||||
Check if request should be throttled
|
||||
|
||||
Bypasses throttling if:
|
||||
- DEBUG mode is True
|
||||
- IGNY8_DEBUG_THROTTLE environment variable is True
|
||||
- User belongs to aws-admin or other system accounts
|
||||
- User is admin/developer role
|
||||
- Public blueprint list request with site filter (for Sites Renderer)
|
||||
Check if request should be throttled.
|
||||
DISABLED - Always allow all requests.
|
||||
"""
|
||||
return True
|
||||
|
||||
# OLD CODE BELOW (DISABLED)
|
||||
# Check if throttling should be bypassed
|
||||
debug_bypass = getattr(settings, 'DEBUG', False)
|
||||
env_bypass = getattr(settings, 'IGNY8_DEBUG_THROTTLE', False)
|
||||
@@ -41,21 +38,7 @@ class DebugScopedRateThrottle(ScopedRateThrottle):
|
||||
if not request.user or not hasattr(request.user, 'is_authenticated') or not request.user.is_authenticated:
|
||||
public_blueprint_bypass = True
|
||||
|
||||
# Bypass for system account users (aws-admin, default-account, etc.)
|
||||
system_account_bypass = False
|
||||
if hasattr(request, 'user') and request.user and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated:
|
||||
try:
|
||||
# Check if user is in system account (aws-admin, default-account, default)
|
||||
if hasattr(request.user, 'is_system_account_user') and request.user.is_system_account_user():
|
||||
system_account_bypass = True
|
||||
# Also bypass for admin/developer roles
|
||||
elif hasattr(request.user, 'is_admin_or_developer') and request.user.is_admin_or_developer():
|
||||
system_account_bypass = True
|
||||
except (AttributeError, Exception):
|
||||
# If checking fails, continue with normal throttling
|
||||
pass
|
||||
|
||||
if debug_bypass or env_bypass or system_account_bypass or public_blueprint_bypass:
|
||||
if debug_bypass or env_bypass or public_blueprint_bypass:
|
||||
# In debug mode or for system accounts, still set throttle headers but don't actually throttle
|
||||
# This allows testing throttle headers without blocking requests
|
||||
if hasattr(self, 'get_rate'):
|
||||
@@ -76,9 +59,27 @@ class DebugScopedRateThrottle(ScopedRateThrottle):
|
||||
}
|
||||
return True
|
||||
|
||||
# Normal throttling behavior
|
||||
# Normal throttling with per-account keying
|
||||
return super().allow_request(request, view)
|
||||
|
||||
def get_cache_key(self, request, view):
|
||||
"""
|
||||
Override to add account-based throttle keying.
|
||||
Keys by (scope, account.id) instead of just user.
|
||||
"""
|
||||
if not self.scope:
|
||||
return None
|
||||
|
||||
# Get account from request
|
||||
account = getattr(request, 'account', None)
|
||||
if not account and hasattr(request, 'user') and request.user and request.user.is_authenticated:
|
||||
account = getattr(request.user, 'account', None)
|
||||
|
||||
account_id = account.id if account else 'anon'
|
||||
|
||||
# Build throttle key: scope:account_id
|
||||
return f'{self.scope}:{account_id}'
|
||||
|
||||
def get_rate(self):
|
||||
"""
|
||||
Get rate for the current scope
|
||||
|
||||
@@ -6,8 +6,10 @@ from rest_framework.routers import DefaultRouter
|
||||
from .account_views import (
|
||||
AccountSettingsViewSet,
|
||||
TeamManagementViewSet,
|
||||
UsageAnalyticsViewSet
|
||||
UsageAnalyticsViewSet,
|
||||
DashboardStatsViewSet
|
||||
)
|
||||
from igny8_core.modules.system.settings_views import ContentGenerationSettingsViewSet
|
||||
|
||||
router = DefaultRouter()
|
||||
|
||||
@@ -15,6 +17,10 @@ urlpatterns = [
|
||||
# Account settings (non-router endpoints for simplified access)
|
||||
path('settings/', AccountSettingsViewSet.as_view({'get': 'retrieve', 'patch': 'partial_update'}), name='account-settings'),
|
||||
|
||||
# AI Settings - Content Generation Settings per the plan
|
||||
# GET/POST /api/v1/account/settings/ai/
|
||||
path('settings/ai/', ContentGenerationSettingsViewSet.as_view({'get': 'list', 'post': 'create', 'put': 'create'}), name='ai-settings'),
|
||||
|
||||
# Team management
|
||||
path('team/', TeamManagementViewSet.as_view({'get': 'list', 'post': 'create'}), name='team-list'),
|
||||
path('team/<int:pk>/', TeamManagementViewSet.as_view({'delete': 'destroy'}), name='team-detail'),
|
||||
@@ -22,5 +28,8 @@ urlpatterns = [
|
||||
# Usage analytics
|
||||
path('usage/analytics/', UsageAnalyticsViewSet.as_view({'get': 'overview'}), name='usage-analytics'),
|
||||
|
||||
# Dashboard stats (real data for home page)
|
||||
path('dashboard/stats/', DashboardStatsViewSet.as_view({'get': 'stats'}), name='dashboard-stats'),
|
||||
|
||||
path('', include(router.urls)),
|
||||
]
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
35
backend/igny8_core/auth/backends.py
Normal file
35
backend/igny8_core/auth/backends.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Custom Authentication Backend - No Caching
|
||||
Prevents cross-request user contamination by disabling Django's default user caching
|
||||
"""
|
||||
from django.contrib.auth.backends import ModelBackend
|
||||
|
||||
|
||||
class NoCacheModelBackend(ModelBackend):
|
||||
"""
|
||||
Custom authentication backend that disables user object caching.
|
||||
|
||||
Django's default ModelBackend caches the user object in thread-local storage,
|
||||
which can cause cross-request contamination when the same worker process
|
||||
handles requests from different users.
|
||||
|
||||
This backend forces a fresh DB query on EVERY request to prevent user swapping.
|
||||
"""
|
||||
|
||||
def get_user(self, user_id):
|
||||
"""
|
||||
Get user from database WITHOUT caching.
|
||||
|
||||
This overrides the default behavior which caches user objects
|
||||
at the process level, causing session contamination.
|
||||
"""
|
||||
from django.contrib.auth import get_user_model
|
||||
UserModel = get_user_model()
|
||||
|
||||
try:
|
||||
# CRITICAL: Use select_related to load account/plan in ONE query
|
||||
# But do NOT cache the result - return fresh object every time
|
||||
user = UserModel.objects.select_related('account', 'account__plan').get(pk=user_id)
|
||||
return user
|
||||
except UserModel.DoesNotExist:
|
||||
return None
|
||||
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Management command to clean up expired and orphaned sessions
|
||||
Helps prevent session contamination and reduces DB bloat
|
||||
"""
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.contrib.auth import get_user_model
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Clean up expired sessions and detect session contamination'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what would be deleted without actually deleting',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--days',
|
||||
type=int,
|
||||
default=7,
|
||||
help='Delete sessions older than X days (default: 7)',
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
dry_run = options['dry_run']
|
||||
days = options['days']
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
|
||||
# Get all sessions
|
||||
all_sessions = Session.objects.all()
|
||||
expired_sessions = Session.objects.filter(expire_date__lt=datetime.now())
|
||||
old_sessions = Session.objects.filter(expire_date__lt=cutoff_date)
|
||||
|
||||
self.stdout.write(f"\n📊 Session Statistics:")
|
||||
self.stdout.write(f" Total sessions: {all_sessions.count()}")
|
||||
self.stdout.write(f" Expired sessions: {expired_sessions.count()}")
|
||||
self.stdout.write(f" Sessions older than {days} days: {old_sessions.count()}")
|
||||
|
||||
# Count sessions by user
|
||||
user_sessions = {}
|
||||
for session in all_sessions:
|
||||
try:
|
||||
data = session.get_decoded()
|
||||
user_id = data.get('_auth_user_id')
|
||||
if user_id:
|
||||
user = User.objects.get(id=user_id)
|
||||
key = f"{user.username} ({user.account.slug if user.account else 'no-account'})"
|
||||
user_sessions[key] = user_sessions.get(key, 0) + 1
|
||||
except:
|
||||
pass
|
||||
|
||||
if user_sessions:
|
||||
self.stdout.write(f"\n📈 Active sessions by user:")
|
||||
for user_key, count in sorted(user_sessions.items(), key=lambda x: x[1], reverse=True)[:10]:
|
||||
indicator = "⚠️ " if count > 20 else " "
|
||||
self.stdout.write(f"{indicator}{user_key}: {count} sessions")
|
||||
|
||||
# Delete expired sessions
|
||||
if expired_sessions.exists():
|
||||
if dry_run:
|
||||
self.stdout.write(self.style.WARNING(f"\n[DRY RUN] Would delete {expired_sessions.count()} expired sessions"))
|
||||
else:
|
||||
count = expired_sessions.delete()[0]
|
||||
self.stdout.write(self.style.SUCCESS(f"\n✓ Deleted {count} expired sessions"))
|
||||
else:
|
||||
self.stdout.write(f"\n✓ No expired sessions to clean")
|
||||
|
||||
# Detect potential contamination
|
||||
warnings = []
|
||||
for user_key, count in user_sessions.items():
|
||||
if count > 50:
|
||||
warnings.append(f"User '{user_key}' has {count} active sessions (potential proliferation)")
|
||||
|
||||
if warnings:
|
||||
self.stdout.write(self.style.WARNING(f"\n⚠️ Contamination Warnings:"))
|
||||
for warning in warnings:
|
||||
self.stdout.write(self.style.WARNING(f" {warning}"))
|
||||
self.stdout.write(f"\n💡 Consider running: python manage.py clearsessions")
|
||||
@@ -25,18 +25,7 @@ class Command(BaseCommand):
|
||||
'max_users': 999999,
|
||||
'max_sites': 999999,
|
||||
'max_keywords': 999999,
|
||||
'max_clusters': 999999,
|
||||
'max_content_ideas': 999999,
|
||||
'monthly_word_count_limit': 999999999,
|
||||
'daily_content_tasks': 999999,
|
||||
'daily_ai_requests': 999999,
|
||||
'daily_ai_request_limit': 999999,
|
||||
'monthly_ai_credit_limit': 999999,
|
||||
'monthly_image_count': 999999,
|
||||
'daily_image_generation_limit': 999999,
|
||||
'monthly_cluster_ai_credits': 999999,
|
||||
'monthly_content_ai_credits': 999999,
|
||||
'monthly_image_ai_credits': 999999,
|
||||
'max_ahrefs_queries': 999999,
|
||||
'included_credits': 999999,
|
||||
'is_active': True,
|
||||
'features': ['ai_writer', 'image_gen', 'auto_publish', 'custom_prompts', 'unlimited'],
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
"""
|
||||
Management command to create or update the Free Trial plan
|
||||
"""
|
||||
from django.core.management.base import BaseCommand
|
||||
from igny8_core.auth.models import Plan
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Create or update the Free Trial plan for signup'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
self.stdout.write('Creating/updating Free Trial plan...')
|
||||
|
||||
plan, created = Plan.objects.update_or_create(
|
||||
slug='free-trial',
|
||||
defaults={
|
||||
'name': 'Free Trial',
|
||||
'price': 0.00,
|
||||
'billing_cycle': 'monthly',
|
||||
'included_credits': 2000, # 2000 credits for trial
|
||||
'credits_per_month': 2000, # Legacy field
|
||||
'max_sites': 1,
|
||||
'max_users': 1,
|
||||
'max_industries': 3, # 3 sectors per site
|
||||
'max_author_profiles': 2,
|
||||
'is_active': True,
|
||||
'features': ['ai_writer', 'planner', 'basic_support'],
|
||||
'allow_credit_topup': False, # No top-up during trial
|
||||
'extra_credit_price': 0.00,
|
||||
}
|
||||
)
|
||||
|
||||
if created:
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f'✓ Created Free Trial plan (ID: {plan.id})'
|
||||
))
|
||||
else:
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f'✓ Updated Free Trial plan (ID: {plan.id})'
|
||||
))
|
||||
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f' - Credits: {plan.included_credits}'
|
||||
))
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f' - Max Sites: {plan.max_sites}'
|
||||
))
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f' - Max Sectors: {plan.max_industries}'
|
||||
))
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f' - Status: {"Active" if plan.is_active else "Inactive"}'
|
||||
))
|
||||
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
'\nFree Trial plan is ready for signup!'
|
||||
))
|
||||
@@ -2,10 +2,27 @@
|
||||
Multi-Account Middleware
|
||||
Extracts account from JWT token and injects into request context
|
||||
"""
|
||||
import logging
|
||||
from django.utils.deprecation import MiddlewareMixin
|
||||
from django.http import JsonResponse
|
||||
from django.contrib.auth import logout
|
||||
from rest_framework import status
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
logger = logging.getLogger('auth.middleware')
|
||||
|
||||
# Logout reason codes for precise tracking
|
||||
LOGOUT_REASONS = {
|
||||
'SESSION_ACCOUNT_MISMATCH': 'Session contamination: account ID mismatch',
|
||||
'SESSION_USER_MISMATCH': 'Session contamination: user ID mismatch',
|
||||
'ACCOUNT_MISSING': 'Account not configured for this user',
|
||||
'ACCOUNT_SUSPENDED': 'Account is suspended',
|
||||
'ACCOUNT_CANCELLED': 'Account is cancelled',
|
||||
'PLAN_MISSING': 'No subscription plan assigned',
|
||||
'PLAN_INACTIVE': 'Subscription plan is inactive',
|
||||
'USER_INACTIVE': 'User account is inactive',
|
||||
}
|
||||
|
||||
try:
|
||||
import jwt
|
||||
@@ -31,35 +48,25 @@ class AccountContextMiddleware(MiddlewareMixin):
|
||||
# First, try to get user from Django session (cookie-based auth)
|
||||
# This handles cases where frontend uses credentials: 'include' with session cookies
|
||||
if hasattr(request, 'user') and request.user and request.user.is_authenticated:
|
||||
# User is authenticated via session - refresh from DB to get latest account/plan data
|
||||
# This ensures changes to account/plan are reflected immediately without re-login
|
||||
# CRITICAL FIX: Never query DB again or mutate request.user
|
||||
# Django's AuthenticationMiddleware already loaded the user correctly
|
||||
# Just use it directly and set request.account from the ALREADY LOADED relationship
|
||||
try:
|
||||
from .models import User as UserModel
|
||||
# Refresh user from DB with account and plan relationships to get latest data
|
||||
# This is important so account/plan changes are reflected immediately
|
||||
user = UserModel.objects.select_related('account', 'account__plan').get(id=request.user.id)
|
||||
# Update request.user with fresh data
|
||||
request.user = user
|
||||
# Get account from refreshed user
|
||||
user_account = getattr(user, 'account', None)
|
||||
validation_error = self._validate_account_and_plan(request, user)
|
||||
# Validate account/plan - but use the user object already set by Django
|
||||
validation_error = self._validate_account_and_plan(request, request.user)
|
||||
if validation_error:
|
||||
return validation_error
|
||||
request.account = getattr(user, 'account', None)
|
||||
|
||||
# Set request.account from the user's account relationship
|
||||
# This is already loaded, no need to query DB again
|
||||
request.account = getattr(request.user, 'account', None)
|
||||
|
||||
# REMOVED: Session contamination checks on every request
|
||||
# These were causing random logouts - session integrity handled by Django
|
||||
|
||||
return None
|
||||
except (AttributeError, UserModel.DoesNotExist, Exception):
|
||||
# If refresh fails, fallback to cached account
|
||||
try:
|
||||
user_account = getattr(request.user, 'account', None)
|
||||
if user_account:
|
||||
validation_error = self._validate_account_and_plan(request, request.user)
|
||||
if validation_error:
|
||||
return validation_error
|
||||
request.account = user_account
|
||||
return None
|
||||
except (AttributeError, Exception):
|
||||
pass
|
||||
# If account access fails (e.g., column mismatch), set to None
|
||||
except (AttributeError, Exception):
|
||||
# If anything fails, just set account to None and continue
|
||||
request.account = None
|
||||
return None
|
||||
|
||||
@@ -132,42 +139,58 @@ class AccountContextMiddleware(MiddlewareMixin):
|
||||
def _validate_account_and_plan(self, request, user):
|
||||
"""
|
||||
Ensure the authenticated user has an account and an active plan.
|
||||
If not, logout the user (for session auth) and block the request.
|
||||
Uses shared validation helper for consistency.
|
||||
"""
|
||||
try:
|
||||
account = getattr(user, 'account', None)
|
||||
except Exception:
|
||||
account = None
|
||||
from .utils import validate_account_and_plan
|
||||
|
||||
if not account:
|
||||
return self._deny_request(
|
||||
request,
|
||||
error='Account not configured for this user. Please contact support.',
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
is_valid, error_message, http_status = validate_account_and_plan(user)
|
||||
|
||||
plan = getattr(account, 'plan', None)
|
||||
if plan is None or getattr(plan, 'is_active', False) is False:
|
||||
return self._deny_request(
|
||||
request,
|
||||
error='Active subscription required. Visit igny8.com/pricing to subscribe.',
|
||||
status_code=status.HTTP_402_PAYMENT_REQUIRED,
|
||||
)
|
||||
if not is_valid:
|
||||
return self._deny_request(request, error_message, http_status)
|
||||
|
||||
return None
|
||||
|
||||
def _deny_request(self, request, error, status_code):
|
||||
"""Logout session users (if any) and return a consistent JSON error."""
|
||||
"""Logout session users (if any) and return a consistent JSON error with detailed tracking."""
|
||||
# Determine logout reason code based on error message
|
||||
reason_code = 'UNKNOWN'
|
||||
if 'Account not configured' in error or 'Account not found' in error:
|
||||
reason_code = 'ACCOUNT_MISSING'
|
||||
elif 'suspended' in error.lower():
|
||||
reason_code = 'ACCOUNT_SUSPENDED'
|
||||
elif 'cancelled' in error.lower():
|
||||
reason_code = 'ACCOUNT_CANCELLED'
|
||||
elif 'No subscription plan' in error or 'plan assigned' in error.lower():
|
||||
reason_code = 'PLAN_MISSING'
|
||||
elif 'plan is inactive' in error.lower() or 'Active subscription required' in error:
|
||||
reason_code = 'PLAN_INACTIVE'
|
||||
elif 'inactive' in error.lower():
|
||||
reason_code = 'USER_INACTIVE'
|
||||
|
||||
try:
|
||||
if hasattr(request, 'user') and request.user and request.user.is_authenticated:
|
||||
logger.warning(
|
||||
f"[AUTO-LOGOUT] {reason_code}: {error}. "
|
||||
f"User={request.user.id}, Account={getattr(request, 'account', None)}, "
|
||||
f"Path={request.path}, IP={request.META.get('REMOTE_ADDR')}, "
|
||||
f"Status={status_code}, Timestamp={datetime.now().isoformat()}"
|
||||
)
|
||||
logout(request)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"[AUTO-LOGOUT] Error during logout: {e}")
|
||||
|
||||
return JsonResponse(
|
||||
{
|
||||
'success': False,
|
||||
'error': error,
|
||||
'logout_reason': reason_code,
|
||||
'logout_message': LOGOUT_REASONS.get(reason_code, error),
|
||||
'logout_path': request.path,
|
||||
'logout_context': {
|
||||
'user_id': request.user.id if hasattr(request, 'user') and request.user and request.user.is_authenticated else None,
|
||||
'account_id': getattr(request, 'account', None).id if hasattr(request, 'account') and getattr(request, 'account', None) else None,
|
||||
'status_code': status_code,
|
||||
}
|
||||
},
|
||||
status=status_code,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,105 @@
|
||||
# Generated manually based on FINAL-IMPLEMENTATION-REQUIREMENTS.md
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0006_soft_delete_and_retention'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Add payment_method to Account
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='payment_method',
|
||||
field=models.CharField(
|
||||
max_length=30,
|
||||
choices=[
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
],
|
||||
default='stripe',
|
||||
help_text='Payment method used for this account'
|
||||
),
|
||||
),
|
||||
# Add payment_method to Subscription
|
||||
migrations.AddField(
|
||||
model_name='subscription',
|
||||
name='payment_method',
|
||||
field=models.CharField(
|
||||
max_length=30,
|
||||
choices=[
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
],
|
||||
default='stripe',
|
||||
help_text='Payment method for this subscription'
|
||||
),
|
||||
),
|
||||
# Add external_payment_id to Subscription
|
||||
migrations.AddField(
|
||||
model_name='subscription',
|
||||
name='external_payment_id',
|
||||
field=models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text='External payment reference (bank transfer ref, PayPal transaction ID)'
|
||||
),
|
||||
),
|
||||
# Make stripe_subscription_id nullable
|
||||
migrations.AlterField(
|
||||
model_name='subscription',
|
||||
name='stripe_subscription_id',
|
||||
field=models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
null=True,
|
||||
db_index=True,
|
||||
help_text='Stripe subscription ID (when using Stripe)'
|
||||
),
|
||||
),
|
||||
# Add pending_payment status to Account
|
||||
migrations.AlterField(
|
||||
model_name='account',
|
||||
name='status',
|
||||
field=models.CharField(
|
||||
max_length=20,
|
||||
choices=[
|
||||
('active', 'Active'),
|
||||
('suspended', 'Suspended'),
|
||||
('trial', 'Trial'),
|
||||
('cancelled', 'Cancelled'),
|
||||
('pending_payment', 'Pending Payment'),
|
||||
],
|
||||
default='trial'
|
||||
),
|
||||
),
|
||||
# Add pending_payment status to Subscription
|
||||
migrations.AlterField(
|
||||
model_name='subscription',
|
||||
name='status',
|
||||
field=models.CharField(
|
||||
max_length=20,
|
||||
choices=[
|
||||
('active', 'Active'),
|
||||
('past_due', 'Past Due'),
|
||||
('canceled', 'Canceled'),
|
||||
('trialing', 'Trialing'),
|
||||
('pending_payment', 'Pending Payment'),
|
||||
]
|
||||
),
|
||||
),
|
||||
# Add index on payment_method
|
||||
migrations.AddIndex(
|
||||
model_name='account',
|
||||
index=models.Index(fields=['payment_method'], name='auth_acc_payment_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='subscription',
|
||||
index=models.Index(fields=['payment_method'], name='auth_sub_payment_idx'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,26 @@
|
||||
# Generated by Django 5.2.8 on 2025-12-08 13:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0007_add_payment_method_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveIndex(
|
||||
model_name='account',
|
||||
name='auth_acc_payment_idx',
|
||||
),
|
||||
migrations.RemoveIndex(
|
||||
model_name='subscription',
|
||||
name='auth_sub_payment_idx',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='is_internal',
|
||||
field=models.BooleanField(default=False, help_text='Internal-only plan (Free/Internal) - hidden from public plan listings'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,36 @@
|
||||
# Generated manually
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.core.validators
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0008_add_plan_is_internal'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='annual_discount_percent',
|
||||
field=models.DecimalField(
|
||||
decimal_places=2,
|
||||
default=15.0,
|
||||
help_text='Annual subscription discount percentage (default 15%)',
|
||||
max_digits=5,
|
||||
validators=[
|
||||
django.core.validators.MinValueValidator(0),
|
||||
django.core.validators.MaxValueValidator(100)
|
||||
]
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='is_featured',
|
||||
field=models.BooleanField(
|
||||
default=False,
|
||||
help_text='Highlight this plan as popular/recommended'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.2.8 on 2025-12-08 22:42
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0009_add_plan_annual_discount_and_featured'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='subscription',
|
||||
name='plan',
|
||||
field=models.ForeignKey(blank=True, help_text='Subscription plan (tracks historical plan even if account changes plan)', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='subscriptions', to='igny8_core_auth.plan'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='site',
|
||||
name='industry',
|
||||
field=models.ForeignKey(default=21, help_text='Industry this site belongs to (required for sector creation)', on_delete=django.db.models.deletion.PROTECT, related_name='sites', to='igny8_core_auth.industry'),
|
||||
preserve_default=False,
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 5.2.8 on 2025-12-08 22:52
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0010_add_subscription_plan_and_require_site_industry'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='subscription',
|
||||
name='payment_method',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,47 @@
|
||||
# Generated migration to fix subscription constraints
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0011_remove_subscription_payment_method'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Add unique constraint on tenant_id at database level
|
||||
migrations.RunSQL(
|
||||
sql="""
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS igny8_subscriptions_tenant_id_unique
|
||||
ON igny8_subscriptions(tenant_id);
|
||||
""",
|
||||
reverse_sql="""
|
||||
DROP INDEX IF EXISTS igny8_subscriptions_tenant_id_unique;
|
||||
"""
|
||||
),
|
||||
|
||||
# Make plan field required (non-nullable)
|
||||
# First set default plan (ID 1 - Free Plan) for any null values
|
||||
migrations.RunSQL(
|
||||
sql="""
|
||||
UPDATE igny8_subscriptions
|
||||
SET plan_id = 1
|
||||
WHERE plan_id IS NULL;
|
||||
""",
|
||||
reverse_sql=migrations.RunSQL.noop
|
||||
),
|
||||
|
||||
# Now alter the field to be non-nullable
|
||||
migrations.AlterField(
|
||||
model_name='subscription',
|
||||
name='plan',
|
||||
field=models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.PROTECT,
|
||||
related_name='subscriptions',
|
||||
to='igny8_core_auth.plan',
|
||||
help_text='Subscription plan (tracks historical plan even if account changes plan)'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,49 @@
|
||||
# Generated by Django 5.2.8 on 2025-12-12 11:26
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0012_fix_subscription_constraints'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_clusters',
|
||||
field=models.IntegerField(default=100, help_text='Maximum AI keyword clusters allowed (hard limit)', validators=[django.core.validators.MinValueValidator(1)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_content_ideas',
|
||||
field=models.IntegerField(default=300, help_text='Maximum AI content ideas per month', validators=[django.core.validators.MinValueValidator(1)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_content_words',
|
||||
field=models.IntegerField(default=100000, help_text='Maximum content words per month (e.g., 100000 = 100K words)', validators=[django.core.validators.MinValueValidator(1)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_image_prompts',
|
||||
field=models.IntegerField(default=300, help_text='Maximum image prompts per month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_images_basic',
|
||||
field=models.IntegerField(default=300, help_text='Maximum basic AI images per month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_images_premium',
|
||||
field=models.IntegerField(default=60, help_text='Maximum premium AI images per month (DALL-E)', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_keywords',
|
||||
field=models.IntegerField(default=1000, help_text='Maximum total keywords allowed (hard limit)', validators=[django.core.validators.MinValueValidator(1)]),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,49 @@
|
||||
# Generated by Django 5.2.8 on 2025-12-12 12:24
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0013_plan_max_clusters_plan_max_content_ideas_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_content_ideas',
|
||||
field=models.IntegerField(default=0, help_text='Content ideas generated this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_content_words',
|
||||
field=models.IntegerField(default=0, help_text='Content words generated this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_image_prompts',
|
||||
field=models.IntegerField(default=0, help_text='Image prompts this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_images_basic',
|
||||
field=models.IntegerField(default=0, help_text='Basic AI images this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_images_premium',
|
||||
field=models.IntegerField(default=0, help_text='Premium AI images this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_period_end',
|
||||
field=models.DateTimeField(blank=True, help_text='Current billing period end', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_period_start',
|
||||
field=models.DateTimeField(blank=True, help_text='Current billing period start', null=True),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,24 @@
|
||||
# Generated manually
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0014_add_usage_tracking_to_account'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='original_price',
|
||||
field=models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text='Original price (before discount) - shows as crossed out price. Leave empty if no discount.',
|
||||
max_digits=10,
|
||||
null=True
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,19 @@
|
||||
# Generated by Django 5.2.9 on 2025-12-13 20:31
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0015_add_plan_original_price'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='plan',
|
||||
name='annual_discount_percent',
|
||||
field=models.IntegerField(default=15, help_text='Annual subscription discount percentage (default 15%)', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,66 @@
|
||||
# Generated by Django 5.2.9 on 2025-12-15 01:28
|
||||
|
||||
import django.core.validators
|
||||
import django.db.models.deletion
|
||||
import simple_history.models
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0016_alter_plan_annual_discount_percent'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='HistoricalAccount',
|
||||
fields=[
|
||||
('id', models.BigIntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
|
||||
('is_deleted', models.BooleanField(db_index=True, default=False)),
|
||||
('deleted_at', models.DateTimeField(blank=True, db_index=True, null=True)),
|
||||
('restore_until', models.DateTimeField(blank=True, db_index=True, null=True)),
|
||||
('delete_reason', models.CharField(blank=True, max_length=255, null=True)),
|
||||
('name', models.CharField(max_length=255)),
|
||||
('slug', models.SlugField(max_length=255)),
|
||||
('stripe_customer_id', models.CharField(blank=True, max_length=255, null=True)),
|
||||
('credits', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('status', models.CharField(choices=[('active', 'Active'), ('suspended', 'Suspended'), ('trial', 'Trial'), ('cancelled', 'Cancelled'), ('pending_payment', 'Pending Payment')], default='trial', max_length=20)),
|
||||
('payment_method', models.CharField(choices=[('stripe', 'Stripe'), ('paypal', 'PayPal'), ('bank_transfer', 'Bank Transfer')], default='stripe', help_text='Payment method used for this account', max_length=30)),
|
||||
('deletion_retention_days', models.PositiveIntegerField(default=14, help_text='Retention window (days) before soft-deleted items are purged', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(365)])),
|
||||
('billing_email', models.EmailField(blank=True, help_text='Email for billing notifications', max_length=254, null=True)),
|
||||
('billing_address_line1', models.CharField(blank=True, help_text='Street address', max_length=255)),
|
||||
('billing_address_line2', models.CharField(blank=True, help_text='Apt, suite, etc.', max_length=255)),
|
||||
('billing_city', models.CharField(blank=True, max_length=100)),
|
||||
('billing_state', models.CharField(blank=True, help_text='State/Province/Region', max_length=100)),
|
||||
('billing_postal_code', models.CharField(blank=True, max_length=20)),
|
||||
('billing_country', models.CharField(blank=True, help_text='ISO 2-letter country code', max_length=2)),
|
||||
('tax_id', models.CharField(blank=True, help_text='VAT/Tax ID number', max_length=100)),
|
||||
('usage_content_ideas', models.IntegerField(default=0, help_text='Content ideas generated this month', validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('usage_content_words', models.IntegerField(default=0, help_text='Content words generated this month', validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('usage_images_basic', models.IntegerField(default=0, help_text='Basic AI images this month', validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('usage_images_premium', models.IntegerField(default=0, help_text='Premium AI images this month', validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('usage_image_prompts', models.IntegerField(default=0, help_text='Image prompts this month', validators=[django.core.validators.MinValueValidator(0)])),
|
||||
('usage_period_start', models.DateTimeField(blank=True, help_text='Current billing period start', null=True)),
|
||||
('usage_period_end', models.DateTimeField(blank=True, help_text='Current billing period end', null=True)),
|
||||
('created_at', models.DateTimeField(blank=True, editable=False)),
|
||||
('updated_at', models.DateTimeField(blank=True, editable=False)),
|
||||
('history_id', models.AutoField(primary_key=True, serialize=False)),
|
||||
('history_date', models.DateTimeField(db_index=True)),
|
||||
('history_change_reason', models.CharField(max_length=100, null=True)),
|
||||
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
|
||||
('deleted_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
|
||||
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
|
||||
('owner', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
|
||||
('plan', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='igny8_core_auth.plan')),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'historical Account',
|
||||
'verbose_name_plural': 'historical Accounts',
|
||||
'ordering': ('-history_date', '-history_id'),
|
||||
'get_latest_by': ('history_date', 'history_id'),
|
||||
},
|
||||
bases=(simple_history.models.HistoricalChanges, models.Model),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,30 @@
|
||||
# Generated by Django 5.2.9 on 2025-12-17 06:04
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0017_add_history_tracking'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveIndex(
|
||||
model_name='seedkeyword',
|
||||
name='igny8_seed__intent_15020d_idx',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='seedkeyword',
|
||||
name='intent',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='seedkeyword',
|
||||
name='country',
|
||||
field=models.CharField(choices=[('US', 'United States'), ('CA', 'Canada'), ('GB', 'United Kingdom'), ('AE', 'United Arab Emirates'), ('AU', 'Australia'), ('IN', 'India'), ('PK', 'Pakistan')], default='US', help_text='Target country for this keyword', max_length=2),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='seedkeyword',
|
||||
index=models.Index(fields=['country'], name='igny8_seed__country_4127a5_idx'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,100 @@
|
||||
# Generated by IGNY8 Phase 1: Simplify Credits & Limits
|
||||
# Migration: Remove unused limit fields, add Ahrefs query tracking
|
||||
# Date: January 5, 2026
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.core.validators
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
"""
|
||||
Simplify the credits and limits system:
|
||||
|
||||
PLAN MODEL:
|
||||
- REMOVE: max_clusters, max_content_ideas, max_content_words,
|
||||
max_images_basic, max_images_premium, max_image_prompts
|
||||
- ADD: max_ahrefs_queries (monthly keyword research queries)
|
||||
|
||||
ACCOUNT MODEL:
|
||||
- REMOVE: usage_content_ideas, usage_content_words, usage_images_basic,
|
||||
usage_images_premium, usage_image_prompts
|
||||
- ADD: usage_ahrefs_queries
|
||||
|
||||
RATIONALE:
|
||||
All consumption is now controlled by credits only. The only non-credit
|
||||
limits are: sites, users, keywords (hard limits) and ahrefs_queries (monthly).
|
||||
"""
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0018_add_country_remove_intent_seedkeyword'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# STEP 1: Add new Ahrefs fields FIRST (before removing old ones)
|
||||
migrations.AddField(
|
||||
model_name='plan',
|
||||
name='max_ahrefs_queries',
|
||||
field=models.IntegerField(
|
||||
default=0,
|
||||
validators=[django.core.validators.MinValueValidator(0)],
|
||||
help_text='Monthly Ahrefs keyword research queries (0 = disabled)'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='account',
|
||||
name='usage_ahrefs_queries',
|
||||
field=models.IntegerField(
|
||||
default=0,
|
||||
validators=[django.core.validators.MinValueValidator(0)],
|
||||
help_text='Ahrefs queries used this month'
|
||||
),
|
||||
),
|
||||
|
||||
# STEP 2: Remove unused Plan fields
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_clusters',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_content_ideas',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_content_words',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_images_basic',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_images_premium',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='plan',
|
||||
name='max_image_prompts',
|
||||
),
|
||||
|
||||
# STEP 3: Remove unused Account fields
|
||||
migrations.RemoveField(
|
||||
model_name='account',
|
||||
name='usage_content_ideas',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='account',
|
||||
name='usage_content_words',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='account',
|
||||
name='usage_images_basic',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='account',
|
||||
name='usage_images_premium',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='account',
|
||||
name='usage_image_prompts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,39 @@
|
||||
# Generated by Django 5.2.9 on 2026-01-06 00:11
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('igny8_core_auth', '0019_simplify_credits_limits'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_content_ideas',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_content_words',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_image_prompts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_images_basic',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_images_premium',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='historicalaccount',
|
||||
name='usage_ahrefs_queries',
|
||||
field=models.IntegerField(default=0, help_text='Ahrefs queries used this month', validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
]
|
||||
@@ -6,6 +6,7 @@ from django.contrib.auth.models import AbstractUser
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||
from igny8_core.common.soft_delete import SoftDeletableModel, SoftDeleteManager
|
||||
from simple_history.models import HistoricalRecords
|
||||
|
||||
|
||||
class AccountBaseModel(models.Model):
|
||||
@@ -62,6 +63,13 @@ class Account(SoftDeletableModel):
|
||||
('suspended', 'Suspended'),
|
||||
('trial', 'Trial'),
|
||||
('cancelled', 'Cancelled'),
|
||||
('pending_payment', 'Pending Payment'),
|
||||
]
|
||||
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
]
|
||||
|
||||
name = models.CharField(max_length=255)
|
||||
@@ -77,6 +85,12 @@ class Account(SoftDeletableModel):
|
||||
plan = models.ForeignKey('igny8_core_auth.Plan', on_delete=models.PROTECT, related_name='accounts')
|
||||
credits = models.IntegerField(default=0, validators=[MinValueValidator(0)])
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='trial')
|
||||
payment_method = models.CharField(
|
||||
max_length=30,
|
||||
choices=PAYMENT_METHOD_CHOICES,
|
||||
default='stripe',
|
||||
help_text='Payment method used for this account'
|
||||
)
|
||||
deletion_retention_days = models.PositiveIntegerField(
|
||||
default=14,
|
||||
validators=[MinValueValidator(1), MaxValueValidator(365)],
|
||||
@@ -93,8 +107,16 @@ class Account(SoftDeletableModel):
|
||||
billing_country = models.CharField(max_length=2, blank=True, help_text="ISO 2-letter country code")
|
||||
tax_id = models.CharField(max_length=100, blank=True, help_text="VAT/Tax ID number")
|
||||
|
||||
# Monthly usage tracking (reset on billing cycle)
|
||||
usage_ahrefs_queries = models.IntegerField(default=0, validators=[MinValueValidator(0)], help_text="Ahrefs queries used this month")
|
||||
usage_period_start = models.DateTimeField(null=True, blank=True, help_text="Current billing period start")
|
||||
usage_period_end = models.DateTimeField(null=True, blank=True, help_text="Current billing period end")
|
||||
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
# History tracking
|
||||
history = HistoricalRecords()
|
||||
|
||||
class Meta:
|
||||
db_table = 'igny8_tenants'
|
||||
@@ -111,17 +133,172 @@ class Account(SoftDeletableModel):
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def default_payment_method(self):
|
||||
"""Get default payment method from AccountPaymentMethod table"""
|
||||
try:
|
||||
from igny8_core.business.billing.models import AccountPaymentMethod
|
||||
method = AccountPaymentMethod.objects.filter(
|
||||
account=self,
|
||||
is_default=True,
|
||||
is_enabled=True
|
||||
).first()
|
||||
return method.type if method else self.payment_method
|
||||
except Exception:
|
||||
# Fallback to field if table doesn't exist or error
|
||||
return self.payment_method
|
||||
|
||||
def is_system_account(self):
|
||||
"""Check if this account is a system account with highest access level."""
|
||||
# System accounts bypass all filtering restrictions
|
||||
return self.slug in ['aws-admin', 'default-account', 'default']
|
||||
|
||||
def soft_delete(self, user=None, reason=None, retention_days=None):
|
||||
def soft_delete(self, user=None, reason=None, retention_days=None, cascade=True):
|
||||
"""
|
||||
Soft delete the account and optionally cascade to all related objects.
|
||||
Args:
|
||||
user: User performing the deletion
|
||||
reason: Reason for deletion
|
||||
retention_days: Days before permanent deletion
|
||||
cascade: If True, also soft-delete related objects that support soft delete,
|
||||
and hard-delete objects that don't support soft delete
|
||||
"""
|
||||
if self.is_system_account():
|
||||
from django.core.exceptions import PermissionDenied
|
||||
raise PermissionDenied("System account cannot be deleted.")
|
||||
|
||||
if cascade:
|
||||
self._cascade_delete_related(user=user, reason=reason, retention_days=retention_days, hard_delete=False)
|
||||
|
||||
return super().soft_delete(user=user, reason=reason, retention_days=retention_days)
|
||||
|
||||
def _cascade_delete_related(self, user=None, reason=None, retention_days=None, hard_delete=False):
|
||||
"""
|
||||
Delete all related objects when account is deleted.
|
||||
For soft delete: soft-deletes objects with SoftDeletableModel, hard-deletes others
|
||||
For hard delete: hard-deletes everything
|
||||
"""
|
||||
from igny8_core.common.soft_delete import SoftDeletableModel
|
||||
|
||||
# List of related objects to delete (in order to avoid FK constraint issues)
|
||||
# Related names from Account reverse relations
|
||||
related_names = [
|
||||
# Content & Planning related (delete first due to dependencies)
|
||||
'contentclustermap_set',
|
||||
'contentattribute_set',
|
||||
'contenttaxonomy_set',
|
||||
'content_set',
|
||||
'images_set',
|
||||
'contentideas_set',
|
||||
'tasks_set',
|
||||
'keywords_set',
|
||||
'clusters_set',
|
||||
'strategy_set',
|
||||
# Automation
|
||||
'automation_runs',
|
||||
'automation_configs',
|
||||
# Publishing & Integration
|
||||
'syncevent_set',
|
||||
'publishingsettings_set',
|
||||
'publishingrecord_set',
|
||||
'deploymentrecord_set',
|
||||
'siteintegration_set',
|
||||
# Notifications & Optimization
|
||||
'notification_set',
|
||||
'optimizationtask_set',
|
||||
# AI & Settings
|
||||
'aitasklog_set',
|
||||
'aiprompt_set',
|
||||
'aisettings_set',
|
||||
'authorprofile_set',
|
||||
# Billing (preserve invoices/payments for audit, delete others)
|
||||
'planlimitusage_set',
|
||||
'creditusagelog_set',
|
||||
'credittransaction_set',
|
||||
'accountpaymentmethod_set',
|
||||
'payment_set',
|
||||
'invoice_set',
|
||||
# Settings
|
||||
'modulesettings_set',
|
||||
'moduleenablesettings_set',
|
||||
'integrationsettings_set',
|
||||
'user_settings',
|
||||
'accountsettings_set',
|
||||
# Core (last due to dependencies)
|
||||
'sector_set',
|
||||
'site_set',
|
||||
# Users (delete after sites to avoid FK issues, owner is SET_NULL)
|
||||
'users',
|
||||
# Subscription (OneToOne)
|
||||
'subscription',
|
||||
]
|
||||
|
||||
for related_name in related_names:
|
||||
try:
|
||||
related = getattr(self, related_name, None)
|
||||
if related is None:
|
||||
continue
|
||||
|
||||
# Handle OneToOne fields (subscription)
|
||||
if hasattr(related, 'pk'):
|
||||
# It's a single object (OneToOneField)
|
||||
if hard_delete:
|
||||
related.hard_delete() if hasattr(related, 'hard_delete') else related.delete()
|
||||
elif isinstance(related, SoftDeletableModel):
|
||||
related.soft_delete(user=user, reason=reason, retention_days=retention_days)
|
||||
else:
|
||||
# Non-soft-deletable single object - hard delete
|
||||
related.delete()
|
||||
else:
|
||||
# It's a RelatedManager (ForeignKey)
|
||||
queryset = related.all()
|
||||
if queryset.exists():
|
||||
if hard_delete:
|
||||
# Hard delete all
|
||||
if hasattr(queryset, 'hard_delete'):
|
||||
queryset.hard_delete()
|
||||
else:
|
||||
for obj in queryset:
|
||||
if hasattr(obj, 'hard_delete'):
|
||||
obj.hard_delete()
|
||||
else:
|
||||
obj.delete()
|
||||
else:
|
||||
# Soft delete if supported, otherwise hard delete
|
||||
model = queryset.model
|
||||
if issubclass(model, SoftDeletableModel):
|
||||
for obj in queryset:
|
||||
obj.soft_delete(user=user, reason=reason, retention_days=retention_days)
|
||||
else:
|
||||
queryset.delete()
|
||||
except Exception as e:
|
||||
# Log but don't fail - some relations may not exist
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Failed to delete related {related_name} for account {self.pk}: {e}")
|
||||
|
||||
def hard_delete_with_cascade(self, using=None, keep_parents=False):
|
||||
"""
|
||||
Permanently delete the account and ALL related objects.
|
||||
This bypasses soft-delete and removes everything from the database.
|
||||
USE WITH CAUTION - this cannot be undone!
|
||||
"""
|
||||
if self.is_system_account():
|
||||
from django.core.exceptions import PermissionDenied
|
||||
raise PermissionDenied("System account cannot be deleted.")
|
||||
|
||||
# Clear owner reference first to avoid FK constraint issues
|
||||
# (owner is SET_NULL but we're deleting the user who is the owner)
|
||||
if self.owner:
|
||||
self.owner = None
|
||||
self.save(update_fields=['owner'])
|
||||
|
||||
# Cascade hard-delete all related objects first
|
||||
self._cascade_delete_related(hard_delete=True)
|
||||
|
||||
# Finally hard-delete the account itself
|
||||
return super().hard_delete(using=using, keep_parents=keep_parents)
|
||||
|
||||
def delete(self, using=None, keep_parents=False):
|
||||
return self.soft_delete()
|
||||
|
||||
@@ -140,9 +317,23 @@ class Plan(models.Model):
|
||||
name = models.CharField(max_length=255)
|
||||
slug = models.SlugField(unique=True, max_length=255)
|
||||
price = models.DecimalField(max_digits=10, decimal_places=2)
|
||||
original_price = models.DecimalField(
|
||||
max_digits=10,
|
||||
decimal_places=2,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Original price (before discount) - shows as crossed out price. Leave empty if no discount."
|
||||
)
|
||||
billing_cycle = models.CharField(max_length=20, choices=BILLING_CYCLE_CHOICES, default='monthly')
|
||||
annual_discount_percent = models.IntegerField(
|
||||
default=15,
|
||||
validators=[MinValueValidator(0), MaxValueValidator(100)],
|
||||
help_text="Annual subscription discount percentage (default 15%)"
|
||||
)
|
||||
is_featured = models.BooleanField(default=False, help_text="Highlight this plan as popular/recommended")
|
||||
features = models.JSONField(default=list, blank=True, help_text="Plan features as JSON array (e.g., ['ai_writer', 'image_gen', 'auto_publish'])")
|
||||
is_active = models.BooleanField(default=True)
|
||||
is_internal = models.BooleanField(default=False, help_text="Internal-only plan (Free/Internal) - hidden from public plan listings")
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
# Account Management Limits (kept - not operation limits)
|
||||
@@ -155,6 +346,20 @@ class Plan(models.Model):
|
||||
max_industries = models.IntegerField(default=None, null=True, blank=True, validators=[MinValueValidator(1)], help_text="Optional limit for industries/sectors")
|
||||
max_author_profiles = models.IntegerField(default=5, validators=[MinValueValidator(0)], help_text="Limit for saved writing styles")
|
||||
|
||||
# Hard Limits (Persistent - user manages within limit)
|
||||
max_keywords = models.IntegerField(
|
||||
default=1000,
|
||||
validators=[MinValueValidator(1)],
|
||||
help_text="Maximum total keywords allowed (hard limit)"
|
||||
)
|
||||
|
||||
# Monthly Limits (Reset on billing cycle)
|
||||
max_ahrefs_queries = models.IntegerField(
|
||||
default=0,
|
||||
validators=[MinValueValidator(0)],
|
||||
help_text="Monthly Ahrefs keyword research queries (0 = disabled)"
|
||||
)
|
||||
|
||||
# Billing & Credits (Phase 0: Credit-only system)
|
||||
included_credits = models.IntegerField(default=0, validators=[MinValueValidator(0)], help_text="Monthly credits included")
|
||||
extra_credit_price = models.DecimalField(max_digits=10, decimal_places=2, default=0.01, help_text="Price per additional credit")
|
||||
@@ -191,23 +396,56 @@ class Plan(models.Model):
|
||||
|
||||
class Subscription(models.Model):
|
||||
"""
|
||||
Account subscription model linking to Stripe.
|
||||
Account subscription model supporting multiple payment methods.
|
||||
"""
|
||||
STATUS_CHOICES = [
|
||||
('active', 'Active'),
|
||||
('past_due', 'Past Due'),
|
||||
('canceled', 'Canceled'),
|
||||
('trialing', 'Trialing'),
|
||||
('pending_payment', 'Pending Payment'),
|
||||
]
|
||||
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
]
|
||||
|
||||
account = models.OneToOneField('igny8_core_auth.Account', on_delete=models.CASCADE, related_name='subscription', db_column='tenant_id')
|
||||
stripe_subscription_id = models.CharField(max_length=255, unique=True)
|
||||
plan = models.ForeignKey(
|
||||
'igny8_core_auth.Plan',
|
||||
on_delete=models.PROTECT,
|
||||
related_name='subscriptions',
|
||||
help_text='Subscription plan (tracks historical plan even if account changes plan)'
|
||||
)
|
||||
stripe_subscription_id = models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
null=True,
|
||||
db_index=True,
|
||||
help_text='Stripe subscription ID (when using Stripe)'
|
||||
)
|
||||
external_payment_id = models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text='External payment reference (bank transfer ref, PayPal transaction ID)'
|
||||
)
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES)
|
||||
current_period_start = models.DateTimeField()
|
||||
current_period_end = models.DateTimeField()
|
||||
cancel_at_period_end = models.BooleanField(default=False)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
@property
|
||||
def payment_method(self):
|
||||
"""Get payment method from account's default payment method"""
|
||||
if hasattr(self.account, 'default_payment_method'):
|
||||
return self.account.default_payment_method
|
||||
# Fallback to account.payment_method field if property doesn't exist yet
|
||||
return getattr(self.account, 'payment_method', 'stripe')
|
||||
|
||||
class Meta:
|
||||
db_table = 'igny8_subscriptions'
|
||||
@@ -239,9 +477,7 @@ class Site(SoftDeletableModel, AccountBaseModel):
|
||||
'igny8_core_auth.Industry',
|
||||
on_delete=models.PROTECT,
|
||||
related_name='sites',
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Industry this site belongs to"
|
||||
help_text="Industry this site belongs to (required for sector creation)"
|
||||
)
|
||||
is_active = models.BooleanField(default=True, db_index=True)
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='active')
|
||||
@@ -392,11 +628,14 @@ class SeedKeyword(models.Model):
|
||||
These are canonical keywords that can be imported into account-specific Keywords.
|
||||
Non-deletable global reference data.
|
||||
"""
|
||||
INTENT_CHOICES = [
|
||||
('informational', 'Informational'),
|
||||
('navigational', 'Navigational'),
|
||||
('commercial', 'Commercial'),
|
||||
('transactional', 'Transactional'),
|
||||
COUNTRY_CHOICES = [
|
||||
('US', 'United States'),
|
||||
('CA', 'Canada'),
|
||||
('GB', 'United Kingdom'),
|
||||
('AE', 'United Arab Emirates'),
|
||||
('AU', 'Australia'),
|
||||
('IN', 'India'),
|
||||
('PK', 'Pakistan'),
|
||||
]
|
||||
|
||||
keyword = models.CharField(max_length=255, db_index=True)
|
||||
@@ -408,7 +647,7 @@ class SeedKeyword(models.Model):
|
||||
validators=[MinValueValidator(0), MaxValueValidator(100)],
|
||||
help_text='Keyword difficulty (0-100)'
|
||||
)
|
||||
intent = models.CharField(max_length=50, choices=INTENT_CHOICES, default='informational')
|
||||
country = models.CharField(max_length=2, choices=COUNTRY_CHOICES, default='US', help_text='Target country for this keyword')
|
||||
is_active = models.BooleanField(default=True, db_index=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
@@ -422,7 +661,7 @@ class SeedKeyword(models.Model):
|
||||
models.Index(fields=['keyword']),
|
||||
models.Index(fields=['industry', 'sector']),
|
||||
models.Index(fields=['industry', 'sector', 'is_active']),
|
||||
models.Index(fields=['intent']),
|
||||
models.Index(fields=['country']),
|
||||
]
|
||||
ordering = ['keyword']
|
||||
|
||||
@@ -604,8 +843,7 @@ class User(AbstractUser):
|
||||
return self.role == 'developer' or self.is_superuser
|
||||
|
||||
def is_admin_or_developer(self):
|
||||
"""Check if user is admin or developer with override privileges."""
|
||||
# ADMIN/DEV OVERRIDE: Both admin and developer roles bypass account/site/sector restrictions
|
||||
"""Check if user is admin or developer."""
|
||||
return self.role in ['admin', 'developer'] or self.is_superuser
|
||||
|
||||
def is_system_account_user(self):
|
||||
@@ -618,29 +856,17 @@ class User(AbstractUser):
|
||||
|
||||
def get_accessible_sites(self):
|
||||
"""Get all sites the user can access."""
|
||||
# System account users can access all sites across all accounts
|
||||
if self.is_system_account_user():
|
||||
return Site.objects.filter(is_active=True).distinct()
|
||||
|
||||
# Developers/super admins can access all sites across all accounts
|
||||
# ADMIN/DEV OVERRIDE: Admins also bypass account restrictions (see is_admin_or_developer)
|
||||
if self.is_developer():
|
||||
return Site.objects.filter(is_active=True).distinct()
|
||||
|
||||
try:
|
||||
if not self.account:
|
||||
return Site.objects.none()
|
||||
|
||||
# Owners and admins can access all sites in their account
|
||||
if self.role in ['owner', 'admin']:
|
||||
return Site.objects.filter(account=self.account, is_active=True)
|
||||
base_sites = Site.objects.filter(account=self.account)
|
||||
|
||||
if self.role in ['owner', 'admin', 'developer'] or self.is_superuser or self.is_system_account_user():
|
||||
return base_sites
|
||||
|
||||
# Other users can only access sites explicitly granted via SiteUserAccess
|
||||
return Site.objects.filter(
|
||||
account=self.account,
|
||||
is_active=True,
|
||||
user_access__user=self
|
||||
).distinct()
|
||||
return base_sites.filter(user_access__user=self).distinct()
|
||||
except (AttributeError, Exception):
|
||||
# If account access fails (e.g., column mismatch), return empty queryset
|
||||
return Site.objects.none()
|
||||
|
||||
@@ -10,8 +10,10 @@ class PlanSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = Plan
|
||||
fields = [
|
||||
'id', 'name', 'slug', 'price', 'billing_cycle', 'features', 'is_active',
|
||||
'id', 'name', 'slug', 'price', 'original_price', 'billing_cycle', 'annual_discount_percent',
|
||||
'is_featured', 'features', 'is_active',
|
||||
'max_users', 'max_sites', 'max_industries', 'max_author_profiles',
|
||||
'max_keywords', 'max_ahrefs_queries',
|
||||
'included_credits', 'extra_credit_price', 'allow_credit_topup',
|
||||
'auto_credit_topup_threshold', 'auto_credit_topup_amount',
|
||||
'stripe_product_id', 'stripe_price_id', 'credits_per_month'
|
||||
@@ -27,8 +29,8 @@ class SubscriptionSerializer(serializers.ModelSerializer):
|
||||
model = Subscription
|
||||
fields = [
|
||||
'id', 'account', 'account_name', 'account_slug',
|
||||
'stripe_subscription_id', 'status',
|
||||
'current_period_start', 'current_period_end',
|
||||
'stripe_subscription_id', 'payment_method', 'external_payment_id',
|
||||
'status', 'current_period_start', 'current_period_end',
|
||||
'cancel_at_period_end',
|
||||
'created_at', 'updated_at'
|
||||
]
|
||||
@@ -48,7 +50,11 @@ class AccountSerializer(serializers.ModelSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Account
|
||||
fields = ['id', 'name', 'slug', 'owner', 'plan', 'plan_id', 'credits', 'status', 'subscription', 'created_at']
|
||||
fields = [
|
||||
'id', 'name', 'slug', 'owner', 'plan', 'plan_id',
|
||||
'credits', 'status', 'payment_method',
|
||||
'subscription', 'billing_country', 'created_at'
|
||||
]
|
||||
read_only_fields = ['owner', 'created_at']
|
||||
|
||||
|
||||
@@ -58,6 +64,8 @@ class SiteSerializer(serializers.ModelSerializer):
|
||||
active_sectors_count = serializers.SerializerMethodField()
|
||||
selected_sectors = serializers.SerializerMethodField()
|
||||
can_add_sectors = serializers.SerializerMethodField()
|
||||
keywords_count = serializers.SerializerMethodField()
|
||||
has_integration = serializers.SerializerMethodField()
|
||||
industry_name = serializers.CharField(source='industry.name', read_only=True)
|
||||
industry_slug = serializers.CharField(source='industry.slug', read_only=True)
|
||||
# Override domain field to use CharField instead of URLField to avoid premature validation
|
||||
@@ -71,10 +79,14 @@ class SiteSerializer(serializers.ModelSerializer):
|
||||
'is_active', 'status',
|
||||
'site_type', 'hosting_type', 'seo_metadata',
|
||||
'sectors_count', 'active_sectors_count', 'selected_sectors',
|
||||
'can_add_sectors',
|
||||
'can_add_sectors', 'keywords_count', 'has_integration',
|
||||
'created_at', 'updated_at'
|
||||
]
|
||||
read_only_fields = ['created_at', 'updated_at', 'account']
|
||||
# Explicitly specify required fields for clarity
|
||||
extra_kwargs = {
|
||||
'industry': {'required': True, 'error_messages': {'required': 'Industry is required when creating a site.'}},
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Allow partial updates for PATCH requests."""
|
||||
@@ -82,10 +94,12 @@ class SiteSerializer(serializers.ModelSerializer):
|
||||
# Make slug optional - it will be auto-generated from name if not provided
|
||||
if 'slug' in self.fields:
|
||||
self.fields['slug'].required = False
|
||||
# For partial updates (PATCH), make name optional
|
||||
# For partial updates (PATCH), make name and industry optional
|
||||
if self.partial:
|
||||
if 'name' in self.fields:
|
||||
self.fields['name'].required = False
|
||||
if 'industry' in self.fields:
|
||||
self.fields['industry'].required = False
|
||||
|
||||
def validate_domain(self, value):
|
||||
"""Ensure domain has https:// protocol.
|
||||
@@ -94,8 +108,9 @@ class SiteSerializer(serializers.ModelSerializer):
|
||||
- If domain has no protocol, add https://
|
||||
- Validates that the final URL is valid
|
||||
"""
|
||||
if not value:
|
||||
return value
|
||||
# Allow empty/None values
|
||||
if not value or value.strip() == '':
|
||||
return None
|
||||
|
||||
value = value.strip()
|
||||
|
||||
@@ -146,6 +161,20 @@ class SiteSerializer(serializers.ModelSerializer):
|
||||
"""Check if site can add more sectors (max 5)."""
|
||||
return obj.can_add_sector()
|
||||
|
||||
def get_keywords_count(self, obj):
|
||||
"""Get total keywords count for the site across all sectors."""
|
||||
from igny8_core.modules.planner.models import Keywords
|
||||
return Keywords.objects.filter(site=obj).count()
|
||||
|
||||
def get_has_integration(self, obj):
|
||||
"""Check if site has an active WordPress integration."""
|
||||
from igny8_core.business.integration.models import SiteIntegration
|
||||
return SiteIntegration.objects.filter(
|
||||
site=obj,
|
||||
platform='wordpress',
|
||||
is_active=True
|
||||
).exists() or bool(obj.wp_url)
|
||||
|
||||
|
||||
class IndustrySectorSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for IndustrySector model."""
|
||||
@@ -230,6 +259,9 @@ class SiteUserAccessSerializer(serializers.ModelSerializer):
|
||||
read_only_fields = ['granted_at']
|
||||
|
||||
|
||||
from igny8_core.business.billing.models import PAYMENT_METHOD_CHOICES
|
||||
|
||||
|
||||
class UserSerializer(serializers.ModelSerializer):
|
||||
account = AccountSerializer(read_only=True)
|
||||
accessible_sites = serializers.SerializerMethodField()
|
||||
@@ -260,6 +292,21 @@ class RegisterSerializer(serializers.Serializer):
|
||||
allow_null=True,
|
||||
default=None
|
||||
)
|
||||
plan_slug = serializers.CharField(max_length=50, required=False)
|
||||
payment_method = serializers.ChoiceField(
|
||||
choices=[choice[0] for choice in PAYMENT_METHOD_CHOICES],
|
||||
default='bank_transfer',
|
||||
required=False
|
||||
)
|
||||
# Billing information fields
|
||||
billing_email = serializers.EmailField(required=False, allow_blank=True)
|
||||
billing_address_line1 = serializers.CharField(max_length=255, required=False, allow_blank=True)
|
||||
billing_address_line2 = serializers.CharField(max_length=255, required=False, allow_blank=True)
|
||||
billing_city = serializers.CharField(max_length=100, required=False, allow_blank=True)
|
||||
billing_state = serializers.CharField(max_length=100, required=False, allow_blank=True)
|
||||
billing_postal_code = serializers.CharField(max_length=20, required=False, allow_blank=True)
|
||||
billing_country = serializers.CharField(max_length=2, required=False, allow_blank=True)
|
||||
tax_id = serializers.CharField(max_length=100, required=False, allow_blank=True)
|
||||
|
||||
def validate(self, attrs):
|
||||
if attrs['password'] != attrs['password_confirm']:
|
||||
@@ -271,23 +318,59 @@ class RegisterSerializer(serializers.Serializer):
|
||||
if 'plan_id' in attrs and attrs.get('plan_id') == '':
|
||||
attrs['plan_id'] = None
|
||||
|
||||
# Validate billing fields for paid plans
|
||||
plan_slug = attrs.get('plan_slug')
|
||||
paid_plans = ['starter', 'growth', 'scale']
|
||||
if plan_slug and plan_slug in paid_plans:
|
||||
# Require billing_country for paid plans
|
||||
if not attrs.get('billing_country'):
|
||||
raise serializers.ValidationError({
|
||||
"billing_country": "Billing country is required for paid plans."
|
||||
})
|
||||
# Require payment_method for paid plans
|
||||
if not attrs.get('payment_method'):
|
||||
raise serializers.ValidationError({
|
||||
"payment_method": "Payment method is required for paid plans."
|
||||
})
|
||||
|
||||
return attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
from django.db import transaction
|
||||
from igny8_core.business.billing.models import CreditTransaction
|
||||
from igny8_core.auth.models import Subscription
|
||||
from igny8_core.business.billing.models import AccountPaymentMethod
|
||||
from igny8_core.business.billing.services.invoice_service import InvoiceService
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
with transaction.atomic():
|
||||
# Get or assign free plan
|
||||
plan = validated_data.get('plan_id')
|
||||
if not plan:
|
||||
# Auto-assign free plan
|
||||
plan_slug = validated_data.get('plan_slug')
|
||||
paid_plans = ['starter', 'growth', 'scale']
|
||||
|
||||
if plan_slug and plan_slug in paid_plans:
|
||||
try:
|
||||
plan = Plan.objects.get(slug=plan_slug, is_active=True)
|
||||
except Plan.DoesNotExist:
|
||||
raise serializers.ValidationError({
|
||||
"plan": f"Plan '{plan_slug}' not available. Please contact support."
|
||||
})
|
||||
account_status = 'pending_payment'
|
||||
initial_credits = 0
|
||||
billing_period_start = timezone.now()
|
||||
# simple monthly cycle; if annual needed, extend here
|
||||
billing_period_end = billing_period_start + timedelta(days=30)
|
||||
else:
|
||||
try:
|
||||
plan = Plan.objects.get(slug='free', is_active=True)
|
||||
except Plan.DoesNotExist:
|
||||
# Fallback: get first active plan ordered by price (cheapest)
|
||||
plan = Plan.objects.filter(is_active=True).order_by('price').first()
|
||||
if not plan:
|
||||
raise serializers.ValidationError({"plan": "No active plans available"})
|
||||
raise serializers.ValidationError({
|
||||
"plan": "Free plan not configured. Please contact support."
|
||||
})
|
||||
account_status = 'trial'
|
||||
initial_credits = plan.get_effective_credits_per_month()
|
||||
billing_period_start = None
|
||||
billing_period_end = None
|
||||
|
||||
# Generate account name if not provided
|
||||
account_name = validated_data.get('account_name')
|
||||
@@ -295,7 +378,8 @@ class RegisterSerializer(serializers.Serializer):
|
||||
first_name = validated_data.get('first_name', '')
|
||||
last_name = validated_data.get('last_name', '')
|
||||
if first_name or last_name:
|
||||
account_name = f"{first_name} {last_name}".strip() or validated_data['email'].split('@')[0]
|
||||
account_name = f"{first_name} {last_name}".strip() or \
|
||||
validated_data['email'].split('@')[0]
|
||||
else:
|
||||
account_name = validated_data['email'].split('@')[0]
|
||||
|
||||
@@ -321,17 +405,97 @@ class RegisterSerializer(serializers.Serializer):
|
||||
role='owner'
|
||||
)
|
||||
|
||||
# Now create account with user as owner
|
||||
# Generate unique slug for account
|
||||
# Clean the base slug: lowercase, replace spaces and underscores with hyphens
|
||||
import re
|
||||
import random
|
||||
import string
|
||||
base_slug = re.sub(r'[^a-z0-9-]', '', account_name.lower().replace(' ', '-').replace('_', '-'))[:40] or 'account'
|
||||
|
||||
# Add random suffix to prevent collisions (especially during concurrent registrations)
|
||||
random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
slug = f"{base_slug}-{random_suffix}"
|
||||
|
||||
# Ensure uniqueness with fallback counter
|
||||
counter = 1
|
||||
while Account.objects.filter(slug=slug).exists():
|
||||
slug = f"{base_slug}-{random_suffix}-{counter}"
|
||||
counter += 1
|
||||
|
||||
# Create account with status and credits seeded (0 for paid pending)
|
||||
account = Account.objects.create(
|
||||
name=account_name,
|
||||
slug=account_name.lower().replace(' ', '-').replace('_', '-')[:50],
|
||||
slug=slug,
|
||||
owner=user,
|
||||
plan=plan
|
||||
plan=plan,
|
||||
credits=initial_credits,
|
||||
status=account_status,
|
||||
payment_method=validated_data.get('payment_method') or 'bank_transfer',
|
||||
# Save billing information
|
||||
billing_email=validated_data.get('billing_email', '') or validated_data.get('email', ''),
|
||||
billing_address_line1=validated_data.get('billing_address_line1', ''),
|
||||
billing_address_line2=validated_data.get('billing_address_line2', ''),
|
||||
billing_city=validated_data.get('billing_city', ''),
|
||||
billing_state=validated_data.get('billing_state', ''),
|
||||
billing_postal_code=validated_data.get('billing_postal_code', ''),
|
||||
billing_country=validated_data.get('billing_country', ''),
|
||||
tax_id=validated_data.get('tax_id', ''),
|
||||
)
|
||||
|
||||
# Log initial credit transaction only for free/trial accounts with credits
|
||||
if initial_credits > 0:
|
||||
CreditTransaction.objects.create(
|
||||
account=account,
|
||||
transaction_type='subscription',
|
||||
amount=initial_credits,
|
||||
balance_after=initial_credits,
|
||||
description=f'Free plan credits from {plan.name}',
|
||||
metadata={
|
||||
'plan_slug': plan.slug,
|
||||
'registration': True,
|
||||
'trial': True
|
||||
}
|
||||
)
|
||||
|
||||
# Update user to reference the new account
|
||||
user.account = account
|
||||
user.save()
|
||||
|
||||
# For paid plans, create subscription, invoice, and default payment method
|
||||
if plan_slug and plan_slug in paid_plans:
|
||||
payment_method = validated_data.get('payment_method', 'bank_transfer')
|
||||
|
||||
subscription = Subscription.objects.create(
|
||||
account=account,
|
||||
plan=plan,
|
||||
status='pending_payment',
|
||||
external_payment_id=None,
|
||||
current_period_start=billing_period_start,
|
||||
current_period_end=billing_period_end,
|
||||
cancel_at_period_end=False,
|
||||
)
|
||||
# Create pending invoice for the first period
|
||||
InvoiceService.create_subscription_invoice(
|
||||
subscription=subscription,
|
||||
billing_period_start=billing_period_start,
|
||||
billing_period_end=billing_period_end,
|
||||
)
|
||||
# Create AccountPaymentMethod with selected payment method
|
||||
payment_method_display_names = {
|
||||
'stripe': 'Credit/Debit Card (Stripe)',
|
||||
'paypal': 'PayPal',
|
||||
'bank_transfer': 'Bank Transfer (Manual)',
|
||||
'local_wallet': 'Mobile Wallet (Manual)',
|
||||
}
|
||||
AccountPaymentMethod.objects.create(
|
||||
account=account,
|
||||
type=payment_method,
|
||||
display_name=payment_method_display_names.get(payment_method, payment_method.title()),
|
||||
is_default=True,
|
||||
is_enabled=True,
|
||||
is_verified=False,
|
||||
instructions='Please complete payment and confirm with your transaction reference.',
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
@@ -340,6 +504,7 @@ class LoginSerializer(serializers.Serializer):
|
||||
"""Serializer for user login."""
|
||||
email = serializers.EmailField()
|
||||
password = serializers.CharField(write_only=True)
|
||||
remember_me = serializers.BooleanField(required=False, default=False)
|
||||
|
||||
|
||||
class ChangePasswordSerializer(serializers.Serializer):
|
||||
@@ -382,14 +547,14 @@ class SeedKeywordSerializer(serializers.ModelSerializer):
|
||||
industry_slug = serializers.CharField(source='industry.slug', read_only=True)
|
||||
sector_name = serializers.CharField(source='sector.name', read_only=True)
|
||||
sector_slug = serializers.CharField(source='sector.slug', read_only=True)
|
||||
intent_display = serializers.CharField(source='get_intent_display', read_only=True)
|
||||
country_display = serializers.CharField(source='get_country_display', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = SeedKeyword
|
||||
fields = [
|
||||
'id', 'keyword', 'industry', 'industry_name', 'industry_slug',
|
||||
'sector', 'sector_name', 'sector_slug',
|
||||
'volume', 'difficulty', 'intent', 'intent_display',
|
||||
'volume', 'difficulty', 'country', 'country_display',
|
||||
'is_active', 'created_at', 'updated_at'
|
||||
]
|
||||
read_only_fields = ['created_at', 'updated_at']
|
||||
|
||||
@@ -46,12 +46,101 @@ class RegisterView(APIView):
|
||||
permission_classes = [permissions.AllowAny]
|
||||
|
||||
def post(self, request):
|
||||
from .utils import generate_access_token, generate_refresh_token, get_access_token_expiry, get_refresh_token_expiry
|
||||
from django.contrib.auth import login, logout
|
||||
from django.utils import timezone
|
||||
|
||||
force_logout = request.data.get('force_logout', False)
|
||||
|
||||
serializer = RegisterSerializer(data=request.data)
|
||||
if serializer.is_valid():
|
||||
user = serializer.save()
|
||||
|
||||
# SECURITY: Check for session contamination before login
|
||||
# If there's an existing session from a different user, handle it
|
||||
if request.session.session_key:
|
||||
existing_user_id = request.session.get('_auth_user_id')
|
||||
if existing_user_id and str(existing_user_id) != str(user.id):
|
||||
# Get existing user details
|
||||
try:
|
||||
existing_user = User.objects.get(id=existing_user_id)
|
||||
existing_email = existing_user.email
|
||||
existing_username = existing_user.username or existing_email.split('@')[0]
|
||||
except User.DoesNotExist:
|
||||
existing_email = 'Unknown user'
|
||||
existing_username = 'Unknown'
|
||||
|
||||
# If not forcing logout, return conflict info
|
||||
if not force_logout:
|
||||
return Response(
|
||||
{
|
||||
'status': 'error',
|
||||
'error': 'session_conflict',
|
||||
'message': f'You have an active session for another account ({existing_email}). Please logout first or choose to continue.',
|
||||
'existing_user': {
|
||||
'email': existing_email,
|
||||
'username': existing_username,
|
||||
'id': existing_user_id
|
||||
},
|
||||
'requested_user': {
|
||||
'email': user.email,
|
||||
'username': user.username or user.email.split('@')[0],
|
||||
'id': user.id
|
||||
}
|
||||
},
|
||||
status=status.HTTP_409_CONFLICT
|
||||
)
|
||||
|
||||
# Force logout - clean existing session completely
|
||||
logout(request)
|
||||
# Clear all session data
|
||||
request.session.flush()
|
||||
|
||||
# Log the user in (create session for session authentication)
|
||||
login(request, user)
|
||||
|
||||
# Get account from user
|
||||
account = getattr(user, 'account', None)
|
||||
|
||||
# Generate JWT tokens
|
||||
access_token = generate_access_token(user, account)
|
||||
refresh_token = generate_refresh_token(user, account)
|
||||
access_expires_at = timezone.now() + get_access_token_expiry()
|
||||
refresh_expires_at = timezone.now() + get_refresh_token_expiry()
|
||||
|
||||
user_serializer = UserSerializer(user)
|
||||
|
||||
# Build response data
|
||||
response_data = {
|
||||
'user': user_serializer.data,
|
||||
'tokens': {
|
||||
'access': access_token,
|
||||
'refresh': refresh_token,
|
||||
'access_expires_at': access_expires_at.isoformat(),
|
||||
'refresh_expires_at': refresh_expires_at.isoformat(),
|
||||
}
|
||||
}
|
||||
|
||||
# NOTE: Payment checkout is NO LONGER created at registration
|
||||
# User will complete payment on /account/plans after signup
|
||||
# This simplifies the signup flow and consolidates all payment handling
|
||||
|
||||
# Send welcome email (if enabled in settings)
|
||||
try:
|
||||
from igny8_core.modules.system.email_models import EmailSettings
|
||||
from igny8_core.business.billing.services.email_service import send_welcome_email
|
||||
|
||||
email_settings = EmailSettings.get_settings()
|
||||
if email_settings.send_welcome_emails and account:
|
||||
send_welcome_email(user, account)
|
||||
except Exception as e:
|
||||
# Don't fail registration if email fails
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"Failed to send welcome email for user {user.id}: {e}")
|
||||
|
||||
return success_response(
|
||||
data={'user': user_serializer.data},
|
||||
data=response_data,
|
||||
message='Registration successful',
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
request=request
|
||||
@@ -78,6 +167,8 @@ class LoginView(APIView):
|
||||
if serializer.is_valid():
|
||||
email = serializer.validated_data['email']
|
||||
password = serializer.validated_data['password']
|
||||
remember_me = serializer.validated_data.get('remember_me', False)
|
||||
force_logout = request.data.get('force_logout', False)
|
||||
|
||||
try:
|
||||
user = User.objects.select_related('account', 'account__plan').get(email=email)
|
||||
@@ -89,6 +180,47 @@ class LoginView(APIView):
|
||||
)
|
||||
|
||||
if user.check_password(password):
|
||||
# SECURITY: Check for session contamination before login
|
||||
# If user has a session cookie from a different user, handle it
|
||||
if request.session.session_key:
|
||||
existing_user_id = request.session.get('_auth_user_id')
|
||||
if existing_user_id and str(existing_user_id) != str(user.id):
|
||||
# Get existing user details
|
||||
try:
|
||||
existing_user = User.objects.get(id=existing_user_id)
|
||||
existing_email = existing_user.email
|
||||
existing_username = existing_user.username or existing_email.split('@')[0]
|
||||
except User.DoesNotExist:
|
||||
existing_email = 'Unknown user'
|
||||
existing_username = 'Unknown'
|
||||
|
||||
# If not forcing logout, return conflict info
|
||||
if not force_logout:
|
||||
return Response(
|
||||
{
|
||||
'status': 'error',
|
||||
'error': 'session_conflict',
|
||||
'message': f'You have an active session for another account ({existing_email}). Please logout first or choose to continue.',
|
||||
'existing_user': {
|
||||
'email': existing_email,
|
||||
'username': existing_username,
|
||||
'id': existing_user_id
|
||||
},
|
||||
'requested_user': {
|
||||
'email': user.email,
|
||||
'username': user.username or user.email.split('@')[0],
|
||||
'id': user.id
|
||||
}
|
||||
},
|
||||
status=status.HTTP_409_CONFLICT
|
||||
)
|
||||
|
||||
# Force logout - clean existing session completely
|
||||
from django.contrib.auth import logout
|
||||
logout(request)
|
||||
# Clear all session data
|
||||
request.session.flush()
|
||||
|
||||
# Log the user in (create session for session authentication)
|
||||
from django.contrib.auth import login
|
||||
login(request, user)
|
||||
@@ -97,11 +229,12 @@ class LoginView(APIView):
|
||||
account = getattr(user, 'account', None)
|
||||
|
||||
# Generate JWT tokens
|
||||
from .utils import generate_access_token, generate_refresh_token, get_token_expiry
|
||||
access_token = generate_access_token(user, account)
|
||||
from .utils import generate_access_token, generate_refresh_token, get_access_token_expiry, get_refresh_token_expiry
|
||||
from django.utils import timezone
|
||||
access_token = generate_access_token(user, account, remember_me=remember_me)
|
||||
refresh_token = generate_refresh_token(user, account)
|
||||
access_expires_at = get_token_expiry('access')
|
||||
refresh_expires_at = get_token_expiry('refresh')
|
||||
access_expires_at = timezone.now() + get_access_token_expiry(remember_me=remember_me)
|
||||
refresh_expires_at = timezone.now() + get_refresh_token_expiry()
|
||||
|
||||
# Serialize user data safely, handling missing account relationship
|
||||
try:
|
||||
@@ -152,6 +285,128 @@ class LoginView(APIView):
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=['Authentication'],
|
||||
summary='Request Password Reset',
|
||||
description='Request password reset email'
|
||||
)
|
||||
class PasswordResetRequestView(APIView):
|
||||
"""Request password reset endpoint - sends email with reset token."""
|
||||
permission_classes = [permissions.AllowAny]
|
||||
|
||||
def post(self, request):
|
||||
from .serializers import RequestPasswordResetSerializer
|
||||
from .models import PasswordResetToken
|
||||
|
||||
serializer = RequestPasswordResetSerializer(data=request.data)
|
||||
if not serializer.is_valid():
|
||||
return error_response(
|
||||
error='Validation failed',
|
||||
errors=serializer.errors,
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
email = serializer.validated_data['email']
|
||||
|
||||
try:
|
||||
user = User.objects.get(email=email)
|
||||
except User.DoesNotExist:
|
||||
# Don't reveal if email exists - return success anyway
|
||||
return success_response(
|
||||
message='If an account with that email exists, a password reset link has been sent.',
|
||||
request=request
|
||||
)
|
||||
|
||||
# Generate secure token
|
||||
import secrets
|
||||
token = secrets.token_urlsafe(32)
|
||||
|
||||
# Create reset token (expires in 1 hour)
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
expires_at = timezone.now() + timedelta(hours=1)
|
||||
|
||||
PasswordResetToken.objects.create(
|
||||
user=user,
|
||||
token=token,
|
||||
expires_at=expires_at
|
||||
)
|
||||
|
||||
# Send password reset email
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.info(f"[PASSWORD_RESET] Attempting to send reset email to: {email}")
|
||||
|
||||
try:
|
||||
from igny8_core.business.billing.services.email_service import send_password_reset_email
|
||||
result = send_password_reset_email(user, token)
|
||||
logger.info(f"[PASSWORD_RESET] Email send result: {result}")
|
||||
print(f"[PASSWORD_RESET] Email send result: {result}") # Console output
|
||||
except Exception as e:
|
||||
logger.error(f"[PASSWORD_RESET] Failed to send password reset email: {e}", exc_info=True)
|
||||
print(f"[PASSWORD_RESET] ERROR: {e}") # Console output
|
||||
|
||||
return success_response(
|
||||
message='If an account with that email exists, a password reset link has been sent.',
|
||||
request=request
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=['Authentication'],
|
||||
summary='Reset Password',
|
||||
description='Reset password using token from email'
|
||||
)
|
||||
class PasswordResetConfirmView(APIView):
|
||||
"""Confirm password reset with token."""
|
||||
permission_classes = [permissions.AllowAny]
|
||||
|
||||
def post(self, request):
|
||||
from .serializers import ResetPasswordSerializer
|
||||
from .models import PasswordResetToken
|
||||
from django.utils import timezone
|
||||
|
||||
serializer = ResetPasswordSerializer(data=request.data)
|
||||
if not serializer.is_valid():
|
||||
return error_response(
|
||||
error='Validation failed',
|
||||
errors=serializer.errors,
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
token = serializer.validated_data['token']
|
||||
new_password = serializer.validated_data['new_password']
|
||||
|
||||
try:
|
||||
reset_token = PasswordResetToken.objects.get(
|
||||
token=token,
|
||||
used=False,
|
||||
expires_at__gt=timezone.now()
|
||||
)
|
||||
except PasswordResetToken.DoesNotExist:
|
||||
return error_response(
|
||||
error='Invalid or expired reset token',
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
# Reset password
|
||||
user = reset_token.user
|
||||
user.set_password(new_password)
|
||||
user.save()
|
||||
|
||||
# Mark token as used
|
||||
reset_token.used = True
|
||||
reset_token.save()
|
||||
|
||||
return success_response(
|
||||
message='Password reset successfully. You can now log in with your new password.',
|
||||
request=request
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=['Authentication'],
|
||||
summary='Change Password',
|
||||
@@ -247,6 +502,7 @@ class RefreshTokenView(APIView):
|
||||
account = getattr(user, 'account', None)
|
||||
|
||||
# Generate new access token
|
||||
from .utils import get_token_expiry
|
||||
access_token = generate_access_token(user, account)
|
||||
access_expires_at = get_token_expiry('access')
|
||||
|
||||
@@ -266,6 +522,77 @@ class RefreshTokenView(APIView):
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=['Authentication'],
|
||||
summary='Get Country List',
|
||||
description='Returns list of countries for registration country selection'
|
||||
)
|
||||
class CountryListView(APIView):
|
||||
"""Returns list of countries for signup dropdown"""
|
||||
permission_classes = [permissions.AllowAny] # Public endpoint
|
||||
|
||||
def get(self, request):
|
||||
"""Get list of countries with codes and names"""
|
||||
# Comprehensive list of countries for billing purposes
|
||||
countries = [
|
||||
{'code': 'US', 'name': 'United States'},
|
||||
{'code': 'GB', 'name': 'United Kingdom'},
|
||||
{'code': 'CA', 'name': 'Canada'},
|
||||
{'code': 'AU', 'name': 'Australia'},
|
||||
{'code': 'DE', 'name': 'Germany'},
|
||||
{'code': 'FR', 'name': 'France'},
|
||||
{'code': 'ES', 'name': 'Spain'},
|
||||
{'code': 'IT', 'name': 'Italy'},
|
||||
{'code': 'NL', 'name': 'Netherlands'},
|
||||
{'code': 'BE', 'name': 'Belgium'},
|
||||
{'code': 'CH', 'name': 'Switzerland'},
|
||||
{'code': 'AT', 'name': 'Austria'},
|
||||
{'code': 'SE', 'name': 'Sweden'},
|
||||
{'code': 'NO', 'name': 'Norway'},
|
||||
{'code': 'DK', 'name': 'Denmark'},
|
||||
{'code': 'FI', 'name': 'Finland'},
|
||||
{'code': 'IE', 'name': 'Ireland'},
|
||||
{'code': 'PT', 'name': 'Portugal'},
|
||||
{'code': 'PL', 'name': 'Poland'},
|
||||
{'code': 'CZ', 'name': 'Czech Republic'},
|
||||
{'code': 'NZ', 'name': 'New Zealand'},
|
||||
{'code': 'SG', 'name': 'Singapore'},
|
||||
{'code': 'HK', 'name': 'Hong Kong'},
|
||||
{'code': 'JP', 'name': 'Japan'},
|
||||
{'code': 'KR', 'name': 'South Korea'},
|
||||
{'code': 'IN', 'name': 'India'},
|
||||
{'code': 'PK', 'name': 'Pakistan'},
|
||||
{'code': 'BD', 'name': 'Bangladesh'},
|
||||
{'code': 'AE', 'name': 'United Arab Emirates'},
|
||||
{'code': 'SA', 'name': 'Saudi Arabia'},
|
||||
{'code': 'ZA', 'name': 'South Africa'},
|
||||
{'code': 'NG', 'name': 'Nigeria'},
|
||||
{'code': 'EG', 'name': 'Egypt'},
|
||||
{'code': 'KE', 'name': 'Kenya'},
|
||||
{'code': 'BR', 'name': 'Brazil'},
|
||||
{'code': 'MX', 'name': 'Mexico'},
|
||||
{'code': 'AR', 'name': 'Argentina'},
|
||||
{'code': 'CL', 'name': 'Chile'},
|
||||
{'code': 'CO', 'name': 'Colombia'},
|
||||
{'code': 'PE', 'name': 'Peru'},
|
||||
{'code': 'MY', 'name': 'Malaysia'},
|
||||
{'code': 'TH', 'name': 'Thailand'},
|
||||
{'code': 'VN', 'name': 'Vietnam'},
|
||||
{'code': 'PH', 'name': 'Philippines'},
|
||||
{'code': 'ID', 'name': 'Indonesia'},
|
||||
{'code': 'TR', 'name': 'Turkey'},
|
||||
{'code': 'RU', 'name': 'Russia'},
|
||||
{'code': 'UA', 'name': 'Ukraine'},
|
||||
{'code': 'RO', 'name': 'Romania'},
|
||||
{'code': 'GR', 'name': 'Greece'},
|
||||
{'code': 'IL', 'name': 'Israel'},
|
||||
{'code': 'TW', 'name': 'Taiwan'},
|
||||
]
|
||||
# Sort alphabetically by name
|
||||
countries.sort(key=lambda x: x['name'])
|
||||
return Response({'countries': countries})
|
||||
|
||||
|
||||
@extend_schema(exclude=True) # Exclude from public API documentation - internal authenticated endpoint
|
||||
class MeView(APIView):
|
||||
"""Get current user information."""
|
||||
@@ -283,12 +610,86 @@ class MeView(APIView):
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=['Authentication'],
|
||||
summary='Unsubscribe from Emails',
|
||||
description='Unsubscribe a user from marketing, billing, or all email notifications'
|
||||
)
|
||||
class UnsubscribeView(APIView):
|
||||
"""Handle email unsubscribe requests with signed URLs."""
|
||||
permission_classes = [permissions.AllowAny]
|
||||
|
||||
def post(self, request):
|
||||
"""
|
||||
Process unsubscribe request.
|
||||
|
||||
Expected payload:
|
||||
- email: The email address to unsubscribe
|
||||
- type: Type of emails to unsubscribe from (marketing, billing, all)
|
||||
- ts: Timestamp from signed URL
|
||||
- sig: HMAC signature from signed URL
|
||||
"""
|
||||
from igny8_core.business.billing.services.email_service import verify_unsubscribe_signature
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
email = request.data.get('email')
|
||||
email_type = request.data.get('type', 'all')
|
||||
timestamp = request.data.get('ts')
|
||||
signature = request.data.get('sig')
|
||||
|
||||
# Validate required fields
|
||||
if not email or not timestamp or not signature:
|
||||
return error_response(
|
||||
error='Missing required parameters',
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
try:
|
||||
timestamp = int(timestamp)
|
||||
except (ValueError, TypeError):
|
||||
return error_response(
|
||||
error='Invalid timestamp',
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
# Verify signature
|
||||
if not verify_unsubscribe_signature(email, email_type, timestamp, signature):
|
||||
return error_response(
|
||||
error='Invalid or expired unsubscribe link',
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
request=request
|
||||
)
|
||||
|
||||
# Log the unsubscribe request
|
||||
# In production, update user preferences or use email provider's suppression list
|
||||
logger.info(f'Unsubscribe request processed: email={email}, type={email_type}')
|
||||
|
||||
# TODO: Implement preference storage
|
||||
# Options:
|
||||
# 1. Add email preference fields to User model
|
||||
# 2. Use Resend's suppression list API
|
||||
# 3. Create EmailPreferences model
|
||||
|
||||
return success_response(
|
||||
message=f'Successfully unsubscribed from {email_type} emails',
|
||||
request=request
|
||||
)
|
||||
|
||||
|
||||
urlpatterns = [
|
||||
path('', include(router.urls)),
|
||||
path('register/', csrf_exempt(RegisterView.as_view()), name='auth-register'),
|
||||
path('login/', csrf_exempt(LoginView.as_view()), name='auth-login'),
|
||||
path('refresh/', csrf_exempt(RefreshTokenView.as_view()), name='auth-refresh'),
|
||||
path('change-password/', ChangePasswordView.as_view(), name='auth-change-password'),
|
||||
path('password-reset/', csrf_exempt(PasswordResetRequestView.as_view()), name='auth-password-reset-request'),
|
||||
path('password-reset/confirm/', csrf_exempt(PasswordResetConfirmView.as_view()), name='auth-password-reset-confirm'),
|
||||
path('me/', MeView.as_view(), name='auth-me'),
|
||||
path('countries/', CountryListView.as_view(), name='auth-countries'),
|
||||
path('unsubscribe/', csrf_exempt(UnsubscribeView.as_view()), name='auth-unsubscribe'),
|
||||
]
|
||||
|
||||
|
||||
@@ -17,23 +17,26 @@ def get_jwt_algorithm():
|
||||
return getattr(settings, 'JWT_ALGORITHM', 'HS256')
|
||||
|
||||
|
||||
def get_access_token_expiry():
|
||||
def get_access_token_expiry(remember_me=False):
|
||||
"""Get access token expiry time from settings"""
|
||||
return getattr(settings, 'JWT_ACCESS_TOKEN_EXPIRY', timedelta(minutes=15))
|
||||
if remember_me:
|
||||
return getattr(settings, 'JWT_ACCESS_TOKEN_EXPIRY_REMEMBER_ME', timedelta(days=20))
|
||||
return getattr(settings, 'JWT_ACCESS_TOKEN_EXPIRY', timedelta(hours=1))
|
||||
|
||||
|
||||
def get_refresh_token_expiry():
|
||||
"""Get refresh token expiry time from settings"""
|
||||
return getattr(settings, 'JWT_REFRESH_TOKEN_EXPIRY', timedelta(days=7))
|
||||
return getattr(settings, 'JWT_REFRESH_TOKEN_EXPIRY', timedelta(days=30))
|
||||
|
||||
|
||||
def generate_access_token(user, account=None):
|
||||
def generate_access_token(user, account=None, remember_me=False):
|
||||
"""
|
||||
Generate JWT access token for user
|
||||
|
||||
Args:
|
||||
user: User instance
|
||||
account: Account instance (optional, will use user.account if not provided)
|
||||
remember_me: bool - If True, use extended expiry (20 days)
|
||||
|
||||
Returns:
|
||||
str: JWT access token
|
||||
@@ -42,7 +45,7 @@ def generate_access_token(user, account=None):
|
||||
account = getattr(user, 'account', None)
|
||||
|
||||
now = timezone.now()
|
||||
expiry = now + get_access_token_expiry()
|
||||
expiry = now + get_access_token_expiry(remember_me=remember_me)
|
||||
|
||||
payload = {
|
||||
'user_id': user.id,
|
||||
@@ -51,6 +54,7 @@ def generate_access_token(user, account=None):
|
||||
'exp': int(expiry.timestamp()),
|
||||
'iat': int(now.timestamp()),
|
||||
'type': 'access',
|
||||
'remember_me': remember_me,
|
||||
}
|
||||
|
||||
token = jwt.encode(payload, get_jwt_secret_key(), algorithm=get_jwt_algorithm())
|
||||
@@ -128,3 +132,72 @@ def get_token_expiry(token_type='access'):
|
||||
return now + get_refresh_token_expiry()
|
||||
return now + get_access_token_expiry()
|
||||
|
||||
|
||||
|
||||
|
||||
def validate_account_and_plan(user_or_account):
|
||||
"""
|
||||
Validate account exists and has active plan.
|
||||
Allows trial, active, and pending_payment statuses.
|
||||
Bypasses validation for superusers, developers, and system accounts.
|
||||
|
||||
Args:
|
||||
user_or_account: User or Account instance
|
||||
|
||||
Returns:
|
||||
tuple: (is_valid: bool, error_msg: str or None, http_status: int or None)
|
||||
"""
|
||||
from rest_framework import status
|
||||
from .models import User, Account
|
||||
|
||||
# Extract account from user or use directly
|
||||
if isinstance(user_or_account, User):
|
||||
try:
|
||||
account = getattr(user_or_account, 'account', None)
|
||||
except Exception:
|
||||
account = None
|
||||
elif isinstance(user_or_account, Account):
|
||||
account = user_or_account
|
||||
# Check if account is a system account
|
||||
try:
|
||||
if hasattr(account, 'is_system_account') and account.is_system_account():
|
||||
return (True, None, None)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
return (False, 'Invalid object type', status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Check account exists
|
||||
if not account:
|
||||
return (
|
||||
False,
|
||||
'Account not configured for this user. Please contact support.',
|
||||
status.HTTP_403_FORBIDDEN
|
||||
)
|
||||
|
||||
# Check account status - allow trial, active, pending_payment
|
||||
# Block only suspended and cancelled
|
||||
if hasattr(account, 'status') and account.status in ['suspended', 'cancelled']:
|
||||
return (
|
||||
False,
|
||||
f'Account is {account.status}. Please contact support.',
|
||||
status.HTTP_403_FORBIDDEN
|
||||
)
|
||||
|
||||
# Check plan exists and is active
|
||||
plan = getattr(account, 'plan', None)
|
||||
if not plan:
|
||||
return (
|
||||
False,
|
||||
'No subscription plan assigned. Visit igny8.com/pricing to subscribe.',
|
||||
status.HTTP_402_PAYMENT_REQUIRED
|
||||
)
|
||||
|
||||
if hasattr(plan, 'is_active') and not plan.is_active:
|
||||
return (
|
||||
False,
|
||||
'Active subscription required. Visit igny8.com/pricing to subscribe.',
|
||||
status.HTTP_402_PAYMENT_REQUIRED
|
||||
)
|
||||
|
||||
return (True, None, None)
|
||||
|
||||
@@ -341,7 +341,8 @@ class SubscriptionsViewSet(AccountModelViewSet):
|
||||
queryset = Subscription.objects.all()
|
||||
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess, IsOwnerOrAdmin]
|
||||
pagination_class = CustomPageNumberPagination
|
||||
throttle_scope = 'auth'
|
||||
# Use relaxed auth throttle to avoid 429s during onboarding plan fetches
|
||||
throttle_scope = 'auth_read'
|
||||
throttle_classes = [DebugScopedRateThrottle]
|
||||
|
||||
def get_queryset(self):
|
||||
@@ -439,14 +440,26 @@ class SiteUserAccessViewSet(AccountModelViewSet):
|
||||
class PlanViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
"""
|
||||
ViewSet for listing active subscription plans.
|
||||
Excludes internal-only plans (Free/Internal) from public listings.
|
||||
Unified API Standard v1.0 compliant
|
||||
"""
|
||||
queryset = Plan.objects.filter(is_active=True)
|
||||
queryset = Plan.objects.filter(is_active=True, is_internal=False)
|
||||
serializer_class = PlanSerializer
|
||||
permission_classes = [permissions.AllowAny]
|
||||
pagination_class = CustomPageNumberPagination
|
||||
throttle_scope = 'auth'
|
||||
throttle_classes = [DebugScopedRateThrottle]
|
||||
# Plans are public and should not throttle aggressively to avoid blocking signup/onboarding
|
||||
throttle_scope = None
|
||||
throttle_classes: list = []
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""Override list to return paginated response with unified format"""
|
||||
queryset = self.filter_queryset(self.get_queryset())
|
||||
page = self.paginate_queryset(queryset)
|
||||
if page is not None:
|
||||
serializer = self.get_serializer(page, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
serializer = self.get_serializer(queryset, many=True)
|
||||
return success_response(data={'results': serializer.data}, request=request)
|
||||
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
"""Override retrieve to return unified format"""
|
||||
@@ -474,7 +487,7 @@ class SiteViewSet(AccountModelViewSet):
|
||||
"""ViewSet for managing Sites."""
|
||||
serializer_class = SiteSerializer
|
||||
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess, IsEditorOrAbove]
|
||||
authentication_classes = [JWTAuthentication, CSRFExemptSessionAuthentication]
|
||||
authentication_classes = [JWTAuthentication]
|
||||
|
||||
def get_permissions(self):
|
||||
"""Allow normal users (viewer) to create sites, but require editor+ for other operations."""
|
||||
@@ -483,8 +496,9 @@ class SiteViewSet(AccountModelViewSet):
|
||||
from rest_framework.permissions import AllowAny
|
||||
return [AllowAny()]
|
||||
if self.action == 'create':
|
||||
# For create, only require authentication - not active account status
|
||||
return [permissions.IsAuthenticated()]
|
||||
return [IsEditorOrAbove()]
|
||||
return [IsAuthenticatedAndActive(), HasTenantAccess(), IsEditorOrAbove()]
|
||||
|
||||
def get_queryset(self):
|
||||
"""Return sites accessible to the current user."""
|
||||
@@ -498,33 +512,44 @@ class SiteViewSet(AccountModelViewSet):
|
||||
|
||||
user = self.request.user
|
||||
|
||||
# ADMIN/DEV OVERRIDE: Both admins and developers can see all sites
|
||||
if user.is_admin_or_developer():
|
||||
return Site.objects.all().distinct()
|
||||
|
||||
# Get account from user
|
||||
account = getattr(user, 'account', None)
|
||||
if not account:
|
||||
return Site.objects.none()
|
||||
|
||||
if user.role in ['owner', 'admin']:
|
||||
return Site.objects.filter(account=account)
|
||||
if hasattr(user, 'get_accessible_sites'):
|
||||
return user.get_accessible_sites()
|
||||
|
||||
return Site.objects.filter(
|
||||
account=account,
|
||||
user_access__user=user
|
||||
).distinct()
|
||||
return Site.objects.filter(account=account)
|
||||
|
||||
def perform_create(self, serializer):
|
||||
"""Create site with account."""
|
||||
"""Create site with account and auto-grant access to creator."""
|
||||
account = getattr(self.request, 'account', None)
|
||||
if not account:
|
||||
user = self.request.user
|
||||
if user and user.is_authenticated:
|
||||
account = getattr(user, 'account', None)
|
||||
|
||||
# Check hard limit for sites
|
||||
from igny8_core.business.billing.services.limit_service import LimitService, HardLimitExceededError
|
||||
try:
|
||||
LimitService.check_hard_limit(account, 'sites', additional_count=1)
|
||||
except HardLimitExceededError as e:
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
raise PermissionDenied(str(e))
|
||||
|
||||
# Multiple sites can be active simultaneously - no constraint
|
||||
serializer.save(account=account)
|
||||
site = serializer.save(account=account)
|
||||
|
||||
# Auto-create SiteUserAccess for owner/admin who creates the site
|
||||
user = self.request.user
|
||||
if user and user.is_authenticated and hasattr(user, 'role'):
|
||||
if user.role in ['owner', 'admin']:
|
||||
from igny8_core.auth.models import SiteUserAccess
|
||||
SiteUserAccess.objects.get_or_create(
|
||||
user=user,
|
||||
site=site,
|
||||
defaults={'granted_by': user}
|
||||
)
|
||||
|
||||
def perform_update(self, serializer):
|
||||
"""Update site."""
|
||||
@@ -727,18 +752,13 @@ class SectorViewSet(AccountModelViewSet):
|
||||
"""ViewSet for managing Sectors."""
|
||||
serializer_class = SectorSerializer
|
||||
permission_classes = [IsAuthenticatedAndActive, HasTenantAccess, IsEditorOrAbove]
|
||||
authentication_classes = [JWTAuthentication, CSRFExemptSessionAuthentication]
|
||||
authentication_classes = [JWTAuthentication]
|
||||
|
||||
def get_queryset(self):
|
||||
"""Return sectors from sites accessible to the current user."""
|
||||
user = self.request.user
|
||||
if not user or not user.is_authenticated:
|
||||
return Sector.objects.none()
|
||||
|
||||
# ADMIN/DEV OVERRIDE: Both admins and developers can see all sectors across all sites
|
||||
if user.is_admin_or_developer():
|
||||
return Sector.objects.all().distinct()
|
||||
|
||||
accessible_sites = user.get_accessible_sites()
|
||||
return Sector.objects.filter(site__in=accessible_sites)
|
||||
|
||||
@@ -819,7 +839,7 @@ class SeedKeywordViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
search_fields = ['keyword']
|
||||
ordering_fields = ['keyword', 'volume', 'difficulty', 'created_at']
|
||||
ordering = ['keyword']
|
||||
filterset_fields = ['industry', 'sector', 'intent', 'is_active']
|
||||
filterset_fields = ['industry', 'sector', 'country', 'is_active']
|
||||
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
"""Override retrieve to return unified format"""
|
||||
@@ -857,7 +877,7 @@ class SeedKeywordViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
def import_seed_keywords(self, request):
|
||||
"""
|
||||
Import seed keywords from CSV (Admin/Superuser only).
|
||||
Expected columns: keyword, industry_name, sector_name, volume, difficulty, intent
|
||||
Expected columns: keyword, industry_name, sector_name, volume, difficulty, country
|
||||
"""
|
||||
import csv
|
||||
from django.db import transaction
|
||||
@@ -940,7 +960,7 @@ class SeedKeywordViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
sector=sector,
|
||||
volume=int(row.get('volume', 0) or 0),
|
||||
difficulty=int(row.get('difficulty', 0) or 0),
|
||||
intent=row.get('intent', 'informational') or 'informational',
|
||||
country=row.get('country', 'US') or 'US',
|
||||
is_active=True
|
||||
)
|
||||
imported_count += 1
|
||||
@@ -1247,16 +1267,21 @@ class AuthViewSet(viewsets.GenericViewSet):
|
||||
expires_at=expires_at
|
||||
)
|
||||
|
||||
# Send email (async via Celery if available, otherwise sync)
|
||||
# Send password reset email using the email service
|
||||
try:
|
||||
from igny8_core.modules.system.tasks import send_password_reset_email
|
||||
send_password_reset_email.delay(user.id, token)
|
||||
except:
|
||||
# Fallback to sync email sending
|
||||
from igny8_core.business.billing.services.email_service import send_password_reset_email
|
||||
send_password_reset_email(user, token)
|
||||
except Exception as e:
|
||||
# Fallback to Django's send_mail if email service fails
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"Failed to send password reset email via email service: {e}")
|
||||
|
||||
from django.core.mail import send_mail
|
||||
from django.conf import settings
|
||||
|
||||
reset_url = f"{request.scheme}://{request.get_host()}/reset-password?token={token}"
|
||||
frontend_url = getattr(settings, 'FRONTEND_URL', 'https://app.igny8.com')
|
||||
reset_url = f"{frontend_url}/reset-password?token={token}"
|
||||
|
||||
send_mail(
|
||||
subject='Reset Your IGNY8 Password',
|
||||
@@ -1467,9 +1492,9 @@ def seedkeyword_csv_template(request):
|
||||
response['Content-Disposition'] = 'attachment; filename="seedkeyword_template.csv"'
|
||||
|
||||
writer = csv.writer(response)
|
||||
writer.writerow(['keyword', 'industry', 'sector', 'volume', 'difficulty', 'intent', 'is_active'])
|
||||
writer.writerow(['python programming', 'Technology', 'Software Development', '10000', '45', 'Informational', 'true'])
|
||||
writer.writerow(['medical software', 'Healthcare', 'Healthcare IT', '5000', '60', 'Commercial', 'true'])
|
||||
writer.writerow(['keyword', 'industry', 'sector', 'volume', 'difficulty', 'country', 'is_active'])
|
||||
writer.writerow(['python programming', 'Technology', 'Software Development', '10000', '45', 'US', 'true'])
|
||||
writer.writerow(['medical software', 'Healthcare', 'Healthcare IT', '5000', '60', 'CA', 'true'])
|
||||
|
||||
return response
|
||||
|
||||
@@ -1514,7 +1539,7 @@ def seedkeyword_csv_import(request):
|
||||
defaults={
|
||||
'volume': int(row.get('volume', 0)),
|
||||
'difficulty': int(row.get('difficulty', 0)),
|
||||
'intent': row.get('intent', 'Informational'),
|
||||
'country': row.get('country', 'US'),
|
||||
'is_active': is_active
|
||||
}
|
||||
)
|
||||
|
||||
@@ -2,19 +2,165 @@
|
||||
Admin registration for Automation models
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from igny8_core.admin.base import AccountAdminMixin
|
||||
from django.contrib import messages
|
||||
from unfold.admin import ModelAdmin
|
||||
from igny8_core.admin.base import AccountAdminMixin, Igny8ModelAdmin
|
||||
from .models import AutomationConfig, AutomationRun
|
||||
|
||||
|
||||
from import_export.admin import ExportMixin
|
||||
from import_export import resources
|
||||
|
||||
|
||||
class AutomationConfigResource(resources.ModelResource):
|
||||
"""Resource class for exporting Automation Configs"""
|
||||
class Meta:
|
||||
model = AutomationConfig
|
||||
fields = ('id', 'site__domain', 'is_enabled', 'frequency', 'scheduled_time',
|
||||
'within_stage_delay', 'between_stage_delay', 'last_run_at', 'created_at')
|
||||
export_order = fields
|
||||
|
||||
|
||||
@admin.register(AutomationConfig)
|
||||
class AutomationConfigAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
class AutomationConfigAdmin(ExportMixin, AccountAdminMixin, Igny8ModelAdmin):
|
||||
resource_class = AutomationConfigResource
|
||||
list_display = ('site', 'is_enabled', 'frequency', 'scheduled_time', 'within_stage_delay', 'between_stage_delay', 'last_run_at')
|
||||
list_filter = ('is_enabled', 'frequency')
|
||||
search_fields = ('site__domain',)
|
||||
actions = [
|
||||
'bulk_enable',
|
||||
'bulk_disable',
|
||||
'bulk_update_frequency',
|
||||
'bulk_update_delays',
|
||||
]
|
||||
|
||||
def bulk_enable(self, request, queryset):
|
||||
"""Enable selected automation configs"""
|
||||
updated = queryset.update(is_enabled=True)
|
||||
self.message_user(request, f'{updated} automation config(s) enabled.', messages.SUCCESS)
|
||||
bulk_enable.short_description = 'Enable selected automations'
|
||||
|
||||
def bulk_disable(self, request, queryset):
|
||||
"""Disable selected automation configs"""
|
||||
updated = queryset.update(is_enabled=False)
|
||||
self.message_user(request, f'{updated} automation config(s) disabled.', messages.SUCCESS)
|
||||
bulk_disable.short_description = 'Disable selected automations'
|
||||
|
||||
def bulk_update_frequency(self, request, queryset):
|
||||
"""Update frequency for selected automation configs"""
|
||||
from django import forms
|
||||
|
||||
if 'apply' in request.POST:
|
||||
frequency = request.POST.get('frequency')
|
||||
if frequency:
|
||||
updated = queryset.update(frequency=frequency)
|
||||
self.message_user(request, f'{updated} automation config(s) updated to frequency: {frequency}', messages.SUCCESS)
|
||||
return
|
||||
|
||||
FREQUENCY_CHOICES = [
|
||||
('hourly', 'Hourly'),
|
||||
('daily', 'Daily'),
|
||||
('weekly', 'Weekly'),
|
||||
]
|
||||
|
||||
class FrequencyForm(forms.Form):
|
||||
frequency = forms.ChoiceField(
|
||||
choices=FREQUENCY_CHOICES,
|
||||
label="Select Frequency",
|
||||
help_text=f"Update frequency for {queryset.count()} automation config(s)"
|
||||
)
|
||||
|
||||
from django.shortcuts import render
|
||||
return render(request, 'admin/bulk_action_form.html', {
|
||||
'title': 'Update Automation Frequency',
|
||||
'queryset': queryset,
|
||||
'form': FrequencyForm(),
|
||||
'action': 'bulk_update_frequency',
|
||||
})
|
||||
bulk_update_frequency.short_description = 'Update frequency'
|
||||
|
||||
def bulk_update_delays(self, request, queryset):
|
||||
"""Update delay settings for selected automation configs"""
|
||||
from django import forms
|
||||
|
||||
if 'apply' in request.POST:
|
||||
within_delay = int(request.POST.get('within_stage_delay', 0))
|
||||
between_delay = int(request.POST.get('between_stage_delay', 0))
|
||||
|
||||
updated = queryset.update(
|
||||
within_stage_delay=within_delay,
|
||||
between_stage_delay=between_delay
|
||||
)
|
||||
self.message_user(request, f'{updated} automation config(s) delay settings updated.', messages.SUCCESS)
|
||||
return
|
||||
|
||||
class DelayForm(forms.Form):
|
||||
within_stage_delay = forms.IntegerField(
|
||||
min_value=0,
|
||||
initial=10,
|
||||
label="Within Stage Delay (minutes)",
|
||||
help_text="Delay between operations within the same stage"
|
||||
)
|
||||
between_stage_delay = forms.IntegerField(
|
||||
min_value=0,
|
||||
initial=60,
|
||||
label="Between Stage Delay (minutes)",
|
||||
help_text="Delay between different stages"
|
||||
)
|
||||
|
||||
from django.shortcuts import render
|
||||
return render(request, 'admin/bulk_action_form.html', {
|
||||
'title': 'Update Automation Delays',
|
||||
'queryset': queryset,
|
||||
'form': DelayForm(),
|
||||
'action': 'bulk_update_delays',
|
||||
})
|
||||
bulk_update_delays.short_description = 'Update delay settings'
|
||||
|
||||
|
||||
class AutomationRunResource(resources.ModelResource):
|
||||
"""Resource class for exporting Automation Runs"""
|
||||
class Meta:
|
||||
model = AutomationRun
|
||||
fields = ('id', 'run_id', 'site__domain', 'status', 'current_stage',
|
||||
'started_at', 'completed_at', 'created_at')
|
||||
export_order = fields
|
||||
|
||||
|
||||
@admin.register(AutomationRun)
|
||||
class AutomationRunAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
class AutomationRunAdmin(ExportMixin, AccountAdminMixin, Igny8ModelAdmin):
|
||||
resource_class = AutomationRunResource
|
||||
list_display = ('run_id', 'site', 'status', 'current_stage', 'started_at', 'completed_at')
|
||||
list_filter = ('status', 'current_stage')
|
||||
search_fields = ('run_id', 'site__domain')
|
||||
actions = [
|
||||
'bulk_retry_failed',
|
||||
'bulk_cancel_running',
|
||||
'bulk_delete_old_runs',
|
||||
]
|
||||
|
||||
def bulk_retry_failed(self, request, queryset):
|
||||
"""Retry failed automation runs"""
|
||||
failed_runs = queryset.filter(status='failed')
|
||||
count = failed_runs.update(status='pending', current_stage='keyword_research')
|
||||
self.message_user(request, f'{count} failed run(s) marked for retry.', messages.SUCCESS)
|
||||
bulk_retry_failed.short_description = 'Retry failed runs'
|
||||
|
||||
def bulk_cancel_running(self, request, queryset):
|
||||
"""Cancel running automation runs"""
|
||||
running = queryset.filter(status__in=['pending', 'running'])
|
||||
count = running.update(status='failed')
|
||||
self.message_user(request, f'{count} running automation(s) cancelled.', messages.SUCCESS)
|
||||
bulk_cancel_running.short_description = 'Cancel running automations'
|
||||
|
||||
def bulk_delete_old_runs(self, request, queryset):
|
||||
"""Delete automation runs older than 30 days"""
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
cutoff_date = timezone.now() - timedelta(days=30)
|
||||
old_runs = queryset.filter(created_at__lt=cutoff_date)
|
||||
count = old_runs.count()
|
||||
old_runs.delete()
|
||||
self.message_user(request, f'{count} old automation run(s) deleted (older than 30 days).', messages.SUCCESS)
|
||||
bulk_delete_old_runs.short_description = 'Delete old runs (>30 days)'
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 5.2.9 on 2025-12-20 15:13
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('automation', '0004_add_pause_resume_cancel_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='automationconfig',
|
||||
name='stage_1_batch_size',
|
||||
field=models.IntegerField(default=50, help_text='Keywords per batch'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,22 @@
|
||||
# Generated migration for adding initial_snapshot field to AutomationRun
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('automation', '0005_add_default_image_service'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='automationrun',
|
||||
name='initial_snapshot',
|
||||
field=models.JSONField(
|
||||
blank=True,
|
||||
default=dict,
|
||||
help_text='Snapshot of initial queue sizes: {stage_1_initial, stage_2_initial, ..., total_initial_items}'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -24,7 +24,7 @@ class AutomationConfig(models.Model):
|
||||
scheduled_time = models.TimeField(default='02:00', help_text="Time to run (e.g., 02:00)")
|
||||
|
||||
# Batch sizes per stage
|
||||
stage_1_batch_size = models.IntegerField(default=20, help_text="Keywords per batch")
|
||||
stage_1_batch_size = models.IntegerField(default=50, help_text="Keywords per batch")
|
||||
stage_2_batch_size = models.IntegerField(default=1, help_text="Clusters at a time")
|
||||
stage_3_batch_size = models.IntegerField(default=20, help_text="Ideas per batch")
|
||||
stage_4_batch_size = models.IntegerField(default=1, help_text="Tasks - sequential")
|
||||
@@ -88,6 +88,13 @@ class AutomationRun(models.Model):
|
||||
|
||||
total_credits_used = models.IntegerField(default=0)
|
||||
|
||||
# Initial queue snapshot - captured at run start for accurate progress tracking
|
||||
initial_snapshot = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
help_text="Snapshot of initial queue sizes: {stage_1_initial, stage_2_initial, ..., total_initial_items}"
|
||||
)
|
||||
|
||||
# JSON results per stage
|
||||
stage_1_result = models.JSONField(null=True, blank=True, help_text="{keywords_processed, clusters_created, batches}")
|
||||
stage_2_result = models.JSONField(null=True, blank=True, help_text="{clusters_processed, ideas_created}")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -82,7 +82,7 @@ class AutomationViewSet(viewsets.ViewSet):
|
||||
"is_enabled": true,
|
||||
"frequency": "daily",
|
||||
"scheduled_time": "02:00",
|
||||
"stage_1_batch_size": 20,
|
||||
"stage_1_batch_size": 50,
|
||||
...
|
||||
}
|
||||
"""
|
||||
@@ -387,16 +387,17 @@ class AutomationViewSet(viewsets.ViewSet):
|
||||
|
||||
return counts, total
|
||||
|
||||
# Stage 1: Keywords pending clustering (keep previous "pending" semantics but also return status breakdown)
|
||||
# Stage 1: Keywords pending clustering
|
||||
stage_1_counts, stage_1_total = _counts_by_status(
|
||||
Keywords,
|
||||
extra_filter={'disabled': False}
|
||||
)
|
||||
# pending definition used by the UI previously (new & not clustered)
|
||||
# FIXED: Stage 1 pending = all keywords with status='new' (ready for clustering)
|
||||
# This should match the "New" count shown in Keywords metric card
|
||||
# Previously filtered by cluster__isnull=True which caused mismatch
|
||||
stage_1_pending = Keywords.objects.filter(
|
||||
site=site,
|
||||
status='new',
|
||||
cluster__isnull=True,
|
||||
disabled=False
|
||||
).count()
|
||||
|
||||
@@ -714,3 +715,237 @@ class AutomationViewSet(viewsets.ViewSet):
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@extend_schema(tags=['Automation'])
|
||||
@action(detail=False, methods=['get'], url_path='run_progress')
|
||||
def run_progress(self, request):
|
||||
"""
|
||||
GET /api/v1/automation/run_progress/?site_id=123&run_id=abc
|
||||
|
||||
Unified endpoint for ALL run progress data - global + per-stage.
|
||||
Replaces multiple separate API calls with single comprehensive response.
|
||||
|
||||
Response includes:
|
||||
- run: Current run status and metadata
|
||||
- global_progress: Overall pipeline progress percentage
|
||||
- stages: Per-stage progress with input/output/processed counts
|
||||
- metrics: Credits used, duration, errors
|
||||
"""
|
||||
site_id = request.query_params.get('site_id')
|
||||
run_id = request.query_params.get('run_id')
|
||||
|
||||
if not site_id:
|
||||
return Response(
|
||||
{'error': 'site_id required'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
try:
|
||||
site = get_object_or_404(Site, id=site_id, account=request.user.account)
|
||||
|
||||
# If no run_id, get current run
|
||||
if run_id:
|
||||
run = AutomationRun.objects.get(run_id=run_id, site=site)
|
||||
else:
|
||||
run = AutomationRun.objects.filter(
|
||||
site=site,
|
||||
status__in=['running', 'paused']
|
||||
).order_by('-started_at').first()
|
||||
|
||||
if not run:
|
||||
return Response({
|
||||
'run': None,
|
||||
'global_progress': None,
|
||||
'stages': [],
|
||||
'metrics': None
|
||||
})
|
||||
|
||||
# Build unified response
|
||||
response = self._build_run_progress_response(site, run)
|
||||
return Response(response)
|
||||
|
||||
except AutomationRun.DoesNotExist:
|
||||
return Response(
|
||||
{'error': 'Run not found'},
|
||||
status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{'error': str(e)},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
def _build_run_progress_response(self, site, run):
|
||||
"""Build comprehensive progress response for a run"""
|
||||
from igny8_core.business.planning.models import Keywords, Clusters, ContentIdeas
|
||||
from igny8_core.business.content.models import Tasks, Content, Images
|
||||
from django.db.models import Count
|
||||
from django.utils import timezone
|
||||
|
||||
initial_snapshot = run.initial_snapshot or {}
|
||||
|
||||
# Helper to get processed count from result
|
||||
def get_processed(result, key):
|
||||
if not result:
|
||||
return 0
|
||||
return result.get(key, 0)
|
||||
|
||||
# Helper to get output count from result
|
||||
def get_output(result, key):
|
||||
if not result:
|
||||
return 0
|
||||
return result.get(key, 0)
|
||||
|
||||
# Stage-specific key mapping for processed counts
|
||||
processed_keys = {
|
||||
1: 'keywords_processed',
|
||||
2: 'clusters_processed',
|
||||
3: 'ideas_processed',
|
||||
4: 'tasks_processed',
|
||||
5: 'content_processed',
|
||||
6: 'images_processed',
|
||||
7: 'ready_for_review'
|
||||
}
|
||||
|
||||
# Stage-specific key mapping for output counts
|
||||
output_keys = {
|
||||
1: 'clusters_created',
|
||||
2: 'ideas_created',
|
||||
3: 'tasks_created',
|
||||
4: 'content_created',
|
||||
5: 'prompts_created',
|
||||
6: 'images_generated',
|
||||
7: 'ready_for_review'
|
||||
}
|
||||
|
||||
# Build stages array
|
||||
stages = []
|
||||
total_processed = 0
|
||||
total_initial = initial_snapshot.get('total_initial_items', 0)
|
||||
|
||||
stage_names = {
|
||||
1: 'Keywords → Clusters',
|
||||
2: 'Clusters → Ideas',
|
||||
3: 'Ideas → Tasks',
|
||||
4: 'Tasks → Content',
|
||||
5: 'Content → Image Prompts',
|
||||
6: 'Image Prompts → Images',
|
||||
7: 'Manual Review Gate'
|
||||
}
|
||||
|
||||
stage_types = {
|
||||
1: 'AI', 2: 'AI', 3: 'Local', 4: 'AI', 5: 'AI', 6: 'AI', 7: 'Manual'
|
||||
}
|
||||
|
||||
for stage_num in range(1, 8):
|
||||
result = getattr(run, f'stage_{stage_num}_result', None)
|
||||
initial_count = initial_snapshot.get(f'stage_{stage_num}_initial', 0)
|
||||
processed = get_processed(result, processed_keys[stage_num])
|
||||
output = get_output(result, output_keys[stage_num])
|
||||
|
||||
total_processed += processed
|
||||
|
||||
# Determine stage status
|
||||
if run.current_stage > stage_num:
|
||||
stage_status = 'completed'
|
||||
elif run.current_stage == stage_num:
|
||||
stage_status = 'active'
|
||||
else:
|
||||
stage_status = 'pending'
|
||||
|
||||
# Calculate progress percentage for this stage
|
||||
progress = 0
|
||||
if initial_count > 0:
|
||||
progress = round((processed / initial_count) * 100)
|
||||
elif run.current_stage > stage_num:
|
||||
progress = 100
|
||||
|
||||
stage_data = {
|
||||
'number': stage_num,
|
||||
'name': stage_names[stage_num],
|
||||
'type': stage_types[stage_num],
|
||||
'status': stage_status,
|
||||
'input_count': initial_count,
|
||||
'output_count': output,
|
||||
'processed_count': processed,
|
||||
'progress_percentage': min(progress, 100),
|
||||
'credits_used': result.get('credits_used', 0) if result else 0,
|
||||
'time_elapsed': result.get('time_elapsed', '') if result else '',
|
||||
}
|
||||
|
||||
# Add currently_processing for active stage
|
||||
if stage_status == 'active':
|
||||
try:
|
||||
service = AutomationService.from_run_id(run.run_id)
|
||||
processing_state = service.get_current_processing_state()
|
||||
if processing_state:
|
||||
stage_data['currently_processing'] = processing_state.get('currently_processing', [])
|
||||
stage_data['up_next'] = processing_state.get('up_next', [])
|
||||
stage_data['remaining_count'] = processing_state.get('remaining_count', 0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
stages.append(stage_data)
|
||||
|
||||
# Calculate global progress
|
||||
# Stages 1-6 are automation stages, Stage 7 is manual review (not counted)
|
||||
# Progress = weighted average of stages 1-6 completion
|
||||
global_percentage = 0
|
||||
if run.status == 'completed':
|
||||
# If run is completed (after Stage 6), show 100%
|
||||
global_percentage = 100
|
||||
elif run.status in ('cancelled', 'failed'):
|
||||
# Keep current progress for cancelled/failed
|
||||
if total_initial > 0:
|
||||
global_percentage = round((total_processed / total_initial) * 100)
|
||||
else:
|
||||
# Calculate based on completed stages (1-6 only)
|
||||
# Each of the 6 automation stages contributes ~16.67% to total
|
||||
completed_stages = min(max(run.current_stage - 1, 0), 6)
|
||||
stage_weight = 100 / 6 # Each stage is ~16.67%
|
||||
|
||||
# Base progress from completed stages
|
||||
base_progress = completed_stages * stage_weight
|
||||
|
||||
# Add partial progress from current stage
|
||||
current_stage_progress = 0
|
||||
if run.current_stage <= 6:
|
||||
current_result = getattr(run, f'stage_{run.current_stage}_result', None)
|
||||
current_initial = initial_snapshot.get(f'stage_{run.current_stage}_initial', 0)
|
||||
if current_initial > 0 and current_result:
|
||||
processed_key = processed_keys.get(run.current_stage, '')
|
||||
current_processed = current_result.get(processed_key, 0)
|
||||
current_stage_progress = (current_processed / current_initial) * stage_weight
|
||||
|
||||
global_percentage = round(base_progress + current_stage_progress)
|
||||
|
||||
# Calculate duration
|
||||
duration_seconds = 0
|
||||
if run.started_at:
|
||||
end_time = run.completed_at or timezone.now()
|
||||
duration_seconds = int((end_time - run.started_at).total_seconds())
|
||||
|
||||
return {
|
||||
'run': {
|
||||
'run_id': run.run_id,
|
||||
'status': run.status,
|
||||
'current_stage': run.current_stage,
|
||||
'trigger_type': run.trigger_type,
|
||||
'started_at': run.started_at,
|
||||
'completed_at': run.completed_at,
|
||||
'paused_at': run.paused_at,
|
||||
},
|
||||
'global_progress': {
|
||||
'total_items': total_initial,
|
||||
'completed_items': total_processed,
|
||||
'percentage': min(global_percentage, 100),
|
||||
'current_stage': run.current_stage,
|
||||
'total_stages': 7
|
||||
},
|
||||
'stages': stages,
|
||||
'metrics': {
|
||||
'credits_used': run.total_credits_used,
|
||||
'duration_seconds': duration_seconds,
|
||||
'errors': []
|
||||
},
|
||||
'initial_snapshot': initial_snapshot
|
||||
}
|
||||
|
||||
@@ -1,168 +1,71 @@
|
||||
"""
|
||||
Billing Business Logic Admin
|
||||
|
||||
NOTE: Most billing models are registered in modules/billing/admin.py
|
||||
with full workflow functionality. This file contains legacy/minimal registrations.
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.contrib import messages
|
||||
from django.utils.html import format_html
|
||||
from igny8_core.admin.base import AccountAdminMixin
|
||||
from .models import (
|
||||
CreditCostConfig,
|
||||
AccountPaymentMethod,
|
||||
Invoice,
|
||||
Payment,
|
||||
CreditPackage,
|
||||
PaymentMethodConfig,
|
||||
)
|
||||
from unfold.admin import ModelAdmin
|
||||
from igny8_core.admin.base import AccountAdminMixin, Igny8ModelAdmin
|
||||
# NOTE: Most billing models are now registered in modules/billing/admin.py
|
||||
# This file is kept for reference but all registrations are commented out
|
||||
# to avoid AlreadyRegistered errors
|
||||
|
||||
# from .models import (
|
||||
# CreditCostConfig,
|
||||
# AccountPaymentMethod,
|
||||
# Invoice,
|
||||
# Payment,
|
||||
# CreditPackage,
|
||||
# PaymentMethodConfig,
|
||||
# )
|
||||
|
||||
|
||||
@admin.register(CreditCostConfig)
|
||||
class CreditCostConfigAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'operation_type',
|
||||
'display_name',
|
||||
'credits_cost_display',
|
||||
'unit',
|
||||
'is_active',
|
||||
'cost_change_indicator',
|
||||
'updated_at',
|
||||
'updated_by'
|
||||
]
|
||||
|
||||
list_filter = ['is_active', 'unit', 'updated_at']
|
||||
search_fields = ['operation_type', 'display_name', 'description']
|
||||
|
||||
fieldsets = (
|
||||
('Operation', {
|
||||
'fields': ('operation_type', 'display_name', 'description')
|
||||
}),
|
||||
('Cost Configuration', {
|
||||
'fields': ('credits_cost', 'unit', 'is_active')
|
||||
}),
|
||||
('Audit Trail', {
|
||||
'fields': ('previous_cost', 'updated_by', 'created_at', 'updated_at'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
)
|
||||
|
||||
readonly_fields = ['created_at', 'updated_at', 'previous_cost']
|
||||
|
||||
def credits_cost_display(self, obj):
|
||||
"""Show cost with color coding"""
|
||||
if obj.credits_cost >= 20:
|
||||
color = 'red'
|
||||
elif obj.credits_cost >= 10:
|
||||
color = 'orange'
|
||||
else:
|
||||
color = 'green'
|
||||
return format_html(
|
||||
'<span style="color: {}; font-weight: bold;">{} credits</span>',
|
||||
color,
|
||||
obj.credits_cost
|
||||
)
|
||||
credits_cost_display.short_description = 'Cost'
|
||||
|
||||
def cost_change_indicator(self, obj):
|
||||
"""Show if cost changed recently"""
|
||||
if obj.previous_cost is not None:
|
||||
if obj.credits_cost > obj.previous_cost:
|
||||
icon = '📈' # Increased
|
||||
color = 'red'
|
||||
elif obj.credits_cost < obj.previous_cost:
|
||||
icon = '📉' # Decreased
|
||||
color = 'green'
|
||||
else:
|
||||
icon = '➡️' # Same
|
||||
color = 'gray'
|
||||
|
||||
return format_html(
|
||||
'{} <span style="color: {};">({} → {})</span>',
|
||||
icon,
|
||||
color,
|
||||
obj.previous_cost,
|
||||
obj.credits_cost
|
||||
)
|
||||
return '—'
|
||||
cost_change_indicator.short_description = 'Recent Change'
|
||||
|
||||
def save_model(self, request, obj, form, change):
|
||||
"""Track who made the change"""
|
||||
obj.updated_by = request.user
|
||||
super().save_model(request, obj, form, change)
|
||||
# CreditCostConfig - DUPLICATE - Registered in modules/billing/admin.py with better features
|
||||
# Commenting out to avoid conflicts
|
||||
# @admin.register(CreditCostConfig)
|
||||
# class CreditCostConfigAdmin(admin.ModelAdmin):
|
||||
# ...existing implementation...
|
||||
|
||||
|
||||
@admin.register(Invoice)
|
||||
class InvoiceAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
list_display = [
|
||||
'invoice_number',
|
||||
'account',
|
||||
'status',
|
||||
'total',
|
||||
'currency',
|
||||
'invoice_date',
|
||||
'due_date',
|
||||
'subscription',
|
||||
]
|
||||
list_filter = ['status', 'currency', 'invoice_date', 'account']
|
||||
search_fields = ['invoice_number', 'account__name', 'subscription__id']
|
||||
readonly_fields = ['created_at', 'updated_at']
|
||||
# Invoice - DUPLICATE - Registered in modules/billing/admin.py
|
||||
# Commenting out to avoid conflicts
|
||||
# @admin.register(Invoice)
|
||||
# class InvoiceAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
# ...existing implementation...
|
||||
|
||||
|
||||
@admin.register(Payment)
|
||||
class PaymentAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
list_display = [
|
||||
'id',
|
||||
'invoice',
|
||||
'account',
|
||||
'payment_method',
|
||||
'status',
|
||||
'amount',
|
||||
'currency',
|
||||
'processed_at',
|
||||
]
|
||||
list_filter = ['status', 'payment_method', 'currency', 'created_at']
|
||||
search_fields = ['invoice__invoice_number', 'account__name', 'stripe_payment_intent_id', 'paypal_order_id']
|
||||
readonly_fields = ['created_at', 'updated_at']
|
||||
# Payment - DUPLICATE - Registered in modules/billing/admin.py with full approval workflow
|
||||
# Commenting out to avoid conflicts
|
||||
# @admin.register(Payment)
|
||||
# class PaymentAdmin(AccountAdminMixin, admin.ModelAdmin):
|
||||
# ...existing implementation...
|
||||
|
||||
|
||||
@admin.register(CreditPackage)
|
||||
class CreditPackageAdmin(admin.ModelAdmin):
|
||||
list_display = ['name', 'slug', 'credits', 'price', 'discount_percentage', 'is_active', 'is_featured', 'sort_order']
|
||||
list_filter = ['is_active', 'is_featured']
|
||||
search_fields = ['name', 'slug']
|
||||
readonly_fields = ['created_at', 'updated_at']
|
||||
# CreditPackage - DUPLICATE - Registered in modules/billing/admin.py
|
||||
# Commenting out to avoid conflicts
|
||||
# @admin.register(CreditPackage)
|
||||
# class CreditPackageAdmin(admin.ModelAdmin):
|
||||
# ...existing implementation...
|
||||
|
||||
|
||||
@admin.register(PaymentMethodConfig)
|
||||
class PaymentMethodConfigAdmin(admin.ModelAdmin):
|
||||
list_display = ['country_code', 'payment_method', 'is_enabled', 'display_name', 'sort_order']
|
||||
list_filter = ['payment_method', 'is_enabled', 'country_code']
|
||||
search_fields = ['country_code', 'display_name', 'payment_method']
|
||||
readonly_fields = ['created_at', 'updated_at']
|
||||
# AccountPaymentMethod - DUPLICATE - Registered in modules/billing/admin.py with AccountAdminMixin
|
||||
# Commenting out to avoid AlreadyRegistered error
|
||||
# The version in modules/billing/admin.py is preferred as it includes AccountAdminMixin
|
||||
|
||||
|
||||
@admin.register(AccountPaymentMethod)
|
||||
class AccountPaymentMethodAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'display_name',
|
||||
'type',
|
||||
'account',
|
||||
'is_default',
|
||||
'is_enabled',
|
||||
'country_code',
|
||||
'is_verified',
|
||||
'updated_at',
|
||||
]
|
||||
list_filter = ['type', 'is_default', 'is_enabled', 'is_verified', 'country_code']
|
||||
search_fields = ['display_name', 'account__name', 'account__id']
|
||||
readonly_fields = ['created_at', 'updated_at']
|
||||
fieldsets = (
|
||||
('Payment Method', {
|
||||
'fields': ('account', 'type', 'display_name', 'is_default', 'is_enabled', 'is_verified', 'country_code')
|
||||
}),
|
||||
('Instructions / Metadata', {
|
||||
'fields': ('instructions', 'metadata')
|
||||
}),
|
||||
('Timestamps', {
|
||||
'fields': ('created_at', 'updated_at'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
)
|
||||
# from import_export.admin import ExportMixin
|
||||
# from import_export import resources
|
||||
#
|
||||
# class AccountPaymentMethodResource(resources.ModelResource):
|
||||
# """Resource class for exporting Account Payment Methods"""
|
||||
# class Meta:
|
||||
# model = AccountPaymentMethod
|
||||
# fields = ('id', 'display_name', 'type', 'account__name', 'is_default',
|
||||
# 'is_enabled', 'is_verified', 'country_code', 'created_at')
|
||||
# export_order = fields
|
||||
#
|
||||
# @admin.register(AccountPaymentMethod)
|
||||
# class AccountPaymentMethodAdmin(ExportMixin, Igny8ModelAdmin):
|
||||
# ... (see modules/billing/admin.py for active registration)
|
||||
1062
backend/igny8_core/business/billing/billing_views.py
Normal file
1062
backend/igny8_core/business/billing/billing_views.py
Normal file
File diff suppressed because it is too large
Load Diff
37
backend/igny8_core/business/billing/config.py
Normal file
37
backend/igny8_core/business/billing/config.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Billing configuration settings
|
||||
"""
|
||||
from django.conf import settings
|
||||
|
||||
# Payment Gateway Mode
|
||||
PAYMENT_GATEWAY_MODE = getattr(settings, 'PAYMENT_GATEWAY_MODE', 'sandbox') # 'sandbox' or 'production'
|
||||
|
||||
# Auto-approve payments (development only)
|
||||
AUTO_APPROVE_PAYMENTS = getattr(settings, 'AUTO_APPROVE_PAYMENTS', False)
|
||||
|
||||
# Invoice due date offset (days)
|
||||
INVOICE_DUE_DATE_OFFSET = getattr(settings, 'INVOICE_DUE_DATE_OFFSET', 7)
|
||||
|
||||
# Grace period for payment (days)
|
||||
PAYMENT_GRACE_PERIOD = getattr(settings, 'PAYMENT_GRACE_PERIOD', 7)
|
||||
|
||||
# Maximum payment retry attempts
|
||||
MAX_PAYMENT_RETRIES = getattr(settings, 'MAX_PAYMENT_RETRIES', 3)
|
||||
|
||||
# Subscription renewal advance notice (days)
|
||||
SUBSCRIPTION_RENEWAL_NOTICE_DAYS = getattr(settings, 'SUBSCRIPTION_RENEWAL_NOTICE_DAYS', 7)
|
||||
|
||||
# Default subscription plan slugs
|
||||
DEFAULT_PLAN_SLUGS = {
|
||||
'free': getattr(settings, 'FREE_PLAN_SLUG', 'basic-free'),
|
||||
'starter': getattr(settings, 'STARTER_PLAN_SLUG', 'starter-10'),
|
||||
'professional': getattr(settings, 'PROFESSIONAL_PLAN_SLUG', 'professional-100'),
|
||||
'enterprise': getattr(settings, 'ENTERPRISE_PLAN_SLUG', 'enterprise-unlimited'),
|
||||
}
|
||||
|
||||
# Credit package slugs
|
||||
DEFAULT_CREDIT_PACKAGES = {
|
||||
'small': getattr(settings, 'SMALL_CREDIT_PACKAGE_SLUG', 'credits-100'),
|
||||
'medium': getattr(settings, 'MEDIUM_CREDIT_PACKAGE_SLUG', 'credits-500'),
|
||||
'large': getattr(settings, 'LARGE_CREDIT_PACKAGE_SLUG', 'credits-1000'),
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
"""
|
||||
Management command to backfill usage tracking for existing content.
|
||||
Usage: python manage.py backfill_usage [account_id]
|
||||
|
||||
NOTE: Since the simplification of limits (Jan 2026), this command only
|
||||
tracks Ahrefs queries. All other usage is tracked via CreditUsageLog.
|
||||
"""
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.apps import apps
|
||||
from django.db import transaction
|
||||
from igny8_core.auth.models import Account
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Backfill usage tracking for existing content (Ahrefs queries only)'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'account_id',
|
||||
nargs='?',
|
||||
type=int,
|
||||
help='Account ID to backfill (optional, processes all accounts if not provided)'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
account_id = options.get('account_id')
|
||||
|
||||
if account_id:
|
||||
accounts = Account.objects.filter(id=account_id).select_related('plan')
|
||||
if not accounts.exists():
|
||||
self.stdout.write(self.style.ERROR(f'Account {account_id} not found'))
|
||||
return
|
||||
else:
|
||||
accounts = Account.objects.filter(plan__isnull=False).select_related('plan')
|
||||
|
||||
total_accounts = accounts.count()
|
||||
self.stdout.write(f'Processing {total_accounts} account(s)...\n')
|
||||
|
||||
for account in accounts:
|
||||
self.stdout.write('=' * 60)
|
||||
self.stdout.write(f'Account: {account.name} (ID: {account.id})')
|
||||
self.stdout.write(f'Plan: {account.plan.name if account.plan else "No Plan"}')
|
||||
self.stdout.write('=' * 60)
|
||||
|
||||
# Ahrefs queries are tracked in CreditUsageLog with operation_type='ahrefs_query'
|
||||
# We don't backfill these as they should be tracked in real-time going forward
|
||||
# This command is primarily for verification
|
||||
|
||||
self.stdout.write(f'Ahrefs queries used this month: {account.usage_ahrefs_queries}')
|
||||
self.stdout.write(self.style.SUCCESS('\n✅ Verified usage tracking'))
|
||||
self.stdout.write(f' usage_ahrefs_queries: {account.usage_ahrefs_queries}\n')
|
||||
|
||||
self.stdout.write('=' * 60)
|
||||
self.stdout.write(self.style.SUCCESS('✅ Verification complete!'))
|
||||
self.stdout.write('=' * 60)
|
||||
@@ -0,0 +1,35 @@
|
||||
# Generated migration for Payment status simplification
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def migrate_payment_statuses(apps, schema_editor):
|
||||
"""
|
||||
Migrate old payment statuses to new simplified statuses:
|
||||
- pending, processing, completed, cancelled → map to new statuses
|
||||
"""
|
||||
Payment = apps.get_model('billing', 'Payment')
|
||||
|
||||
# Map old statuses to new statuses
|
||||
status_mapping = {
|
||||
'pending': 'pending_approval', # Treat as pending approval
|
||||
'processing': 'pending_approval', # Treat as pending approval
|
||||
'completed': 'succeeded', # completed = succeeded
|
||||
'cancelled': 'failed', # cancelled = failed
|
||||
# Keep existing: pending_approval, succeeded, failed, refunded
|
||||
}
|
||||
|
||||
for old_status, new_status in status_mapping.items():
|
||||
Payment.objects.filter(status=old_status).update(status=new_status)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('billing', '0006_auto_20251209_payment_workflow'), # Adjust to your latest migration
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Update status choices (Django will handle this in model)
|
||||
migrations.RunPython(migrate_payment_statuses, reverse_code=migrations.RunPython.noop),
|
||||
]
|
||||
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Migration: Simplify payment methods to global (remove country-specific filtering)
|
||||
|
||||
This migration:
|
||||
1. Updates existing PaymentMethodConfig records to use country_code='*' (global)
|
||||
2. Removes duplicate payment methods per country, keeping only one global config per method
|
||||
"""
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def migrate_to_global_payment_methods(apps, schema_editor):
|
||||
"""
|
||||
Convert country-specific payment methods to global.
|
||||
For each payment_method type, keep only one configuration with country_code='*'
|
||||
"""
|
||||
PaymentMethodConfig = apps.get_model('billing', 'PaymentMethodConfig')
|
||||
|
||||
# Get all unique payment methods
|
||||
payment_methods = PaymentMethodConfig.objects.values_list('payment_method', flat=True).distinct()
|
||||
|
||||
for method in payment_methods:
|
||||
# Get all configs for this payment method
|
||||
configs = PaymentMethodConfig.objects.filter(payment_method=method).order_by('sort_order', 'id')
|
||||
|
||||
if configs.exists():
|
||||
# Keep the first one and make it global
|
||||
first_config = configs.first()
|
||||
first_config.country_code = '*'
|
||||
first_config.save(update_fields=['country_code'])
|
||||
|
||||
# Delete duplicates (other country-specific versions)
|
||||
configs.exclude(id=first_config.id).delete()
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""Reverse is a no-op - can't restore original country codes"""
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('billing', '0007_simplify_payment_statuses'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_to_global_payment_methods, reverse_migration),
|
||||
]
|
||||
@@ -0,0 +1,359 @@
|
||||
"""
|
||||
Migration: Seed AIModelConfig from constants.py
|
||||
|
||||
This migration populates the AIModelConfig table with the current models
|
||||
from ai/constants.py, enabling database-driven model configuration.
|
||||
"""
|
||||
from decimal import Decimal
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def seed_ai_models(apps, schema_editor):
|
||||
"""
|
||||
Seed AIModelConfig with models from constants.py
|
||||
"""
|
||||
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
|
||||
|
||||
# Text Models (from MODEL_RATES)
|
||||
text_models = [
|
||||
{
|
||||
'model_name': 'gpt-4.1',
|
||||
'display_name': 'GPT-4.1 - Balanced Performance',
|
||||
'model_type': 'text',
|
||||
'provider': 'openai',
|
||||
'input_cost_per_1m': Decimal('2.00'),
|
||||
'output_cost_per_1m': Decimal('8.00'),
|
||||
'context_window': 128000,
|
||||
'max_output_tokens': 16384,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': True, # Default text model
|
||||
'sort_order': 1,
|
||||
'description': 'Default model - good balance of cost and capability',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-4o-mini',
|
||||
'display_name': 'GPT-4o Mini - Fast & Affordable',
|
||||
'model_type': 'text',
|
||||
'provider': 'openai',
|
||||
'input_cost_per_1m': Decimal('0.15'),
|
||||
'output_cost_per_1m': Decimal('0.60'),
|
||||
'context_window': 128000,
|
||||
'max_output_tokens': 16384,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 2,
|
||||
'description': 'Best for high-volume tasks where cost matters',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-4o',
|
||||
'display_name': 'GPT-4o - High Quality',
|
||||
'model_type': 'text',
|
||||
'provider': 'openai',
|
||||
'input_cost_per_1m': Decimal('2.50'),
|
||||
'output_cost_per_1m': Decimal('10.00'),
|
||||
'context_window': 128000,
|
||||
'max_output_tokens': 16384,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 3,
|
||||
'description': 'Premium model for complex tasks requiring best quality',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-5.1',
|
||||
'display_name': 'GPT-5.1 - Latest Generation',
|
||||
'model_type': 'text',
|
||||
'provider': 'openai',
|
||||
'input_cost_per_1m': Decimal('1.25'),
|
||||
'output_cost_per_1m': Decimal('10.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 32768,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 4,
|
||||
'description': 'Next-gen model with improved reasoning',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-5.2',
|
||||
'display_name': 'GPT-5.2 - Most Advanced',
|
||||
'model_type': 'text',
|
||||
'provider': 'openai',
|
||||
'input_cost_per_1m': Decimal('1.75'),
|
||||
'output_cost_per_1m': Decimal('14.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 65536,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 5,
|
||||
'description': 'Most capable model for enterprise-grade tasks',
|
||||
},
|
||||
]
|
||||
|
||||
# Image Models (from IMAGE_MODEL_RATES)
|
||||
image_models = [
|
||||
{
|
||||
'model_name': 'dall-e-3',
|
||||
'display_name': 'DALL-E 3 - Premium Images',
|
||||
'model_type': 'image',
|
||||
'provider': 'openai',
|
||||
'cost_per_image': Decimal('0.040'),
|
||||
'valid_sizes': ['1024x1024', '1024x1792', '1792x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': True, # Default image model
|
||||
'sort_order': 1,
|
||||
'description': 'Best quality image generation, good for hero images and marketing',
|
||||
},
|
||||
{
|
||||
'model_name': 'dall-e-2',
|
||||
'display_name': 'DALL-E 2 - Standard Images',
|
||||
'model_type': 'image',
|
||||
'provider': 'openai',
|
||||
'cost_per_image': Decimal('0.020'),
|
||||
'valid_sizes': ['256x256', '512x512', '1024x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 2,
|
||||
'description': 'Lower cost option for bulk image generation',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-image-1',
|
||||
'display_name': 'GPT Image 1 - Advanced',
|
||||
'model_type': 'image',
|
||||
'provider': 'openai',
|
||||
'cost_per_image': Decimal('0.042'),
|
||||
'valid_sizes': ['1024x1024', '1024x1792', '1792x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 3,
|
||||
'description': 'Advanced image model with enhanced capabilities',
|
||||
},
|
||||
{
|
||||
'model_name': 'gpt-image-1-mini',
|
||||
'display_name': 'GPT Image 1 Mini - Fast',
|
||||
'model_type': 'image',
|
||||
'provider': 'openai',
|
||||
'cost_per_image': Decimal('0.011'),
|
||||
'valid_sizes': ['1024x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 4,
|
||||
'description': 'Fastest and most affordable image model',
|
||||
},
|
||||
]
|
||||
|
||||
# Runware Image Models (from existing integration)
|
||||
runware_models = [
|
||||
{
|
||||
'model_name': 'runware:100@1',
|
||||
'display_name': 'Runware Standard',
|
||||
'model_type': 'image',
|
||||
'provider': 'runware',
|
||||
'cost_per_image': Decimal('0.008'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 10,
|
||||
'description': 'Runware image generation - most affordable',
|
||||
},
|
||||
]
|
||||
|
||||
# Bria AI Image Models
|
||||
bria_models = [
|
||||
{
|
||||
'model_name': 'bria-2.3',
|
||||
'display_name': 'Bria 2.3 High Quality',
|
||||
'model_type': 'image',
|
||||
'provider': 'bria',
|
||||
'cost_per_image': Decimal('0.015'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024', '1024x1792', '1792x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 11,
|
||||
'description': 'Bria 2.3 - High quality image generation',
|
||||
},
|
||||
{
|
||||
'model_name': 'bria-2.3-fast',
|
||||
'display_name': 'Bria 2.3 Fast',
|
||||
'model_type': 'image',
|
||||
'provider': 'bria',
|
||||
'cost_per_image': Decimal('0.010'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 12,
|
||||
'description': 'Bria 2.3 Fast - Quick generation, lower cost',
|
||||
},
|
||||
{
|
||||
'model_name': 'bria-2.2',
|
||||
'display_name': 'Bria 2.2 Standard',
|
||||
'model_type': 'image',
|
||||
'provider': 'bria',
|
||||
'cost_per_image': Decimal('0.012'),
|
||||
'valid_sizes': ['512x512', '768x768', '1024x1024'],
|
||||
'supports_json_mode': False,
|
||||
'supports_vision': False,
|
||||
'supports_function_calling': False,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 13,
|
||||
'description': 'Bria 2.2 - Standard image generation',
|
||||
},
|
||||
]
|
||||
|
||||
# Anthropic Claude Text Models
|
||||
anthropic_models = [
|
||||
{
|
||||
'model_name': 'claude-3-5-sonnet-20241022',
|
||||
'display_name': 'Claude 3.5 Sonnet (Latest)',
|
||||
'model_type': 'text',
|
||||
'provider': 'anthropic',
|
||||
'input_cost_per_1m': Decimal('3.00'),
|
||||
'output_cost_per_1m': Decimal('15.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 8192,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 20,
|
||||
'description': 'Claude 3.5 Sonnet - Best for most tasks, excellent reasoning',
|
||||
},
|
||||
{
|
||||
'model_name': 'claude-3-5-haiku-20241022',
|
||||
'display_name': 'Claude 3.5 Haiku (Fast)',
|
||||
'model_type': 'text',
|
||||
'provider': 'anthropic',
|
||||
'input_cost_per_1m': Decimal('1.00'),
|
||||
'output_cost_per_1m': Decimal('5.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 8192,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 21,
|
||||
'description': 'Claude 3.5 Haiku - Fast and affordable',
|
||||
},
|
||||
{
|
||||
'model_name': 'claude-3-opus-20240229',
|
||||
'display_name': 'Claude 3 Opus',
|
||||
'model_type': 'text',
|
||||
'provider': 'anthropic',
|
||||
'input_cost_per_1m': Decimal('15.00'),
|
||||
'output_cost_per_1m': Decimal('75.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 4096,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 22,
|
||||
'description': 'Claude 3 Opus - Most capable Claude model',
|
||||
},
|
||||
{
|
||||
'model_name': 'claude-3-sonnet-20240229',
|
||||
'display_name': 'Claude 3 Sonnet',
|
||||
'model_type': 'text',
|
||||
'provider': 'anthropic',
|
||||
'input_cost_per_1m': Decimal('3.00'),
|
||||
'output_cost_per_1m': Decimal('15.00'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 4096,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 23,
|
||||
'description': 'Claude 3 Sonnet - Balanced performance and cost',
|
||||
},
|
||||
{
|
||||
'model_name': 'claude-3-haiku-20240307',
|
||||
'display_name': 'Claude 3 Haiku',
|
||||
'model_type': 'text',
|
||||
'provider': 'anthropic',
|
||||
'input_cost_per_1m': Decimal('0.25'),
|
||||
'output_cost_per_1m': Decimal('1.25'),
|
||||
'context_window': 200000,
|
||||
'max_output_tokens': 4096,
|
||||
'supports_json_mode': True,
|
||||
'supports_vision': True,
|
||||
'supports_function_calling': True,
|
||||
'is_active': True,
|
||||
'is_default': False,
|
||||
'sort_order': 24,
|
||||
'description': 'Claude 3 Haiku - Most affordable Claude model',
|
||||
},
|
||||
]
|
||||
|
||||
# Create all models
|
||||
all_models = text_models + image_models + runware_models + bria_models + anthropic_models
|
||||
|
||||
for model_data in all_models:
|
||||
AIModelConfig.objects.update_or_create(
|
||||
model_name=model_data['model_name'],
|
||||
defaults=model_data
|
||||
)
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""Remove seeded models"""
|
||||
AIModelConfig = apps.get_model('billing', 'AIModelConfig')
|
||||
seeded_models = [
|
||||
'gpt-4.1', 'gpt-4o-mini', 'gpt-4o', 'gpt-5.1', 'gpt-5.2',
|
||||
'dall-e-3', 'dall-e-2', 'gpt-image-1', 'gpt-image-1-mini',
|
||||
'runware:100@1',
|
||||
'bria-2.3', 'bria-2.3-fast', 'bria-2.2',
|
||||
'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-20241022',
|
||||
'claude-3-opus-20240229', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307'
|
||||
]
|
||||
AIModelConfig.objects.filter(model_name__in=seeded_models).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('billing', '0008_global_payment_methods'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(seed_ai_models, reverse_migration),
|
||||
]
|
||||
@@ -6,6 +6,17 @@ from django.db import models
|
||||
from django.core.validators import MinValueValidator
|
||||
from django.conf import settings
|
||||
from igny8_core.auth.models import AccountBaseModel
|
||||
from simple_history.models import HistoricalRecords
|
||||
|
||||
|
||||
# Centralized payment method choices - single source of truth
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe (Credit/Debit Card)'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer (Manual)'),
|
||||
('local_wallet', 'Local Wallet (Manual)'),
|
||||
('manual', 'Manual Payment'),
|
||||
]
|
||||
|
||||
|
||||
class CreditTransaction(AccountBaseModel):
|
||||
@@ -23,11 +34,24 @@ class CreditTransaction(AccountBaseModel):
|
||||
balance_after = models.IntegerField(help_text="Credit balance after this transaction")
|
||||
description = models.CharField(max_length=255)
|
||||
metadata = models.JSONField(default=dict, help_text="Additional context (AI call details, etc.)")
|
||||
|
||||
# Payment FK - preferred over reference_id string
|
||||
payment = models.ForeignKey(
|
||||
'billing.Payment',
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
related_name='credit_transactions',
|
||||
help_text='Payment that triggered this credit transaction'
|
||||
)
|
||||
|
||||
# Deprecated: Use payment FK instead
|
||||
reference_id = models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
help_text="Optional reference (e.g., payment id, invoice id)"
|
||||
help_text="DEPRECATED: Use payment FK. Legacy reference (e.g., payment id, invoice id)"
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
@@ -51,7 +75,12 @@ class CreditUsageLog(AccountBaseModel):
|
||||
('idea_generation', 'Content Ideas Generation'),
|
||||
('content_generation', 'Content Generation'),
|
||||
('image_generation', 'Image Generation'),
|
||||
('image_prompt_extraction', 'Image Prompt Extraction'),
|
||||
('linking', 'Internal Linking'),
|
||||
('optimization', 'Content Optimization'),
|
||||
('reparse', 'Content Reparse'),
|
||||
('site_page_generation', 'Site Page Generation'),
|
||||
('site_structure_generation', 'Site Structure Generation'),
|
||||
('ideas', 'Content Ideas Generation'), # Legacy
|
||||
('content', 'Content Generation'), # Legacy
|
||||
('images', 'Image Generation'), # Legacy
|
||||
@@ -85,65 +114,53 @@ class CreditUsageLog(AccountBaseModel):
|
||||
|
||||
class CreditCostConfig(models.Model):
|
||||
"""
|
||||
Configurable credit costs per AI function
|
||||
Admin-editable alternative to hardcoded constants
|
||||
Fixed credit costs per operation type.
|
||||
|
||||
Per final-model-schemas.md:
|
||||
| Field | Type | Required | Notes |
|
||||
|-------|------|----------|-------|
|
||||
| operation_type | CharField(50) PK | Yes | Unique operation ID |
|
||||
| display_name | CharField(100) | Yes | Human-readable |
|
||||
| base_credits | IntegerField | Yes | Fixed credits per operation |
|
||||
| is_active | BooleanField | Yes | Enable/disable |
|
||||
| description | TextField | No | Admin notes |
|
||||
"""
|
||||
# Operation identification
|
||||
# Operation identification (Primary Key)
|
||||
operation_type = models.CharField(
|
||||
max_length=50,
|
||||
unique=True,
|
||||
choices=CreditUsageLog.OPERATION_TYPE_CHOICES,
|
||||
help_text="AI operation type"
|
||||
primary_key=True,
|
||||
help_text="Unique operation ID (e.g., 'article_generation', 'image_generation')"
|
||||
)
|
||||
|
||||
# Cost configuration
|
||||
credits_cost = models.IntegerField(
|
||||
# Human-readable name
|
||||
display_name = models.CharField(
|
||||
max_length=100,
|
||||
help_text="Human-readable name"
|
||||
)
|
||||
|
||||
# Fixed credits per operation
|
||||
base_credits = models.IntegerField(
|
||||
default=1,
|
||||
validators=[MinValueValidator(0)],
|
||||
help_text="Credits required for this operation"
|
||||
help_text="Fixed credits per operation"
|
||||
)
|
||||
|
||||
# Unit of measurement
|
||||
UNIT_CHOICES = [
|
||||
('per_request', 'Per Request'),
|
||||
('per_100_words', 'Per 100 Words'),
|
||||
('per_200_words', 'Per 200 Words'),
|
||||
('per_item', 'Per Item'),
|
||||
('per_image', 'Per Image'),
|
||||
]
|
||||
|
||||
unit = models.CharField(
|
||||
max_length=50,
|
||||
default='per_request',
|
||||
choices=UNIT_CHOICES,
|
||||
help_text="What the cost applies to"
|
||||
)
|
||||
|
||||
# Metadata
|
||||
display_name = models.CharField(max_length=100, help_text="Human-readable name")
|
||||
description = models.TextField(blank=True, help_text="What this operation does")
|
||||
|
||||
# Status
|
||||
is_active = models.BooleanField(default=True, help_text="Enable/disable this operation")
|
||||
|
||||
# Audit fields
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
updated_by = models.ForeignKey(
|
||||
settings.AUTH_USER_MODEL,
|
||||
null=True,
|
||||
blank=True,
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='credit_cost_updates',
|
||||
help_text="Admin who last updated"
|
||||
is_active = models.BooleanField(
|
||||
default=True,
|
||||
help_text="Enable/disable this operation"
|
||||
)
|
||||
|
||||
# Change tracking
|
||||
previous_cost = models.IntegerField(
|
||||
null=True,
|
||||
# Admin notes
|
||||
description = models.TextField(
|
||||
blank=True,
|
||||
help_text="Cost before last update (for audit trail)"
|
||||
help_text="Admin notes about this operation"
|
||||
)
|
||||
|
||||
# History tracking
|
||||
history = HistoricalRecords()
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_credit_cost_config'
|
||||
@@ -152,18 +169,156 @@ class CreditCostConfig(models.Model):
|
||||
ordering = ['operation_type']
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.display_name} - {self.credits_cost} credits {self.unit}"
|
||||
return f"{self.display_name} - {self.base_credits} credits"
|
||||
|
||||
|
||||
class BillingConfiguration(models.Model):
|
||||
"""
|
||||
System-wide billing configuration (Singleton).
|
||||
Global settings for token-credit pricing.
|
||||
"""
|
||||
# Default token-to-credit ratio
|
||||
default_tokens_per_credit = models.IntegerField(
|
||||
default=100,
|
||||
validators=[MinValueValidator(1)],
|
||||
help_text="Default: How many tokens equal 1 credit (e.g., 100)"
|
||||
)
|
||||
|
||||
# Credit pricing
|
||||
default_credit_price_usd = models.DecimalField(
|
||||
max_digits=10,
|
||||
decimal_places=4,
|
||||
default=Decimal('0.01'),
|
||||
validators=[MinValueValidator(Decimal('0.0001'))],
|
||||
help_text="Default price per credit in USD"
|
||||
)
|
||||
|
||||
# Reporting settings
|
||||
enable_token_based_reporting = models.BooleanField(
|
||||
default=True,
|
||||
help_text="Show token metrics in all reports"
|
||||
)
|
||||
|
||||
# Rounding settings
|
||||
ROUNDING_CHOICES = [
|
||||
('up', 'Round Up'),
|
||||
('down', 'Round Down'),
|
||||
('nearest', 'Round to Nearest'),
|
||||
]
|
||||
|
||||
credit_rounding_mode = models.CharField(
|
||||
max_length=10,
|
||||
default='up',
|
||||
choices=ROUNDING_CHOICES,
|
||||
help_text="How to round fractional credits"
|
||||
)
|
||||
|
||||
# Audit fields
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
updated_by = models.ForeignKey(
|
||||
settings.AUTH_USER_MODEL,
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Admin who last updated"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_billing_configuration'
|
||||
verbose_name = 'Billing Configuration'
|
||||
verbose_name_plural = 'Billing Configuration'
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# Track cost changes
|
||||
if self.pk:
|
||||
try:
|
||||
old = CreditCostConfig.objects.get(pk=self.pk)
|
||||
if old.credits_cost != self.credits_cost:
|
||||
self.previous_cost = old.credits_cost
|
||||
except CreditCostConfig.DoesNotExist:
|
||||
pass
|
||||
"""Enforce singleton pattern"""
|
||||
self.pk = 1
|
||||
super().save(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls):
|
||||
"""Get or create the singleton config"""
|
||||
config, created = cls.objects.get_or_create(pk=1)
|
||||
return config
|
||||
|
||||
def __str__(self):
|
||||
return f"Billing Configuration (1 credit = {self.default_tokens_per_credit} tokens)"
|
||||
|
||||
|
||||
class PlanLimitUsage(AccountBaseModel):
|
||||
"""
|
||||
Track monthly usage of plan limits (ideas, words, images, prompts)
|
||||
Resets at start of each billing period
|
||||
"""
|
||||
LIMIT_TYPE_CHOICES = [
|
||||
('content_ideas', 'Content Ideas'),
|
||||
('content_words', 'Content Words'),
|
||||
('images_basic', 'Basic Images'),
|
||||
('images_premium', 'Premium Images'),
|
||||
('image_prompts', 'Image Prompts'),
|
||||
]
|
||||
|
||||
limit_type = models.CharField(
|
||||
max_length=50,
|
||||
choices=LIMIT_TYPE_CHOICES,
|
||||
db_index=True,
|
||||
help_text="Type of limit being tracked"
|
||||
)
|
||||
amount_used = models.IntegerField(
|
||||
default=0,
|
||||
validators=[MinValueValidator(0)],
|
||||
help_text="Amount used in current period"
|
||||
)
|
||||
|
||||
# Billing period tracking
|
||||
period_start = models.DateField(
|
||||
help_text="Start date of billing period"
|
||||
)
|
||||
period_end = models.DateField(
|
||||
help_text="End date of billing period"
|
||||
)
|
||||
|
||||
# Metadata
|
||||
metadata = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
help_text="Additional tracking data (e.g., breakdown by site)"
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_plan_limit_usage'
|
||||
verbose_name = 'Plan Limit Usage'
|
||||
verbose_name_plural = 'Plan Limit Usage Records'
|
||||
unique_together = [['account', 'limit_type', 'period_start']]
|
||||
ordering = ['-period_start', 'limit_type']
|
||||
indexes = [
|
||||
models.Index(fields=['account', 'limit_type']),
|
||||
models.Index(fields=['account', 'period_start', 'period_end']),
|
||||
models.Index(fields=['limit_type', 'period_start']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
account = getattr(self, 'account', None)
|
||||
return f"{account.name if account else 'No Account'} - {self.get_limit_type_display()} - {self.amount_used} used"
|
||||
|
||||
def is_current_period(self):
|
||||
"""Check if this record is for the current billing period"""
|
||||
from django.utils import timezone
|
||||
today = timezone.now().date()
|
||||
return self.period_start <= today <= self.period_end
|
||||
|
||||
def remaining_allowance(self, plan_limit):
|
||||
"""Calculate remaining allowance"""
|
||||
return max(0, plan_limit - self.amount_used)
|
||||
|
||||
def percentage_used(self, plan_limit):
|
||||
"""Calculate percentage of limit used"""
|
||||
if plan_limit == 0:
|
||||
return 0
|
||||
return min(100, int((self.amount_used / plan_limit) * 100))
|
||||
|
||||
|
||||
class Invoice(AccountBaseModel):
|
||||
@@ -180,12 +335,15 @@ class Invoice(AccountBaseModel):
|
||||
]
|
||||
|
||||
invoice_number = models.CharField(max_length=50, unique=True, db_index=True)
|
||||
|
||||
# Subscription relationship
|
||||
subscription = models.ForeignKey(
|
||||
'igny8_core_auth.Subscription',
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='invoices'
|
||||
related_name='invoices',
|
||||
help_text='Subscription this invoice is for (if subscription-based)'
|
||||
)
|
||||
|
||||
# Amounts
|
||||
@@ -208,9 +366,6 @@ class Invoice(AccountBaseModel):
|
||||
# Payment integration
|
||||
stripe_invoice_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
payment_method = models.CharField(max_length=50, null=True, blank=True)
|
||||
billing_email = models.EmailField(null=True, blank=True)
|
||||
billing_period_start = models.DateTimeField(null=True, blank=True)
|
||||
billing_period_end = models.DateTimeField(null=True, blank=True)
|
||||
|
||||
# Metadata
|
||||
notes = models.TextField(blank=True)
|
||||
@@ -243,10 +398,45 @@ class Invoice(AccountBaseModel):
|
||||
def tax_amount(self):
|
||||
return self.tax
|
||||
|
||||
@property
|
||||
def tax_rate(self):
|
||||
"""Get tax rate from metadata if stored"""
|
||||
if self.metadata and 'tax_rate' in self.metadata:
|
||||
return self.metadata['tax_rate']
|
||||
return 0
|
||||
|
||||
@property
|
||||
def discount_amount(self):
|
||||
"""Get discount amount from metadata if stored"""
|
||||
if self.metadata and 'discount_amount' in self.metadata:
|
||||
return self.metadata['discount_amount']
|
||||
return 0
|
||||
|
||||
@property
|
||||
def total_amount(self):
|
||||
return self.total
|
||||
|
||||
@property
|
||||
def billing_period_start(self):
|
||||
"""Get from subscription - single source of truth"""
|
||||
if self.account and hasattr(self.account, 'subscription'):
|
||||
return self.account.subscription.current_period_start
|
||||
return None
|
||||
|
||||
@property
|
||||
def billing_period_end(self):
|
||||
"""Get from subscription - single source of truth"""
|
||||
if self.account and hasattr(self.account, 'subscription'):
|
||||
return self.account.subscription.current_period_end
|
||||
return None
|
||||
|
||||
@property
|
||||
def billing_email(self):
|
||||
"""Get from metadata snapshot or account"""
|
||||
if self.metadata and 'billing_snapshot' in self.metadata:
|
||||
return self.metadata['billing_snapshot'].get('email')
|
||||
return self.account.billing_email if self.account else None
|
||||
|
||||
def add_line_item(self, description: str, quantity: int, unit_price: Decimal, amount: Decimal = None):
|
||||
"""Append a line item and keep JSON shape consistent."""
|
||||
items = list(self.line_items or [])
|
||||
@@ -278,23 +468,14 @@ class Payment(AccountBaseModel):
|
||||
Supports: Stripe, PayPal, Manual (Bank Transfer, Local Wallet)
|
||||
"""
|
||||
STATUS_CHOICES = [
|
||||
('pending', 'Pending'),
|
||||
('pending_approval', 'Pending Approval'),
|
||||
('processing', 'Processing'),
|
||||
('succeeded', 'Succeeded'),
|
||||
('completed', 'Completed'), # Legacy alias for succeeded
|
||||
('failed', 'Failed'),
|
||||
('refunded', 'Refunded'),
|
||||
('cancelled', 'Cancelled'),
|
||||
('pending_approval', 'Pending Approval'), # Manual payment submitted by user
|
||||
('succeeded', 'Succeeded'), # Payment approved and processed
|
||||
('failed', 'Failed'), # Payment rejected or failed
|
||||
('refunded', 'Refunded'), # Payment refunded (rare)
|
||||
]
|
||||
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe (Credit/Debit Card)'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer (Manual)'),
|
||||
('local_wallet', 'Local Wallet (Manual)'),
|
||||
('manual', 'Manual Payment'),
|
||||
]
|
||||
# Use centralized payment method choices
|
||||
PAYMENT_METHOD_CHOICES = PAYMENT_METHOD_CHOICES
|
||||
|
||||
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE, related_name='payments')
|
||||
|
||||
@@ -303,7 +484,7 @@ class Payment(AccountBaseModel):
|
||||
currency = models.CharField(max_length=3, default='USD')
|
||||
|
||||
# Status
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='pending', db_index=True)
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='pending_approval', db_index=True)
|
||||
|
||||
# Payment method
|
||||
payment_method = models.CharField(max_length=50, choices=PAYMENT_METHOD_CHOICES, db_index=True)
|
||||
@@ -320,10 +501,10 @@ class Payment(AccountBaseModel):
|
||||
manual_reference = models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Bank transfer reference, wallet transaction ID, etc."
|
||||
)
|
||||
manual_notes = models.TextField(blank=True, help_text="Admin notes for manual payments")
|
||||
transaction_reference = models.CharField(max_length=255, blank=True)
|
||||
admin_notes = models.TextField(blank=True, help_text="Internal notes on approval/rejection")
|
||||
approved_by = models.ForeignKey(
|
||||
settings.AUTH_USER_MODEL,
|
||||
@@ -348,6 +529,9 @@ class Payment(AccountBaseModel):
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
# History tracking
|
||||
history = HistoricalRecords()
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_payments'
|
||||
@@ -357,9 +541,24 @@ class Payment(AccountBaseModel):
|
||||
models.Index(fields=['account', 'payment_method']),
|
||||
models.Index(fields=['invoice', 'status']),
|
||||
]
|
||||
constraints = [
|
||||
# Ensure manual_reference is unique when not null/empty
|
||||
# This prevents duplicate bank transfer references
|
||||
models.UniqueConstraint(
|
||||
fields=['manual_reference'],
|
||||
name='unique_manual_reference_when_not_null',
|
||||
condition=models.Q(manual_reference__isnull=False) & ~models.Q(manual_reference='')
|
||||
),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Payment {self.id} - {self.get_payment_method_display()} - {self.amount} {self.currency}"
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
"""Normalize empty manual_reference to NULL for proper uniqueness handling"""
|
||||
if self.manual_reference == '':
|
||||
self.manual_reference = None
|
||||
super().save(*args, **kwargs)
|
||||
|
||||
|
||||
class CreditPackage(models.Model):
|
||||
@@ -409,20 +608,18 @@ class CreditPackage(models.Model):
|
||||
|
||||
class PaymentMethodConfig(models.Model):
|
||||
"""
|
||||
Configure payment methods availability per country
|
||||
Allows enabling/disabling manual payments by region
|
||||
Configure payment methods availability per country.
|
||||
|
||||
For online payments (stripe, paypal): Credentials stored in IntegrationProvider.
|
||||
For manual payments (bank_transfer, local_wallet): Bank/wallet details stored here.
|
||||
"""
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
('local_wallet', 'Local Wallet'),
|
||||
]
|
||||
# Use centralized choices
|
||||
PAYMENT_METHOD_CHOICES = PAYMENT_METHOD_CHOICES
|
||||
|
||||
country_code = models.CharField(
|
||||
max_length=2,
|
||||
db_index=True,
|
||||
help_text="ISO 2-letter country code (e.g., US, GB, IN)"
|
||||
help_text="ISO 2-letter country code (e.g., US, GB, PK) or '*' for global"
|
||||
)
|
||||
payment_method = models.CharField(max_length=50, choices=PAYMENT_METHOD_CHOICES)
|
||||
is_enabled = models.BooleanField(default=True)
|
||||
@@ -431,15 +628,17 @@ class PaymentMethodConfig(models.Model):
|
||||
display_name = models.CharField(max_length=100, blank=True)
|
||||
instructions = models.TextField(blank=True, help_text="Payment instructions for users")
|
||||
|
||||
# Manual payment details (for bank_transfer/local_wallet)
|
||||
# Manual payment details (for bank_transfer only)
|
||||
bank_name = models.CharField(max_length=255, blank=True)
|
||||
account_number = models.CharField(max_length=255, blank=True)
|
||||
routing_number = models.CharField(max_length=255, blank=True)
|
||||
swift_code = models.CharField(max_length=255, blank=True)
|
||||
account_title = models.CharField(max_length=255, blank=True, help_text="Account holder name")
|
||||
routing_number = models.CharField(max_length=255, blank=True, help_text="Routing/Sort code")
|
||||
swift_code = models.CharField(max_length=255, blank=True, help_text="SWIFT/BIC code for international")
|
||||
iban = models.CharField(max_length=255, blank=True, help_text="IBAN for international transfers")
|
||||
|
||||
# Additional fields for local wallets
|
||||
wallet_type = models.CharField(max_length=100, blank=True, help_text="E.g., PayTM, PhonePe, etc.")
|
||||
wallet_id = models.CharField(max_length=255, blank=True)
|
||||
wallet_type = models.CharField(max_length=100, blank=True, help_text="E.g., JazzCash, EasyPaisa, etc.")
|
||||
wallet_id = models.CharField(max_length=255, blank=True, help_text="Mobile number or wallet ID")
|
||||
|
||||
# Order/priority
|
||||
sort_order = models.IntegerField(default=0)
|
||||
@@ -464,12 +663,8 @@ class AccountPaymentMethod(AccountBaseModel):
|
||||
Account-scoped payment methods (Stripe/PayPal/manual bank/wallet).
|
||||
Only metadata/refs are stored here; no secrets.
|
||||
"""
|
||||
PAYMENT_METHOD_CHOICES = [
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
('bank_transfer', 'Bank Transfer'),
|
||||
('local_wallet', 'Local Wallet'),
|
||||
]
|
||||
# Use centralized choices
|
||||
PAYMENT_METHOD_CHOICES = PAYMENT_METHOD_CHOICES
|
||||
|
||||
type = models.CharField(max_length=50, choices=PAYMENT_METHOD_CHOICES, db_index=True)
|
||||
display_name = models.CharField(max_length=100, help_text="User-visible label", default='')
|
||||
@@ -497,3 +692,307 @@ class AccountPaymentMethod(AccountBaseModel):
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.account_id} - {self.display_name} ({self.type})"
|
||||
|
||||
|
||||
class AIModelConfig(models.Model):
|
||||
"""
|
||||
All AI models (text + image) with pricing and credit configuration.
|
||||
Single Source of Truth for Models.
|
||||
|
||||
Per final-model-schemas.md:
|
||||
| Field | Type | Required | Notes |
|
||||
|-------|------|----------|-------|
|
||||
| id | AutoField PK | Auto | |
|
||||
| model_name | CharField(100) | Yes | gpt-5.1, dall-e-3, runware:97@1 |
|
||||
| model_type | CharField(20) | Yes | text / image |
|
||||
| provider | CharField(50) | Yes | Links to IntegrationProvider |
|
||||
| display_name | CharField(200) | Yes | Human-readable |
|
||||
| is_default | BooleanField | Yes | One default per type |
|
||||
| is_active | BooleanField | Yes | Enable/disable |
|
||||
| cost_per_1k_input | DecimalField | No | Provider cost (USD) - text models |
|
||||
| cost_per_1k_output | DecimalField | No | Provider cost (USD) - text models |
|
||||
| tokens_per_credit | IntegerField | No | Text: tokens per 1 credit (e.g., 1000) |
|
||||
| credits_per_image | IntegerField | No | Image: credits per image (e.g., 1, 5, 15) |
|
||||
| quality_tier | CharField(20) | No | basic / quality / premium |
|
||||
| max_tokens | IntegerField | No | Model token limit |
|
||||
| context_window | IntegerField | No | Model context size |
|
||||
| capabilities | JSONField | No | vision, function_calling, etc. |
|
||||
| created_at | DateTime | Auto | |
|
||||
| updated_at | DateTime | Auto | |
|
||||
"""
|
||||
|
||||
MODEL_TYPE_CHOICES = [
|
||||
('text', 'Text Generation'),
|
||||
('image', 'Image Generation'),
|
||||
]
|
||||
|
||||
PROVIDER_CHOICES = [
|
||||
('openai', 'OpenAI'),
|
||||
('anthropic', 'Anthropic'),
|
||||
('runware', 'Runware'),
|
||||
('google', 'Google'),
|
||||
]
|
||||
|
||||
QUALITY_TIER_CHOICES = [
|
||||
('basic', 'Basic'),
|
||||
('quality', 'Quality'),
|
||||
('premium', 'Premium'),
|
||||
]
|
||||
|
||||
# Basic Information
|
||||
model_name = models.CharField(
|
||||
max_length=100,
|
||||
unique=True,
|
||||
db_index=True,
|
||||
help_text="Model identifier (e.g., 'gpt-5.1', 'dall-e-3', 'runware:97@1')"
|
||||
)
|
||||
|
||||
model_type = models.CharField(
|
||||
max_length=20,
|
||||
choices=MODEL_TYPE_CHOICES,
|
||||
db_index=True,
|
||||
help_text="text / image"
|
||||
)
|
||||
|
||||
provider = models.CharField(
|
||||
max_length=50,
|
||||
choices=PROVIDER_CHOICES,
|
||||
db_index=True,
|
||||
help_text="Links to IntegrationProvider"
|
||||
)
|
||||
|
||||
display_name = models.CharField(
|
||||
max_length=200,
|
||||
help_text="Human-readable name"
|
||||
)
|
||||
|
||||
is_default = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text="One default per type"
|
||||
)
|
||||
|
||||
is_active = models.BooleanField(
|
||||
default=True,
|
||||
db_index=True,
|
||||
help_text="Enable/disable"
|
||||
)
|
||||
|
||||
# Text Model Pricing (cost per 1K tokens)
|
||||
cost_per_1k_input = models.DecimalField(
|
||||
max_digits=10,
|
||||
decimal_places=6,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Provider cost per 1K input tokens (USD) - text models"
|
||||
)
|
||||
|
||||
cost_per_1k_output = models.DecimalField(
|
||||
max_digits=10,
|
||||
decimal_places=6,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Provider cost per 1K output tokens (USD) - text models"
|
||||
)
|
||||
|
||||
# Credit Configuration
|
||||
tokens_per_credit = models.IntegerField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Text: tokens per 1 credit (e.g., 1000, 10000)"
|
||||
)
|
||||
|
||||
credits_per_image = models.IntegerField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Image: credits per image (e.g., 1, 5, 15)"
|
||||
)
|
||||
|
||||
quality_tier = models.CharField(
|
||||
max_length=20,
|
||||
choices=QUALITY_TIER_CHOICES,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="basic / quality / premium - for image models"
|
||||
)
|
||||
|
||||
# Model Limits
|
||||
max_tokens = models.IntegerField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Model token limit"
|
||||
)
|
||||
|
||||
context_window = models.IntegerField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Model context size"
|
||||
)
|
||||
|
||||
# Capabilities
|
||||
capabilities = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
help_text="Capabilities: vision, function_calling, json_mode, etc."
|
||||
)
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
# History tracking
|
||||
history = HistoricalRecords()
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_ai_model_config'
|
||||
verbose_name = 'AI Model Configuration'
|
||||
verbose_name_plural = 'AI Model Configurations'
|
||||
ordering = ['model_type', 'model_name']
|
||||
indexes = [
|
||||
models.Index(fields=['model_type', 'is_active']),
|
||||
models.Index(fields=['provider', 'is_active']),
|
||||
models.Index(fields=['is_default', 'model_type']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return self.display_name
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
"""Ensure only one is_default per model_type"""
|
||||
if self.is_default:
|
||||
AIModelConfig.objects.filter(
|
||||
model_type=self.model_type,
|
||||
is_default=True
|
||||
).exclude(pk=self.pk).update(is_default=False)
|
||||
super().save(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def get_default_text_model(cls):
|
||||
"""Get the default text generation model"""
|
||||
return cls.objects.filter(model_type='text', is_default=True, is_active=True).first()
|
||||
|
||||
@classmethod
|
||||
def get_default_image_model(cls):
|
||||
"""Get the default image generation model"""
|
||||
return cls.objects.filter(model_type='image', is_default=True, is_active=True).first()
|
||||
|
||||
@classmethod
|
||||
def get_image_models_by_tier(cls):
|
||||
"""Get all active image models grouped by quality tier"""
|
||||
return cls.objects.filter(
|
||||
model_type='image',
|
||||
is_active=True
|
||||
).order_by('quality_tier', 'model_name')
|
||||
|
||||
|
||||
class WebhookEvent(models.Model):
|
||||
"""
|
||||
Store all incoming webhook events for audit and replay capability.
|
||||
|
||||
This model provides:
|
||||
- Audit trail of all webhook events
|
||||
- Idempotency verification (via event_id)
|
||||
- Ability to replay failed events
|
||||
- Debugging and monitoring
|
||||
"""
|
||||
PROVIDER_CHOICES = [
|
||||
('stripe', 'Stripe'),
|
||||
('paypal', 'PayPal'),
|
||||
]
|
||||
|
||||
# Unique identifier from the payment provider
|
||||
event_id = models.CharField(
|
||||
max_length=255,
|
||||
unique=True,
|
||||
db_index=True,
|
||||
help_text="Unique event ID from the payment provider"
|
||||
)
|
||||
|
||||
# Payment provider
|
||||
provider = models.CharField(
|
||||
max_length=20,
|
||||
choices=PROVIDER_CHOICES,
|
||||
db_index=True,
|
||||
help_text="Payment provider (stripe or paypal)"
|
||||
)
|
||||
|
||||
# Event type (e.g., 'checkout.session.completed', 'PAYMENT.CAPTURE.COMPLETED')
|
||||
event_type = models.CharField(
|
||||
max_length=100,
|
||||
db_index=True,
|
||||
help_text="Event type from the provider"
|
||||
)
|
||||
|
||||
# Full payload for debugging and replay
|
||||
payload = models.JSONField(
|
||||
help_text="Full webhook payload"
|
||||
)
|
||||
|
||||
# Processing status
|
||||
processed = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text="Whether this event has been successfully processed"
|
||||
)
|
||||
processed_at = models.DateTimeField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="When the event was processed"
|
||||
)
|
||||
|
||||
# Error tracking
|
||||
error_message = models.TextField(
|
||||
blank=True,
|
||||
help_text="Error message if processing failed"
|
||||
)
|
||||
retry_count = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of processing attempts"
|
||||
)
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
app_label = 'billing'
|
||||
db_table = 'igny8_webhook_events'
|
||||
verbose_name = 'Webhook Event'
|
||||
verbose_name_plural = 'Webhook Events'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['provider', 'event_type']),
|
||||
models.Index(fields=['processed', 'created_at']),
|
||||
models.Index(fields=['provider', 'processed']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.provider}:{self.event_type} - {self.event_id[:20]}..."
|
||||
|
||||
@classmethod
|
||||
def record_event(cls, event_id: str, provider: str, event_type: str, payload: dict):
|
||||
"""
|
||||
Record a webhook event. Returns (event, created) tuple.
|
||||
If the event already exists, returns the existing event.
|
||||
"""
|
||||
return cls.objects.get_or_create(
|
||||
event_id=event_id,
|
||||
defaults={
|
||||
'provider': provider,
|
||||
'event_type': event_type,
|
||||
'payload': payload,
|
||||
}
|
||||
)
|
||||
|
||||
def mark_processed(self):
|
||||
"""Mark the event as successfully processed"""
|
||||
from django.utils import timezone
|
||||
self.processed = True
|
||||
self.processed_at = timezone.now()
|
||||
self.save(update_fields=['processed', 'processed_at'])
|
||||
|
||||
def mark_failed(self, error_message: str):
|
||||
"""Mark the event as failed with error message"""
|
||||
self.error_message = error_message
|
||||
self.retry_count += 1
|
||||
self.save(update_fields=['error_message', 'retry_count'])
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
"""
|
||||
Credit Service for managing credit transactions and deductions
|
||||
"""
|
||||
import math
|
||||
import logging
|
||||
from django.db import transaction
|
||||
from django.utils import timezone
|
||||
from igny8_core.business.billing.models import CreditTransaction, CreditUsageLog
|
||||
@@ -8,90 +10,252 @@ from igny8_core.business.billing.constants import CREDIT_COSTS
|
||||
from igny8_core.business.billing.exceptions import InsufficientCreditsError, CreditCalculationError
|
||||
from igny8_core.auth.models import Account
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _check_low_credits_warning(account, previous_balance):
|
||||
"""
|
||||
Check if credits have fallen below threshold and send warning email.
|
||||
Only sends if this is the first time falling below threshold.
|
||||
"""
|
||||
try:
|
||||
from igny8_core.modules.system.email_models import EmailSettings
|
||||
from .email_service import BillingEmailService
|
||||
|
||||
settings = EmailSettings.get_settings()
|
||||
if not settings.send_low_credit_warnings:
|
||||
return
|
||||
|
||||
threshold = settings.low_credit_threshold
|
||||
|
||||
# Only send if we CROSSED below the threshold (wasn't already below)
|
||||
if account.credits < threshold <= previous_balance:
|
||||
logger.info(f"Credits fell below threshold for account {account.id}: {account.credits} < {threshold}")
|
||||
BillingEmailService.send_low_credits_warning(
|
||||
account=account,
|
||||
current_credits=account.credits,
|
||||
threshold=threshold
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check/send low credits warning: {e}")
|
||||
|
||||
|
||||
class CreditService:
|
||||
"""Service for managing credits"""
|
||||
"""Service for managing credits - Token-based only"""
|
||||
|
||||
@staticmethod
|
||||
def get_credit_cost(operation_type, amount=None):
|
||||
def calculate_credits_for_image(model_name: str, num_images: int = 1) -> int:
|
||||
"""
|
||||
Get credit cost for operation.
|
||||
Now checks database config first, falls back to constants.
|
||||
Calculate credits for image generation based on AIModelConfig.credits_per_image.
|
||||
|
||||
Args:
|
||||
operation_type: Type of operation (from CREDIT_COSTS)
|
||||
amount: Optional amount (word count, image count, etc.)
|
||||
|
||||
Returns:
|
||||
int: Number of credits required
|
||||
|
||||
Raises:
|
||||
CreditCalculationError: If operation type is unknown
|
||||
"""
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Try to get from database config first
|
||||
try:
|
||||
from igny8_core.business.billing.models import CreditCostConfig
|
||||
model_name: The AI model name (e.g., 'dall-e-3', 'flux-1-1-pro')
|
||||
num_images: Number of images to generate
|
||||
|
||||
config = CreditCostConfig.objects.filter(
|
||||
operation_type=operation_type,
|
||||
Returns:
|
||||
int: Credits required
|
||||
|
||||
Raises:
|
||||
CreditCalculationError: If model not found or has no credits_per_image
|
||||
"""
|
||||
from igny8_core.business.billing.models import AIModelConfig
|
||||
|
||||
try:
|
||||
model = AIModelConfig.objects.filter(
|
||||
model_name=model_name,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if config:
|
||||
base_cost = config.credits_cost
|
||||
|
||||
# Apply unit-based calculation
|
||||
if config.unit == 'per_100_words' and amount:
|
||||
return max(1, int(base_cost * (amount / 100)))
|
||||
elif config.unit == 'per_200_words' and amount:
|
||||
return max(1, int(base_cost * (amount / 200)))
|
||||
elif config.unit in ['per_item', 'per_image'] and amount:
|
||||
return base_cost * amount
|
||||
else:
|
||||
return base_cost
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get cost from database, using constants: {e}")
|
||||
|
||||
# Fallback to hardcoded constants
|
||||
base_cost = CREDIT_COSTS.get(operation_type, 0)
|
||||
if base_cost == 0:
|
||||
raise CreditCalculationError(f"Unknown operation type: {operation_type}")
|
||||
|
||||
# Variable cost operations (legacy logic)
|
||||
if operation_type == 'content_generation' and amount:
|
||||
# Per 100 words
|
||||
return max(1, int(base_cost * (amount / 100)))
|
||||
elif operation_type == 'optimization' and amount:
|
||||
# Per 200 words
|
||||
return max(1, int(base_cost * (amount / 200)))
|
||||
elif operation_type == 'image_generation' and amount:
|
||||
# Per image
|
||||
return base_cost * amount
|
||||
elif operation_type == 'idea_generation' and amount:
|
||||
# Per idea
|
||||
return base_cost * amount
|
||||
|
||||
# Fixed cost operations
|
||||
return base_cost
|
||||
if not model:
|
||||
raise CreditCalculationError(f"Model {model_name} not found or inactive")
|
||||
|
||||
if model.credits_per_image is None:
|
||||
raise CreditCalculationError(
|
||||
f"Model {model_name} has no credits_per_image configured"
|
||||
)
|
||||
|
||||
credits = model.credits_per_image * num_images
|
||||
|
||||
logger.info(
|
||||
f"Calculated credits for {model_name}: "
|
||||
f"{num_images} images × {model.credits_per_image} = {credits} credits"
|
||||
)
|
||||
|
||||
return credits
|
||||
|
||||
except AIModelConfig.DoesNotExist:
|
||||
raise CreditCalculationError(f"Model {model_name} not found")
|
||||
|
||||
@staticmethod
|
||||
def check_credits(account, operation_type, amount=None):
|
||||
def calculate_credits_from_tokens_by_model(model_name: str, total_tokens: int) -> int:
|
||||
"""
|
||||
Calculate credits from token usage based on AIModelConfig.tokens_per_credit.
|
||||
|
||||
This is the model-specific version that uses the model's configured rate.
|
||||
For operation-based calculation, use calculate_credits_from_tokens().
|
||||
|
||||
Args:
|
||||
model_name: The AI model name (e.g., 'gpt-4o', 'claude-3-5-sonnet')
|
||||
total_tokens: Total tokens used (input + output)
|
||||
|
||||
Returns:
|
||||
int: Credits required (minimum 1)
|
||||
|
||||
Raises:
|
||||
CreditCalculationError: If model not found
|
||||
"""
|
||||
from igny8_core.business.billing.models import AIModelConfig, BillingConfiguration
|
||||
|
||||
try:
|
||||
model = AIModelConfig.objects.filter(
|
||||
model_name=model_name,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if model and model.tokens_per_credit:
|
||||
tokens_per_credit = model.tokens_per_credit
|
||||
else:
|
||||
# Fallback to global default
|
||||
billing_config = BillingConfiguration.get_config()
|
||||
tokens_per_credit = billing_config.default_tokens_per_credit
|
||||
logger.info(
|
||||
f"Model {model_name} has no tokens_per_credit, "
|
||||
f"using default: {tokens_per_credit}"
|
||||
)
|
||||
|
||||
if tokens_per_credit <= 0:
|
||||
raise CreditCalculationError(
|
||||
f"Invalid tokens_per_credit for {model_name}: {tokens_per_credit}"
|
||||
)
|
||||
|
||||
# Get rounding mode
|
||||
billing_config = BillingConfiguration.get_config()
|
||||
rounding_mode = billing_config.credit_rounding_mode
|
||||
|
||||
credits_float = total_tokens / tokens_per_credit
|
||||
|
||||
if rounding_mode == 'up':
|
||||
credits = math.ceil(credits_float)
|
||||
elif rounding_mode == 'down':
|
||||
credits = math.floor(credits_float)
|
||||
else: # nearest
|
||||
credits = round(credits_float)
|
||||
|
||||
# Minimum 1 credit
|
||||
credits = max(credits, 1)
|
||||
|
||||
logger.info(
|
||||
f"Calculated credits for {model_name}: "
|
||||
f"{total_tokens} tokens ÷ {tokens_per_credit} = {credits} credits"
|
||||
)
|
||||
|
||||
return credits
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating credits for {model_name}: {e}")
|
||||
raise CreditCalculationError(f"Error calculating credits: {e}")
|
||||
|
||||
@staticmethod
|
||||
def calculate_credits_from_tokens(operation_type, tokens_input, tokens_output):
|
||||
"""
|
||||
Calculate credits from actual token usage using configured ratio.
|
||||
This is the ONLY way credits are calculated in the system.
|
||||
|
||||
Args:
|
||||
operation_type: Type of operation
|
||||
tokens_input: Input tokens used
|
||||
tokens_output: Output tokens used
|
||||
|
||||
Returns:
|
||||
int: Credits to deduct
|
||||
|
||||
Raises:
|
||||
CreditCalculationError: If configuration error
|
||||
"""
|
||||
import logging
|
||||
import math
|
||||
from igny8_core.business.billing.models import CreditCostConfig, BillingConfiguration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get operation config (use global default if not found)
|
||||
config = CreditCostConfig.objects.filter(
|
||||
operation_type=operation_type,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if not config:
|
||||
# Use global billing config as fallback
|
||||
billing_config = BillingConfiguration.get_config()
|
||||
tokens_per_credit = billing_config.default_tokens_per_credit
|
||||
min_credits = 1
|
||||
logger.info(f"No config for {operation_type}, using default: {tokens_per_credit} tokens/credit")
|
||||
else:
|
||||
tokens_per_credit = config.tokens_per_credit
|
||||
min_credits = config.min_credits
|
||||
|
||||
# Calculate total tokens
|
||||
total_tokens = (tokens_input or 0) + (tokens_output or 0)
|
||||
|
||||
# Calculate credits (fractional)
|
||||
if tokens_per_credit <= 0:
|
||||
raise CreditCalculationError(f"Invalid tokens_per_credit: {tokens_per_credit}")
|
||||
|
||||
credits_float = total_tokens / tokens_per_credit
|
||||
|
||||
# Get rounding mode from global config
|
||||
billing_config = BillingConfiguration.get_config()
|
||||
rounding_mode = billing_config.credit_rounding_mode
|
||||
|
||||
if rounding_mode == 'up':
|
||||
credits = math.ceil(credits_float)
|
||||
elif rounding_mode == 'down':
|
||||
credits = math.floor(credits_float)
|
||||
else: # nearest
|
||||
credits = round(credits_float)
|
||||
|
||||
# Apply minimum
|
||||
credits = max(credits, min_credits)
|
||||
|
||||
logger.info(
|
||||
f"Calculated credits for {operation_type}: "
|
||||
f"{total_tokens} tokens ({tokens_input} in, {tokens_output} out) "
|
||||
f"÷ {tokens_per_credit} = {credits} credits"
|
||||
)
|
||||
|
||||
return credits
|
||||
|
||||
@staticmethod
|
||||
def check_credits(account, operation_type, estimated_amount=None):
|
||||
"""
|
||||
Check if account has sufficient credits for an operation.
|
||||
For token-based operations, this is an estimate check only.
|
||||
Actual deduction happens after AI call with real token usage.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
operation_type: Type of operation
|
||||
amount: Optional amount (word count, image count, etc.)
|
||||
estimated_amount: Optional estimated amount (for non-token operations)
|
||||
|
||||
Raises:
|
||||
InsufficientCreditsError: If account doesn't have enough credits
|
||||
"""
|
||||
required = CreditService.get_credit_cost(operation_type, amount)
|
||||
from igny8_core.business.billing.models import CreditCostConfig
|
||||
from igny8_core.business.billing.constants import CREDIT_COSTS
|
||||
|
||||
# Get operation config
|
||||
config = CreditCostConfig.objects.filter(
|
||||
operation_type=operation_type,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if config:
|
||||
# Use minimum credits as estimate for token-based operations
|
||||
required = config.min_credits
|
||||
else:
|
||||
# Fallback to constants
|
||||
required = CREDIT_COSTS.get(operation_type, 1)
|
||||
|
||||
if account.credits < required:
|
||||
raise InsufficientCreditsError(
|
||||
f"Insufficient credits. Required: {required}, Available: {account.credits}"
|
||||
@@ -99,21 +263,46 @@ class CreditService:
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def check_credits_legacy(account, required_credits):
|
||||
def check_credits_legacy(account, amount):
|
||||
"""
|
||||
Legacy method: Check if account has enough credits (for backward compatibility).
|
||||
Legacy method to check credits for a known amount.
|
||||
Used internally by deduct_credits.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
required_credits: Number of credits required
|
||||
amount: Required credits amount
|
||||
|
||||
Raises:
|
||||
InsufficientCreditsError: If account doesn't have enough credits
|
||||
"""
|
||||
if account.credits < required_credits:
|
||||
if account.credits < amount:
|
||||
raise InsufficientCreditsError(
|
||||
f"Insufficient credits. Required: {required_credits}, Available: {account.credits}"
|
||||
f"Insufficient credits. Required: {amount}, Available: {account.credits}"
|
||||
)
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def check_credits_for_tokens(account, operation_type, estimated_tokens_input, estimated_tokens_output):
|
||||
"""
|
||||
Check if account has sufficient credits based on estimated token usage.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
operation_type: Type of operation
|
||||
estimated_tokens_input: Estimated input tokens
|
||||
estimated_tokens_output: Estimated output tokens
|
||||
|
||||
Raises:
|
||||
InsufficientCreditsError: If account doesn't have enough credits
|
||||
"""
|
||||
required = CreditService.calculate_credits_from_tokens(
|
||||
operation_type, estimated_tokens_input, estimated_tokens_output
|
||||
)
|
||||
if account.credits < required:
|
||||
raise InsufficientCreditsError(
|
||||
f"Insufficient credits. Required: {required}, Available: {account.credits}"
|
||||
)
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
@@ -140,6 +329,9 @@ class CreditService:
|
||||
# Check sufficient credits (legacy: amount is already calculated)
|
||||
CreditService.check_credits_legacy(account, amount)
|
||||
|
||||
# Store previous balance for low credits check
|
||||
previous_balance = account.credits
|
||||
|
||||
# Deduct from account.credits
|
||||
account.credits -= amount
|
||||
account.save(update_fields=['credits'])
|
||||
@@ -168,48 +360,72 @@ class CreditService:
|
||||
metadata=metadata or {}
|
||||
)
|
||||
|
||||
# Check and send low credits warning if applicable
|
||||
_check_low_credits_warning(account, previous_balance)
|
||||
|
||||
return account.credits
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
def deduct_credits_for_operation(account, operation_type, amount=None, description=None, metadata=None, cost_usd=None, model_used=None, tokens_input=None, tokens_output=None, related_object_type=None, related_object_id=None):
|
||||
def deduct_credits_for_operation(
|
||||
account,
|
||||
operation_type,
|
||||
tokens_input,
|
||||
tokens_output,
|
||||
description=None,
|
||||
metadata=None,
|
||||
cost_usd=None,
|
||||
model_used=None,
|
||||
related_object_type=None,
|
||||
related_object_id=None
|
||||
):
|
||||
"""
|
||||
Deduct credits for an operation (convenience method that calculates cost automatically).
|
||||
Deduct credits for an operation based on actual token usage.
|
||||
This is the ONLY way to deduct credits in the token-based system.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
operation_type: Type of operation
|
||||
amount: Optional amount (word count, image count, etc.)
|
||||
tokens_input: REQUIRED - Actual input tokens used
|
||||
tokens_output: REQUIRED - Actual output tokens used
|
||||
description: Optional description (auto-generated if not provided)
|
||||
metadata: Optional metadata dict
|
||||
cost_usd: Optional cost in USD
|
||||
model_used: Optional AI model used
|
||||
tokens_input: Optional input tokens
|
||||
tokens_output: Optional output tokens
|
||||
related_object_type: Optional related object type
|
||||
related_object_id: Optional related object ID
|
||||
|
||||
Returns:
|
||||
int: New credit balance
|
||||
|
||||
Raises:
|
||||
ValueError: If tokens_input or tokens_output not provided
|
||||
"""
|
||||
# Calculate credit cost
|
||||
credits_required = CreditService.get_credit_cost(operation_type, amount)
|
||||
# Validate token inputs
|
||||
if tokens_input is None or tokens_output is None:
|
||||
raise ValueError(
|
||||
f"tokens_input and tokens_output are REQUIRED for credit deduction. "
|
||||
f"Got: tokens_input={tokens_input}, tokens_output={tokens_output}"
|
||||
)
|
||||
|
||||
# Calculate credits from actual token usage
|
||||
credits_required = CreditService.calculate_credits_from_tokens(
|
||||
operation_type, tokens_input, tokens_output
|
||||
)
|
||||
|
||||
# Check sufficient credits
|
||||
CreditService.check_credits(account, operation_type, amount)
|
||||
if account.credits < credits_required:
|
||||
raise InsufficientCreditsError(
|
||||
f"Insufficient credits. Required: {credits_required}, Available: {account.credits}"
|
||||
)
|
||||
|
||||
# Auto-generate description if not provided
|
||||
if not description:
|
||||
if operation_type == 'clustering':
|
||||
description = f"Clustering operation"
|
||||
elif operation_type == 'idea_generation':
|
||||
description = f"Generated {amount or 1} idea(s)"
|
||||
elif operation_type == 'content_generation':
|
||||
description = f"Generated content ({amount or 0} words)"
|
||||
elif operation_type == 'image_generation':
|
||||
description = f"Generated {amount or 1} image(s)"
|
||||
else:
|
||||
description = f"{operation_type} operation"
|
||||
total_tokens = tokens_input + tokens_output
|
||||
description = (
|
||||
f"{operation_type}: {total_tokens} tokens "
|
||||
f"({tokens_input} in, {tokens_output} out) = {credits_required} credits"
|
||||
)
|
||||
|
||||
return CreditService.deduct_credits(
|
||||
account=account,
|
||||
@@ -258,37 +474,54 @@ class CreditService:
|
||||
return account.credits
|
||||
|
||||
@staticmethod
|
||||
def calculate_credits_for_operation(operation_type, **kwargs):
|
||||
@transaction.atomic
|
||||
def deduct_credits_for_image(
|
||||
account,
|
||||
model_name: str,
|
||||
num_images: int = 1,
|
||||
description: str = None,
|
||||
metadata: dict = None,
|
||||
cost_usd: float = None,
|
||||
related_object_type: str = None,
|
||||
related_object_id: int = None
|
||||
):
|
||||
"""
|
||||
Calculate credits needed for an operation.
|
||||
Legacy method - use get_credit_cost() instead.
|
||||
Deduct credits for image generation based on model's credits_per_image.
|
||||
|
||||
Args:
|
||||
operation_type: Type of operation
|
||||
**kwargs: Operation-specific parameters
|
||||
|
||||
account: Account instance
|
||||
model_name: AI model used (e.g., 'dall-e-3', 'flux-1-1-pro')
|
||||
num_images: Number of images generated
|
||||
description: Optional description
|
||||
metadata: Optional metadata dict
|
||||
cost_usd: Optional cost in USD
|
||||
related_object_type: Optional related object type
|
||||
related_object_id: Optional related object ID
|
||||
|
||||
Returns:
|
||||
int: Number of credits required
|
||||
|
||||
Raises:
|
||||
CreditCalculationError: If calculation fails
|
||||
int: New credit balance
|
||||
"""
|
||||
# Map legacy operation types
|
||||
if operation_type == 'ideas':
|
||||
operation_type = 'idea_generation'
|
||||
elif operation_type == 'content':
|
||||
operation_type = 'content_generation'
|
||||
elif operation_type == 'images':
|
||||
operation_type = 'image_generation'
|
||||
credits_required = CreditService.calculate_credits_for_image(model_name, num_images)
|
||||
|
||||
# Extract amount from kwargs
|
||||
amount = None
|
||||
if 'word_count' in kwargs:
|
||||
amount = kwargs.get('word_count')
|
||||
elif 'image_count' in kwargs:
|
||||
amount = kwargs.get('image_count')
|
||||
elif 'idea_count' in kwargs:
|
||||
amount = kwargs.get('idea_count')
|
||||
if account.credits < credits_required:
|
||||
raise InsufficientCreditsError(
|
||||
f"Insufficient credits. Required: {credits_required}, Available: {account.credits}"
|
||||
)
|
||||
|
||||
return CreditService.get_credit_cost(operation_type, amount)
|
||||
if not description:
|
||||
description = f"Image generation: {num_images} images with {model_name} = {credits_required} credits"
|
||||
|
||||
return CreditService.deduct_credits(
|
||||
account=account,
|
||||
amount=credits_required,
|
||||
operation_type='image_generation',
|
||||
description=description,
|
||||
metadata=metadata,
|
||||
cost_usd=cost_usd,
|
||||
model_used=model_name,
|
||||
tokens_input=None,
|
||||
tokens_output=None,
|
||||
related_object_type=related_object_type,
|
||||
related_object_id=related_object_id
|
||||
)
|
||||
|
||||
|
||||
1172
backend/igny8_core/business/billing/services/email_service.py
Normal file
1172
backend/igny8_core/business/billing/services/email_service.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -14,23 +14,67 @@ from ....auth.models import Account, Subscription
|
||||
class InvoiceService:
|
||||
"""Service for managing invoices"""
|
||||
|
||||
@staticmethod
|
||||
def get_pending_invoice(subscription: Subscription) -> Optional[Invoice]:
|
||||
"""
|
||||
Get pending invoice for a subscription.
|
||||
Used to find existing invoice during payment processing instead of creating duplicates.
|
||||
"""
|
||||
return Invoice.objects.filter(
|
||||
subscription=subscription,
|
||||
status='pending'
|
||||
).order_by('-created_at').first()
|
||||
|
||||
@staticmethod
|
||||
def get_or_create_subscription_invoice(
|
||||
subscription: Subscription,
|
||||
billing_period_start: datetime,
|
||||
billing_period_end: datetime
|
||||
) -> tuple[Invoice, bool]:
|
||||
"""
|
||||
Get existing pending invoice or create new one.
|
||||
Returns tuple of (invoice, created) where created is True if new invoice was created.
|
||||
"""
|
||||
# First try to find existing pending invoice for this subscription
|
||||
existing = InvoiceService.get_pending_invoice(subscription)
|
||||
if existing:
|
||||
return existing, False
|
||||
|
||||
# Create new invoice if none exists
|
||||
invoice = InvoiceService.create_subscription_invoice(
|
||||
subscription=subscription,
|
||||
billing_period_start=billing_period_start,
|
||||
billing_period_end=billing_period_end
|
||||
)
|
||||
return invoice, True
|
||||
|
||||
@staticmethod
|
||||
def generate_invoice_number(account: Account) -> str:
|
||||
"""
|
||||
Generate unique invoice number
|
||||
Format: INV-{ACCOUNT_ID}-{YEAR}{MONTH}-{COUNTER}
|
||||
Generate unique invoice number with atomic locking to prevent duplicates
|
||||
Format: INV-{YY}{MM}{COUNTER} (e.g., INV-26010001)
|
||||
"""
|
||||
from django.db import transaction
|
||||
|
||||
now = timezone.now()
|
||||
prefix = f"INV-{account.id}-{now.year}{now.month:02d}"
|
||||
prefix = f"INV-{now.year % 100:02d}{now.month:02d}"
|
||||
|
||||
# Get count of invoices for this account this month
|
||||
count = Invoice.objects.filter(
|
||||
account=account,
|
||||
created_at__year=now.year,
|
||||
created_at__month=now.month
|
||||
).count()
|
||||
|
||||
return f"{prefix}-{count + 1:04d}"
|
||||
# Use atomic transaction with SELECT FOR UPDATE to prevent race conditions
|
||||
with transaction.atomic():
|
||||
# Lock the invoice table for this month to get accurate count
|
||||
count = Invoice.objects.select_for_update().filter(
|
||||
created_at__year=now.year,
|
||||
created_at__month=now.month
|
||||
).count()
|
||||
|
||||
invoice_number = f"{prefix}{count + 1:04d}"
|
||||
|
||||
# Double-check uniqueness (should not happen with lock, but safety check)
|
||||
while Invoice.objects.filter(invoice_number=invoice_number).exists():
|
||||
count += 1
|
||||
invoice_number = f"{prefix}{count + 1:04d}"
|
||||
|
||||
return invoice_number
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
@@ -41,29 +85,70 @@ class InvoiceService:
|
||||
) -> Invoice:
|
||||
"""
|
||||
Create invoice for subscription billing period
|
||||
|
||||
SIMPLIFIED CURRENCY LOGIC:
|
||||
- ALL invoices are in USD (consistent for accounting)
|
||||
- PKR equivalent is calculated and stored in metadata for display purposes
|
||||
- Bank transfer users see PKR equivalent but invoice is technically USD
|
||||
"""
|
||||
account = subscription.account
|
||||
plan = subscription.plan
|
||||
|
||||
# Snapshot billing information for historical record
|
||||
billing_snapshot = {
|
||||
'email': account.billing_email or (account.owner.email if account.owner else ''),
|
||||
'address_line1': account.billing_address_line1,
|
||||
'address_line2': account.billing_address_line2,
|
||||
'city': account.billing_city,
|
||||
'state': account.billing_state,
|
||||
'postal_code': account.billing_postal_code,
|
||||
'country': account.billing_country,
|
||||
'tax_id': account.tax_id,
|
||||
'snapshot_date': timezone.now().isoformat()
|
||||
}
|
||||
|
||||
# For manual payments, use configurable grace period instead of billing_period_end
|
||||
from igny8_core.business.billing.config import INVOICE_DUE_DATE_OFFSET
|
||||
invoice_date = timezone.now().date()
|
||||
due_date = invoice_date + timedelta(days=INVOICE_DUE_DATE_OFFSET)
|
||||
|
||||
# ALWAYS use USD for invoices (simplified accounting)
|
||||
from igny8_core.business.billing.utils.currency import get_currency_for_country, convert_usd_to_local
|
||||
|
||||
currency = 'USD'
|
||||
usd_price = float(plan.price)
|
||||
|
||||
# Calculate local equivalent for display purposes (if applicable)
|
||||
local_currency = get_currency_for_country(account.billing_country) if account.billing_country else 'USD'
|
||||
local_equivalent = convert_usd_to_local(usd_price, account.billing_country) if local_currency != 'USD' else usd_price
|
||||
|
||||
invoice = Invoice.objects.create(
|
||||
account=account,
|
||||
subscription=subscription,
|
||||
subscription=subscription, # Set FK directly
|
||||
invoice_number=InvoiceService.generate_invoice_number(account),
|
||||
billing_email=account.billing_email or account.users.filter(role='owner').first().email,
|
||||
status='pending',
|
||||
currency='USD',
|
||||
invoice_date=timezone.now().date(),
|
||||
due_date=billing_period_end.date(),
|
||||
billing_period_start=billing_period_start,
|
||||
billing_period_end=billing_period_end
|
||||
currency=currency,
|
||||
invoice_date=invoice_date,
|
||||
due_date=due_date,
|
||||
metadata={
|
||||
'billing_snapshot': billing_snapshot,
|
||||
'billing_period_start': billing_period_start.isoformat(),
|
||||
'billing_period_end': billing_period_end.isoformat(),
|
||||
'subscription_id': subscription.id, # Keep in metadata for backward compatibility
|
||||
'usd_price': str(plan.price), # Store original USD price
|
||||
'local_currency': local_currency, # Store local currency code for display
|
||||
'local_equivalent': str(round(local_equivalent, 2)), # Store local equivalent for display
|
||||
'exchange_rate': str(local_equivalent / usd_price if usd_price > 0 else 1.0),
|
||||
'payment_method': account.payment_method
|
||||
}
|
||||
)
|
||||
|
||||
# Add line item for subscription
|
||||
# Add line item for subscription in USD
|
||||
invoice.add_line_item(
|
||||
description=f"{plan.name} Plan - {billing_period_start.strftime('%b %Y')}",
|
||||
quantity=1,
|
||||
unit_price=plan.price,
|
||||
amount=plan.price
|
||||
unit_price=Decimal(str(usd_price)),
|
||||
amount=Decimal(str(usd_price))
|
||||
)
|
||||
|
||||
invoice.calculate_totals()
|
||||
@@ -79,27 +164,49 @@ class InvoiceService:
|
||||
) -> Invoice:
|
||||
"""
|
||||
Create invoice for credit package purchase
|
||||
|
||||
SIMPLIFIED CURRENCY LOGIC:
|
||||
- ALL invoices are in USD (consistent for accounting)
|
||||
- PKR equivalent is calculated and stored in metadata for display purposes
|
||||
"""
|
||||
from igny8_core.business.billing.config import INVOICE_DUE_DATE_OFFSET
|
||||
invoice_date = timezone.now().date()
|
||||
|
||||
# ALWAYS use USD for invoices (simplified accounting)
|
||||
from igny8_core.business.billing.utils.currency import get_currency_for_country, convert_usd_to_local
|
||||
|
||||
currency = 'USD'
|
||||
usd_price = float(credit_package.price)
|
||||
|
||||
# Calculate local equivalent for display purposes (if applicable)
|
||||
local_currency = get_currency_for_country(account.billing_country) if account.billing_country else 'USD'
|
||||
local_equivalent = convert_usd_to_local(usd_price, account.billing_country) if local_currency != 'USD' else usd_price
|
||||
|
||||
invoice = Invoice.objects.create(
|
||||
account=account,
|
||||
invoice_number=InvoiceService.generate_invoice_number(account),
|
||||
billing_email=account.billing_email or account.users.filter(role='owner').first().email,
|
||||
status='pending',
|
||||
currency='USD',
|
||||
invoice_date=timezone.now().date(),
|
||||
due_date=timezone.now().date(),
|
||||
currency=currency,
|
||||
invoice_date=invoice_date,
|
||||
due_date=invoice_date + timedelta(days=INVOICE_DUE_DATE_OFFSET),
|
||||
metadata={
|
||||
'credit_package_id': credit_package.id,
|
||||
'credit_amount': credit_package.credits,
|
||||
'usd_price': str(credit_package.price), # Store original USD price
|
||||
'local_currency': local_currency, # Store local currency code for display
|
||||
'local_equivalent': str(round(local_equivalent, 2)), # Store local equivalent for display
|
||||
'exchange_rate': str(local_equivalent / usd_price if usd_price > 0 else 1.0),
|
||||
'payment_method': account.payment_method
|
||||
},
|
||||
)
|
||||
|
||||
# Add line item for credit package
|
||||
# Add line item for credit package in USD
|
||||
invoice.add_line_item(
|
||||
description=f"{credit_package.name} - {credit_package.credits:,} Credits",
|
||||
quantity=1,
|
||||
unit_price=credit_package.price,
|
||||
amount=credit_package.price
|
||||
unit_price=Decimal(str(usd_price)),
|
||||
amount=Decimal(str(usd_price))
|
||||
)
|
||||
|
||||
invoice.calculate_totals()
|
||||
@@ -159,10 +266,21 @@ class InvoiceService:
|
||||
transaction_id: Optional[str] = None
|
||||
) -> Invoice:
|
||||
"""
|
||||
Mark invoice as paid
|
||||
Mark invoice as paid and record payment details
|
||||
|
||||
Args:
|
||||
invoice: Invoice to mark as paid
|
||||
payment_method: Payment method used ('stripe', 'paypal', 'bank_transfer', etc.)
|
||||
transaction_id: External transaction ID (Stripe payment intent, PayPal capture ID, etc.)
|
||||
"""
|
||||
invoice.status = 'paid'
|
||||
invoice.paid_at = timezone.now()
|
||||
invoice.payment_method = payment_method
|
||||
|
||||
# For Stripe payments, store the transaction ID in stripe_invoice_id field
|
||||
if payment_method == 'stripe' and transaction_id:
|
||||
invoice.stripe_invoice_id = transaction_id
|
||||
|
||||
invoice.save()
|
||||
|
||||
return invoice
|
||||
@@ -186,43 +304,13 @@ class InvoiceService:
|
||||
@staticmethod
|
||||
def generate_pdf(invoice: Invoice) -> bytes:
|
||||
"""
|
||||
Generate PDF for invoice
|
||||
|
||||
TODO: Implement PDF generation using reportlab or weasyprint
|
||||
For now, return placeholder
|
||||
Generate professional PDF invoice using ReportLab
|
||||
"""
|
||||
from io import BytesIO
|
||||
from igny8_core.business.billing.services.pdf_service import InvoicePDFGenerator
|
||||
|
||||
# Placeholder - implement PDF generation
|
||||
buffer = BytesIO()
|
||||
|
||||
# Simple text representation for now
|
||||
content = f"""
|
||||
INVOICE #{invoice.invoice_number}
|
||||
|
||||
Bill To: {invoice.account.name}
|
||||
Email: {invoice.billing_email}
|
||||
|
||||
Date: {invoice.created_at.strftime('%Y-%m-%d')}
|
||||
Due Date: {invoice.due_date.strftime('%Y-%m-%d') if invoice.due_date else 'N/A'}
|
||||
|
||||
Line Items:
|
||||
"""
|
||||
for item in invoice.line_items:
|
||||
content += f" {item['description']} - ${item['amount']}\n"
|
||||
|
||||
content += f"""
|
||||
Subtotal: ${invoice.subtotal}
|
||||
Tax: ${invoice.tax_amount}
|
||||
Total: ${invoice.total_amount}
|
||||
|
||||
Status: {invoice.status.upper()}
|
||||
"""
|
||||
|
||||
buffer.write(content.encode('utf-8'))
|
||||
buffer.seek(0)
|
||||
|
||||
return buffer.getvalue()
|
||||
# Use the professional PDF generator
|
||||
pdf_buffer = InvoicePDFGenerator.generate_invoice_pdf(invoice)
|
||||
return pdf_buffer.getvalue()
|
||||
|
||||
@staticmethod
|
||||
def get_account_invoices(
|
||||
|
||||
330
backend/igny8_core/business/billing/services/limit_service.py
Normal file
330
backend/igny8_core/business/billing/services/limit_service.py
Normal file
@@ -0,0 +1,330 @@
|
||||
"""
|
||||
Limit Service for Plan Limit Enforcement
|
||||
Manages hard limits (sites, users, keywords) and monthly limits (ahrefs_queries)
|
||||
"""
|
||||
from django.db import transaction
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from igny8_core.auth.models import Account
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LimitExceededError(Exception):
|
||||
"""Base exception for limit exceeded errors"""
|
||||
pass
|
||||
|
||||
|
||||
class HardLimitExceededError(LimitExceededError):
|
||||
"""Raised when a hard limit (sites, users, keywords) is exceeded"""
|
||||
pass
|
||||
|
||||
|
||||
class MonthlyLimitExceededError(LimitExceededError):
|
||||
"""Raised when a monthly limit (ahrefs_queries) is exceeded"""
|
||||
pass
|
||||
|
||||
|
||||
class LimitService:
|
||||
"""Service for managing and enforcing plan limits"""
|
||||
|
||||
# Map limit types to model/field names
|
||||
# Simplified to only 3 hard limits: sites, users, keywords
|
||||
HARD_LIMIT_MAPPINGS = {
|
||||
'sites': {
|
||||
'model': 'igny8_core_auth.Site',
|
||||
'plan_field': 'max_sites',
|
||||
'display_name': 'Sites',
|
||||
'filter_field': 'account',
|
||||
},
|
||||
'users': {
|
||||
'model': 'igny8_core_auth.User',
|
||||
'plan_field': 'max_users',
|
||||
'display_name': 'Team Members',
|
||||
'filter_field': 'account',
|
||||
},
|
||||
'keywords': {
|
||||
'model': 'planner.Keywords',
|
||||
'plan_field': 'max_keywords',
|
||||
'display_name': 'Keywords',
|
||||
'filter_field': 'account',
|
||||
},
|
||||
}
|
||||
|
||||
# Simplified to only 1 monthly limit: ahrefs_queries
|
||||
# All other consumption is controlled by credits only
|
||||
MONTHLY_LIMIT_MAPPINGS = {
|
||||
'ahrefs_queries': {
|
||||
'plan_field': 'max_ahrefs_queries',
|
||||
'usage_field': 'usage_ahrefs_queries',
|
||||
'display_name': 'Keyword Research Queries',
|
||||
},
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def check_hard_limit(account: Account, limit_type: str, additional_count: int = 1) -> bool:
|
||||
"""
|
||||
Check if adding items would exceed hard limit.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
limit_type: Type of limit
|
||||
additional_count: Number of items to add
|
||||
|
||||
Returns:
|
||||
bool: True if within limit
|
||||
|
||||
Raises:
|
||||
HardLimitExceededError: If limit would be exceeded
|
||||
"""
|
||||
from django.apps import apps
|
||||
|
||||
if limit_type not in LimitService.HARD_LIMIT_MAPPINGS:
|
||||
raise ValueError(f"Invalid hard limit type: {limit_type}")
|
||||
|
||||
config = LimitService.HARD_LIMIT_MAPPINGS[limit_type]
|
||||
plan = account.plan
|
||||
|
||||
if not plan:
|
||||
raise ValueError("Account has no plan")
|
||||
|
||||
plan_limit = getattr(plan, config['plan_field'])
|
||||
model_path = config['model']
|
||||
app_label, model_name = model_path.split('.')
|
||||
Model = apps.get_model(app_label, model_name)
|
||||
|
||||
filter_field = config.get('filter_field', 'account')
|
||||
filter_kwargs = {filter_field: account}
|
||||
current_count = Model.objects.filter(**filter_kwargs).count()
|
||||
new_count = current_count + additional_count
|
||||
|
||||
logger.info(f"Hard limit check: {limit_type} - Current: {current_count}, Requested: {additional_count}, Limit: {plan_limit}")
|
||||
|
||||
if new_count > plan_limit:
|
||||
raise HardLimitExceededError(
|
||||
f"{config['display_name']} limit exceeded. "
|
||||
f"Current: {current_count}, Limit: {plan_limit}. "
|
||||
f"Upgrade your plan to increase this limit."
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def check_monthly_limit(account: Account, limit_type: str, amount: int = 1) -> bool:
|
||||
"""
|
||||
Check if operation would exceed monthly limit.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
limit_type: Type of limit
|
||||
amount: Amount to use
|
||||
|
||||
Returns:
|
||||
bool: True if within limit
|
||||
|
||||
Raises:
|
||||
MonthlyLimitExceededError: If limit would be exceeded
|
||||
"""
|
||||
if limit_type not in LimitService.MONTHLY_LIMIT_MAPPINGS:
|
||||
raise ValueError(f"Invalid monthly limit type: {limit_type}")
|
||||
|
||||
config = LimitService.MONTHLY_LIMIT_MAPPINGS[limit_type]
|
||||
plan = account.plan
|
||||
|
||||
if not plan:
|
||||
raise ValueError("Account has no plan")
|
||||
|
||||
plan_limit = getattr(plan, config['plan_field'])
|
||||
current_usage = getattr(account, config['usage_field'], 0)
|
||||
new_usage = current_usage + amount
|
||||
|
||||
logger.info(f"Monthly limit check: {limit_type} - Current: {current_usage}, Requested: {amount}, Limit: {plan_limit}")
|
||||
|
||||
if new_usage > plan_limit:
|
||||
period_end = account.usage_period_end or timezone.now().date()
|
||||
raise MonthlyLimitExceededError(
|
||||
f"{config['display_name']} limit exceeded. "
|
||||
f"Used: {current_usage}, Requested: {amount}, Limit: {plan_limit}. "
|
||||
f"Resets on {period_end.strftime('%B %d, %Y')}. "
|
||||
f"Upgrade your plan or wait for reset."
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
def increment_usage(account: Account, limit_type: str, amount: int = 1, metadata: dict = None) -> int:
|
||||
"""
|
||||
Increment monthly usage after successful operation.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
limit_type: Type of limit
|
||||
amount: Amount to increment
|
||||
metadata: Optional metadata
|
||||
|
||||
Returns:
|
||||
int: New usage amount
|
||||
"""
|
||||
if limit_type not in LimitService.MONTHLY_LIMIT_MAPPINGS:
|
||||
raise ValueError(f"Invalid monthly limit type: {limit_type}")
|
||||
|
||||
config = LimitService.MONTHLY_LIMIT_MAPPINGS[limit_type]
|
||||
usage_field = config['usage_field']
|
||||
|
||||
current_usage = getattr(account, usage_field, 0)
|
||||
new_usage = current_usage + amount
|
||||
setattr(account, usage_field, new_usage)
|
||||
account.save(update_fields=[usage_field, 'updated_at'])
|
||||
|
||||
logger.info(f"Incremented {limit_type} usage by {amount}. New total: {new_usage}")
|
||||
|
||||
return new_usage
|
||||
|
||||
@staticmethod
|
||||
def get_current_period(account: Account) -> tuple:
|
||||
"""
|
||||
Get current billing period start and end dates from account.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
|
||||
Returns:
|
||||
tuple: (period_start, period_end) as datetime objects
|
||||
"""
|
||||
if account.usage_period_start and account.usage_period_end:
|
||||
return account.usage_period_start, account.usage_period_end
|
||||
|
||||
subscription = getattr(account, 'subscription', None)
|
||||
|
||||
if subscription and hasattr(subscription, 'current_period_start'):
|
||||
period_start = subscription.current_period_start
|
||||
period_end = subscription.current_period_end
|
||||
else:
|
||||
now = timezone.now()
|
||||
period_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
if now.month == 12:
|
||||
next_month = now.replace(year=now.year + 1, month=1, day=1)
|
||||
else:
|
||||
next_month = now.replace(month=now.month + 1, day=1)
|
||||
|
||||
period_end = next_month - timedelta(days=1)
|
||||
period_end = period_end.replace(hour=23, minute=59, second=59)
|
||||
|
||||
account.usage_period_start = period_start
|
||||
account.usage_period_end = period_end
|
||||
account.save(update_fields=['usage_period_start', 'usage_period_end', 'updated_at'])
|
||||
|
||||
return period_start, period_end
|
||||
|
||||
@staticmethod
|
||||
def get_usage_summary(account: Account) -> dict:
|
||||
"""
|
||||
Get comprehensive usage summary for all limits.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
|
||||
Returns:
|
||||
dict: Usage summary with hard and monthly limits
|
||||
"""
|
||||
from django.apps import apps
|
||||
|
||||
plan = account.plan
|
||||
if not plan:
|
||||
return {'error': 'No plan assigned to account'}
|
||||
|
||||
period_start, period_end = LimitService.get_current_period(account)
|
||||
days_until_reset = (period_end.date() - timezone.now().date()).days if period_end else 0
|
||||
|
||||
summary = {
|
||||
'account_id': account.id,
|
||||
'account_name': account.name,
|
||||
'plan_name': plan.name,
|
||||
'period_start': period_start.isoformat() if period_start else None,
|
||||
'period_end': period_end.isoformat() if period_end else None,
|
||||
'days_until_reset': days_until_reset,
|
||||
'hard_limits': {},
|
||||
'monthly_limits': {},
|
||||
}
|
||||
|
||||
for limit_type, config in LimitService.HARD_LIMIT_MAPPINGS.items():
|
||||
model_path = config['model']
|
||||
app_label, model_name = model_path.split('.')
|
||||
Model = apps.get_model(app_label, model_name)
|
||||
|
||||
filter_field = config.get('filter_field', 'account')
|
||||
filter_kwargs = {filter_field: account}
|
||||
current_count = Model.objects.filter(**filter_kwargs).count()
|
||||
plan_limit = getattr(plan, config['plan_field'])
|
||||
|
||||
summary['hard_limits'][limit_type] = {
|
||||
'display_name': config['display_name'],
|
||||
'current': current_count,
|
||||
'limit': plan_limit,
|
||||
'remaining': max(0, plan_limit - current_count),
|
||||
'percentage_used': int((current_count / plan_limit) * 100) if plan_limit > 0 else 0,
|
||||
}
|
||||
|
||||
for limit_type, config in LimitService.MONTHLY_LIMIT_MAPPINGS.items():
|
||||
plan_limit = getattr(plan, config['plan_field'])
|
||||
current_usage = getattr(account, config['usage_field'], 0)
|
||||
|
||||
summary['monthly_limits'][limit_type] = {
|
||||
'display_name': config['display_name'],
|
||||
'current': current_usage,
|
||||
'limit': plan_limit,
|
||||
'remaining': max(0, plan_limit - current_usage),
|
||||
'percentage_used': int((current_usage / plan_limit) * 100) if plan_limit > 0 else 0,
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
def reset_monthly_limits(account: Account) -> dict:
|
||||
"""
|
||||
Reset all monthly limits for an account.
|
||||
|
||||
Args:
|
||||
account: Account instance
|
||||
|
||||
Returns:
|
||||
dict: Summary of reset operation
|
||||
"""
|
||||
# Reset only ahrefs_queries (the only monthly limit now)
|
||||
account.usage_ahrefs_queries = 0
|
||||
|
||||
old_period_end = account.usage_period_end
|
||||
|
||||
now = timezone.now()
|
||||
new_period_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
if now.month == 12:
|
||||
next_month = now.replace(year=now.year + 1, month=1, day=1)
|
||||
else:
|
||||
next_month = now.replace(month=now.month + 1, day=1)
|
||||
|
||||
new_period_end = next_month - timedelta(days=1)
|
||||
new_period_end = new_period_end.replace(hour=23, minute=59, second=59)
|
||||
|
||||
account.usage_period_start = new_period_start
|
||||
account.usage_period_end = new_period_end
|
||||
|
||||
account.save(update_fields=[
|
||||
'usage_ahrefs_queries',
|
||||
'usage_period_start', 'usage_period_end', 'updated_at'
|
||||
])
|
||||
|
||||
logger.info(f"Reset monthly limits for account {account.id}")
|
||||
|
||||
return {
|
||||
'account_id': account.id,
|
||||
'old_period_end': old_period_end.isoformat() if old_period_end else None,
|
||||
'new_period_start': new_period_start.isoformat(),
|
||||
'new_period_end': new_period_end.isoformat(),
|
||||
'limits_reset': 1,
|
||||
}
|
||||
@@ -105,11 +105,15 @@ class PaymentService:
|
||||
) -> Payment:
|
||||
"""
|
||||
Mark payment as completed and update invoice
|
||||
For automatic payments (Stripe/PayPal), sets approved_at but leaves approved_by as None
|
||||
"""
|
||||
from .invoice_service import InvoiceService
|
||||
|
||||
payment.status = 'succeeded'
|
||||
payment.processed_at = timezone.now()
|
||||
# For automatic payments, set approved_at to indicate when payment was verified
|
||||
# approved_by stays None to indicate it was automated, not manual approval
|
||||
payment.approved_at = timezone.now()
|
||||
|
||||
if transaction_id:
|
||||
payment.transaction_reference = transaction_id
|
||||
@@ -182,6 +186,16 @@ class PaymentService:
|
||||
if payment.metadata.get('credit_package_id'):
|
||||
PaymentService._add_credits_for_payment(payment)
|
||||
|
||||
# If account is inactive/suspended/trial, activate it on successful payment
|
||||
try:
|
||||
account = payment.account
|
||||
if account and account.status != 'active':
|
||||
account.status = 'active'
|
||||
account.save(update_fields=['status', 'updated_at'])
|
||||
except Exception:
|
||||
# Do not block payment approval if status update fails
|
||||
pass
|
||||
|
||||
return payment
|
||||
|
||||
@staticmethod
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user