-
-
{scenario.name}
-
{scenario.description}
+ {/* Header */}
+
+
+
+
+
+
+
+
{scenario.name}
+
+ {scenario.status}
+
+
+
{scenario.description}
+
+ Region: {scenario.region}
+ •
+ Created: {new Date(scenario.created_at).toLocaleDateString()}
+
+
+
+
+
+
+
+ {scenario.status === 'draft' && (
+
+ )}
+ {scenario.status === 'running' && (
+
+ )}
-
- {scenario.status}
-
+ {/* Stats Cards */}
- Total Requests
-
-
- {metrics?.summary.total_requests || 0}
-
-
-
-
- Total Cost
+ Total Requests
- ${(metrics?.summary.total_cost_usd || 0).toFixed(6)}
+ {formatNumber(metrics?.summary.total_requests || 0)}
- SQS Blocks
+ Total Cost
- {metrics?.summary.sqs_blocks || 0}
+
+ {formatCurrency(metrics?.summary.total_cost_usd || 0)}
+
- LLM Tokens
+ SQS Blocks
- {metrics?.summary.llm_tokens || 0}
+
+ {formatNumber(metrics?.summary.sqs_blocks || 0)}
+
+
+
+
+
+ Lambda Invocations
+
+
+
+ {formatNumber(metrics?.summary.lambda_invocations || 0)}
+
+
+ {/* Tabs */}
+
+
+
+
+ Overview
+
+
+
+ Metrics
+
+
+
+ Analysis
+
+
+
+
+
+ {/* Cost Breakdown Chart */}
+ {metrics?.cost_breakdown && metrics.cost_breakdown.length > 0 && (
+
+ )}
+
+ {/* Summary Card */}
+
+
+ Additional Metrics
+ Detailed breakdown of scenario metrics
+
+
+
+ LLM Tokens
+ {formatNumber(metrics?.summary.llm_tokens || 0)}
+
+
+ PII Violations
+ {formatNumber(metrics?.summary.pii_violations || 0)}
+
+
+ Avg Cost per Request
+
+ {metrics?.summary.total_requests
+ ? formatCurrency(metrics.summary.total_cost_usd / metrics.summary.total_requests)
+ : '$0.0000'}
+
+
+
+ Status
+ {scenario.status}
+
+
+
+
+
+
+
+ {chartData.length > 0 ? (
+
+ ) : (
+
+
+ No time series data available yet
+
+
+ )}
+
+
+
+
+
+ Analysis
+ Advanced analysis and insights
+
+
+
+
+
Cost Efficiency
+
+ {metrics?.summary.total_requests
+ ? `Average cost per request: ${formatCurrency(
+ metrics.summary.total_cost_usd / metrics.summary.total_requests
+ )}`
+ : 'No request data available'}
+
+
+
+
PII Risk Assessment
+
+ {metrics?.summary.pii_violations
+ ? `${metrics.summary.pii_violations} potential PII violations detected`
+ : 'No PII violations detected'}
+
+
+
+
+
+
+
);
}
diff --git a/frontend/src/pages/ScenariosPage.tsx b/frontend/src/pages/ScenariosPage.tsx
index 7cda3fe..6d82511 100644
--- a/frontend/src/pages/ScenariosPage.tsx
+++ b/frontend/src/pages/ScenariosPage.tsx
@@ -1,11 +1,45 @@
import { useState } from 'react';
import { useNavigate } from 'react-router-dom';
-import { useScenarios, useStartScenario, useStopScenario, useDeleteScenario } from '@/hooks/useScenarios';
+import {
+ useScenarios,
+ useStartScenario,
+ useStopScenario,
+ useDeleteScenario
+} from '@/hooks/useScenarios';
import { Button } from '@/components/ui/button';
import { Badge } from '@/components/ui/badge';
-import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table';
-import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu';
-import { MoreHorizontal, Play, Square, Trash2 } from 'lucide-react';
+import { Checkbox } from '@/components/ui/checkbox';
+import {
+ Table,
+ TableBody,
+ TableCell,
+ TableHead,
+ TableHeader,
+ TableRow
+} from '@/components/ui/table';
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger
+} from '@/components/ui/dropdown-menu';
+import {
+ MoreHorizontal,
+ Play,
+ Square,
+ Trash2,
+ BarChart3,
+ X,
+ FileText,
+} from 'lucide-react';
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogFooter,
+ DialogHeader,
+ DialogTitle,
+} from '@/components/ui/dialog';
const statusColors = {
draft: 'secondary',
@@ -17,13 +51,76 @@ const statusColors = {
export function ScenariosPage() {
const navigate = useNavigate();
const { data: scenarios, isLoading } = useScenarios();
+ const [selectedScenarios, setSelectedScenarios] = useState
>(new Set());
+ const [showCompareModal, setShowCompareModal] = useState(false);
+ const startScenario = useStartScenario('');
+ const stopScenario = useStopScenario('');
+ const deleteScenario = useDeleteScenario();
+
+ const toggleScenario = (id: string, e: React.MouseEvent) => {
+ e.stopPropagation();
+ setSelectedScenarios((prev) => {
+ const next = new Set(prev);
+ if (next.has(id)) {
+ next.delete(id);
+ } else if (next.size < 4) {
+ next.add(id);
+ }
+ return next;
+ });
+ };
+
+ const toggleAll = () => {
+ if (selectedScenarios.size > 0) {
+ setSelectedScenarios(new Set());
+ } else if (scenarios?.items) {
+ const firstFour = scenarios.items.slice(0, 4).map((s) => s.id);
+ setSelectedScenarios(new Set(firstFour));
+ }
+ };
+
+ const clearSelection = () => {
+ setSelectedScenarios(new Set());
+ };
+
+ const handleCompare = () => {
+ setShowCompareModal(true);
+ };
+
+ const confirmCompare = () => {
+ const ids = Array.from(selectedScenarios);
+ navigate('/compare', { state: { scenarioIds: ids } });
+ };
+
+ const handleStart = (_id: string, e: React.MouseEvent) => {
+ e.stopPropagation();
+ startScenario.mutate();
+ };
+
+ const handleStop = (_id: string, e: React.MouseEvent) => {
+ e.stopPropagation();
+ stopScenario.mutate();
+ };
+
+ const handleDelete = (id: string, e: React.MouseEvent) => {
+ e.stopPropagation();
+ if (confirm('Are you sure you want to delete this scenario?')) {
+ deleteScenario.mutate(id);
+ }
+ };
+
+ const canCompare = selectedScenarios.size >= 2 && selectedScenarios.size <= 4;
+
if (isLoading) {
return Loading...
;
}
+ const selectedScenarioData = scenarios?.items.filter((s) => selectedScenarios.has(s.id));
+
return (
+ {/* Header */}
Scenarios
@@ -31,26 +128,84 @@ export function ScenariosPage() {
Manage your AWS cost simulation scenarios
+ {selectedScenarios.size > 0 && (
+
+
+ {selectedScenarios.size} selected
+
+
+
+
+ )}
+ {/* Selection Mode Indicator */}
+ {selectedScenarios.size > 0 && (
+
+
+ Comparison Mode: Select 2-4 scenarios
+
+
+ {selectedScenarioData?.map((s) => (
+
+ {s.name}
+ setSelectedScenarios((prev) => {
+ const next = new Set(prev);
+ next.delete(s.id);
+ return next;
+ })}
+ />
+
+ ))}
+
+
+ )}
+
+
+ 0 && selectedScenarios.size === (scenarios?.items.length || 0)}
+ onCheckedChange={toggleAll}
+ aria-label="Select all"
+ />
+
Name
Status
Region
Requests
Cost
- Actions
+ Actions
{scenarios?.items.map((scenario) => (
navigate(`/scenarios/${scenario.id}`)}
>
+ e.stopPropagation()}>
+ {}}
+ onClick={(e: React.MouseEvent) => toggleScenario(scenario.id, e)}
+ aria-label={`Select ${scenario.name}`}
+ />
+
{scenario.name}
@@ -58,39 +213,89 @@ export function ScenariosPage() {
{scenario.region}
- {scenario.total_requests}
+ {scenario.total_requests.toLocaleString()}
${scenario.total_cost_estimate.toFixed(6)}
e.stopPropagation()}>
-
-
-
-
-
- {scenario.status === 'draft' && (
-
-
- Start
+
+
+
+
+
+
+
+ {scenario.status === 'draft' && (
+ handleStart(scenario.id, e as React.MouseEvent)}>
+
+ Start
+
+ )}
+ {scenario.status === 'running' && (
+ handleStop(scenario.id, e as React.MouseEvent)}>
+
+ Stop
+
+ )}
+ handleDelete(scenario.id, e as React.MouseEvent)}
+ >
+
+ Delete
- )}
- {scenario.status === 'running' && (
-
-
- Stop
-
- )}
-
-
- Delete
-
-
-
+
+
+
))}
+
+ {/* Compare Confirmation Modal */}
+
);
}
diff --git a/frontend/src/providers/QueryProvider.tsx b/frontend/src/providers/QueryProvider.tsx
index bd1929f..fa00040 100644
--- a/frontend/src/providers/QueryProvider.tsx
+++ b/frontend/src/providers/QueryProvider.tsx
@@ -1,5 +1,5 @@
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
-import { ReactNode } from 'react';
+import type { ReactNode } from 'react';
const queryClient = new QueryClient({
defaultOptions: {
diff --git a/frontend/src/providers/ThemeProvider.tsx b/frontend/src/providers/ThemeProvider.tsx
new file mode 100644
index 0000000..3b52ecf
--- /dev/null
+++ b/frontend/src/providers/ThemeProvider.tsx
@@ -0,0 +1,80 @@
+import { createContext, useContext, useEffect, useState } from 'react';
+import type { ReactNode } from 'react';
+
+type Theme = 'dark' | 'light' | 'system';
+
+interface ThemeContextType {
+ theme: Theme;
+ setTheme: (theme: Theme) => void;
+ resolvedTheme: 'dark' | 'light';
+}
+
+const ThemeContext = createContext(undefined);
+
+const STORAGE_KEY = 'mockup-aws-theme';
+
+interface ThemeProviderProps {
+ children: ReactNode;
+ defaultTheme?: Theme;
+}
+
+export function ThemeProvider({ children, defaultTheme = 'system' }: ThemeProviderProps) {
+ const [theme, setThemeState] = useState(() => {
+ if (typeof window !== 'undefined') {
+ const stored = localStorage.getItem(STORAGE_KEY) as Theme;
+ return stored || defaultTheme;
+ }
+ return defaultTheme;
+ });
+
+ const [resolvedTheme, setResolvedTheme] = useState<'dark' | 'light'>('light');
+
+ useEffect(() => {
+ const root = window.document.documentElement;
+ const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)');
+
+ const applyTheme = () => {
+ let resolved: 'dark' | 'light';
+
+ if (theme === 'system') {
+ resolved = mediaQuery.matches ? 'dark' : 'light';
+ } else {
+ resolved = theme;
+ }
+
+ setResolvedTheme(resolved);
+
+ if (resolved === 'dark') {
+ root.classList.add('dark');
+ } else {
+ root.classList.remove('dark');
+ }
+ };
+
+ applyTheme();
+
+ if (theme === 'system') {
+ mediaQuery.addEventListener('change', applyTheme);
+ return () => mediaQuery.removeEventListener('change', applyTheme);
+ }
+ }, [theme]);
+
+ const setTheme = (newTheme: Theme) => {
+ setThemeState(newTheme);
+ localStorage.setItem(STORAGE_KEY, newTheme);
+ };
+
+ return (
+
+ {children}
+
+ );
+}
+
+export function useTheme() {
+ const context = useContext(ThemeContext);
+ if (context === undefined) {
+ throw new Error('useTheme must be used within a ThemeProvider');
+ }
+ return context;
+}
diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js
index fa318b9..cf8e372 100644
--- a/frontend/tailwind.config.js
+++ b/frontend/tailwind.config.js
@@ -1,6 +1,6 @@
/** @type {import('tailwindcss').Config} */
export default {
- darkMode: ["class"],
+ darkMode: 'class',
content: [
"./index.html",
"./src/**/*.{js,ts,jsx,tsx}",
diff --git a/frontend/tsconfig.app.json b/frontend/tsconfig.app.json
index f0d1913..d932263 100644
--- a/frontend/tsconfig.app.json
+++ b/frontend/tsconfig.app.json
@@ -6,6 +6,7 @@
"module": "esnext",
"types": ["vite/client"],
"skipLibCheck": true,
+ "ignoreDeprecations": "6.0",
/* Bundler mode */
"moduleResolution": "bundler",
diff --git a/prompt/prompt-v0.4.0-planning.md b/prompt/prompt-v0.4.0-planning.md
new file mode 100644
index 0000000..5c53631
--- /dev/null
+++ b/prompt/prompt-v0.4.0-planning.md
@@ -0,0 +1,483 @@
+# Prompt: Pianificazione v0.4.0 - Reports, Charts & Comparison
+
+> **Progetto:** mockupAWS - Backend Profiler & Cost Estimator
+> **Versione Target:** v0.4.0
+> **Focus:** Report Generation, Data Visualization, Scenario Comparison
+> **Stima Tempo:** 2-3 settimane
+> **Priorità:** P1 (High)
+
+---
+
+## 🎯 Obiettivi v0.4.0
+
+### Goals Principali
+1. **Report Generation** - Generazione report PDF e CSV professionali
+2. **Data Visualization** - Grafici interattivi con Recharts
+3. **Scenario Comparison** - Confronto side-by-side tra scenari multipli
+4. **Dark/Light Mode** - Toggle tema UI completo
+5. **Testing E2E** - Setup testing end-to-end con Playwright
+
+### Metriche di Successo
+- [ ] Report PDF generati in <3 secondi
+- [ ] CSV export funzionante con tutti i dati
+- [ ] 3+ tipi di grafici interattivi
+- [ ] Confronto 2-4 scenari simultaneamente
+- [ ] Code coverage >70%
+- [ ] Zero regressioni v0.3.0
+
+---
+
+## 📋 Feature Breakdown
+
+### 1. Report Generation System 📝
+
+#### Backend (BE-RPT-001 → BE-RPT-005)
+
+**BE-RPT-001: Report Service Implementation**
+- Implementare `ReportService` con metodi:
+ - `generate_pdf(scenario_id: UUID) -> Report`
+ - `generate_csv(scenario_id: UUID) -> Report`
+ - `compile_metrics(scenario_id: UUID) -> dict`
+- Librerie: `reportlab` (PDF), `pandas` (CSV)
+- Template PDF con logo, header, footer, pagine numerate
+- Includere:
+ - Summary scenario (nome, regione, periodo, stato)
+ - Cost breakdown per servizio (SQS, Lambda, Bedrock)
+ - Metriche aggregate (totali, medie, picchi)
+ - Top 10 logs più costosi
+ - PII violations summary
+ - Grafici embedded (se PDF lo supporta)
+
+**BE-RPT-002: Report Generation API**
+- Endpoint: `POST /api/v1/scenarios/{id}/reports`
+- Request body:
+ ```json
+ {
+ "format": "pdf" | "csv",
+ "include_logs": boolean,
+ "date_from": "ISO8601" | null,
+ "date_to": "ISO8601" | null,
+ "sections": ["summary", "costs", "metrics", "logs", "pii"]
+ }
+ ```
+- Response: `202 Accepted` con `report_id`
+- Background task per generazione (Celery oppure async FastAPI)
+- Progress tracking via `GET /api/v1/reports/{id}/status`
+
+**BE-RPT-003: Report Download API**
+- Endpoint: `GET /api/v1/reports/{id}/download`
+- Response: File stream con headers corretti
+- Supporto `Content-Disposition: attachment`
+- Mime types: `application/pdf`, `text/csv`
+- Rate limiting: 10 download/minuto
+
+**BE-RPT-004: Report Storage**
+- Tabella `reports` già esistente
+- Salvare file in filesystem (o S3 in futuro)
+- Path: `./storage/reports/{scenario_id}/{report_id}.{format}`
+- Cleanup automatico dopo 30 giorni (configurabile)
+- Max file size: 50MB
+
+**BE-RPT-005: Report Templates**
+- Template HTML per PDF (usare Jinja2 + WeasyPrint oppure ReportLab diretto)
+- Stile professionale coerente con brand
+- Header con logo mockupAWS
+- Colori coerenti (primario: #0066CC)
+- Font: Inter o Roboto
+- Tabelle formattate con zebra striping
+
+#### Frontend (FE-RPT-001 → FE-RPT-004)
+
+**FE-RPT-001: Report Generation UI**
+- Nuova pagina: `/scenarios/:id/reports`
+- Sezione "Generate Report" con form:
+ - Select formato (PDF/CSV toggle)
+ - Checkbox: include_logs, sections
+ - Date range picker (optional)
+ - Preview dati che saranno inclusi
+- Bottone "Generate" con loading state
+- Toast notification quando report pronto
+
+**FE-RPT-002: Reports List**
+- Tabella reports generati per scenario
+- Colonne: Data, Formato, Dimensione, Stato, Azioni
+- Azioni: Download, Delete, Rigenera
+- Badge stato: Pending, Processing, Completed, Failed
+- Sorting per data (default: newest first)
+- Pagination se necessario
+
+**FE-RPT-003: Report Download Handler**
+- Download file con nome appropriato: `{scenario_name}_YYYY-MM-DD.{format}`
+- Axios con `responseType: 'blob'`
+- Creare ObjectURL per trigger download
+- Cleanup dopo download
+- Error handling con toast
+
+**FE-RPT-004: Report Preview**
+- Preview CSV in tabella (primi 100 record)
+- Info box con summary prima di generare
+- Stima dimensione file
+- Costo stimato basato su metriche
+
+---
+
+### 2. Data Visualization 📊
+
+#### Frontend (FE-VIZ-001 → FE-VIZ-006)
+
+**FE-VIZ-001: Recharts Integration**
+- Installare: `recharts`, `date-fns`
+- Setup tema coerente con Tailwind/shadcn
+- Color palette per grafici (primario, secondario, accenti)
+- Responsive containers
+
+**FE-VIZ-002: Cost Breakdown Chart**
+- Tipo: Pie Chart o Donut Chart
+- Dati: Costo per servizio (SQS, Lambda, Bedrock)
+- Percentuali visualizzate
+- Legend interattiva (toggle servizi)
+- Tooltip con valori esatti ($)
+- Posizione: Dashboard e Scenario Detail
+
+**FE-VIZ-003: Time Series Chart**
+- Tipo: Area Chart o Line Chart
+- Dati: Metriche nel tempo (requests, costi cumulativi)
+- X-axis: Timestamp
+- Y-axis: Valore (count o $)
+- Multi-line per diversi tipi di metriche
+- Zoom e pan (se supportato da Recharts)
+- Posizione: Scenario Detail (tab "Metrics")
+
+**FE-VIZ-004: Comparison Bar Chart**
+- Tipo: Grouped Bar Chart
+- Dati: Confronto metriche tra scenari
+- X-axis: Nome scenario
+- Y-axis: Valore metrica
+- Selettore metrica: Costo totale, Requests, SQS blocks, Tokens
+- Posizione: Compare Page
+
+**FE-VIZ-005: Metrics Distribution Chart**
+- Tipo: Histogram o Box Plot (se Recharts supporta)
+- Dati: Distribuzione dimensioni log, tempi risposta
+- Posizione: Scenario Detail (tab "Analysis")
+
+**FE-VIZ-006: Dashboard Overview Charts**
+- Mini charts nella lista scenari (sparklines)
+- Ultimi 7 giorni di attività
+- Quick stats con trend indicator (↑ ↓)
+
+---
+
+### 3. Scenario Comparison 🔍
+
+#### Backend (BE-CMP-001 → BE-CMP-003)
+
+**BE-CMP-001: Comparison API**
+- Endpoint: `POST /api/v1/scenarios/compare`
+- Request body:
+ ```json
+ {
+ "scenario_ids": ["uuid1", "uuid2", "uuid3"],
+ "metrics": ["total_cost", "total_requests", "sqs_blocks", "tokens"]
+ }
+ ```
+- Response:
+ ```json
+ {
+ "scenarios": [...],
+ "comparison": {
+ "total_cost": { "baseline": 100, "variance": [0, +15%, -20%] },
+ "metrics": [...]
+ }
+ }
+ ```
+- Max 4 scenari per confronto
+- Validazione: tutti scenari esistono e user ha accesso
+
+**BE-CMP-002: Delta Calculation**
+- Calcolare variazione percentuale vs baseline (primo scenario)
+- Evidenziare miglioramenti/peggioramenti
+- Ordinare scenari per costo totale
+- Export comparison come CSV/PDF
+
+**BE-CMP-003: Comparison Cache**
+- Cache risultati per 5 minuti (in-memory)
+- Cache key: hash degli scenario_ids ordinati
+
+#### Frontend (FE-CMP-001 → FE-CMP-004)
+
+**FE-CMP-001: Comparison Selection UI**
+- Checkbox multi-selezione nella lista scenari
+- Bottone "Compare Selected" (enabled quando 2-4 selezionati)
+- Modal confirmation con lista scenari
+- Visualizzazione "Comparison Mode" indicator
+
+**FE-CMP-002: Compare Page**
+- Nuova route: `/compare`
+- Layout side-by-side (2 colonne per 2 scenari, 4 per 4 scenari)
+- Responsive: su mobile diventa scroll orizzontale
+- Header con nome scenario, regione, stato
+- Summary cards affiancate
+
+**FE-CMP-003: Comparison Tables**
+- Tabella dettagliata con metriche affiancate
+- Color coding: verde (migliore), rosso (peggiore), grigio (neutro)
+- Delta column con trend arrow
+- Export comparison button
+
+**FE-CMP-004: Visual Comparison**
+- Grouped bar chart per confronto visivo
+- Highlight scenario selezionato
+- Toggle metriche da confrontare
+
+---
+
+### 4. Dark/Light Mode Toggle 🌓
+
+#### Frontend (FE-THM-001 → FE-THM-004)
+
+**FE-THM-001: Theme Provider Setup**
+- Theme context o Zustand store
+- Persistenza in localStorage
+- Default: system preference (media query)
+- Toggle button in Header
+
+**FE-THM-002: Tailwind Dark Mode Configuration**
+- Aggiornare `tailwind.config.js`:
+ ```js
+ darkMode: 'class'
+ ```
+- Wrapper component con `dark` class sul root
+- Transition smooth tra temi
+
+**FE-THM-003: Component Theme Support**
+- Verificare tutti i componenti shadcn/ui supportino dark mode
+- Aggiornare classi custom per dark variant:
+ - `bg-white` → `bg-white dark:bg-gray-900`
+ - `text-gray-900` → `text-gray-900 dark:text-white`
+ - Bordi, shadow, hover states
+
+**FE-THM-004: Chart Theming**
+- Recharts tema dark (colori assi, grid, tooltip)
+- Colori serie dati visibili su entrambi i temi
+- Background chart trasparente o temizzato
+
+---
+
+### 5. Testing E2E Setup 🧪
+
+#### QA (QA-E2E-001 → QA-E2E-004)
+
+**QA-E2E-001: Playwright Setup**
+- Installare: `@playwright/test`
+- Configurare `playwright.config.ts`
+- Scripts: `test:e2e`, `test:e2e:ui`, `test:e2e:debug`
+- Setup CI (GitHub Actions oppure locale)
+
+**QA-E2E-002: Test Scenarios**
+- Test: Creazione scenario completo
+- Test: Ingestione log e verifica metriche
+- Test: Generazione e download report
+- Test: Navigazione tra pagine
+- Test: Responsive design (mobile viewport)
+
+**QA-E2E-003: Test Data**
+- Fixtures per scenari di test
+- Seed database per test
+- Cleanup dopo ogni test
+- Parallel execution config
+
+**QA-E2E-004: Visual Regression**
+- Screenshot testing per UI critica
+- Baseline images in repo
+- Fallimento test se diff > threshold
+
+---
+
+## 🎨 UI/UX Requirements
+
+### Design Principles
+- **Consistency**: Usare stessi pattern v0.3.0
+- **Feedback**: Loading states, toast notifications, progress indicators
+- **Accessibility**: WCAG 2.1 AA compliance
+- **Mobile**: Responsive design per tutte le feature
+
+### Componenti UI da Aggiungere
+- `DateRangePicker` - Per filtro report
+- `FileDownload` - Componente download con progress
+- `ComparisonCard` - Card per confronto scenari
+- `ChartContainer` - Wrapper responsive per Recharts
+- `ThemeToggle` - Toggle dark/light mode
+
+### Animazioni
+- Page transitions (React Router + Framer Motion opzionale)
+- Chart animations (Recharts built-in)
+- Toast slide-in
+- Loading skeletons
+
+---
+
+## 🏗️ Technical Architecture
+
+### Backend Changes
+```
+src/
+├── api/v1/
+│ └── reports.py # NUOVO: Report endpoints
+├── services/
+│ └── report_service.py # NUOVO: PDF/CSV generation
+├── core/
+│ └── storage.py # NUOVO: File storage abstraction
+└── tasks/ # NUOVO: Background tasks
+ └── report_tasks.py
+```
+
+### Frontend Changes
+```
+frontend/src/
+├── pages/
+│ ├── Reports.tsx # NUOVO: Reports management
+│ └── Compare.tsx # NUOVO: Scenario comparison
+├── components/
+│ ├── charts/ # NUOVO: Chart components
+│ │ ├── CostBreakdown.tsx
+│ │ ├── TimeSeries.tsx
+│ │ └── ComparisonChart.tsx
+│ ├── reports/ # NUOVO: Report components
+│ │ ├── ReportGenerator.tsx
+│ │ └── ReportList.tsx
+│ └── ui/
+│ └── theme-toggle.tsx # NUOVO
+├── hooks/
+│ ├── useReports.ts # NUOVO
+│ └── useComparison.ts # NUOVO
+└── lib/
+ └── theme.ts # NUOVO: Theme utilities
+```
+
+---
+
+## 📅 Timeline Suggerita (2-3 settimane)
+
+### Week 1: Foundation & Reports
+- **Giorno 1-2**: BE-RPT-001, BE-RPT-002 (Report service e API)
+- **Giorno 3**: BE-RPT-003, FE-RPT-001, FE-RPT-002 (Download e UI)
+- **Giorno 4**: BE-RPT-004, BE-RPT-005 (Storage e templates)
+- **Giorno 5**: Testing reports, bug fixing
+
+### Week 2: Charts & Comparison
+- **Giorno 6-7**: FE-VIZ-001 → FE-VIZ-004 (Recharts integration)
+- **Giorno 8**: BE-CMP-001, BE-CMP-002 (Comparison API)
+- **Giorno 9**: FE-CMP-001 → FE-CMP-004 (Comparison UI)
+- **Giorno 10**: FE-VIZ-005, FE-VIZ-006 (Additional charts)
+
+### Week 3: Polish & Testing
+- **Giorno 11-12**: FE-THM-001 → FE-THM-004 (Dark mode)
+- **Giorno 13**: QA-E2E-001 → QA-E2E-004 (Testing setup)
+- **Giorno 14**: Bug fixing, performance optimization, documentation
+- **Giorno 15**: Final review, demo, release v0.4.0
+
+---
+
+## ✅ Acceptance Criteria
+
+### Report Generation
+- [ ] PDF generato correttamente con tutte le sezioni richieste
+- [ ] CSV contiene tutti i log e metriche in formato tabellare
+- [ ] Download funziona su Chrome, Firefox, Safari
+- [ ] File size < 50MB per scenari grandi
+- [ ] Report deleted dopo 30 giorni (cleanup)
+
+### Charts
+- [ ] Tutti i grafici sono responsive (resize corretto)
+- [ ] Tooltip mostra dati corretti
+- [ ] Animazioni smooth (no jank)
+- [ ] Funzionano in entrambi i temi (dark/light)
+- [ ] Performance: <100ms per renderizzare
+
+### Comparison
+- [ ] Confronto 2-4 scenari simultaneamente
+- [ ] Variazioni percentuali calcolate correttamente
+- [ ] UI responsive su mobile
+- [ ] Export comparison disponibile
+- [ ] Color coding intuitivo
+
+### Dark Mode
+- [ ] Toggle funziona istantaneamente
+- [ ] Persistenza dopo refresh
+- [ ] Tutti i componenti visibili in entrambi i temi
+- [ ] Charts adeguatamente temizzati
+- [ ] Nessun contrasto illeggibile
+
+### Testing
+- [ ] E2E tests passano in CI
+- [ ] Coverage >70% (backend)
+- [ ] Visual regression baseline stabilita
+- [ ] Zero regressioni v0.3.0
+- [ ] Documentazione testing aggiornata
+
+---
+
+## 🚧 Rischi e Mitigazioni
+
+| Rischio | Probabilità | Impatto | Mitigazione |
+|---------|-------------|---------|-------------|
+| ReportLab complesso | Media | Alto | Usare WeasyPrint (HTML→PDF) come alternativa |
+| Performance charts con molti dati | Media | Medio | Virtualization, data sampling, pagination |
+| Dark mode inconsistente | Bassa | Medio | Audit visivo completo, design tokens |
+| E2E tests flaky | Media | Medio | Retry logic, deterministic selectors, wait conditions |
+| Scope creep | Alta | Medio | Strict deadline, MVP first, nice-to-have in backlog |
+
+---
+
+## 📝 Notes per Implementazione
+
+### Libraries Consigliate
+```bash
+# Backend
+pip install reportlab pandas xlsxwriter # Reports
+pip install celery redis # Background tasks (optional)
+
+# Frontend
+npm install recharts date-fns # Charts
+npm install @playwright/test # E2E testing
+npm install zustand # State management (optional, for theme)
+```
+
+### Pattern da Seguire
+- **Report Generation**: Async task con status polling
+- **Charts**: Container/Presentational pattern
+- **Comparison**: Derive state, non duplicare dati
+- **Theme**: CSS variables + Tailwind dark mode
+
+### Performance Considerations
+- Lazy load chart components
+- Debounce resize handlers
+- Virtualize long lists (reports)
+- Cache comparison results
+- Optimize re-renders (React.memo)
+
+---
+
+## 🎯 Definition of Done
+
+- [ ] Tutti i task P1 completati
+- [ ] Code review passato
+- [ ] Tests passanti (unit + integration + e2e)
+- [ ] Documentation aggiornata (README, API docs)
+- [ ] Demo funzionante
+- [ ] CHANGELOG.md aggiornato
+- [ ] Tag v0.4.0 creato
+- [ ] Deploy su staging verificato
+
+---
+
+**Assegnato a:** @frontend-dev (lead), @backend-dev (supporto API), @qa-engineer (testing)
+**Reviewer:** @spec-architect
+**Deadline:** 3 settimane dalla data di inizio
+**Dependencies:** v0.3.0 completata (✅)
+
+---
+
+*Prompt generato per pianificazione v0.4.0*
+*Data: 2026-04-07*
diff --git a/pyproject.toml b/pyproject.toml
index 29c2028..c1040ce 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,6 +13,9 @@ dependencies = [
"pydantic-settings>=2.13.1",
"tiktoken>=0.6.0",
"uvicorn>=0.29.0",
+ "reportlab>=4.0.0",
+ "pandas>=2.0.0",
+ "slowapi>=0.1.9",
]
[dependency-groups]
diff --git a/src/api/v1/__init__.py b/src/api/v1/__init__.py
index 598ea5b..ed4a292 100644
--- a/src/api/v1/__init__.py
+++ b/src/api/v1/__init__.py
@@ -5,8 +5,13 @@ from fastapi import APIRouter
from src.api.v1.scenarios import router as scenarios_router
from src.api.v1.ingest import router as ingest_router
from src.api.v1.metrics import router as metrics_router
+from src.api.v1.reports import scenario_reports_router, reports_router
api_router = APIRouter()
api_router.include_router(scenarios_router, prefix="/scenarios", tags=["scenarios"])
api_router.include_router(ingest_router, tags=["ingest"])
api_router.include_router(metrics_router, prefix="/scenarios", tags=["metrics"])
+api_router.include_router(
+ scenario_reports_router, prefix="/scenarios", tags=["reports"]
+)
+api_router.include_router(reports_router, prefix="/reports", tags=["reports"])
diff --git a/src/api/v1/reports.py b/src/api/v1/reports.py
new file mode 100644
index 0000000..994e132
--- /dev/null
+++ b/src/api/v1/reports.py
@@ -0,0 +1,349 @@
+"""Report API endpoints."""
+
+from datetime import datetime
+from pathlib import Path
+from uuid import UUID
+
+from fastapi import (
+ APIRouter,
+ Depends,
+ Query,
+ status,
+ BackgroundTasks,
+ Request,
+)
+from fastapi.responses import FileResponse
+from sqlalchemy.ext.asyncio import AsyncSession
+from slowapi import Limiter
+from slowapi.util import get_remote_address
+
+from src.api.deps import get_db
+from src.core.config import settings
+from src.core.exceptions import NotFoundException, ValidationException
+from src.repositories.scenario import scenario_repository
+from src.repositories.report import report_repository
+from src.schemas.report import (
+ ReportCreateRequest,
+ ReportResponse,
+ ReportList,
+ ReportStatus,
+ ReportStatusResponse,
+ ReportGenerateResponse,
+ ReportFormat,
+)
+from src.services.report_service import report_service
+
+# Separate routers for different route groups
+scenario_reports_router = APIRouter()
+reports_router = APIRouter()
+
+# In-memory store for report generation status (use Redis in production)
+_report_status_store: dict[UUID, dict] = {}
+
+# Rate limiter for downloads
+limiter = Limiter(key_func=get_remote_address)
+
+
+def _update_report_status(
+ report_id: UUID,
+ status: ReportStatus,
+ progress: int = 0,
+ message: str = None,
+ file_path: str = None,
+ file_size_bytes: int = None,
+):
+ """Update report generation status in store."""
+ _report_status_store[report_id] = {
+ "status": status,
+ "progress": progress,
+ "message": message,
+ "file_path": file_path,
+ "file_size_bytes": file_size_bytes,
+ "completed_at": datetime.now()
+ if status in [ReportStatus.COMPLETED, ReportStatus.FAILED]
+ else None,
+ }
+
+
+async def _generate_report_task(
+ db: AsyncSession,
+ scenario_id: UUID,
+ report_id: UUID,
+ request_data: ReportCreateRequest,
+):
+ """Background task for report generation."""
+ try:
+ _update_report_status(
+ report_id,
+ ReportStatus.PROCESSING,
+ progress=10,
+ message="Compiling metrics...",
+ )
+
+ if request_data.format == ReportFormat.PDF:
+ _update_report_status(
+ report_id,
+ ReportStatus.PROCESSING,
+ progress=30,
+ message="Generating PDF...",
+ )
+ file_path = await report_service.generate_pdf(
+ db=db,
+ scenario_id=scenario_id,
+ report_id=report_id,
+ include_sections=[s.value for s in request_data.sections],
+ date_from=request_data.date_from,
+ date_to=request_data.date_to,
+ )
+ else: # CSV
+ _update_report_status(
+ report_id,
+ ReportStatus.PROCESSING,
+ progress=30,
+ message="Generating CSV...",
+ )
+ file_path = await report_service.generate_csv(
+ db=db,
+ scenario_id=scenario_id,
+ report_id=report_id,
+ include_logs=request_data.include_logs,
+ date_from=request_data.date_from,
+ date_to=request_data.date_to,
+ )
+
+ # Update report with file size
+ file_size = file_path.stat().st_size
+ await report_repository.update_file_size(db, report_id, file_size)
+
+ _update_report_status(
+ report_id,
+ ReportStatus.COMPLETED,
+ progress=100,
+ message="Report generation completed",
+ file_path=str(file_path),
+ file_size_bytes=file_size,
+ )
+ except Exception as e:
+ _update_report_status(
+ report_id,
+ ReportStatus.FAILED,
+ progress=0,
+ message=f"Report generation failed: {str(e)}",
+ )
+
+
+# Scenario-scoped routes (prefixed with /scenarios)
+@scenario_reports_router.post(
+ "/{scenario_id}/reports",
+ response_model=ReportGenerateResponse,
+ status_code=status.HTTP_202_ACCEPTED,
+)
+async def create_report(
+ scenario_id: UUID,
+ request_data: ReportCreateRequest,
+ background_tasks: BackgroundTasks,
+ db: AsyncSession = Depends(get_db),
+):
+ """Generate a report for a scenario.
+
+ Returns 202 Accepted with report_id. Use GET /reports/{id}/status to check progress.
+ """
+ # Validate scenario exists
+ scenario = await scenario_repository.get(db, scenario_id)
+ if not scenario:
+ raise NotFoundException("Scenario")
+
+ # Create report record
+ report_id = UUID(int=datetime.now().timestamp())
+ await report_repository.create(
+ db,
+ obj_in={
+ "id": report_id,
+ "scenario_id": scenario_id,
+ "format": request_data.format.value,
+ "file_path": str(
+ report_service._get_file_path(
+ scenario_id, report_id, request_data.format.value
+ )
+ ),
+ "generated_by": "api",
+ "extra_data": {
+ "include_logs": request_data.include_logs,
+ "sections": [s.value for s in request_data.sections],
+ "date_from": request_data.date_from.isoformat()
+ if request_data.date_from
+ else None,
+ "date_to": request_data.date_to.isoformat()
+ if request_data.date_to
+ else None,
+ },
+ },
+ )
+
+ # Initialize status
+ _update_report_status(
+ report_id,
+ ReportStatus.PENDING,
+ progress=0,
+ message="Report queued for generation",
+ )
+
+ # Start background task
+ background_tasks.add_task(
+ _generate_report_task,
+ db,
+ scenario_id,
+ report_id,
+ request_data,
+ )
+
+ return ReportGenerateResponse(
+ report_id=report_id,
+ status=ReportStatus.PENDING,
+ message="Report generation started. Check status at /reports/{id}/status",
+ )
+
+
+@scenario_reports_router.get(
+ "/{scenario_id}/reports",
+ response_model=ReportList,
+)
+async def list_reports(
+ scenario_id: UUID,
+ page: int = Query(1, ge=1, description="Page number"),
+ page_size: int = Query(
+ settings.default_page_size,
+ ge=1,
+ le=settings.max_page_size,
+ description="Items per page",
+ ),
+ db: AsyncSession = Depends(get_db),
+):
+ """List all reports for a scenario."""
+ # Validate scenario exists
+ scenario = await scenario_repository.get(db, scenario_id)
+ if not scenario:
+ raise NotFoundException("Scenario")
+
+ skip = (page - 1) * page_size
+
+ reports = await report_repository.get_by_scenario(
+ db, scenario_id, skip=skip, limit=page_size
+ )
+ total = await report_repository.count_by_scenario(db, scenario_id)
+
+ return ReportList(
+ items=[ReportResponse.model_validate(r) for r in reports],
+ total=total,
+ page=page,
+ page_size=page_size,
+ )
+
+
+# Report-scoped routes (prefixed with /reports)
+@reports_router.get(
+ "/{report_id}/status",
+ response_model=ReportStatusResponse,
+)
+async def get_report_status(
+ report_id: UUID,
+ db: AsyncSession = Depends(get_db),
+):
+ """Get the status of a report generation."""
+ report = await report_repository.get(db, report_id)
+ if not report:
+ raise NotFoundException("Report")
+
+ # Check in-memory status store
+ status_info = _report_status_store.get(report_id, {})
+
+ return ReportStatusResponse(
+ report_id=report_id,
+ status=status_info.get("status", ReportStatus.PENDING),
+ progress=status_info.get("progress", 0),
+ message=status_info.get("message"),
+ file_path=status_info.get("file_path") or report.file_path,
+ file_size_bytes=status_info.get("file_size_bytes") or report.file_size_bytes,
+ created_at=report.created_at,
+ completed_at=status_info.get("completed_at"),
+ )
+
+
+@reports_router.get(
+ "/{report_id}/download",
+ responses={
+ 200: {
+ "description": "Report file download",
+ "content": {
+ "application/pdf": {},
+ "text/csv": {},
+ },
+ },
+ },
+)
+@limiter.limit(f"{settings.reports_rate_limit_per_minute}/minute")
+async def download_report(
+ request: Request,
+ report_id: UUID,
+ db: AsyncSession = Depends(get_db),
+):
+ """Download a generated report file.
+
+ Rate limited to 10 downloads per minute.
+ """
+ report = await report_repository.get(db, report_id)
+ if not report:
+ raise NotFoundException("Report")
+
+ # Check if report is completed
+ status_info = _report_status_store.get(report_id, {})
+ if status_info.get("status") != ReportStatus.COMPLETED:
+ raise ValidationException("Report is not ready for download yet")
+
+ file_path = Path(report.file_path)
+ if not file_path.exists():
+ raise NotFoundException("Report file")
+
+ # Determine media type
+ media_type = "application/pdf" if report.format == "pdf" else "text/csv"
+ extension = report.format
+
+ # Get scenario name for filename
+ scenario = await scenario_repository.get(db, report.scenario_id)
+ filename = f"{scenario.name}_{datetime.now().strftime('%Y-%m-%d')}.{extension}"
+
+ return FileResponse(
+ path=file_path,
+ media_type=media_type,
+ filename=filename,
+ headers={
+ "Content-Disposition": f'attachment; filename="{filename}"',
+ },
+ )
+
+
+@reports_router.delete(
+ "/{report_id}",
+ status_code=status.HTTP_204_NO_CONTENT,
+)
+async def delete_report(
+ report_id: UUID,
+ db: AsyncSession = Depends(get_db),
+):
+ """Delete a report and its associated file."""
+ report = await report_repository.get(db, report_id)
+ if not report:
+ raise NotFoundException("Report")
+
+ # Delete file if it exists
+ file_path = Path(report.file_path)
+ if file_path.exists():
+ file_path.unlink()
+
+ # Delete from database
+ await report_repository.delete(db, id=report_id)
+
+ # Clean up status store
+ _report_status_store.pop(report_id, None)
+
+ return None
diff --git a/src/core/config.py b/src/core/config.py
index 036c79a..9e3b99a 100644
--- a/src/core/config.py
+++ b/src/core/config.py
@@ -18,6 +18,12 @@ class Settings(BaseSettings):
default_page_size: int = 20
max_page_size: int = 100
+ # Report Storage
+ reports_storage_path: str = "./storage/reports"
+ reports_max_file_size_mb: int = 50
+ reports_cleanup_days: int = 30
+ reports_rate_limit_per_minute: int = 10
+
class Config:
env_file = ".env"
case_sensitive = False
diff --git a/src/repositories/__init__.py b/src/repositories/__init__.py
index 659264e..6aebf75 100644
--- a/src/repositories/__init__.py
+++ b/src/repositories/__init__.py
@@ -6,10 +6,16 @@ from src.repositories.scenario import (
scenario_repository,
ScenarioStatus,
)
+from src.repositories.report import (
+ ReportRepository,
+ report_repository,
+)
__all__ = [
"BaseRepository",
"ScenarioRepository",
"scenario_repository",
"ScenarioStatus",
+ "ReportRepository",
+ "report_repository",
]
diff --git a/src/repositories/report.py b/src/repositories/report.py
new file mode 100644
index 0000000..02b7c66
--- /dev/null
+++ b/src/repositories/report.py
@@ -0,0 +1,54 @@
+"""Report repository with specific methods."""
+
+from typing import Optional, List
+from uuid import UUID
+from datetime import datetime
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy import select, update, desc
+
+from src.models.report import Report
+from src.repositories.base import BaseRepository
+
+
+class ReportRepository(BaseRepository[Report]):
+ """Repository for Report model with specific methods."""
+
+ def __init__(self):
+ super().__init__(Report)
+
+ async def get_by_scenario(
+ self, db: AsyncSession, scenario_id: UUID, skip: int = 0, limit: int = 100
+ ) -> List[Report]:
+ """Get reports for a specific scenario."""
+ query = (
+ select(Report)
+ .where(Report.scenario_id == scenario_id)
+ .order_by(desc(Report.created_at))
+ .offset(skip)
+ .limit(limit)
+ )
+ result = await db.execute(query)
+ return result.scalars().all()
+
+ async def count_by_scenario(self, db: AsyncSession, scenario_id: UUID) -> int:
+ """Count reports for a specific scenario."""
+ query = select(Report).where(Report.scenario_id == scenario_id)
+ result = await db.execute(query)
+ return len(result.scalars().all())
+
+ async def update_file_size(
+ self, db: AsyncSession, report_id: UUID, file_size_bytes: int
+ ) -> Optional[Report]:
+ """Update report file size."""
+ result = await db.execute(
+ update(Report)
+ .where(Report.id == report_id)
+ .values(file_size_bytes=file_size_bytes)
+ .returning(Report)
+ )
+ await db.commit()
+ return result.scalar_one_or_none()
+
+
+# Singleton instance
+report_repository = ReportRepository()
diff --git a/src/schemas/__init__.py b/src/schemas/__init__.py
index 060a91e..1200e31 100644
--- a/src/schemas/__init__.py
+++ b/src/schemas/__init__.py
@@ -15,6 +15,16 @@ from src.schemas.metric import (
MetricsResponse,
)
from src.schemas.common import PaginatedResponse
+from src.schemas.report import (
+ ReportFormat,
+ ReportSection,
+ ReportStatus,
+ ReportCreateRequest,
+ ReportResponse,
+ ReportStatusResponse,
+ ReportList,
+ ReportGenerateResponse,
+)
__all__ = [
"ScenarioBase",
@@ -29,4 +39,12 @@ __all__ = [
"TimeseriesPoint",
"MetricsResponse",
"PaginatedResponse",
+ "ReportFormat",
+ "ReportSection",
+ "ReportStatus",
+ "ReportCreateRequest",
+ "ReportResponse",
+ "ReportStatusResponse",
+ "ReportList",
+ "ReportGenerateResponse",
]
diff --git a/src/schemas/report.py b/src/schemas/report.py
new file mode 100644
index 0000000..b5b75a2
--- /dev/null
+++ b/src/schemas/report.py
@@ -0,0 +1,95 @@
+"""Report schemas."""
+
+from datetime import datetime
+from typing import Optional, List
+from uuid import UUID
+from pydantic import BaseModel, Field, ConfigDict
+from enum import Enum
+
+
+class ReportFormat(str, Enum):
+ """Report format enum."""
+
+ PDF = "pdf"
+ CSV = "csv"
+
+
+class ReportSection(str, Enum):
+ """Report section enum."""
+
+ SUMMARY = "summary"
+ COSTS = "costs"
+ METRICS = "metrics"
+ LOGS = "logs"
+ PII = "pii"
+
+
+class ReportStatus(str, Enum):
+ """Report generation status enum."""
+
+ PENDING = "pending"
+ PROCESSING = "processing"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+class ReportCreateRequest(BaseModel):
+ """Schema for report generation request."""
+
+ format: ReportFormat = Field(..., description="Report format (pdf or csv)")
+ include_logs: bool = Field(
+ default=True, description="Include individual log entries"
+ )
+ date_from: Optional[datetime] = Field(None, description="Start date filter")
+ date_to: Optional[datetime] = Field(None, description="End date filter")
+ sections: List[ReportSection] = Field(
+ default=["summary", "costs", "metrics", "logs", "pii"],
+ description="Sections to include in PDF report",
+ )
+
+
+class ReportResponse(BaseModel):
+ """Schema for report response."""
+
+ model_config = ConfigDict(from_attributes=True)
+
+ id: UUID
+ scenario_id: UUID
+ format: ReportFormat
+ file_path: str
+ file_size_bytes: Optional[int] = None
+ generated_by: Optional[str] = None
+ created_at: datetime
+ updated_at: datetime
+
+
+class ReportStatusResponse(BaseModel):
+ """Schema for report status response."""
+
+ report_id: UUID
+ status: ReportStatus
+ progress: int = Field(
+ default=0, ge=0, le=100, description="Generation progress percentage"
+ )
+ message: Optional[str] = None
+ file_path: Optional[str] = None
+ file_size_bytes: Optional[int] = None
+ created_at: Optional[datetime] = None
+ completed_at: Optional[datetime] = None
+
+
+class ReportList(BaseModel):
+ """Schema for list of reports."""
+
+ items: List[ReportResponse]
+ total: int
+ page: int
+ page_size: int
+
+
+class ReportGenerateResponse(BaseModel):
+ """Schema for report generation accepted response."""
+
+ report_id: UUID
+ status: ReportStatus
+ message: str
diff --git a/src/services/__init__.py b/src/services/__init__.py
index d8f69c7..79bf09a 100644
--- a/src/services/__init__.py
+++ b/src/services/__init__.py
@@ -3,6 +3,7 @@
from src.services.pii_detector import PIIDetector, pii_detector, PIIDetectionResult
from src.services.cost_calculator import CostCalculator, cost_calculator
from src.services.ingest_service import IngestService, ingest_service
+from src.services.report_service import ReportService, report_service
__all__ = [
"PIIDetector",
@@ -12,4 +13,6 @@ __all__ = [
"cost_calculator",
"IngestService",
"ingest_service",
+ "ReportService",
+ "report_service",
]
diff --git a/src/services/report_service.py b/src/services/report_service.py
new file mode 100644
index 0000000..359eb46
--- /dev/null
+++ b/src/services/report_service.py
@@ -0,0 +1,621 @@
+"""Report generation service."""
+
+import os
+import uuid
+from datetime import datetime, timedelta
+from decimal import Decimal
+from pathlib import Path
+from typing import Optional, List, Dict, Any
+from uuid import UUID
+
+import pandas as pd
+from reportlab.lib import colors
+from reportlab.lib.pagesizes import letter, A4
+from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
+from reportlab.lib.units import inch
+from reportlab.platypus import (
+ SimpleDocTemplate,
+ Paragraph,
+ Spacer,
+ Table,
+ TableStyle,
+ PageBreak,
+)
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy import select, func, desc
+
+from src.core.config import settings
+from src.core.exceptions import NotFoundException, ValidationException
+from src.models.report import Report
+from src.models.scenario import Scenario
+from src.models.scenario_log import ScenarioLog
+from src.models.scenario_metric import ScenarioMetric
+
+
+class ReportStatus:
+ """Report generation status constants."""
+
+ PENDING = "pending"
+ PROCESSING = "processing"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+class ReportService:
+ """Service for generating scenario reports in PDF and CSV formats."""
+
+ def __init__(self):
+ self.storage_path = Path(settings.reports_storage_path)
+ self.storage_path.mkdir(parents=True, exist_ok=True)
+ self.max_file_size_mb = settings.reports_max_file_size_mb
+
+ def _get_scenario_path(self, scenario_id: UUID) -> Path:
+ """Get storage path for a scenario's reports."""
+ path = self.storage_path / str(scenario_id)
+ path.mkdir(parents=True, exist_ok=True)
+ return path
+
+ def _get_file_path(self, scenario_id: UUID, report_id: UUID, format: str) -> Path:
+ """Get file path for a report."""
+ return self._get_scenario_path(scenario_id) / f"{report_id}.{format}"
+
+ async def compile_metrics(
+ self,
+ db: AsyncSession,
+ scenario_id: UUID,
+ date_from: Optional[datetime] = None,
+ date_to: Optional[datetime] = None,
+ ) -> Dict[str, Any]:
+ """Compile all metrics for a scenario.
+
+ Args:
+ db: Database session
+ scenario_id: Scenario UUID
+ date_from: Optional start date filter
+ date_to: Optional end date filter
+
+ Returns:
+ Dictionary containing all compiled metrics
+ """
+ # Get scenario
+ scenario = await db.get(Scenario, scenario_id)
+ if not scenario:
+ raise NotFoundException("Scenario")
+
+ # Base queries
+ logs_query = select(ScenarioLog).where(ScenarioLog.scenario_id == scenario_id)
+ metrics_query = select(ScenarioMetric).where(
+ ScenarioMetric.scenario_id == scenario_id
+ )
+
+ # Apply date filters
+ if date_from:
+ logs_query = logs_query.where(ScenarioLog.received_at >= date_from)
+ metrics_query = metrics_query.where(ScenarioMetric.timestamp >= date_from)
+ if date_to:
+ logs_query = logs_query.where(ScenarioLog.received_at <= date_to)
+ metrics_query = metrics_query.where(ScenarioMetric.timestamp <= date_to)
+
+ # Execute queries
+ logs_result = await db.execute(logs_query)
+ logs = logs_result.scalars().all()
+
+ metrics_result = await db.execute(metrics_query)
+ metrics = metrics_result.scalars().all()
+
+ # Compile metrics
+ total_logs = len(logs)
+ total_size_bytes = sum(log.size_bytes for log in logs)
+ logs_with_pii = sum(1 for log in logs if log.has_pii)
+ total_tokens = sum(log.token_count for log in logs)
+ total_sqs_blocks = sum(log.sqs_blocks for log in logs)
+
+ # Cost breakdown by metric type
+ cost_breakdown = {}
+ for metric in metrics:
+ if metric.metric_type not in cost_breakdown:
+ cost_breakdown[metric.metric_type] = Decimal("0")
+ cost_breakdown[metric.metric_type] += metric.value
+
+ # Top 10 most expensive logs (by size)
+ top_logs_query = (
+ select(ScenarioLog)
+ .where(ScenarioLog.scenario_id == scenario_id)
+ .order_by(desc(ScenarioLog.size_bytes))
+ .limit(10)
+ )
+ if date_from:
+ top_logs_query = top_logs_query.where(ScenarioLog.received_at >= date_from)
+ if date_to:
+ top_logs_query = top_logs_query.where(ScenarioLog.received_at <= date_to)
+
+ top_logs_result = await db.execute(top_logs_query)
+ top_logs = top_logs_result.scalars().all()
+
+ # Get unique sources
+ sources_query = (
+ select(ScenarioLog.source, func.count(ScenarioLog.id).label("count"))
+ .where(ScenarioLog.scenario_id == scenario_id)
+ .group_by(ScenarioLog.source)
+ )
+ if date_from:
+ sources_query = sources_query.where(ScenarioLog.received_at >= date_from)
+ if date_to:
+ sources_query = sources_query.where(ScenarioLog.received_at <= date_to)
+
+ sources_result = await db.execute(sources_query)
+ sources = {row.source: row.count for row in sources_result.all()}
+
+ return {
+ "scenario": {
+ "id": str(scenario.id),
+ "name": scenario.name,
+ "description": scenario.description,
+ "region": scenario.region,
+ "status": scenario.status,
+ "created_at": scenario.created_at.isoformat()
+ if scenario.created_at
+ else None,
+ "started_at": scenario.started_at.isoformat()
+ if scenario.started_at
+ else None,
+ "completed_at": scenario.completed_at.isoformat()
+ if scenario.completed_at
+ else None,
+ "total_cost_estimate": float(scenario.total_cost_estimate),
+ },
+ "summary": {
+ "total_logs": total_logs,
+ "total_size_bytes": total_size_bytes,
+ "total_size_mb": round(total_size_bytes / (1024 * 1024), 2),
+ "logs_with_pii": logs_with_pii,
+ "total_tokens": total_tokens,
+ "total_sqs_blocks": total_sqs_blocks,
+ "date_range": {
+ "from": date_from.isoformat() if date_from else None,
+ "to": date_to.isoformat() if date_to else None,
+ },
+ },
+ "cost_breakdown": {k: float(v) for k, v in cost_breakdown.items()},
+ "sources": sources,
+ "top_logs": [
+ {
+ "id": str(log.id),
+ "received_at": log.received_at.isoformat()
+ if log.received_at
+ else None,
+ "source": log.source,
+ "size_bytes": log.size_bytes,
+ "size_kb": round(log.size_bytes / 1024, 2),
+ "has_pii": log.has_pii,
+ "token_count": log.token_count,
+ "sqs_blocks": log.sqs_blocks,
+ "message_preview": log.message_preview,
+ }
+ for log in top_logs
+ ],
+ }
+
+ async def generate_pdf(
+ self,
+ db: AsyncSession,
+ scenario_id: UUID,
+ report_id: UUID,
+ include_sections: Optional[List[str]] = None,
+ date_from: Optional[datetime] = None,
+ date_to: Optional[datetime] = None,
+ ) -> Path:
+ """Generate a PDF report for a scenario.
+
+ Args:
+ db: Database session
+ scenario_id: Scenario UUID
+ report_id: Report UUID
+ include_sections: List of sections to include (default: all)
+ date_from: Optional start date filter
+ date_to: Optional end date filter
+
+ Returns:
+ Path to the generated PDF file
+ """
+ include_sections = include_sections or [
+ "summary",
+ "costs",
+ "metrics",
+ "logs",
+ "pii",
+ ]
+
+ # Compile metrics
+ metrics = await self.compile_metrics(db, scenario_id, date_from, date_to)
+
+ # Get file path
+ file_path = self._get_file_path(scenario_id, report_id, "pdf")
+
+ # Create PDF
+ doc = SimpleDocTemplate(
+ str(file_path),
+ pagesize=A4,
+ rightMargin=72,
+ leftMargin=72,
+ topMargin=72,
+ bottomMargin=18,
+ )
+
+ # Container for elements
+ elements = []
+ styles = getSampleStyleSheet()
+
+ # Custom styles
+ title_style = ParagraphStyle(
+ "CustomTitle",
+ parent=styles["Heading1"],
+ fontSize=24,
+ spaceAfter=30,
+ textColor=colors.HexColor("#0066CC"),
+ )
+
+ heading_style = ParagraphStyle(
+ "CustomHeading",
+ parent=styles["Heading2"],
+ fontSize=14,
+ spaceAfter=12,
+ textColor=colors.HexColor("#0066CC"),
+ )
+
+ # Header / Title
+ elements.append(Paragraph(f"mockupAWS Report", title_style))
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Report metadata
+ elements.append(
+ Paragraph(
+ f"Scenario: {metrics['scenario']['name']}", styles["Normal"]
+ )
+ )
+ elements.append(
+ Paragraph(
+ f"Region: {metrics['scenario']['region']}", styles["Normal"]
+ )
+ )
+ elements.append(
+ Paragraph(
+ f"Status: {metrics['scenario']['status']}", styles["Normal"]
+ )
+ )
+ elements.append(
+ Paragraph(
+ f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
+ styles["Normal"],
+ )
+ )
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Summary Section
+ if "summary" in include_sections:
+ elements.append(Paragraph("Scenario Summary", heading_style))
+ summary_data = [
+ ["Metric", "Value"],
+ ["Total Logs", str(metrics["summary"]["total_logs"])],
+ ["Total Size", f"{metrics['summary']['total_size_mb']} MB"],
+ ["Total Tokens", str(metrics["summary"]["total_tokens"])],
+ ["SQS Blocks", str(metrics["summary"]["total_sqs_blocks"])],
+ ]
+
+ summary_table = Table(summary_data, colWidths=[2.5 * inch, 2.5 * inch])
+ summary_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#0066CC")),
+ ("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
+ ("FONTSIZE", (0, 0), (-1, 0), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 12),
+ ("BACKGROUND", (0, 1), (-1, -1), colors.beige),
+ ("GRID", (0, 0), (-1, -1), 1, colors.black),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [colors.white, colors.lightgrey],
+ ),
+ ]
+ )
+ )
+ elements.append(summary_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Cost Breakdown Section
+ if "costs" in include_sections and metrics["cost_breakdown"]:
+ elements.append(Paragraph("Cost Breakdown", heading_style))
+ cost_data = [["Service", "Cost (USD)"]]
+ for service, cost in metrics["cost_breakdown"].items():
+ cost_data.append([service.capitalize(), f"${cost:.6f}"])
+ cost_data.append(
+ [
+ "Total Estimated",
+ f"${metrics['scenario']['total_cost_estimate']:.6f}",
+ ]
+ )
+
+ cost_table = Table(cost_data, colWidths=[2.5 * inch, 2.5 * inch])
+ cost_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#0066CC")),
+ ("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
+ ("FONTSIZE", (0, 0), (-1, 0), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 12),
+ ("GRID", (0, 0), (-1, -1), 1, colors.black),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [colors.white, colors.lightgrey],
+ ),
+ ("FONTNAME", (0, -1), (-1, -1), "Helvetica-Bold"),
+ ("BACKGROUND", (0, -1), (-1, -1), colors.lightblue),
+ ]
+ )
+ )
+ elements.append(cost_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # PII Summary Section
+ if "pii" in include_sections:
+ elements.append(Paragraph("PII Summary", heading_style))
+ pii_data = [
+ ["Metric", "Value"],
+ ["Logs with PII", str(metrics["summary"]["logs_with_pii"])],
+ [
+ "PII Percentage",
+ f"{(metrics['summary']['logs_with_pii'] / metrics['summary']['total_logs'] * 100) if metrics['summary']['total_logs'] > 0 else 0:.1f}%",
+ ],
+ ]
+
+ pii_table = Table(pii_data, colWidths=[2.5 * inch, 2.5 * inch])
+ pii_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#0066CC")),
+ ("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
+ ("FONTSIZE", (0, 0), (-1, 0), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 12),
+ ("GRID", (0, 0), (-1, -1), 1, colors.black),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [colors.white, colors.lightgrey],
+ ),
+ ]
+ )
+ )
+ elements.append(pii_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Sources Section
+ if "metrics" in include_sections and metrics["sources"]:
+ elements.append(PageBreak())
+ elements.append(Paragraph("Log Sources", heading_style))
+ source_data = [["Source", "Count"]]
+ for source, count in metrics["sources"].items():
+ source_data.append([source, str(count)])
+
+ source_table = Table(source_data, colWidths=[2.5 * inch, 2.5 * inch])
+ source_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#0066CC")),
+ ("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
+ ("FONTSIZE", (0, 0), (-1, 0), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 12),
+ ("GRID", (0, 0), (-1, -1), 1, colors.black),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [colors.white, colors.lightgrey],
+ ),
+ ]
+ )
+ )
+ elements.append(source_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Top Logs Section
+ if "logs" in include_sections and metrics["top_logs"]:
+ elements.append(PageBreak())
+ elements.append(Paragraph("Top 10 Largest Logs", heading_style))
+
+ log_data = [["Source", "Size (KB)", "Tokens", "PII"]]
+ for log in metrics["top_logs"]:
+ log_data.append(
+ [
+ log["source"][:20],
+ f"{log['size_kb']:.2f}",
+ str(log["token_count"]),
+ "Yes" if log["has_pii"] else "No",
+ ]
+ )
+
+ log_table = Table(
+ log_data, colWidths=[2 * inch, 1.2 * inch, 1.2 * inch, 0.8 * inch]
+ )
+ log_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#0066CC")),
+ ("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
+ ("FONTSIZE", (0, 0), (-1, 0), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 12),
+ ("GRID", (0, 0), (-1, -1), 1, colors.black),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [colors.white, colors.lightgrey],
+ ),
+ ("FONTSIZE", (0, 1), (-1, -1), 9),
+ ]
+ )
+ )
+ elements.append(log_table)
+
+ # Footer
+ def add_page_number(canvas, doc):
+ """Add page number to footer."""
+ canvas.saveState()
+ canvas.setFont("Helvetica", 9)
+ canvas.setFillColor(colors.grey)
+ page_num_text = f"Page {doc.page}"
+ canvas.drawRightString(7.5 * inch, 0.5 * inch, page_num_text)
+ canvas.restoreState()
+
+ # Build PDF
+ doc.build(elements, onFirstPage=add_page_number, onLaterPages=add_page_number)
+
+ # Check file size
+ file_size_mb = file_path.stat().st_size / (1024 * 1024)
+ if file_size_mb > self.max_file_size_mb:
+ file_path.unlink()
+ raise ValidationException(
+ f"Generated file exceeds maximum size of {self.max_file_size_mb}MB"
+ )
+
+ return file_path
+
+ async def generate_csv(
+ self,
+ db: AsyncSession,
+ scenario_id: UUID,
+ report_id: UUID,
+ include_logs: bool = True,
+ date_from: Optional[datetime] = None,
+ date_to: Optional[datetime] = None,
+ ) -> Path:
+ """Generate a CSV report for a scenario.
+
+ Args:
+ db: Database session
+ scenario_id: Scenario UUID
+ report_id: Report UUID
+ include_logs: Whether to include individual log entries
+ date_from: Optional start date filter
+ date_to: Optional end date filter
+
+ Returns:
+ Path to the generated CSV file
+ """
+ # Get file path
+ file_path = self._get_file_path(scenario_id, report_id, "csv")
+
+ # Compile metrics
+ metrics = await self.compile_metrics(db, scenario_id, date_from, date_to)
+
+ # Create CSV data
+ if include_logs:
+ # Get all logs for CSV
+ logs_query = select(ScenarioLog).where(
+ ScenarioLog.scenario_id == scenario_id
+ )
+ if date_from:
+ logs_query = logs_query.where(ScenarioLog.received_at >= date_from)
+ if date_to:
+ logs_query = logs_query.where(ScenarioLog.received_at <= date_to)
+
+ logs_result = await db.execute(logs_query)
+ logs = logs_result.scalars().all()
+
+ # Convert to DataFrame
+ logs_data = []
+ for log in logs:
+ logs_data.append(
+ {
+ "log_id": str(log.id),
+ "scenario_id": str(scenario_id),
+ "received_at": log.received_at,
+ "source": log.source,
+ "size_bytes": log.size_bytes,
+ "size_kb": round(log.size_bytes / 1024, 2),
+ "has_pii": log.has_pii,
+ "token_count": log.token_count,
+ "sqs_blocks": log.sqs_blocks,
+ "message_preview": log.message_preview,
+ }
+ )
+
+ df = pd.DataFrame(logs_data)
+ df.to_csv(file_path, index=False)
+ else:
+ # Summary only
+ summary_data = {
+ "scenario_id": [str(scenario_id)],
+ "scenario_name": [metrics["scenario"]["name"]],
+ "region": [metrics["scenario"]["region"]],
+ "status": [metrics["scenario"]["status"]],
+ "total_logs": [metrics["summary"]["total_logs"]],
+ "total_size_mb": [metrics["summary"]["total_size_mb"]],
+ "total_tokens": [metrics["summary"]["total_tokens"]],
+ "total_sqs_blocks": [metrics["summary"]["total_sqs_blocks"]],
+ "logs_with_pii": [metrics["summary"]["logs_with_pii"]],
+ "total_cost_estimate": [metrics["scenario"]["total_cost_estimate"]],
+ }
+
+ # Add cost breakdown
+ for service, cost in metrics["cost_breakdown"].items():
+ summary_data[f"cost_{service}"] = [cost]
+
+ df = pd.DataFrame(summary_data)
+ df.to_csv(file_path, index=False)
+
+ # Check file size
+ file_size_mb = file_path.stat().st_size / (1024 * 1024)
+ if file_size_mb > self.max_file_size_mb:
+ file_path.unlink()
+ raise ValidationException(
+ f"Generated file exceeds maximum size of {self.max_file_size_mb}MB"
+ )
+
+ return file_path
+
+ async def cleanup_old_reports(self, max_age_days: int = 30) -> int:
+ """Clean up reports older than specified days.
+
+ Args:
+ max_age_days: Maximum age of reports in days
+
+ Returns:
+ Number of files deleted
+ """
+ cutoff_date = datetime.now() - timedelta(days=max_age_days)
+ deleted_count = 0
+
+ if self.storage_path.exists():
+ for scenario_dir in self.storage_path.iterdir():
+ if scenario_dir.is_dir():
+ for file_path in scenario_dir.iterdir():
+ if file_path.is_file():
+ file_stat = file_path.stat()
+ file_mtime = datetime.fromtimestamp(file_stat.st_mtime)
+ if file_mtime < cutoff_date:
+ file_path.unlink()
+ deleted_count += 1
+ # Remove empty directories
+ if not any(scenario_dir.iterdir()):
+ scenario_dir.rmdir()
+
+ return deleted_count
+
+
+# Singleton instance
+report_service = ReportService()