Files
mockupAWS/frontend/e2e/ingest-logs.spec.ts
Luca Sacchi Ricciardi a5fc85897b
Some checks failed
E2E Tests / Run E2E Tests (push) Has been cancelled
E2E Tests / Visual Regression Tests (push) Has been cancelled
E2E Tests / Smoke Tests (push) Has been cancelled
feat: implement v0.4.0 - Reports, Charts, Comparison, Dark Mode, E2E Testing
Backend (@backend-dev):
- Add ReportService with PDF/CSV generation (reportlab, pandas)
- Implement Report API endpoints (POST, GET, DELETE, download)
- Add ReportRepository and schemas
- Configure storage with auto-cleanup (30 days)
- Rate limiting: 10 downloads/minute
- Professional PDF templates with charts support

Frontend (@frontend-dev):
- Integrate Recharts for data visualization
- Add CostBreakdown, TimeSeries, ComparisonBar charts
- Implement scenario comparison page with multi-select
- Add dark/light mode toggle with ThemeProvider
- Create Reports page with generation form and list
- Add new UI components: checkbox, dialog, tabs, label, skeleton
- Implement useComparison and useReports hooks

QA (@qa-engineer):
- Setup Playwright E2E testing framework
- Create 7 test spec files with 94 test cases
- Add visual regression testing with baselines
- Configure multi-browser testing (Chrome, Firefox, WebKit)
- Add mobile responsive tests
- Create test fixtures and helpers
- Setup GitHub Actions CI workflow

Documentation (@spec-architect):
- Create detailed kanban-v0.4.0.md with 27 tasks
- Update progress.md with v0.4.0 tracking
- Create v0.4.0 planning prompt

Features:
 PDF/CSV Report Generation
 Interactive Charts (Pie, Area, Bar)
 Scenario Comparison (2-4 scenarios)
 Dark/Light Mode Toggle
 E2E Test Suite (94 tests)

Dependencies added:
- Backend: reportlab, pandas, slowapi
- Frontend: recharts, date-fns, @radix-ui/react-checkbox/dialog/tabs
- Testing: @playwright/test

27 tasks completed, 100% v0.4.0 implementation
2026-04-07 16:11:47 +02:00

252 lines
7.9 KiB
TypeScript

/**
* E2E Test: Log Ingestion and Metrics
*
* Tests for:
* - Start a scenario
* - Send test logs via API
* - Verify metrics update
* - Check PII detection
*/
import { test, expect } from '@playwright/test';
import {
navigateTo,
waitForLoading,
createScenarioViaAPI,
deleteScenarioViaAPI,
startScenarioViaAPI,
stopScenarioViaAPI,
sendTestLogs,
generateTestScenarioName,
} from './utils/test-helpers';
import { testLogs, logsWithPII, highVolumeLogs } from './fixtures/test-logs';
import { newScenarioData } from './fixtures/test-scenarios';
const testScenarioName = generateTestScenarioName('Ingest Test');
let createdScenarioId: string | null = null;
test.describe('Log Ingestion', () => {
test.beforeEach(async ({ request }) => {
// Create a fresh scenario for each test
const scenario = await createScenarioViaAPI(request, {
...newScenarioData,
name: testScenarioName,
});
createdScenarioId = scenario.id;
});
test.afterEach(async ({ request }) => {
// Cleanup: Stop and delete scenario
if (createdScenarioId) {
try {
await stopScenarioViaAPI(request, createdScenarioId);
} catch {
// Scenario might not be running
}
await deleteScenarioViaAPI(request, createdScenarioId);
createdScenarioId = null;
}
});
test('should start scenario successfully', async ({ page }) => {
// Navigate to scenario detail
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Verify initial state (draft)
await expect(page.locator('span').filter({ hasText: 'draft' }).first()).toBeVisible();
});
test('should ingest logs and update metrics', async ({ page, request }) => {
// Start the scenario
await startScenarioViaAPI(request, createdScenarioId!);
// Send test logs
await sendTestLogs(request, createdScenarioId!, testLogs);
// Wait a moment for logs to be processed
await page.waitForTimeout(2000);
// Navigate to scenario detail and verify metrics
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Verify metrics updated (should be greater than 0)
const totalRequests = page.locator('div', {
has: page.locator('text=Total Requests')
}).locator('div.text-2xl');
// Wait for metrics to refresh
await page.waitForTimeout(6000); // Wait for metrics polling
await page.reload();
await waitForLoading(page);
// Verify scenario is now running
await expect(page.locator('span').filter({ hasText: 'running' }).first()).toBeVisible();
});
test('should detect PII in logs', async ({ page, request }) => {
// Start the scenario
await startScenarioViaAPI(request, createdScenarioId!);
// Send logs containing PII
await sendTestLogs(request, createdScenarioId!, logsWithPII);
// Wait for processing
await page.waitForTimeout(2000);
// Navigate to dashboard to check PII violations
await navigateTo(page, '/');
await waitForLoading(page);
// Verify PII Violations card is visible
await expect(page.getByText('PII Violations')).toBeVisible();
});
test('should handle high volume log ingestion', async ({ page, request }) => {
// Start the scenario
await startScenarioViaAPI(request, createdScenarioId!);
// Send high volume of logs
await sendTestLogs(request, createdScenarioId!, highVolumeLogs.slice(0, 50));
// Wait for processing
await page.waitForTimeout(3000);
// Navigate to scenario detail
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Verify metrics reflect high volume
// The scenario should still be stable
await expect(page.getByRole('heading', { name: testScenarioName })).toBeVisible();
});
test('should stop scenario and update status', async ({ page, request }) => {
// Start the scenario
await startScenarioViaAPI(request, createdScenarioId!);
// Navigate to detail page
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Verify running status
await expect(page.locator('span').filter({ hasText: 'running' }).first()).toBeVisible();
// Stop the scenario
await stopScenarioViaAPI(request, createdScenarioId!);
// Refresh and verify stopped status
await page.reload();
await waitForLoading(page);
// Status should be completed or stopped
const statusElement = page.locator('span').filter({ hasText: /completed|stopped|archived/ }).first();
await expect(statusElement).toBeVisible();
});
test('should update cost breakdown with different services', async ({ page, request }) => {
// Start the scenario
await startScenarioViaAPI(request, createdScenarioId!);
// Send logs for different services
const serviceLogs = [
...testLogs.filter(log => log.service === 'lambda'),
...testLogs.filter(log => log.service === 'sqs'),
...testLogs.filter(log => log.service === 'bedrock'),
];
await sendTestLogs(request, createdScenarioId!, serviceLogs);
// Wait for processing
await page.waitForTimeout(2000);
// Navigate to scenario detail
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Wait for metrics refresh
await page.waitForTimeout(6000);
await page.reload();
await waitForLoading(page);
// Verify cost is updated
const totalCost = page.locator('div', {
has: page.locator('text=Total Cost')
}).locator('div.text-2xl');
await expect(totalCost).toBeVisible();
});
test('should handle log ingestion errors gracefully', async ({ page, request }) => {
// Try to send logs to a non-existent scenario
const response = await request.post(
`http://localhost:8000/api/v1/scenarios/non-existent-id/ingest`,
{ data: { logs: testLogs.slice(0, 1) } }
);
// Should return 404
expect(response.status()).toBe(404);
});
test('should persist metrics after page refresh', async ({ page, request }) => {
// Start scenario and ingest logs
await startScenarioViaAPI(request, createdScenarioId!);
await sendTestLogs(request, createdScenarioId!, testLogs);
// Wait for processing
await page.waitForTimeout(3000);
// Navigate to scenario detail
await navigateTo(page, `/scenarios/${createdScenarioId}`);
await waitForLoading(page);
// Wait for metrics
await page.waitForTimeout(6000);
// Refresh page
await page.reload();
await waitForLoading(page);
// Verify metrics are still displayed
await expect(page.getByText('Total Requests')).toBeVisible();
await expect(page.getByText('Total Cost')).toBeVisible();
await expect(page.getByText('SQS Blocks')).toBeVisible();
await expect(page.getByText('LLM Tokens')).toBeVisible();
});
});
test.describe('Log Ingestion - Dashboard Metrics', () => {
test('should update dashboard stats after log ingestion', async ({ page, request }) => {
// Create and start a scenario
const scenario = await createScenarioViaAPI(request, {
...newScenarioData,
name: generateTestScenarioName('Dashboard Test'),
});
createdScenarioId = scenario.id;
await startScenarioViaAPI(request, createdScenarioId);
// Navigate to dashboard before ingestion
await navigateTo(page, '/');
await waitForLoading(page);
// Get initial running count
const runningCard = page.locator('div').filter({ hasText: 'Running' }).first();
await expect(runningCard).toBeVisible();
// Send logs
await sendTestLogs(request, createdScenarioId, testLogs);
// Refresh dashboard
await page.reload();
await waitForLoading(page);
// Verify dashboard still loads correctly
await expect(page.getByText('Total Scenarios')).toBeVisible();
await expect(page.getByText('Running')).toBeVisible();
await expect(page.getByText('Total Cost')).toBeVisible();
await expect(page.getByText('PII Violations')).toBeVisible();
});
});