Synthetic Monitoring
Automated testing for consistent, proactive performance monitoring.
RUM vs Synthetic
| Aspect |
RUM (Real User) |
Synthetic (Lab) |
| Data source |
Actual users |
Controlled tests |
| Variability |
High |
Low |
| Coverage |
What users do |
What you test |
| Issues |
Finds unknown |
Validates known |
| Timing |
Reactive |
Proactive |
Use both: Synthetic catches regressions before users; RUM shows real-world impact.
Lighthouse CI
Setup
npm install -D @lhci/cli
// lighthouserc.json
{
"ci": {
"collect": {
"url": [
"http://localhost:3000/",
"http://localhost:3000/products",
"http://localhost:3000/checkout"
],
"numberOfRuns": 3
},
"assert": {
"preset": "lighthouse:recommended",
"assertions": {
"categories:performance": ["error", { "minScore": 0.9 }],
"categories:accessibility": ["error", { "minScore": 0.9 }],
"first-contentful-paint": ["error", { "maxNumericValue": 2000 }],
"largest-contentful-paint": ["error", { "maxNumericValue": 2500 }],
"interactive": ["error", { "maxNumericValue": 3500 }],
"cumulative-layout-shift": ["error", { "maxNumericValue": 0.1 }]
}
},
"upload": {
"target": "temporary-public-storage"
}
}
}
GitHub Actions
# .github/workflows/lighthouse.yml
name: Lighthouse CI
on: [push]
jobs:
lighthouse:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install & Build
run: |
npm ci
npm run build
- name: Start server
run: npm run start &
- name: Run Lighthouse
run: |
npm install -g @lhci/cli
lhci autorun
env:
LHCI_GITHUB_APP_TOKEN: ${{ secrets.LHCI_GITHUB_APP_TOKEN }}
Playwright Performance Testing
Setup
npm install -D @playwright/test
// tests/performance.spec.ts
import { test, expect } from '@playwright/test';
test.describe('Performance Tests', () => {
test('homepage loads within budget', async ({ page }) => {
// Start tracing
await page.context().tracing.start({ screenshots: true, snapshots: true });
const startTime = Date.now();
await page.goto('/');
await page.waitForLoadState('networkidle');
const loadTime = Date.now() - startTime;
// Assert load time
expect(loadTime).toBeLessThan(3000);
// Get Core Web Vitals
const vitals = await page.evaluate(() => {
return new Promise((resolve) => {
new PerformanceObserver((list) => {
const entries = list.getEntries();
const lcp = entries.find((e) => e.entryType === 'largest-contentful-paint');
resolve({
lcp: lcp?.startTime,
cls: entries
.filter((e) => e.entryType === 'layout-shift')
.reduce((sum: number, e: any) => sum + e.value, 0),
});
}).observe({ type: 'largest-contentful-paint', buffered: true });
// Fallback timeout
setTimeout(() => resolve({ lcp: null, cls: null }), 5000);
});
});
console.log('Vitals:', vitals);
// Stop tracing
await page.context().tracing.stop({ path: 'trace.zip' });
});
test('checkout flow performance', async ({ page }) => {
const metrics: Record<string, number> = {};
// Navigate through checkout
await page.goto('/products/1');
metrics.productPageLoad = await measurePageLoad(page);
await page.click('[data-testid="add-to-cart"]');
await page.goto('/cart');
metrics.cartPageLoad = await measurePageLoad(page);
await page.click('[data-testid="checkout"]');
metrics.checkoutPageLoad = await measurePageLoad(page);
// Assert all pages load within budget
expect(metrics.productPageLoad).toBeLessThan(2000);
expect(metrics.cartPageLoad).toBeLessThan(1500);
expect(metrics.checkoutPageLoad).toBeLessThan(2000);
});
});
async function measurePageLoad(page: any): Promise<number> {
const timing = await page.evaluate(() => {
const nav = performance.getEntriesByType('navigation')[0] as PerformanceNavigationTiming;
return nav.loadEventEnd - nav.fetchStart;
});
return timing;
}
Performance Trace Collection
// playwright.config.ts
import { defineConfig } from '@playwright/test';
export default defineConfig({
use: {
trace: 'retain-on-failure',
video: 'retain-on-failure',
},
projects: [
{
name: 'Desktop Chrome',
use: {
browserName: 'chromium',
viewport: { width: 1280, height: 720 },
},
},
{
name: 'Mobile Chrome',
use: {
browserName: 'chromium',
...devices['Pixel 5'],
},
},
{
name: 'Slow 3G',
use: {
browserName: 'chromium',
// Simulate slow network
launchOptions: {
args: ['--force-effective-connection-type=3g'],
},
},
},
],
});
Scheduled Monitoring
Checkly (or similar)
// __checks__/homepage.check.ts
import { check, group } from 'checkly';
import { Playwright } from 'checkly/constructs';
const homepage = new Playwright('homepage-check', {
name: 'Homepage Performance',
frequency: 5, // Every 5 minutes
locations: ['us-east-1', 'eu-west-1'],
runtimeId: '2024.02',
script: `
const { expect } = require('@playwright/test');
async function run({ page }) {
const response = await page.goto('https://yoursite.com');
expect(response.status()).toBe(200);
const timing = await page.evaluate(() => {
const nav = performance.getEntriesByType('navigation')[0];
return nav.loadEventEnd - nav.fetchStart;
});
expect(timing).toBeLessThan(3000);
}
module.exports = { run };
`,
alertChannels: ['email-oncall'],
});
Metrics to Track
| Metric |
Threshold |
Priority |
| LCP |
<2.5s |
P0 |
| TTI |
<3.5s |
P0 |
| Total Blocking Time |
<200ms |
P0 |
| CLS |
<0.1 |
P0 |
| Speed Index |
<3.4s |
P1 |
| Time to First Byte |
<600ms |
P1 |
CI/CD Integration Pattern
# Performance gate in CI
performance:
runs-on: ubuntu-latest
steps:
- name: Run Lighthouse
run: lhci autorun
- name: Run Playwright
run: npx playwright test tests/performance/
- name: Upload results
run: |
# Send to your observability platform
curl -X POST \
-H "Authorization: Bearer $TOKEN" \
-d @lighthouse-results.json \
https://api.yourplatform.com/synthetic-results
Anti-Patterns
- Only running in CI (miss production issues)
- Testing only happy path
- Ignoring mobile/slow network
- No alerting on failures
- Not tracking trends over time
- Missing real user comparison
Related Skills
- See
skills/core-web-vitals for RUM comparison
- See
skills/bundle-performance for what to optimize
- See
skills/route-transition-tracking for SPA performance
References
references/performance.md - Performance budgets
references/platforms/*.md - Vendor integrations