Claude Code Plugins

Community-maintained marketplace

Feedback

nginx-configuration

@aj-geddes/useful-ai-prompts
18
4

Configure Nginx web server for high-performance reverse proxy, load balancing, SSL/TLS, caching, and API gateway functionality.

Install Skill

1Download skill
2Enable skills in Claude

Open claude.ai/settings/capabilities and find the "Skills" section

3Upload to Claude

Click "Upload skill" and select the downloaded ZIP file

Note: Please verify skill by going through its instructions before using it.

SKILL.md

name nginx-configuration
description Configure Nginx web server for high-performance reverse proxy, load balancing, SSL/TLS, caching, and API gateway functionality.

Nginx Configuration

Overview

Master Nginx configuration for production-grade web servers, reverse proxies, load balancing, SSL termination, caching, and API gateway patterns with advanced performance tuning.

When to Use

  • Reverse proxy setup
  • Load balancing between backend services
  • SSL/TLS termination
  • HTTP/2 and gRPC support
  • Caching and compression
  • Rate limiting and DDoS protection
  • URL rewriting and routing
  • API gateway functionality

Implementation Examples

1. Production Nginx Configuration

# /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
worker_rlimit_nofile 65535;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 4096;
    use epoll;
    multi_accept on;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    # Logging
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';

    log_format upstream_time '$remote_addr - $remote_user [$time_local] '
                            '"$request" $status $body_bytes_sent '
                            '"$http_referer" "$http_user_agent" '
                            'rt=$request_time uct="$upstream_connect_time" '
                            'uht="$upstream_header_time" urt="$upstream_response_time"';

    access_log /var/log/nginx/access.log upstream_time buffer=32k flush=5s;

    # Performance optimizations
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    client_max_body_size 20M;

    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_types text/plain text/css text/xml text/javascript
               application/json application/javascript application/xml+rss
               application/rss+xml application/atom+xml image/svg+xml;
    gzip_disable "msie6";

    # Rate limiting
    limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
    limit_req_zone $binary_remote_addr zone=api:10m rate=100r/m;
    limit_conn_zone $binary_remote_addr zone=connections:10m;

    # Upstream servers
    upstream backend {
        least_conn;
        server backend1.internal:8080 weight=5 max_fails=3 fail_timeout=30s;
        server backend2.internal:8080 weight=5 max_fails=3 fail_timeout=30s;
        server backend3.internal:8080 weight=3 max_fails=3 fail_timeout=30s;
        keepalive 32;
    }

    upstream api_backend {
        least_conn;
        server api1.internal:3000;
        server api2.internal:3000;
        server api3.internal:3000;
        keepalive 64;
    }

    # Caching
    proxy_cache_path /var/cache/nginx/general levels=1:2 keys_zone=general_cache:10m max_size=1g inactive=60m use_temp_path=off;
    proxy_cache_path /var/cache/nginx/api levels=1:2 keys_zone=api_cache:10m max_size=500m inactive=30m use_temp_path=off;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

2. HTTPS Server with Load Balancing

# /etc/nginx/sites-available/myapp
server {
    listen 80;
    server_name myapp.com www.myapp.com;
    return 301 https://$server_name$request_uri;
}

server {
    listen 443 ssl http2;
    server_name myapp.com www.myapp.com;

    # SSL Configuration
    ssl_certificate /etc/ssl/certs/myapp.com.crt;
    ssl_certificate_key /etc/ssl/private/myapp.com.key;
    ssl_trusted_certificate /etc/ssl/certs/ca-bundle.crt;

    # SSL Security
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    ssl_stapling on;
    ssl_stapling_verify on;

    # Security headers
    add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
    add_header X-Content-Type-Options "nosniff" always;
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-XSS-Protection "1; mode=block" always;
    add_header Referrer-Policy "strict-origin-when-cross-origin" always;

    # Root and logging
    root /var/www/myapp;
    access_log /var/log/nginx/myapp.access.log upstream_time;
    error_log /var/log/nginx/myapp.error.log warn;

    # Rate limiting
    limit_req zone=general burst=20 nodelay;
    limit_conn connections 10;

    # Proxy settings
    location / {
        limit_req zone=general burst=20 nodelay;

        proxy_pass http://backend;
        proxy_http_version 1.1;

        # Headers
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_set_header Connection "";

        # Timeouts
        proxy_connect_timeout 60s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;

        # Caching
        proxy_cache general_cache;
        proxy_cache_valid 200 60m;
        proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
        add_header X-Cache-Status $upstream_cache_status;
    }

    # API endpoint with different caching
    location /api/ {
        limit_req zone=api burst=10 nodelay;

        proxy_pass http://api_backend;
        proxy_http_version 1.1;

        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Cache only GET requests
        proxy_cache api_cache;
        proxy_cache_methods GET HEAD;
        proxy_cache_valid 200 30m;
        proxy_cache_key "$scheme$request_method$host$request_uri";

        # Don't cache if authenticated
        proxy_no_cache $http_authorization;
    }

    # Static files with long caching
    location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
        expires 365d;
        add_header Cache-Control "public, immutable";
        access_log off;
    }

    # Health check endpoint
    location /health {
        access_log off;
        return 200 "healthy\n";
        add_header Content-Type text/plain;
    }

    # Metrics endpoint
    location /metrics {
        deny all;
    }
}

3. Nginx Configuration Script

#!/bin/bash
# nginx-deploy.sh - Deploy and validate Nginx configuration

set -euo pipefail

echo "Deploying Nginx configuration..."

# Test configuration
echo "Testing Nginx configuration..."
nginx -t

# Check if running
if pgrep -x nginx > /dev/null; then
    echo "Reloading Nginx..."
    systemctl reload nginx
else
    echo "Starting Nginx..."
    systemctl start nginx
fi

# Verify
echo "Verifying deployment..."
sleep 2

# Check service status
if systemctl is-active --quiet nginx; then
    echo "Nginx is running"
else
    echo "ERROR: Nginx failed to start"
    systemctl status nginx
    exit 1
fi

# Test connectivity
echo "Testing endpoints..."
curl -k https://localhost/health || echo "Warning: Health check failed"

# Log status
echo "Nginx configuration deployed successfully"
journalctl -u nginx -n 20 --no-pager

4. Nginx Monitoring Configuration

# /etc/nginx/conf.d/monitoring.conf
server {
    listen 127.0.0.1:8080;
    server_name localhost;

    # Stub status for monitoring
    location /nginx_status {
        stub_status on;
        access_log off;
        allow 127.0.0.1;
        allow ::1;
        deny all;
    }

    # Prometheus metrics
    location /metrics {
        access_log off;
        proxy_pass http://127.0.0.1:8081/metrics;
        allow 127.0.0.1;
        allow ::1;
        deny all;
    }
}

Best Practices

✅ DO

  • Use HTTP/2 for performance
  • Enable SSL/TLS with strong ciphers
  • Implement proper caching strategies
  • Use upstream connection pooling
  • Monitor with stub_status or prometheus
  • Rate limit to prevent abuse
  • Add security headers
  • Use least_conn load balancing
  • Keep error logs separate from access logs

❌ DON'T

  • Disable gzip compression
  • Use weak SSL ciphers
  • Cache authenticated responses
  • Allow direct access to backends
  • Ignore upstream health checks
  • Mix HTTP and HTTPS without redirect
  • Use default error pages in production
  • Cache sensitive user data

Common Commands

nginx -t                    # Test configuration
systemctl reload nginx       # Reload without drop
systemctl restart nginx      # Full restart
tail -f /var/log/nginx/access.log   # Monitor access
curl localhost:8080/nginx_status    # Check status

Resources