The Complete NGINX on Ubuntu Series: Part 16 – Performance Tuning and Optimization

The Complete NGINX on Ubuntu Series: Part 16 – Performance Tuning and Optimization

Welcome to Part 16 of our comprehensive NGINX on Ubuntu series! We’ll dive deep into performance tuning and optimization techniques to maximize throughput, minimize latency, and achieve peak performance.

Performance Optimization Fundamentals

NGINX performance optimization involves tuning multiple layers including worker processes, connection handling, memory usage, file I/O, and system-level configurations to achieve maximum efficiency.

graph TD
    A[Performance Optimization] --> B[Worker Process Tuning]
    A --> C[Connection Optimization]
    A --> D[Memory Management]
    A --> E[I/O Optimization]
    
    B --> F[Worker ProcessesWorker ConnectionsCPU Affinity]
    C --> G[Keep-Alive SettingsConnection PoolingTimeout Management]
    D --> H[Buffer SizesShared MemoryCache Zones]
    E --> I[File Descriptorssendfile SettingsDisk I/O]
    
    J[Performance Metrics] --> K[Requests per Second]
    J --> L[Response Time]
    J --> M[Throughput]
    J --> N[Resource Utilization]
    
    style A fill:#e1f5fe
    style J fill:#e8f5e8
    style F fill:#fff3e0
    style G fill:#e3f2fd
    style H fill:#e8f5e8
    style I fill:#fff3e0

Core Performance Configuration

# Optimize main NGINX configuration
sudo nano /etc/nginx/nginx.conf
# High-performance NGINX configuration
user www-data;

# Worker process optimization
worker_processes auto;  # Auto-detect CPU cores
worker_cpu_affinity auto;  # Bind workers to CPU cores
worker_rlimit_nofile 65535;  # File descriptor limit per worker

pid /var/run/nginx.pid;

# Events block - connection handling optimization
events {
    # Connection method optimization
    use epoll;  # Linux-specific efficient method
    
    # Worker connections
    worker_connections 4096;  # Connections per worker
    
    # Connection optimization
    multi_accept on;  # Accept multiple connections at once
    accept_mutex off;  # Disable accept mutex for better performance
}

http {
    # Basic settings
    sendfile on;  # Efficient file transfer
    tcp_nopush on;  # Send headers in one packet
    tcp_nodelay on;  # Don't buffer small packets
    
    # Connection keep-alive optimization
    keepalive_timeout 65;
    keepalive_requests 1000;  # Requests per keep-alive connection
    
    # Client settings optimization
    client_max_body_size 64m;
    client_body_buffer_size 1m;
    client_header_buffer_size 4k;
    large_client_header_buffers 8 8k;
    client_body_timeout 60s;
    client_header_timeout 60s;
    send_timeout 60s;
    
    # Hash table optimizations
    server_names_hash_bucket_size 128;
    server_names_hash_max_size 2048;
    types_hash_max_size 4096;
    types_hash_bucket_size 128;
    
    # File descriptor optimization
    open_file_cache max=10000 inactive=60s;
    open_file_cache_valid 120s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;
    
    # MIME types
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    
    # Logging optimization
    log_format optimized '$remote_addr - [$time_local] '
                        '"$request" $status $body_bytes_sent '
                        'rt=$request_time urt="$upstream_response_time"';
    
    access_log /var/log/nginx/access.log optimized buffer=32k flush=5s;
    error_log /var/log/nginx/error.log warn;
    
    # Gzip compression optimization
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_comp_level 6;
    gzip_proxied any;
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/json
        application/javascript
        application/xml+rss
        application/atom+xml
        image/svg+xml;
    
    # SSL optimization
    ssl_session_cache shared:SSL:50m;
    ssl_session_timeout 1d;
    ssl_session_tickets off;
    
    # Include other configurations
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

Worker Process Optimization

# Create worker optimization script
sudo nano /usr/local/bin/nginx-optimize.sh
#!/bin/bash

# NGINX Performance Optimizer
CPU_CORES=$(nproc)
TOTAL_RAM=$(free -m | awk 'NR==2{print $2}')

optimize_workers() {
    echo "=== NGINX Worker Optimization ==="
    echo "CPU Cores: $CPU_CORES"
    echo "Total RAM: ${TOTAL_RAM}MB"
    echo
    
    # Calculate optimal worker processes
    if [ "$CPU_CORES" -le 2 ]; then
        WORKER_PROCESSES=$CPU_CORES
    elif [ "$CPU_CORES" -le 8 ]; then
        WORKER_PROCESSES=$CPU_CORES
    else
        WORKER_PROCESSES=8  # Cap at 8 for most workloads
    fi
    
    # Calculate worker connections
    if [ "$TOTAL_RAM" -lt 1024 ]; then
        WORKER_CONNECTIONS=1024
    elif [ "$TOTAL_RAM" -lt 2048 ]; then
        WORKER_CONNECTIONS=2048
    elif [ "$TOTAL_RAM" -lt 4096 ]; then
        WORKER_CONNECTIONS=4096
    else
        WORKER_CONNECTIONS=8192
    fi
    
    # File descriptor limit
    FILE_DESCRIPTOR_LIMIT=$((WORKER_PROCESSES * WORKER_CONNECTIONS * 2))
    
    echo "Recommended Configuration:"
    echo "worker_processes: $WORKER_PROCESSES"
    echo "worker_connections: $WORKER_CONNECTIONS"
    echo "worker_rlimit_nofile: $FILE_DESCRIPTOR_LIMIT"
    echo
    
    # Generate configuration
    cat > /tmp/nginx_optimization.conf << EOF
# Auto-generated optimization
worker_processes $WORKER_PROCESSES;
worker_cpu_affinity auto;
worker_rlimit_nofile $FILE_DESCRIPTOR_LIMIT;

events {
    use epoll;
    worker_connections $WORKER_CONNECTIONS;
    multi_accept on;
    accept_mutex off;
}
EOF
    
    echo "Configuration saved to: /tmp/nginx_optimization.conf"
}

optimize_system() {
    echo "=== System Optimization ==="
    
    # System limits
    cat > /tmp/nginx_limits.conf << EOF
* soft nofile 65535
* hard nofile 65535
www-data soft nofile 65535
www-data hard nofile 65535
EOF
    
    echo "System limits: /tmp/nginx_limits.conf"
    
    # Kernel parameters
    cat > /tmp/nginx_sysctl.conf << EOF
net.core.somaxconn = 32768
net.core.netdev_max_backlog = 32768
net.ipv4.tcp_max_syn_backlog = 65536
net.ipv4.tcp_fin_timeout = 10
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_tw_reuse = 1
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
fs.file-max = 2097152
EOF
    
    echo "Kernel parameters: /tmp/nginx_sysctl.conf"
}

case "${1:-worker}" in
    worker)
        optimize_workers
        ;;
    system)
        optimize_system
        ;;
    all)
        optimize_workers
        optimize_system
        ;;
    *)
        echo "Usage: $0 {worker|system|all}"
        ;;
esac

# Make executable: sudo chmod +x /usr/local/bin/nginx-optimize.sh

High-Performance Virtual Host

# Create high-performance virtual host
sudo nano /etc/nginx/sites-available/performance.example.com
# High-performance virtual host
upstream backend_fast {
    least_conn;
    
    server 192.168.1.10:8080 weight=3 max_fails=2 fail_timeout=5s;
    server 192.168.1.11:8080 weight=3 max_fails=2 fail_timeout=5s;
    server 192.168.1.12:8080 weight=2 max_fails=2 fail_timeout=5s;
    
    # Connection pooling
    keepalive 32;
    keepalive_requests 1000;
    keepalive_timeout 60s;
}

# Cache zone
proxy_cache_path /var/cache/nginx/performance
                 levels=2:2
                 keys_zone=perf_cache:100m
                 max_size=2g
                 inactive=60m
                 use_temp_path=off;

server {
    listen 80 reuseport;  # Enable port reuse
    server_name performance.example.com;
    
    # Performance logging
    access_log /var/log/nginx/performance.log optimized buffer=64k flush=5s;
    error_log /var/log/nginx/performance-error.log warn;
    
    # Static content optimization
    location ~* \.(jpg|jpeg|png|gif|css|js|woff|woff2)$ {
        root /var/www/performance/public_html;
        
        # Performance settings
        expires 1y;
        add_header Cache-Control "public, immutable";
        
        # File serving optimization
        sendfile on;
        tcp_nopush on;
        tcp_nodelay on;
        
        # Disable logging for static files
        access_log off;
        
        # File cache
        open_file_cache max=10000 inactive=120s;
        open_file_cache_valid 120s;
        open_file_cache_min_uses 2;
    }
    
    # Dynamic content with caching
    location / {
        proxy_pass http://backend_fast;
        
        # Caching
        proxy_cache perf_cache;
        proxy_cache_valid 200 302 10m;
        proxy_cache_valid 404 1m;
        proxy_cache_use_stale error timeout updating;
        proxy_cache_revalidate on;
        proxy_cache_lock on;
        
        # Optimized proxy settings
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        
        # Fast timeouts
        proxy_connect_timeout 3s;
        proxy_send_timeout 10s;
        proxy_read_timeout 10s;
        
        # Buffer optimization
        proxy_buffering on;
        proxy_buffer_size 8k;
        proxy_buffers 16 8k;
        proxy_busy_buffers_size 16k;
        
        # Upstream optimization
        proxy_next_upstream error timeout http_500 http_502;
        proxy_next_upstream_tries 2;
        proxy_next_upstream_timeout 5s;
        
        add_header X-Cache-Status $upstream_cache_status always;
    }
    
    # API with minimal caching
    location /api/ {
        proxy_pass http://backend_fast/api/;
        
        # Minimal caching
        proxy_cache perf_cache;
        proxy_cache_valid 200 30s;
        proxy_cache_bypass $http_authorization;
        
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        
        # Fast API timeouts
        proxy_connect_timeout 1s;
        proxy_send_timeout 5s;
        proxy_read_timeout 5s;
        
        # Small buffers
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;
        
        add_header X-Cache-Status $upstream_cache_status always;
    }
    
    # Health check
    location = /health {
        access_log off;
        return 200 "OK\n";
        add_header Content-Type text/plain;
    }
    
    # Status endpoint
    location = /nginx-status {
        stub_status on;
        access_log off;
        allow 127.0.0.1;
        allow 192.168.1.0/24;
        deny all;
    }
}

Performance Monitoring

graph TD
    A[Performance Monitoring] --> B[Real-time Metrics]
    A --> C[Load Testing]
    A --> D[Resource Monitoring]
    
    B --> E[RPS Monitoring
Response Times
Error Rates] C --> F[ApacheBench
wrk
Load Testing] D --> G[CPU Usage
Memory Usage
Network I/O] H[Performance Tools] --> I[Built-in Status] H --> J[Third-party Tools] H --> K[Custom Scripts] style A fill:#e1f5fe style H fill:#e8f5e8 style E fill:#fff3e0 style F fill:#e3f2fd style G fill:#e8f5e8
# Create performance monitor script
sudo nano /usr/local/bin/nginx-perf-monitor.sh
#!/bin/bash

# NGINX Performance Monitor
ACCESS_LOG="/var/log/nginx/access.log"

show_performance() {
    echo "=== NGINX Performance Statistics ==="
    echo "Generated: $(date)"
    echo
    
    # NGINX status
    echo "--- NGINX Status ---"
    if curl -s "http://127.0.0.1/nginx-status" 2>/dev/null; then
        echo
    else
        echo "Status endpoint not available"
    fi
    
    # System resources
    echo "--- System Resources ---"
    echo "CPU Usage: $(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | sed 's/%us,//')"
    echo "Memory: $(free -h | grep Mem | awk '{printf "%s / %s", $3, $2}')"
    echo "Load: $(uptime | awk -F'load average:' '{print $2}')"
    
    # Network connections
    echo
    echo "--- Connections ---"
    echo "Total: $(ss -tun | grep ':80\|:443' | wc -l)"
    echo "Established: $(ss -tun | grep ESTAB | grep ':80\|:443' | wc -l)"
    
    # Request rate
    if [ -f "$ACCESS_LOG" ]; then
        echo
        echo "--- Request Rate ---"
        local current_minute=$(date '+%d/%b/%Y:%H:%M')
        local requests=$(grep "$current_minute" "$ACCESS_LOG" | wc -l)
        echo "Current minute: $requests requests"
        
        # Response time analysis
        echo
        echo "--- Response Times (Last 100) ---"
        if tail -100 "$ACCESS_LOG" | grep -q 'rt='; then
            tail -100 "$ACCESS_LOG" | grep 'rt=' | awk -F'rt=' '{print $2}' | awk '{print $1}' | awk '
            {
                sum += $1; count++;
                if ($1 > max) max = $1;
                if (min == 0 || $1 < min) min = $1;
            }
            END {
                if (count > 0) {
                    printf "Avg: %.3fs, Min: %.3fs, Max: %.3fs\n", sum/count, min, max;
                }
            }'
        fi
    fi
    
    # Worker processes
    echo
    echo "--- Workers ---"
    local workers=$(pgrep -c "nginx: worker")
    echo "Active workers: $workers"
}

benchmark() {
    local url="${1:-http://localhost}"
    local requests="${2:-1000}"
    local concurrency="${3:-10}"
    
    echo "=== Benchmark Test ==="
    echo "URL: $url"
    echo "Requests: $requests, Concurrency: $concurrency"
    echo
    
    if command -v ab >/dev/null 2>&1; then
        ab -n "$requests" -c "$concurrency" "$url" | grep -E "(Requests per second|Time per request)"
    else
        echo "Apache Bench (ab) not available"
    fi
    
    # Single request timing
    echo
    echo "--- Single Request ---"
    curl -w "Total: %{time_total}s, Connect: %{time_connect}s, TTFB: %{time_starttransfer}s\n" -o /dev/null -s "$url"
}

case "${1:-stats}" in
    stats)
        show_performance
        ;;
    benchmark)
        benchmark "$2" "$3" "$4"
        ;;
    watch)
        while true; do
            clear
            show_performance
            echo "Press Ctrl+C to exit..."
            sleep 5
        done
        ;;
    *)
        echo "Usage: $0 {stats|benchmark [url] [requests] [concurrency]|watch}"
        ;;
esac

# Make executable: sudo chmod +x /usr/local/bin/nginx-perf-monitor.sh

SSL Performance Optimization

# Create SSL performance configuration
sudo nano /etc/nginx/snippets/ssl-performance.conf
# SSL/TLS performance optimization

# SSL session optimization
ssl_session_cache shared:SSL:100m;
ssl_session_timeout 24h;
ssl_session_tickets off;

# SSL buffer optimization
ssl_buffer_size 4k;  # Smaller buffer for faster response

# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 10s;

# Modern SSL for performance
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;

# Performance headers
add_header Strict-Transport-Security "max-age=63072000" always;
add_header X-Frame-Options "SAMEORIGIN" always;

Testing Performance

# Performance testing and optimization

# 1. Apply optimizations
/usr/local/bin/nginx-optimize.sh all

# 2. Apply system limits
sudo cp /tmp/nginx_limits.conf /etc/security/limits.d/nginx.conf
sudo cp /tmp/nginx_sysctl.conf /etc/sysctl.d/nginx.conf
sudo sysctl -p

# 3. Create cache directory
sudo mkdir -p /var/cache/nginx/performance
sudo chown -R www-data:www-data /var/cache/nginx

# 4. Enable performance site
sudo ln -s /etc/nginx/sites-available/performance.example.com /etc/nginx/sites-enabled/

# 5. Test configuration
sudo nginx -t

# 6. Reload NGINX
sudo systemctl reload nginx

# 7. Monitor performance
/usr/local/bin/nginx-perf-monitor.sh stats

# 8. Run benchmark
/usr/local/bin/nginx-perf-monitor.sh benchmark http://performance.example.com 5000 50

# 9. Watch real-time stats
/usr/local/bin/nginx-perf-monitor.sh watch

# 10. Install performance tools
sudo apt install apache2-utils  # For ab (Apache Bench)

# Optional: Install wrk for advanced testing
# git clone https://github.com/wg/wrk.git
# cd wrk && make && sudo cp wrk /usr/local/bin/

What’s Next?

Excellent! You’ve implemented comprehensive performance optimizations that significantly improve NGINX throughput and reduce latency. Your server now handles high-traffic loads efficiently with optimized resource utilization.

Coming up in Part 17: NGINX Troubleshooting and Debugging

References


This is Part 16 of our 22-part NGINX series. Your server is now performance-optimized for maximum efficiency! Next, we’ll master troubleshooting techniques. Questions? Share them in the comments!

Written by:

373 Posts

View All Posts
Follow Me :
How to whitelist website on AdBlocker?

How to whitelist website on AdBlocker?

  1. 1 Click on the AdBlock Plus icon on the top right corner of your browser
  2. 2 Click on "Enabled on this site" from the AdBlock Plus option
  3. 3 Refresh the page and start browsing the site