Welcome to Part 6 of our comprehensive NGINX on Ubuntu series! We’ve secured our server with SSL certificates. Now it’s time to explore one of NGINX’s most powerful features: reverse proxying. Learn how to use NGINX as a gateway to backend applications, APIs, and microservices.
Understanding Reverse Proxy
A reverse proxy sits between clients and backend servers, forwarding client requests to appropriate backend services and returning responses. Unlike a forward proxy that acts on behalf of clients, a reverse proxy acts on behalf of servers.
graph TD A[Client Browsers] --> B[NGINX Reverse Proxy :80/443] B --> C{Route Based On} C -->|/api/*| D[Backend API Server :3000] C -->|/app/*| E[Node.js Application :3001] C -->|/blog/*| F[WordPress :8080] C -->|Static Files| G[Local File System] D --> H[Database] E --> I[Redis Cache] F --> J[MySQL Database] K[Benefits] --> L[Load Balancing] K --> M[SSL Termination] K --> N[Caching] K --> O[Security] style B fill:#e1f5fe style C fill:#fff3e0 style D fill:#e8f5e8 style E fill:#e8f5e8 style F fill:#e8f5e8
Basic Reverse Proxy Configuration
Let’s start with a simple reverse proxy setup to forward requests to a backend application.
# Create a simple Node.js backend for testing
sudo mkdir -p /var/www/backend
sudo nano /var/www/backend/app.js
// Simple Node.js backend application
const http = require('http');
const os = require('os');
const server = http.createServer((req, res) => {
res.writeHead(200, {'Content-Type': 'application/json'});
res.end(JSON.stringify({
message: 'Hello from Backend Server',
server: os.hostname(),
port: process.env.PORT || 3000,
timestamp: new Date().toISOString(),
path: req.url,
method: req.method,
headers: req.headers
}, null, 2));
});
const PORT = process.env.PORT || 3000;
server.listen(PORT, () => {
console.log(`Backend server running on port ${PORT}`);
});
// Install Node.js if not already installed
# sudo apt install nodejs npm -y
# cd /var/www/backend && node app.js &
Basic Reverse Proxy Virtual Host
# Create reverse proxy configuration
sudo nano /etc/nginx/sites-available/api.example.com
server {
listen 80;
server_name api.example.com;
# Proxy all requests to backend
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Health check endpoint
location /health {
access_log off;
proxy_pass http://127.0.0.1:3000/health;
}
}
Advanced Proxy Configuration
Path-Based Routing
# Multi-service reverse proxy
sudo nano /etc/nginx/sites-available/services.example.com
server {
listen 80;
server_name services.example.com;
# API service
location /api/ {
proxy_pass http://127.0.0.1:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Remove /api prefix when forwarding
proxy_redirect off;
}
# Frontend application
location /app/ {
proxy_pass http://127.0.0.1:3001/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Static admin panel
location /admin/ {
proxy_pass http://127.0.0.1:8080/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Admin-specific headers
proxy_set_header X-Admin-User $remote_user;
}
# WebSocket support
location /ws/ {
proxy_pass http://127.0.0.1:3002/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket specific timeouts
proxy_read_timeout 86400;
}
# Serve static files directly
location /static/ {
alias /var/www/services/static/;
expires 1y;
add_header Cache-Control "public, immutable";
}
}
Load Balancing with Upstream Servers
Distribute traffic across multiple backend servers for scalability and reliability.
# Load balanced configuration
sudo nano /etc/nginx/sites-available/balanced.example.com
# Define upstream server pools
upstream backend_api {
# Load balancing methods
least_conn; # or ip_hash, hash, random
# Backend servers
server 127.0.0.1:3000 weight=3 max_fails=3 fail_timeout=30s;
server 127.0.0.1:3001 weight=2 max_fails=3 fail_timeout=30s;
server 127.0.0.1:3002 weight=1 max_fails=3 fail_timeout=30s backup;
# Health checks (NGINX Plus feature, alternative shown below)
# health_check interval=5s passes=2 fails=3;
# Keep alive connections
keepalive 32;
}
upstream backend_web {
# Simple round-robin
server 127.0.0.1:8001;
server 127.0.0.1:8002;
server 127.0.0.1:8003;
keepalive 16;
}
server {
listen 80;
server_name balanced.example.com;
# API endpoints - load balanced
location /api/ {
proxy_pass http://backend_api/;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Load balancer health check
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 10s;
}
# Web application - load balanced
location / {
proxy_pass http://backend_web;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Health check endpoint
location /nginx-health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
Caching with Reverse Proxy
graph TD A[Client Request] --> B[NGINX Proxy] B --> C{Cache Hit?} C -->|Yes| D[Serve from Cache] C -->|No| E[Forward to Backend] E --> F[Backend Response] F --> G[Store in Cache] G --> H[Return to Client] D --> H I[Cache Management] --> J[Cache Keys] I --> K[TTL Settings] I --> L[Cache Purging] I --> M[Cache Zones] style B fill:#e1f5fe style C fill:#fff3e0 style D fill:#e8f5e8 style G fill:#e8f5e8
# Configure proxy caching
sudo nano /etc/nginx/nginx.conf
# Add to http block in nginx.conf
http {
# Proxy cache configuration
proxy_cache_path /var/cache/nginx/proxy
levels=1:2
keys_zone=api_cache:10m
max_size=1g
inactive=60m
use_temp_path=off;
proxy_cache_path /var/cache/nginx/static
levels=1:2
keys_zone=static_cache:10m
max_size=2g
inactive=24h
use_temp_path=off;
# Cache directory
# sudo mkdir -p /var/cache/nginx/proxy
# sudo mkdir -p /var/cache/nginx/static
# sudo chown -R www-data:www-data /var/cache/nginx
}
# Cached reverse proxy configuration
sudo nano /etc/nginx/sites-available/cached.example.com
server {
listen 80;
server_name cached.example.com;
# API with caching
location /api/ {
proxy_pass http://127.0.0.1:3000/;
# Cache configuration
proxy_cache api_cache;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_revalidate on;
proxy_cache_lock on;
# Cache key
proxy_cache_key "$scheme$request_method$host$request_uri";
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Add cache status header
add_header X-Cache-Status $upstream_cache_status;
# Bypass cache for certain conditions
proxy_cache_bypass $http_pragma $http_authorization;
proxy_no_cache $http_pragma $http_authorization;
}
# Static content with long cache
location /assets/ {
proxy_pass http://127.0.0.1:3000/assets/;
proxy_cache static_cache;
proxy_cache_valid 200 24h;
proxy_cache_use_stale error timeout updating;
proxy_set_header Host $host;
add_header X-Cache-Status $upstream_cache_status;
expires 1y;
add_header Cache-Control "public, immutable";
}
# Cache purge endpoint (for authorized users)
location /cache-purge {
allow 127.0.0.1;
deny all;
proxy_cache_purge api_cache "$scheme$request_method$host$request_uri";
}
}
Security and Rate Limiting
# Security-focused reverse proxy
sudo nano /etc/nginx/sites-available/secure-proxy.example.com
# Rate limiting zones (add to nginx.conf http block)
http {
# Rate limiting
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login_limit:10m rate=1r/s;
limit_conn_zone $binary_remote_addr zone=conn_limit:10m;
}
server {
listen 80;
server_name secure-proxy.example.com;
# API with rate limiting
location /api/ {
# Rate limiting
limit_req zone=api_limit burst=20 nodelay;
limit_conn conn_limit 10;
# Security headers
proxy_hide_header X-Powered-By;
proxy_hide_header Server;
# Forward to backend
proxy_pass http://127.0.0.1:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Security
proxy_ssl_verify off;
proxy_ssl_session_reuse on;
}
# Login endpoint with stricter limits
location /api/login {
limit_req zone=login_limit burst=5 nodelay;
proxy_pass http://127.0.0.1:3000/login;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Block certain endpoints
location /api/admin {
# Allow only specific IPs
allow 192.168.1.0/24;
allow 10.0.0.0/8;
deny all;
proxy_pass http://127.0.0.1:3000/admin;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Monitoring and Logging
# Enable detailed proxy logging
sudo nano /etc/nginx/sites-available/monitored-proxy.example.com
# Custom log format for proxy monitoring
log_format proxy_log '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'upstream: $upstream_addr '
'response_time: $upstream_response_time '
'request_time: $request_time '
'cache: $upstream_cache_status';
server {
listen 80;
server_name monitored-proxy.example.com;
access_log /var/log/nginx/proxy.access.log proxy_log;
error_log /var/log/nginx/proxy.error.log;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Response headers for monitoring
add_header X-Proxy-Cache $upstream_cache_status;
add_header X-Response-Time $upstream_response_time;
}
}
Testing and Troubleshooting
# Enable and test configurations
sudo ln -s /etc/nginx/sites-available/api.example.com /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
# Test basic proxy
curl -H "Host: api.example.com" http://localhost/
# Test with headers
curl -H "Host: api.example.com" -H "X-Test: value" http://localhost/
# Test load balancing
for i in {1..10}; do curl -H "Host: balanced.example.com" http://localhost/; done
# Monitor proxy performance
sudo tail -f /var/log/nginx/proxy.access.log
# Check upstream server health
curl -I http://127.0.0.1:3000/health
# Cache statistics
find /var/cache/nginx -type f | wc -l
What’s Next?
Excellent! You’ve mastered NGINX reverse proxy configuration. You can now route traffic to backend applications, implement load balancing, add caching, and secure your proxy setup.
Coming up in Part 7: NGINX Load Balancing Strategies and Health Checks
References
This is Part 6 of our 22-part NGINX series. You’re now running sophisticated proxy setups! Next, we’ll dive deeper into load balancing strategies. Questions about reverse proxy? Share them in the comments!