The Complete NGINX on Ubuntu Series: Part 13 – Microservices Architecture and API Gateway

The Complete NGINX on Ubuntu Series: Part 13 – Microservices Architecture and API Gateway

Welcome to Part 13 of our comprehensive NGINX on Ubuntu series! We’ll transform NGINX into a powerful API Gateway for microservices architecture, implementing service routing, load balancing, and centralized management.

Microservices and API Gateway Fundamentals

An API Gateway serves as the single entry point for microservices, handling routing, authentication, rate limiting, and cross-cutting concerns while enabling service independence and scalability.

graph TD
    A[Client Applications] --> B[NGINX API Gateway]
    
    B --> C[Authentication Service]
    B --> D[User Service]
    B --> E[Order Service]
    B --> F[Payment Service]
    B --> G[Notification Service]
    
    H[API Gateway Features] --> I[Service Discovery]
    H --> J[Load Balancing]
    H --> K[Rate Limiting]
    H --> L[Authentication]
    H --> M[Request Transform]
    H --> N[Circuit Breaking]
    
    style B fill:#e1f5fe
    style H fill:#e8f5e8
    style C fill:#e3f2fd
    style D fill:#e3f2fd
    style E fill:#e3f2fd
    style F fill:#e3f2fd
    style G fill:#e3f2fd

Basic API Gateway Setup

# Configure microservices backends in nginx.conf
sudo nano /etc/nginx/nginx.conf
# Add to http block in nginx.conf
http {
    # Microservice upstreams
    upstream auth_service {
        server 127.0.0.1:3001;
        server 127.0.0.1:3002;
        keepalive 32;
    }
    
    upstream user_service {
        server 127.0.0.1:4001;
        server 127.0.0.1:4002;
        keepalive 32;
    }
    
    upstream order_service {
        server 127.0.0.1:5001;
        server 127.0.0.1:5002;
        keepalive 32;
    }
    
    upstream payment_service {
        server 127.0.0.1:6001;
        server 127.0.0.1:6002;
        keepalive 32;
    }
    
    # API Gateway rate limiting
    limit_req_zone $binary_remote_addr zone=api_global:10m rate=100r/s;
    limit_req_zone $binary_remote_addr zone=api_auth:10m rate=10r/s;
    limit_req_zone $http_x_api_key zone=api_key_limit:10m rate=1000r/s;
}

API Gateway Configuration

# Create API Gateway virtual host
sudo nano /etc/nginx/sites-available/api-gateway.example.com
server {
    listen 80;
    server_name api-gateway.example.com;
    
    # Global API settings
    client_max_body_size 10m;
    client_body_timeout 30s;
    
    # CORS headers
    add_header Access-Control-Allow-Origin * always;
    add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always;
    add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-API-Key" always;
    
    # Handle preflight requests
    if ($request_method = 'OPTIONS') {
        add_header Access-Control-Max-Age 1728000;
        add_header Content-Type 'text/plain charset=UTF-8';
        add_header Content-Length 0;
        return 204;
    }
    
    # Global rate limiting
    limit_req zone=api_global burst=200 nodelay;
    
    # API Gateway health check
    location /health {
        access_log off;
        return 200 '{"status":"healthy","timestamp":"$time_iso8601","gateway":"nginx"}';
        add_header Content-Type application/json;
    }
    
    # Authentication service
    location /api/v1/auth/ {
        limit_req zone=api_auth burst=20 nodelay;
        
        proxy_pass http://auth_service/;
        include /etc/nginx/snippets/api-proxy.conf;
        
        proxy_set_header X-Service "auth";
        access_log /var/log/nginx/api-auth.log combined;
    }
    
    # User management service
    location /api/v1/users/ {
        auth_request /api/internal/auth;
        
        proxy_pass http://user_service/;
        include /etc/nginx/snippets/api-proxy.conf;
        
        proxy_set_header X-Service "users";
        proxy_set_header X-User-ID $upstream_http_x_user_id;
        
        access_log /var/log/nginx/api-users.log combined;
    }
    
    # Order management service
    location /api/v1/orders/ {
        auth_request /api/internal/auth;
        
        proxy_pass http://order_service/;
        include /etc/nginx/snippets/api-proxy.conf;
        
        proxy_set_header X-Service "orders";
        proxy_set_header X-User-ID $upstream_http_x_user_id;
        
        access_log /var/log/nginx/api-orders.log combined;
    }
    
    # Payment service with enhanced security
    location /api/v1/payments/ {
        auth_request /api/internal/auth;
        limit_req zone=api_auth burst=10 nodelay;
        
        proxy_pass http://payment_service/;
        include /etc/nginx/snippets/api-proxy.conf;
        
        proxy_set_header X-Service "payments";
        proxy_set_header X-User-ID $upstream_http_x_user_id;
        proxy_set_header X-Payment-Gateway "true";
        
        access_log /var/log/nginx/api-payments.log combined;
    }
    
    # Internal authentication endpoint
    location = /api/internal/auth {
        internal;
        proxy_pass http://auth_service/validate;
        proxy_pass_request_body off;
        proxy_set_header Content-Length "";
        proxy_set_header X-Original-URI $request_uri;
        proxy_set_header X-Original-Method $request_method;
        proxy_set_header Authorization $http_authorization;
        proxy_set_header X-API-Key $http_x_api_key;
    }
    
    # Default error response
    location / {
        return 404 '{"error":"endpoint_not_found","message":"API endpoint not found"}';
        add_header Content-Type application/json;
    }
}

API Proxy Configuration Snippet

# Create reusable API proxy configuration
sudo nano /etc/nginx/snippets/api-proxy.conf
# Common API proxy settings
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Request-ID $request_id;

# API Gateway specific headers
proxy_set_header X-Gateway "nginx";
proxy_set_header X-Gateway-Version "1.0";
proxy_set_header X-Client-IP $remote_addr;

# Timeouts for API calls
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;

# Buffering settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
proxy_busy_buffers_size 8k;

# Error handling
proxy_next_upstream error timeout http_500 http_502 http_503 http_504;

Advanced API Gateway with Dynamic Routing

# Create advanced API Gateway
sudo nano /etc/nginx/sites-available/advanced-api.example.com
# Dynamic service mapping
map $request_uri $service_name {
    ~^/api/v1/auth/(.*)$ "auth";
    ~^/api/v1/users/(.*)$ "users";
    ~^/api/v1/orders/(.*)$ "orders";
    ~^/api/v1/payments/(.*)$ "payments";
    default "unknown";
}

# API versioning support
map $request_uri $api_version {
    ~^/api/v1/ "v1";
    ~^/api/v2/ "v2";
    default "v1";
}

server {
    listen 80;
    server_name advanced-api.example.com;
    
    # Enhanced logging with service information
    log_format api_log '$remote_addr - $remote_user [$time_local] '
                      '"$request" $status $body_bytes_sent '
                      'service="$service_name" version="$api_version" '
                      'rt=$request_time urt="$upstream_response_time"';
    
    access_log /var/log/nginx/api-gateway.log api_log;
    
    # API Gateway info endpoint
    location /api/gateway/info {
        return 200 '{"gateway":"nginx","version":"1.0","services":["auth","users","orders","payments"],"timestamp":"$time_iso8601"}';
        add_header Content-Type application/json;
    }
    
    # Dynamic service routing
    location ~ ^/api/(v[0-9]+)/([^/]+)/(.*)$ {
        set $version $1;
        set $service $2;
        set $path $3;
        
        limit_req zone=api_global burst=100 nodelay;
        
        # Authentication for protected services
        if ($service ~ "^(users|orders|payments)$") {
            auth_request /api/internal/auth;
        }
        
        # Route to appropriate service
        if ($service = "auth") {
            proxy_pass http://auth_service/$path$is_args$args;
        }
        if ($service = "users") {
            proxy_pass http://user_service/$path$is_args$args;
        }
        if ($service = "orders") {
            proxy_pass http://order_service/$path$is_args$args;
        }
        if ($service = "payments") {
            proxy_pass http://payment_service/$path$is_args$args;
        }
        
        include /etc/nginx/snippets/api-proxy.conf;
        
        # Add service metadata headers
        proxy_set_header X-Service-Name $service;
        proxy_set_header X-API-Version $version;
        proxy_set_header X-User-ID $upstream_http_x_user_id;
        
        # Error handling
        error_page 502 503 504 = @service_error;
    }
    
    # Service error handler
    location @service_error {
        internal;
        return 503 '{"error":"service_unavailable","service":"$service_name","timestamp":"$time_iso8601"}';
        add_header Content-Type application/json;
    }
    
    # Internal auth service
    location = /api/internal/auth {
        internal;
        proxy_pass http://auth_service/internal/validate;
        proxy_pass_request_body off;
        proxy_set_header Content-Length "";
        proxy_set_header X-Original-URI $request_uri;
        proxy_set_header Authorization $http_authorization;
        proxy_set_header X-API-Key $http_x_api_key;
    }
}

Circuit Breaker Pattern

graph TD
    A[API Request] --> B[Circuit Breaker]
    B --> C{Circuit State}
    
    C -->|Closed| D[Forward to Service]
    C -->|Open| E[Return Fallback]
    C -->|Half-Open| F[Test Request]
    
    D --> G{Success?}
    G -->|Yes| H[Reset Failures]
    G -->|No| I[Count Failure]
    
    I --> J{Threshold?}
    J -->|Yes| K[Open Circuit]
    J -->|No| L[Continue]
    
    style B fill:#e1f5fe
    style C fill:#fff3e0
    style E fill:#ffebee
    style K fill:#ffebee
# Create resilient API Gateway
sudo nano /etc/nginx/sites-available/resilient-api.example.com
server {
    listen 80;
    server_name resilient-api.example.com;
    
    # Main API routing with resilience
    location /api/v1/users/ {
        proxy_pass http://user_service/;
        
        # Fallback to cache on error
        error_page 502 503 504 = @user_fallback;
        
        include /etc/nginx/snippets/api-proxy.conf;
        
        # Quick failure detection
        proxy_connect_timeout 5s;
        proxy_send_timeout 10s;
        proxy_read_timeout 10s;
    }
    
    # Fallback handler
    location @user_fallback {
        internal;
        
        # Try cached data for GET requests
        if ($request_method = GET) {
            proxy_pass http://user_cache_service/;
            add_header X-Served-By "cache" always;
            add_header X-Fallback "true" always;
        }
        
        # Error for write operations
        if ($request_method ~ ^(POST|PUT|DELETE)$) {
            return 503 '{"error":"service_unavailable","retry_after":30}';
        }
    }
    
    # Retry logic
    location /api/v1/orders/ {
        proxy_pass http://order_service/;
        
        # Multiple upstream attempts
        proxy_next_upstream error timeout http_500 http_502 http_503;
        proxy_next_upstream_tries 3;
        proxy_next_upstream_timeout 30s;
        
        include /etc/nginx/snippets/api-proxy.conf;
        
        add_header X-Retry-Count $upstream_tries always;
    }
}

API Gateway Monitoring

# Create API monitoring script
sudo nano /usr/local/bin/api-monitor.sh
#!/bin/bash

# API Gateway Monitor
ACCESS_LOG="/var/log/nginx/access.log"

monitor_api() {
    echo "=== API Gateway Monitor ==="
    echo "Generated: $(date)"
    echo
    
    # API request stats
    local hour_ago=$(date -d '1 hour ago' '+%d/%b/%Y:%H')
    local current_hour=$(date '+%d/%b/%Y:%H')
    
    local total_requests=$(grep -E "$hour_ago|$current_hour" "$ACCESS_LOG" 2>/dev/null | grep "/api/" | wc -l)
    echo "Total API requests: $total_requests"
    
    # Requests by service
    echo
    echo "--- Requests by Service ---"
    grep -E "$hour_ago|$current_hour" "$ACCESS_LOG" 2>/dev/null | grep "/api/" | awk '{print $7}' | sed 's|/api/v[0-9]*/||' | sed 's|/.*||' | sort | uniq -c | sort -nr
    
    # Response codes
    echo
    echo "--- Response Codes ---"
    grep -E "$hour_ago|$current_hour" "$ACCESS_LOG" 2>/dev/null | grep "/api/" | awk '{print $9}' | sort | uniq -c | sort -nr
    
    # Service health
    echo
    echo "--- Service Health ---"
    local services=("auth:3001" "users:4001" "orders:5001" "payments:6001")
    
    for service in "${services[@]}"; do
        local name=$(echo "$service" | cut -d: -f1)
        local port=$(echo "$service" | cut -d: -f2)
        
        if curl -s --max-time 3 "http://127.0.0.1:$port/health" >/dev/null 2>&1; then
            echo "✅ $name service - healthy"
        else
            echo "❌ $name service - unhealthy"
        fi
    done
}

case "${1:-monitor}" in
    monitor)
        monitor_api
        ;;
    watch)
        while true; do
            clear
            monitor_api
            echo "Press Ctrl+C to exit..."
            sleep 10
        done
        ;;
    *)
        echo "Usage: $0 {monitor|watch}"
        ;;
esac

# Make executable: sudo chmod +x /usr/local/bin/api-monitor.sh

Testing API Gateway

# Enable API Gateway sites
sudo ln -s /etc/nginx/sites-available/api-gateway.example.com /etc/nginx/sites-enabled/
sudo ln -s /etc/nginx/sites-available/advanced-api.example.com /etc/nginx/sites-enabled/

# Test configuration
sudo nginx -t

# Reload NGINX
sudo systemctl reload nginx

# Test API Gateway endpoints
curl -H "Host: api-gateway.example.com" http://localhost/health
curl -H "Host: api-gateway.example.com" http://localhost/api/v1/auth/login -X POST

# Test with API key
curl -H "Host: advanced-api.example.com" -H "X-API-Key: test-key" http://localhost/api/v1/users/

# Monitor API Gateway
/usr/local/bin/api-monitor.sh

# Load test API
ab -n 100 -c 10 -H "Host: api-gateway.example.com" http://localhost/api/v1/auth/status

What’s Next?

Excellent! You’ve built a comprehensive API Gateway with NGINX that handles microservices routing, authentication, rate limiting, and resilience patterns. Your architecture now supports scalable, distributed applications.

Coming up in Part 14: NGINX Content Delivery Network (CDN) Setup

References


This is Part 13 of our 22-part NGINX series. Your server is now a powerful microservices API Gateway! Next, we’ll set up CDN capabilities. Questions? Share them in the comments!

Written by:

472 Posts

View All Posts
Follow Me :