Skip to content

Instantly share code, notes, and snippets.

@shivkumarsingh7
Forked from nateware/nginx.conf
Created January 26, 2019 21:56
Show Gist options
  • Save shivkumarsingh7/f4f5437399adf2a6607bdaf571875f16 to your computer and use it in GitHub Desktop.
Save shivkumarsingh7/f4f5437399adf2a6607bdaf571875f16 to your computer and use it in GitHub Desktop.

Revisions

  1. @nateware nateware revised this gist Jan 11, 2013. 1 changed file with 83 additions and 0 deletions.
    83 changes: 83 additions & 0 deletions nginx2.conf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,83 @@
    # This number should be, at maximum, the number of CPU cores on your system.
    # (since nginx doesn't benefit from more than one worker per CPU.)
    worker_processes 24;

    # Number of file descriptors used for Nginx. This is set in the OS with 'ulimit -n 200000'
    # or using /etc/security/limits.conf
    worker_rlimit_nofile 200000;


    # only log critical errors
    error_log /var/log/nginx/error.log crit


    # Determines how many clients will be served by each worker process.
    # (Max clients = worker_connections * worker_processes)
    # "Max clients" is also limited by the number of socket connections available on the system (~64k)
    worker_connections 4000;


    # essential for linux, optmized to serve many clients with each thread
    use epoll;


    # Accept as many connections as possible, after nginx gets notification about a new connection.
    # May flood worker_connections, if that option is set too low.
    multi_accept on;


    # Caches information about open FDs, freqently accessed files.
    # Changing this setting, in my environment, brought performance up from 560k req/sec, to 904k req/sec.
    # I recommend using some varient of these options, though not the specific values listed below.
    open_file_cache max=200000 inactive=20s;
    open_file_cache_valid 30s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;


    # Buffer log writes to speed up IO, or disable them altogether
    #access_log /var/log/nginx/access.log main buffer=16k;
    access_log off;


    # Sendfile copies data between one FD and other from within the kernel.
    # More efficient than read() + write(), since the requires transferring data to and from the user space.
    sendfile on;


    # Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet,
    # instead of using partial frames. This is useful for prepending headers before calling sendfile,
    # or for throughput optimization.
    tcp_nopush on;


    # don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time.
    tcp_nodelay on;


    # Timeout for keep-alive connections. Server will close connections after this time.
    keepalive_timeout 30;


    # Number of requests a client can make over the keep-alive connection. This is set high for testing.
    keepalive_requests 100000;


    # allow the server to close the connection after a client stops responding. Frees up socket-associated memory.
    reset_timedout_connection on;


    # send the client a "request timed out" if the body is not loaded by this time. Default 60.
    client_body_timeout 10;


    # If the client stops reading data, free up the stale client connection after this much time. Default 60.
    send_timeout 2;


    # Compression. Reduces the amount of data that needs to be transferred over the network
    gzip on;
    gzip_min_length 10240;
    gzip_proxied expired no-cache no-store private auth;
    gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
    gzip_disable "MSIE [1-6]\.";
  2. @nateware nateware revised this gist Oct 31, 2012. 1 changed file with 10 additions and 5 deletions.
    15 changes: 10 additions & 5 deletions nginx.conf
    Original file line number Diff line number Diff line change
    @@ -51,11 +51,16 @@ http {
    # http://brainspl.at/nginx.conf.txt - Ezra's complete config
    # http://wiki.codemongers.com/NginxVirtualHostExample
    #
    # This redirects to haproxy (a single process), which then rebalances
    # to web backend. This is because nginx's load balancing is limited.
    # See haproxy.conf
    # List upstream app servers that render dynamic content. These are
    # typically on the same server as nginx. These will either be multiple
    # ports (processes), or a single port if the app server has its own
    # master/slave process model.
    upstream app_servers {
    server 127.0.0.1:8080;
    server 127.0.0.1:8000;
    server 127.0.0.1:8001;
    server 127.0.0.1:8002;
    server 127.0.0.1:8003;
    server 127.0.0.1:8004;
    }

    # HTTP configuration
    @@ -150,4 +155,4 @@ http {
    root /var/www/html;
    }
    }
    }
    }
  3. @nateware nateware created this gist Oct 31, 2012.
    153 changes: 153 additions & 0 deletions nginx.conf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,153 @@
    #
    # Sample nginx.conf optimized for EC2 c1.medium to xlarge instances.
    # Also look at the haproxy.conf file for how the backend is balanced.
    #

    user "nginx" "nginx";
    worker_processes 10;

    error_log /var/log/nginx_error.log info;

    pid /var/run/nginx.pid;

    events {
    worker_connections 1024;
    }

    http {
    # Mime types path needs to be absolute as of nginx 0.7.x from 0.6.x
    include /usr/local/nginx/conf/mime.types;

    # Tune the appropriate default for your system accordingly. Only used if mime types fail.
    #default_type text/html;
    default_type application/octet-stream;

    # These are good default values.
    tcp_nopush on;
    tcp_nodelay off;

    sendfile on;
    keepalive_timeout 30;

    log_format main '$remote_addr - $remote_user [$time_local] $status '
    '"$request" $body_bytes_sent "$http_referer" '
    '"$http_user_agent" "$http_x_forwarded_for" ($request_time)';

    # Output compression with gzip
    gzip on;
    gzip_http_version 1.1;
    gzip_vary on;
    gzip_comp_level 6;
    gzip_proxied any;
    gzip_types text/plain image/png image/gif image/jpeg text/html text/css application/json application/x-javascript application/xml application/xml+rss text/javascript;
    gzip_buffers 16 8k;
    # Disable gzip for certain browsers.
    gzip_disable “MSIE [1-6].(?!.*SV1);

    #
    # Virtualhost server definition for backend cluster
    #
    # This is a combination of two different references:
    # http://brainspl.at/nginx.conf.txt - Ezra's complete config
    # http://wiki.codemongers.com/NginxVirtualHostExample
    #
    # This redirects to haproxy (a single process), which then rebalances
    # to web backend. This is because nginx's load balancing is limited.
    # See haproxy.conf
    upstream app_servers {
    server 127.0.0.1:8080;
    }

    # HTTP configuration
    server {
    listen 80 default sndbuf=16k rcvbuf=8k backlog=1024;
    server_name www.yourdomainhere.com;

    # Apache DocumentRoot equivalent
    root /var/www/html;

    access_log /var/log/nginx_access.log main;

    client_body_temp_path /tmp/nginx_client_data 1 2;
    fastcgi_temp_path /tmp/nginx_fastcgi_data;
    proxy_temp_path /tmp/nginx_proxy_data;

    # Taken from nginx wiki. Qualified thru load testing
    proxy_connect_timeout 90;
    proxy_send_timeout 90;
    proxy_read_timeout 90;
    proxy_buffer_size 4k;
    proxy_buffers 4 32k;
    proxy_busy_buffers_size 64k;
    proxy_temp_file_write_size 64k;

    location / {
    # needed to forward user's IP address to backend
    proxy_set_header X-Real-IP $remote_addr;

    # needed for HTTPS
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header Host $http_host;
    proxy_redirect off;
    proxy_max_temp_file_size 0;

    # Use this variable to key off whether we pass requests to backend,
    # or serve them directly via nginx. By default everything gets
    # passed thru, and we only serve specific resources directly.
    set $send_to_app "yes";

    # Handle all images and assets explicitly. Faster than fs check every time.
    if ($request_uri ~ "images/|img/|javascripts/|js/|stylesheets/|css/") {
    set $send_to_app "no";
    break;
    }

    # If the file exists as a static file serve it directly, without
    # running all the other rewite tests on it.
    if (-f $request_filename) {
    set $send_to_app "no";
    break;
    }

    # Check for index.html for directory index
    # If it's there on the filesystem, then rewite the url to add
    # /index.html to the end of it and serve it directly.
    if (-f $request_filename/index.html) {
    set $send_to_app "no";
    rewrite (.*) $1/index.html break;
    }

    # This is the meat of web app page caching.
    # It adds .html to the end of the url and then checks the filesystem for
    # that file. If it exists, then we rewrite the url to have explicit .html
    # on the end and then send it on its way to the next config rule.
    # If there is no file on the fs then it sets all the necessary headers
    # proxies to our backend.
    if (-f $request_filename.html) {
    set $send_to_app "no";
    rewrite (.*) $1.html break;
    }

    # Check our state to make sure we're forwarding it back
    if ($send_to_app = "yes") {
    proxy_pass http://app_servers;
    }

    # File uploads
    client_max_body_size 10m;
    }

    # Large content
    location ^~ download {
    client_body_buffer_size 1024k;
    }

    # redirect server error pages to the static page /50x.html
    error_page 403 /403.html;
    error_page 404 /404.html;
    error_page 500 502 503 504 /500.html;
    location = /500.html {
    root /var/www/html;
    }
    }
    }