# https://www.nginx.com/blog/tuning-nginx/ worker_connections 1024; # Limit the number of connections NGINX allows, for example from a single client # IP address. Setting them can help prevent individual clients from opening too # many connections and consuming too many resources. server { # When several limit_conn directives are specified, any configured limit will apply. limit_conn perip 10; limit_conn perserver 100; } limit_conn_status 503; limit_conn_zone $binary_remote_addr zone=perip:10m; limit_conn_zone $server_name zone=perserver:10m; # Limits the amount of bandwidth allowed for a client on a single connection. # Setting it can prevent the system from being overloaded by certain clients # and can help to ensure that all clients receive good quality of service. limit_rate # Limit the rate of requests being processed by NGINX. As with limit_rate, # setting them can help prevent the system from being overloaded by certain # clients and can help to ensure that all clients receive good quality of service. # They can also be used to improve security, especially for login pages, by # limiting the request rate so that it is adequate for a human user but too # slow for programs trying to access your application (such as bots in a DDoS attack). # If the requests rate exceeds the rate configured for a zone, their processing is # delayed such that requests are processed at a defined rate. Excessive requests are # delayed until their number exceeds the maximum burst size in which case the request # is terminated with an error 503 (Service Temporarily Unavailable). By default, the # maximum burst size is equal to zero. # limit_req zone=name [burst=number] [nodelay]; location { limit_req zone=one burst=5; } limit_req_status 503; limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; # or r/m # parameter to the server directive in an upstream configuration block # Set the maximum number of simultaneous connections accepted by a server in an # upstream group. This can help prevent the upstream servers from being overloaded. # The default is zero, meaning that there is no limit. max_conns 0; # If max_conns is set for any upstream server, governs what happens when a request # cannot be processed because there are no available servers in the upstream group # and some of those servers have reached the max_conns limit. This directive can be # set to the number of requests to queue and for how long. If this directive is not # set, no queuing occurs. # queue number [timeout=time]; The default value of the timeout parameter is 60 seconds. # Context: upstream queue 100 timeout=60; gzip_min_length 10240; gzip_disable "MSIE [1-6]\."; #If you're serving a large number of static files you'll benefit from keeping filehandles # to requested files open - this avoids the need to reopen them in the future. # NOTE: You should only run with this enabled if you're not editing the files at # the time you're serving them. Because file accesses are cached any 404s will be # cached too, similarly file-sizes will be cached, and if you change them your served # content will be out of date. open_file_cache max=2000 inactive=20s; open_file_cache_valid 60s; open_file_cache_min_uses 5; open_file_cache_errors off; location { fastcgi_buffer_size 128k; fastcgi_buffers 256 16k; fastcgi_busy_buffers_size 256k; fastcgi_temp_file_write_size 256k; } http { # from http://stale.coffee/ec2-micro-instance-adventure-nginx-php-mysql.html fastcgi_cache CACHE; fastcgi_cache_methods GET HEAD; fastcgi_cache_valid 200 1m; fastcgi_cache_bypass $no_cache; fastcgi_no_cache $no_cache; #move next 3 lines to /etc/nginx/nginx.conf if you want to use fastcgi_cache across many sites fastcgi_cache_path /var/run/nginx-cache levels=1:2 keys_zone=WORDPRESS:500m inactive=60m; fastcgi_cache_key "$scheme$request_method$host$request_uri"; fastcgi_cache_use_stale error timeout invalid_header http_500; } server { #fastcgi_cache start set $no_cache 0; # POST requests and urls with a query string should always go to PHP if ($request_method = POST) { set $no_cache 1; } if ($query_string != "") { set $no_cache 1; } # Don't cache uris containing the following segments if ($request_uri ~* "(/wp-admin/|/xmlrpc.php|/wp-(app|cron|login|register|mail).php|wp-.*.php|/feed/|index.php|wp-comments-popup.php|wp-links-opml.php|wp-locations.php|sitemap(_index)?.xml|[a-z0-9_-]+-sitemap([0-9]+)?.xml)") { set $no_cache 1; } # Don't use the cache for logged in users or recent commenters if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_no_cache|wordpress_logged_in") { set $no_cache 1; } } # Pass all .php files onto a php-fpm/php-fcgi server. location ~ [^/]\.php(/|$) { fastcgi_split_path_info ^(.+?\.php)(/.*)$; # This is a robust solution for path info security issue and works # with "cgi.fix_pathinfo = 1" in /etc/php.ini (default) if (!-f $document_root$fastcgi_script_name) { return 404; } include fastcgi.conf; fastcgi_index index.php; # fastcgi_intercept_errors on; fastcgi_pass php; # Options for fcgi cache if enabled # fastcgi_cache_bypass $no_cache; # fastcgi_no_cache $no_cache; # fastcgi_cache WORDPRESS; # fastcgi_cache_valid 200 60m; } location ~ /purge(/.*) { # Uncomment the following two lines to allow purge only from the webserver #allow 127.0.0.1; #deny all; fastcgi_cache_purge WORDPRESS "$scheme$request_method$host$1"; }