# For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ user nginx; worker_processes 4; #auto; worker_cpu_affinity 0001 0010 0100 1000; worker_priority -5; worker_rlimit_nofile 40000; #100000 lock_file /run/lock/nginx.lock; # [ debug | info | notice | warn | error | crit ] error_log /var/log/nginx/error.log warn; pid /run/nginx.pid; events { worker_connections 8192; #4096; #1024; # You can see the available event-models and how you can activate # it at the ./configure -> http://wiki.nginx.org/NginxOptimizations # # use [ kqueue | rtsig | epoll | /dev/poll | select | poll ] ; use epoll; # multi_accept tries to accept() as many connections as # possible after nginx gets notification about a new connection. # default: off multi_accept on; # If a worker process does not have accept mutex it will # try to aquire it at least after this delay. # default: 500ms accept_mutex_delay 50ms; } http { ## Hostname Options server_name_in_redirect on; # The size of the hash for the domain name. server_names_hash_bucket_size 64; server_names_hash_max_size 512; ## Size Limits #Data Size Accepted POST request client_max_body_size 8M; client_header_buffer_size 1M; client_body_buffer_size 32k; large_client_header_buffers 8 32k; # Timeouts # request timed out -- default 60 client_body_timeout 3m; client_header_timeout 3m; # if client stop responding, free up memory -- default 60 send_timeout 3m; # expires 24h; # The first parameter assigns the timeout for keep-alive connections # with the client. The server will close connections after this time. # # The optional second parameter assigns the time value in the header # Keep-Alive:meout=time of the response. This header can convince # some browsers to close the connection, so that the server does not # have to. Without this parameter, nginx does not send a Keep-Alive # header (though this is not what makes a connection "keep-alive"). # # The parameters can differ from each other. # # Notes on how browsers handle the Keep-Alive header: # # MSIE and Opera ignore the "Keep-Alive: timeout=" header. # MSIE keeps the connection alive for about 60-65 seconds, then sends a TCP RST. # Opera keeps the connection alive for a long time. # Mozilla keeps the connection alive for N plus about 1-10 seconds. # Konqueror keeps the connection alive for about N seconds. # server will close connection after this time # default: keepalive_timeout 75 keepalive_timeout 65; #75 20; # number of requests client can make over keep-alive #keepalive_requests 100; # lingering_time 30; lingering_timeout 10; # Reset lingering timed out connections. Deflect DDoS. # allow the server to close connection on non responding client, this will free up memory reset_timedout_connection on; ## # Basic Settings ## charset utf-8; index index.php index.htm index.html iredirect.php; ignore_invalid_headers on; #limit_zone normal $binary_remote_addr 5m; #limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s; recursive_error_pages on; types_hash_max_size 2048; #Expanding buffer returns output_buffers 10 128k; # Buffer is used to return the processed data postpone_output 1500; # Limit the size of the segment being sent for one lockable return sendfile_max_chunk 128k; ## Hide the Nginx version number. server_tokens off; # to boost IO on HDD we can disable access logs # access_log off; # Tell Nginx not to send out partial frames; this increases throughput # since TCP frames are filled up before being sent out. (adds TCP_CORK) # send headers in one peace, its better then sending them one by one tcp_nopush on; # Tell Nginx to enable the Nagle buffering algorithm for TCP packets, which # collates several smaller packets together into one larger packet, thus saving # bandwidth at the cost of a nearly imperceptible increase to latency. (removes TCP_NODELAY) # don't buffer data sent, good for small data bursts in real time tcp_nodelay off; # send_lowat 12000; ## Proxy Options # set_real_ip_from 127.0.0.1; # real_ip_header X-Forwarded-For; # Use sendfile() syscall to speed up I/O operations and speed up # static file serving. # copies data between one FD and other from within the kernel # faster then read() + write() sendfile on; ## Use a SSL/TLS cache for SSL session resume. This needs to be ## here (in this context, for session resumption to work. See this ## thread on the Nginx mailing list: ## http://nginx.org/pipermail/nginx/2010-November/023736.html. ssl_session_cache shared:SSL:10m; ssl_session_timeout 10m; ssl_ciphers HIGH:!aNULL:!MD5; ssl_prefer_server_ciphers on; ## Enable clickjacking protection in modern browsers. Available in ## IE8 also. See ## https://developer.mozilla.org/en/The_X-FRAME-OPTIONS_response_header add_header X-Frame-Options SAMEORIGIN; # when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header, # to disable content-type sniffing on some browsers. # https://www.owasp.org/index.php/List_of_useful_HTTP_headers # currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx # http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx # 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020 add_header X-Content-Type-Options nosniff; # This header enables the Cross-site scripting (XSS) filter built into most recent web browsers. # It's usually enabled by default anyway, so the role of this header is to re-enable the filter for # this particular website if it was disabled by the user. # https://www.owasp.org/index.php/List_of_useful_HTTP_headers add_header X-XSS-Protection "1; mode=block"; # with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy), # you can tell the browser that it can only download content from the domains you explicitly allow # http://www.html5rocks.com/en/tutorials/security/content-security-policy/ # https://www.owasp.org/index.php/Content_Security_Policy # I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval' # directives for css and js(if you have inline css or js, you will need to keep it too). # more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'"; ## Define a zone for limiting the number of simultaneous ## connections nginx accepts. 1m means 32000 simultaneous ## sessions. We need to define for each server the limit_conn ## value refering to this or other zones. ## ** This syntax requires nginx version >= ## ** 1.1.8. Cf. http://nginx.org/en/CHANGES. If using an older ## ** version then use the limit_zone directive below ## ** instead. Comment out this ## ** one if not using nginx version >= 1.1.8. #limit_conn_zone $binary_remote_addr zone=arbeit:10m; #include /etc/nginx/mime.types; include mime.types; default_type application/octet-stream; # Update charset_types due to updated mime.types charset_types text/xml text/plain text/vnd.wap.wml application/x-javascript application/rss+xml text/css application/javascript application/json; # Log format set_real_ip_from 127.0.0.1; real_ip_header X-Forwarded-For; #log format for main log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; #log format for download log_format download '$remote_addr - $remote_user [$time_local] ' '"$request" $status $bytes_sent ' '"$http_referer" "$http_user_agent" ' '"$http_range" "$sent_http_content_range"'; ## Logfile Options access_log /var/log/nginx/access.log main; # Caches information about open FDs, freqently accessed files. # can boost performance, but you need to test those values # open_file_cache max=1000 inactive=150s; open_file_cache_valid 160s; open_file_cache_min_uses 5; open_file_cache_errors on; #fastcgi cache for nginx fastcgi_cache_path /var/cache/nginxfastcgi levels=1:2 keys_zone=fastcgicache:60m inactive=60m max_size=1024m; fastcgi_cache_key $scheme$request_method$host$request_uri; log_format cache '$remote_addr - $remote_user [$time_local] "$request" ' '$status $upstream_cache_status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; # note: can also use HTTP headers to form the cache key, e.g. #fastcgi_cache_key $scheme$request_method$host$request_uri$http_x_custom_header; fastcgi_cache_lock on; fastcgi_cache_use_stale error timeout invalid_header updating http_500; fastcgi_cache_valid 5m; fastcgi_ignore_headers Cache-Control Expires Set-Cookie; #Proxy and Cache Server #Use fastcgi_cache or proxy_cache #proxy_redirect off; #proxy_set_header Host $host; #proxy_set_header X-Real-IP $remote_addr; #proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # caching options #proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my-cache:8m max_size=1000m inactive=600m; #proxy_temp_path /var/cache/tmp; #proxy_cache_methods GET HEAD POST; include conf.d/gzip.conf; #-------pagespeed-------- #pagespeed on; #pagespeed FileCachePath "/var/ngx_pagespeed_cache"; #pagespeed FileCacheSizeKb 102400; #pagespeed FileCacheCleanIntervalMs 3600000; #pagespeed FileCacheInodeLimit 500000; #pagespeed FetchWithGzip on; #pagespeed EnableFilters combine_css,combine_javascript; ## FastCGI Optins ## http://wiki.nginx.org/HttpFastcgiModule#fastcgi_buffers ## This directive determines if current request to the FastCGI-server must be aborted in case the client aborts the request to the server. #fastcgi_ignore_client_abort on; ## This directive sets the number and the size of the buffers into which the reply from the FastCGI process in the backend is read. #fastcgi_buffers 8 16k; # 16k + 256 * 16k = 4112k total #fastcgi_buffer_size 128k; #fastcgi_busy_buffers_size 256k; ## disable buffering to disk if replies start to exceeed your fastcgi buffers #fastcgi_max_temp_file_size 0; #fastcgi_index index.php; #fastcgi_connect_timeout 120; ## allow 4 hrs - pass timeout responsibility to upstream #fastcgi_read_timeout 14400; # fastcgi_send_timeout 120; ## This directive determines whether or not to transfer 4xx and 5xx errors back to the client or to allow Nginx to answer with directive error_page. fastcgi_intercept_errors on; ## When set to the value on, nginx will instruct a FastCGI server to keep connections open. #fastcgi_keep_conn on; ## # If HTTPS, then set a variable so it can be passed along. ## map $scheme $server_https { default off; https on; } upstream php-fpm { #this should match value of "listen" directive in php-fpm pool #server unix:/tmp/php-fpm.sock; server 127.0.0.1:9000; } # Put the Ip of your varnish/proxy here set_real_ip_from 127.0.0.1; # Put the Header that your varnish/proxy set real_ip_header X-Forwarded-For; # Virtual Host Configs #include /etc/nginx/sites-enabled/*.conf; # Include blacklist for bad bot and referer blocking. #include /etc/nginx/blacklist.conf; # Virtual Host include /etc/nginx/conf.d/*.conf; }