#user will run as user www-data; # This number should be, at maximum, the number of CPU cores on your system. # (since nginx doesn't benefit from more than one worker per CPU.) worker_processes 4; # Number of file descriptors used for Nginx. This is set in the OS with 'ulimit -n 200000' # or using /etc/security/limits.conf worker_rlimit_nofile 200000; # only log critical errors error_log /var/log/nginx/error.log info; #pidshit pid /var/run/nginx.pid; # Determines how many clients will be served by each worker process. # (Max clients = worker_connections * worker_processes) # "Max clients" is also limited by the number of socket connections available on the system (~64k) events { worker_connections 2048; # essential for linux, optimized to serve many clients with each thread use epoll; # Accept as many connections as possible, after nginx gets notification about a new connection. # May flood worker_connections, if that option is set too low. multi_accept on; } http { # Caches information about open FDs, frequently accessed files. # Changing this setting, in my environment, brought performance up from 560k req/sec, to 904k req/sec. # I recommend using some varient of these options, though not the specific values listed below. open_file_cache max=200000 inactive=20s; open_file_cache_valid 30s; open_file_cache_min_uses 2; open_file_cache_errors on; #turn off version showing etc server_tokens off; #maxsendsizeshit and other shit client_max_body_size 50m; client_body_buffer_size 32k; #fastcgi_buffers 8 16k; #fastcgi_buffer_size 32k; fastcgi_buffer_size 16k; fastcgi_buffers 4 16k; # Sendfile copies data between one FD and other from within the kernel. # More efficient than read() + write(), since the requires transferring data to and from the user space. sendfile on; # Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet, # instead of using partial frames. This is useful for prepending headers before calling sendfile, # or for throughput optimization. tcp_nopush on; # don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time. tcp_nodelay on; # Timeout for keep-alive connections. Server will close connections after this time. keepalive_timeout 300; #hurrdurr types_hash_max_size 2048; # Number of requests a client can make over the keep-alive connection. This is set high for testing. keepalive_requests 1000; # allow the server to close the connection after a client stops responding. Frees up socket-associated memory. reset_timedout_connection on; # send the client a "request timed out" if the body is not loaded by this time. Default 60. client_body_timeout 15; # If the client stops reading data, free up the stale client connection after this much time. Default 60. send_timeout 15; # Reserve 10MB under the name 'uploads' for the upload-progress bar upload_progress uploads 10m; # Compression. Reduces the amount of data that needs to be transferred over the network gzip on; gzip_min_length 1400; gzip_buffers 16 8k; gzip_comp_level 6; gzip_proxied any; gzip_vary on; gzip_types application/atom+xml application/javascript application/json application/rss+xml application/x-font-ttf application/x-javascript application/xml font/opentype image/svg+xml image/x-icon text/css text/plain; gzip_disable "MSIE [1-6]\."; proxy_buffering off; include /etc/nginx/mime.types; access_log /var/log/nginx/access.log; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent"'; log_format imglol '127.0.0.1 - $remote_user [$time_local] "$request' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent"'; include /etc/nginx/conf.d/*.conf; #include /etc/nginx/sites-available/dev-001; include /etc/nginx/sites-enabled/*; #Config inspired by pomf.se }