I'm trying to optimize a VPN server that's running Nginx + PHP-FPM + MariaDB for a website that has a lot of traffic.
Machine specs are:
8GB RAM
8 CPU cores
1x30GB SSD
Machine uses CENTOS 7.1
Here's my nginx.conf:
# Server globals
user nginx;
worker_processes 8;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
# Worker config
events {
worker_connections 1024;
use epoll;
}
http {
# Main settings
sendfile on;
tcp_nopush on;
tcp_nodelay on;
client_header_timeout 1m;
client_body_timeout 1m;
client_header_buffer_size 2k;
client_body_buffer_size 256k;
client_max_body_size 256m;
large_client_header_buffers 4 8k;
send_timeout 30;
keepalive_timeout 60 60;
reset_timedout_connection on;
server_tokens off;
server_name_in_redirect off;
server_names_hash_max_size 512;
server_names_hash_bucket_size 512;
# Log format
log_format main '$remote_addr - $remote_user [$time_local] $request '
'"$status" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format bytes '$body_bytes_sent';
#access_log /var/log/nginx/access.log main;
access_log off;
# Mime settings
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Compression
gzip on;
gzip_comp_level 9;
gzip_min_length 512;
gzip_buffers 8 64k;
gzip_types text/plain text/css text/javascript
application/x-javascript application/javascript;
gzip_proxied any;
# Proxy settings
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_header Set-Cookie;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
# SSL PCI Compliance
ssl_session_cache shared:SSL:10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
# Error pages
error_page 403 /error/403.html;
error_page 404 /error/404.html;
error_page 502 503 504 /error/50x.html;
# Cache
proxy_cache_path /var/cache/nginx levels=2 keys_zone=cache:10m inactive=60m max_size=512m;
proxy_temp_path /var/cache/nginx/temp;
proxy_cache_key "$host$request_uri $cookie_user";
proxy_ignore_headers Expires Cache-Control;
proxy_cache_use_stale error timeout invalid_header http_502;
proxy_cache_valid any 3d;
map $http_cookie $no_cache {
default 0;
~SESS 1;
~wordpress_logged_in 1;
}
# Wildcard include
include /etc/nginx/conf.d/*.conf;
}
Also here's my domainname.conf:
[domainname.tld]
listen = 127.0.0.1:9003
listen.allowed_clients = 127.0.0.1
user = admin
group = admin
pm = ondemand
pm.max_children = 70
pm.start_servers = 3
pm.min_spare_servers = 3
pm.max_spare_servers = 10
env[HOSTNAME] = $HOSTNAME
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /tmp
env[TMPDIR] = /tmp
env[TEMP] = /tmp
And here's the main www.conf:
[www]
listen = 127.0.0.1:9000
listen.allowed_clients = 127.0.0.1
user = apache
group = apache
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 3
pm.max_spare_servers = 35
And finally here's the my.cnf:
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
symbolic-links=0
skip-external-locking
key_buffer_size = 512M
max_allowed_packet = 128M
table_open_cache = 1024
sort_buffer_size = 8M
read_buffer_size = 8M
read_rnd_buffer_size = 256M
myisam_sort_buffer_size = 128M
thread_cache_size = 8
query_cache_size= 512M
thread_concurrency = 32
#innodb_use_native_aio = 0
innodb_file_per_table
max_connections=500
max_user_connections=250
wait_timeout=5
interactive_timeout=50
long_query_time=10
#slow_query_log=1
#slow_query_log_file=/var/log/mysql-slow-queries.log
[mysqld_safe]
log-error=/var/log/mariadb/mariadb.log
pid-file=/var/run/mariadb/mariadb.pid
#
# include all files from the config directory
#
!includedir /etc/my.cnf.d
The website is working, well okay because caching is enabled (page cache), I turned off the caching for a while on heavy load and there was error 500.
Also here's the memory usage:
#free -g
total used free shared buff/cache available
Mem: 7 2 2 0 3 4
Swap: 0 0 0
Any help is much appreciated, thank you.
pm.max_children -- decrease the value unless you expect lots of really small pages
query_cache_size= 512M -- Inefficient. No more than 50M
innodb_buffer_pool_size = 4G -- see below
You really should be using InnoDB, not MyISAM.
If 10K hits/day leads to 80K database hits/day, that is only 1/second, which is trivial.
Related
We are performing a load test using locust(1000 Users) on a webpage of our application.
Instance type: t3a.medium
The instance is running behind a load balancer. And we are using RDS Aurora Database which peaks at around 70% CPU utilization. EC2 instance metrics are healthy. EDIT: Instance memory consumption is within 800 MB out of available 4 GB
There are multiple 502 Server error: Bad Gateway and sometimes 500 and 520 errors as well.
Error 1:
2020/10/08 16:58:21 [error] 4344#4344: *41841 connect() to unix:/var/run/php/php7.2-fpm.sock failed (11: Resource temporarily unavailable) while connecting to upstream, client: <PublicIP>, server: <Domain name>, request: "GET <webpage> HTTP/1.1", upstream: "fastcgi://unix:/var/run/php/php7.2-fpm.sock:", host: "<Domain name>"
Error 2(Alert):
2020/10/08 19:15:11 [alert] 9109#9109: *105735 socket() failed (24: Too many open files) while connecting to upstream, client: <PublicIP>, server: <Domain name>, request: "GET <webpage> HTTP/1.1", upstream: "fastcgi://unix:/var/run/php/php7.2-fpm.sock:", host: "<Domain name>"
Listing down configuration files:
Nginx Configuration
server {
listen 80;
listen [::]:80;
root /var/www/####;
index index.php;
access_log /var/log/nginx/###access.log;
error_log /var/log/nginx/####error.log ;
server_name #####;
client_max_body_size 100M;
autoindex off;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~ \.php$ {
include fastcgi_params;
fastcgi_intercept_errors on;
fastcgi_index index.php;
fastcgi_pass unix:/var/run/php/php7.2-fpm.sock;
fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
}
}
/etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 8096;
multi_accept on;
use epoll;
epoll_events 512;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 2;
gzip_min_length 1000;
gzip_types text/xml text/css;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "MSIE [4-6] \.";
include /etc/nginx/conf.d/*.conf;
}
/etc/php/7.2/fpm/php-fpm.conf
emergency_restart_threshold 10
emergency_restart_interval 1m
process_control_timeout 10s
Php-fpm Important Parameters:
user = www-data
group = www-data
listen = /run/php/php7.2-fpm.sock
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
pm = static
pm.max_children = 300
/etc/security/limits.conf
nginx soft nofile 30000
nginx hard nofile 50000
/etc/sysctl.conf
net.nf_conntrack_max = 131072
net.core.somaxconn = 131072
net.core.netdev_max_backlog = 65535
kernel.msgmnb = 131072
kernel.msgmax = 131072
fs.file-max = 131072
What are we missing? Can anyone please point to the right direction?
So we were able to resolve this issue. The problem was php-fpm did not have access to access system resources. You may need to change values according to hardware specifications.
So, our final configuration looks like this:
In /etc/security/limits.conf, add following lines:
nginx soft nofile 10000
nginx hard nofile 30000
root soft nofile 10000
root hard nofile 30000
www-data soft nofile 10000
www-data hard nofile 30000
In /etc/sysctl.conf, add following values
net.nf_conntrack_max = 231072
net.core.somaxconn = 231072
net.core.netdev_max_backlog = 65535
kernel.msgmnb = 231072
kernel.msgmax = 231072
fs.file-max = 70000
In /etc/nginx/nginx.conf, change or add so finally it should have these values(kindly change them according to your use case and server capacity):
worker_processes auto;
worker_rlimit_nofile 30000;
events {
worker_connections 8096;
multi_accept on;
use epoll;
epoll_events 512;
}
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 2;
gzip_min_length 1000;
gzip_types text/xml text/css;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "MSIE [4-6] .";
In /etc/php/7.2/fpm/php-fpm.conf , change values to look like this:
emergency_restart_threshold = 10
emergency_restart_interval = 1m
process_control_timeout = 10s
rlimit_files = 10000
In /etc/php/7.2/fpm/pool.d/www.conf , change values to look like this:
user = www-data
group = www-data
listen.backlog = 4096
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
pm = static
pm.max_children = 1000
I'm Getting 502 bad gateway error in one of my magento2 site.
Here are my configuration files.
nginx.conf configuration file
user nginx;
worker_processes 4;
worker_rlimit_nofile 100000;
pid /var/run/nginx.pid;
events {
use epoll;
# worker_connections 1024;
worker_connections 10240;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log off;
error_log /var/log/nginx/error.log warn;
rewrite_log on;
access_log /var/log/nginx/access.log main buffer=32k flush=300;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
autoindex off;
server_tokens off;
port_in_redirect off;
open_file_cache max=10000 inactive=5m;
open_file_cache_valid 2m;
open_file_cache_min_uses 1;
open_file_cache_errors on;
types_hash_max_size 4096;
client_header_buffer_size 16k;
large_client_header_buffers 4 32k;
fastcgi_send_timeout 3600;
fastcgi_read_timeout 3600;
fastcgi_buffers 8 256k;
fastcgi_buffer_size 256k;
fastcgi_connect_timeout 3600;
############
client_max_body_size 1024M;
client_body_buffer_size 128k;
server_names_hash_max_size 1024;
client_body_timeout 300;
client_header_timeout 300;
keepalive_timeout 600;
keepalive_requests 100000;
send_timeout 60;
server_names_hash_bucket_size 128;
gzip on;
gzip_comp_level 6;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 1100;
gzip_buffers 16 8k;
gzip_types any;
gzip_types text/plain text/css application/octet-stream application/json application/x-javascript application/javascript text/xml application/xml application/xml+rss text/javascript text/x-javascript font/ttf application/font-woff font/opentype application/vnd.ms-fontobject image/svg+xml;
gzip_disable “msie6”;
gzip_vary on;
include /etc/nginx/conf.d/*.conf;
}
php-fpm configure file
[domain.com.com]
listen = /var/run/php/domain.com-fpm.sock
listen.allowed_clients = 127.0.0.1
listen.owner = nginx
listen.group = nginx
user = domain_live
group = domain_live
; Choose how the process manager will control the number of child processes.
pm = dynamic
pm.max_children = 150
pm.start_servers = 60
pm.min_spare_servers = 50
pm.max_spare_servers = 90
pm.max_requests = 500
pm.status_path = /status
slowlog = /var/www/www.domain.com.com/logs/php-fpm-www-slow.log
rlimit_core = unlimited
php_admin_value[error_log] = /var/www/www.domain.com.com/logs/php-fpm-www-error.log
php_admin_flag[log_errors] = on
; Set session path to a directory owned by process user
php_value[session.save_handler] = files
php_value[session.save_path] = /var/www/www.domain.com.com/application/var/session
php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
Getting below error logs
php-fpm error logs
WARNING: [pool domain.com] child 4314 exited on signal 11 (SIGSEGV - core dumped) after 4556.304032 seconds from start
NOTICE: [pool domain.com] child 6422 started
nginx error logs
2018/05/18 13:28:02 [error] 4247#4247: *1460 recv() failed (104: Connection reset by peer) while reading response header from upstream, client: 1.2.3.4, server: www.domain.com, request: "GET /checkout/cart/ HTTP/2.0", upstream: "fastcgi://unix:/var/run/php/domain.com-fpm.sock:", host: "www.domain.com.com", referrer: "https://www.domain.com.com/customer/account/login/"
Please help to resolve this issue. As i'm already tried multiple ways to resolve issue but didn't get the success.
I have two apps, one running in docker deployed in cloud - A - and the other - B - on a dedicated server. Both are written in PHP and using nginx server. App B is doing PUT requests with bigger payloads (let's think about 1M). The problem is that what gets to PHP in app A is truncated to approximately 8k (8209 bytes to be exact). This causes json_decode to fail decoding the request body and the whole request fails.
I have googled and checked configs for quite a long time already but cannot find the issue.
This is my nginx.conf for app A (running in docker in cloud):
worker_processes auto;
user www-data;
pid /var/run/nginx.pid;
error_log /var/log/nginx/error.log warn;
error_log /var/log/nginx.error.log notice;
error_log /var/log/nginx.error.log info;
events {
worker_connections 1024;
multi_accept on;
use epoll;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 0;
client_header_timeout 60;
client_body_timeout 60;
client_body_buffer_size 10m;
client_max_body_size 100m;
server_tokens off;
reset_timedout_connection on;
send_timeout 60;
include /etc/nginx/mime.types;
default_type text/html;
charset UTF-8;
large_client_header_buffers 4 16k;
fastcgi_buffer_size 128k;
fastcgi_buffers 256 256k;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
# cache informations about file descriptors, frequently accessed files
# can boost performance, but you need to test those values
open_file_cache max=65000 inactive=20s;
open_file_cache_valid 60s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
log_format timed '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent $request_time "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log timed;
upstream backend {
server 127.0.0.1:9000;
}
include conf.d/*;
}
This is my site's conf:
server {
listen 80 default_server;
root /var/www/appB/public;
index index.php;
location = /.well-known/schema-discovery {
add_header Content-Type application/json;
return 200 '{}';
}
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~* \.(gif|jpg|png|js)$ {
expires 30d;
}
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(.*)$;
fastcgi_pass 127.0.0.1:9000;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
include /etc/nginx/fastcgi_params;
}
}
This is the www.conf (for php-fpm):
[www]
user = www-data
group = www-data
listen = 127.0.0.1:9000
listen.allowed_clients = 127.0.0.1
pm = dynamic
pm.max_children = 150
pm.start_servers = 30
pm.min_spare_servers = 10
pm.max_spare_servers = 50
pm.process_idle_timeout = 60s
pm.max_requests = 5000
pm.status_path = /status
ping.path = /ping
slowlog = /var/log/fpm/slow.log
request_slowlog_timeout = 60s
request_terminate_timeout = 300s
catch_workers_output = yes
access.log = /var/log/fpm/access.log
php_flag[display_errors] = off
php_flag[html_errors] = off
php_admin_value[error_log] = /var/log/fpm/php_error.log
php_admin_flag[log_errors] = on
php_admin_value[memory_limit] = 1024M
php_admin_value[upload_max_filesize] = 100M
php_admin_value[post_max_size] = 100M
According to the logs neither nginx nor php-fpm do not complain about anything (no errors in the logs).
Does anybody have an idea what might be wrong?
Thanks a lot in advance!
We run a few high volume websites which together generate around 5 million pageviews per day. We have the most overkill servers as we anticipate growth but we are having reports of a few active users saying the site is sometimes slow on the first pageview. I've seen this myself every once in a while where the first pageview will take 3-5 seconds then it's instant after that for the rest of the day. This has happened to me maybe twice in the last 24 hours so not enough to figure out what's happening. Every page on our site uses PHP but one of the times it happened to me it was on a PHP page that doesn't have any database calls which makes me think the issue is limited to NGINX, PHP-FPM or network settings.
We have 3 NGINX servers running behind a load balancer. Our database is separate on a cluster. I included our configuration files for nginx and php-fpm as well as our current RAM usage and PHP-FPM status. This is based on middle of the day (average traffic for us). Please take a look and let me know if you see any red flags in my setup or have any suggestions to optimize further.
Specs for each NGINX Server:
OS: CentOS 7
RAM: 128GB
CPU: 32 cores (2.4Ghz each)
Drives: 2xSSD on RAID 1
RAM Usage (free -g)
total used free shared buff/cache available
Mem: 125 15 10 3 100 103
Swap: 15 0 15
PHP-FPM status (IE: http://server1_ip/status)
pool: www
process manager: dynamic
start time: 03/Mar/2016:03:42:49 -0800
start since: 1171262
accepted conn: 69827961
listen queue: 0
max listen queue: 0
listen queue len: 0
idle processes: 1670
active processes: 1
total processes: 1671
max active processes: 440
max children reached: 0
slow requests: 0
php-fpm config file:
[www]
user = nginx
group = nginx
listen = /var/opt/remi/php70/run/php-fpm/php-fpm.sock
listen.owner = nginx
listen.group = nginx
listen.mode = 0660
listen.allowed_clients = 127.0.0.1
pm = dynamic
pm.max_children = 6000
pm.start_servers = 1600
pm.min_spare_servers = 1500
pm.max_spare_servers = 2000
pm.max_requests = 1000
pm.status_path = /status
slowlog = /var/opt/remi/php70/log/php-fpm/www-slow.log
php_admin_value[error_log] = /var/opt/remi/php70/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path] = /var/opt/remi/php70/lib/php/session
php_value[soap.wsdl_cache_dir] = /var/opt/remi/php70/lib/php/wsdlcache
nginx config file:
user nginx;
worker_processes 32;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1000;
multi_accept on;
use epoll;
}
http {
log_format main '$remote_addr - $remote_user [$time_iso8601] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 10 10;
send_timeout 60;
types_hash_max_size 2048;
client_max_body_size 50M;
client_body_buffer_size 5m;
client_body_timeout 60;
client_header_timeout 60;
fastcgi_buffers 256 16k;
fastcgi_buffer_size 128k;
fastcgi_connect_timeout 60s;
fastcgi_send_timeout 60s;
fastcgi_read_timeout 60s;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
reset_timedout_connection on;
server_names_hash_bucket_size 100;
#compression
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/javascript application/xml;
gzip_disable "MSIE [1-6]\.";
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name domain1.com;
root /folderpath;
location / {
index index.php;
}
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
#server status
location /server-status {
stub_status on;
access_log off;
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
}
location = /status {
access_log off;
allow 127.0.0.1;
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
fastcgi_pass unix:/var/opt/remi/php70/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~ \.php$ {
try_files $uri =404;
fastcgi_pass unix:/var/opt/remi/php70/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
UPDATE:
I installed opcache as per the suggestion below. Not sure if it fixes the issue. Here are my settings
opcache.enable=1
opcache.memory_consumption=1024
opcache.interned_strings_buffer=64
opcache.max_accelerated_files=32531
opcache.max_wasted_percentage=10
2 minor tips:
if you use opcache, monitor it to check if its configuration (especially memory size) is ok, and avoid OOM reset, you can use https://github.com/rlerdorf/opcache-status (a single php page)
increase pm.max_requests to keep using same processes
I am doing load testing on an Nginx Server and I am having an issue where my CPU hits 100% but only 50% of my ram is being utilized. The server is this:
2 vCPU
2 GB of RAM
40GB SSD Drive
Rackspace High Performance Server
This is my Nginx Config
worker_processes 2;
error_log /var/log/nginx/error.log crit;
pid /var/run/nginx.pid;
events {
worker_connections 1524;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log main;
access_log off;
# Sendfile copies data between one FD and other from within the kernel.
# More efficient than read() + write(), since the requires transferring data to and from the user space.
sendfile on;
# Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet,
# instead of using partial frames. This is useful for prepending headers before calling sendfile,
# or for throughput optimization.
tcp_nopush on;
# don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time.
tcp_nodelay on;
# allow the server to close the connection after a client stops responding. Frees up socket-associated memory.
reset_timedout_connection on;
#keepalive_timeout 0;
keepalive_timeout 65;
# send the client a "request timed out" if the body is not loaded by this time. Default 60.
client_body_timeout 10;
# If the client stops reading data, free up the stale client connection after this much time. Default 60.
send_timeout 2;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
gzip on;
server_tokens off;
client_max_body_size 20m;
client_body_buffer_size 128k;
client_max_body_size 20m;
client_body_buffer_size 128k;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
# Load config files from the /etc/nginx/conf.d directory
# The default server is in conf.d/default.conf
fastcgi_cache_path /var/cache/nginx levels=1:2 keys_zone=microcache:10m max_size=1000m inactive=60m;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Kernal additions in /etc/sysctl.conf
# Increase system IP port limits to allow for more connections
net.ipv4.ip_local_port_range = 2000 65000
net.ipv4.tcp_window_scaling = 1
# number of packets to keep in backlog before the kernel starts dropping them
net.ipv4.tcp_max_syn_backlog = 3240000
# increase socket listen backlog
net.core.somaxconn = 3240000
net.ipv4.tcp_max_tw_buckets = 1440000
# Increase TCP buffer sizes
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_congestion_control = cubic
Example VHost Config with PHP-FPM
server {
listen 80;
server_name www.example.com;
location / {
root /data/sites/example.com/public_html;
index index.php index.html index.htm;
try_files $uri $uri/ /index.php?rt=$uri&$args;
}
location ~ \.php {
root /data/sites/example.com/public_html;
fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_index index.php;
fastcgi_param PATH_INFO $fastcgi_script_name;
fastcgi_param ENV production;
include fastcgi_params;
}
}
The server can handle about 60 active SBU connections clicking around, or about 300 request per second. Is the fact that is not fully utilizing RAM and more CPU a bad thing? Can I optimize this further?