I'm Getting 502 bad gateway error in one of my magento2 site.
Here are my configuration files.
nginx.conf configuration file
user nginx;
worker_processes 4;
worker_rlimit_nofile 100000;
pid /var/run/nginx.pid;
events {
use epoll;
# worker_connections 1024;
worker_connections 10240;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log off;
error_log /var/log/nginx/error.log warn;
rewrite_log on;
access_log /var/log/nginx/access.log main buffer=32k flush=300;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
autoindex off;
server_tokens off;
port_in_redirect off;
open_file_cache max=10000 inactive=5m;
open_file_cache_valid 2m;
open_file_cache_min_uses 1;
open_file_cache_errors on;
types_hash_max_size 4096;
client_header_buffer_size 16k;
large_client_header_buffers 4 32k;
fastcgi_send_timeout 3600;
fastcgi_read_timeout 3600;
fastcgi_buffers 8 256k;
fastcgi_buffer_size 256k;
fastcgi_connect_timeout 3600;
############
client_max_body_size 1024M;
client_body_buffer_size 128k;
server_names_hash_max_size 1024;
client_body_timeout 300;
client_header_timeout 300;
keepalive_timeout 600;
keepalive_requests 100000;
send_timeout 60;
server_names_hash_bucket_size 128;
gzip on;
gzip_comp_level 6;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 1100;
gzip_buffers 16 8k;
gzip_types any;
gzip_types text/plain text/css application/octet-stream application/json application/x-javascript application/javascript text/xml application/xml application/xml+rss text/javascript text/x-javascript font/ttf application/font-woff font/opentype application/vnd.ms-fontobject image/svg+xml;
gzip_disable “msie6”;
gzip_vary on;
include /etc/nginx/conf.d/*.conf;
}
php-fpm configure file
[domain.com.com]
listen = /var/run/php/domain.com-fpm.sock
listen.allowed_clients = 127.0.0.1
listen.owner = nginx
listen.group = nginx
user = domain_live
group = domain_live
; Choose how the process manager will control the number of child processes.
pm = dynamic
pm.max_children = 150
pm.start_servers = 60
pm.min_spare_servers = 50
pm.max_spare_servers = 90
pm.max_requests = 500
pm.status_path = /status
slowlog = /var/www/www.domain.com.com/logs/php-fpm-www-slow.log
rlimit_core = unlimited
php_admin_value[error_log] = /var/www/www.domain.com.com/logs/php-fpm-www-error.log
php_admin_flag[log_errors] = on
; Set session path to a directory owned by process user
php_value[session.save_handler] = files
php_value[session.save_path] = /var/www/www.domain.com.com/application/var/session
php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
Getting below error logs
php-fpm error logs
WARNING: [pool domain.com] child 4314 exited on signal 11 (SIGSEGV - core dumped) after 4556.304032 seconds from start
NOTICE: [pool domain.com] child 6422 started
nginx error logs
2018/05/18 13:28:02 [error] 4247#4247: *1460 recv() failed (104: Connection reset by peer) while reading response header from upstream, client: 1.2.3.4, server: www.domain.com, request: "GET /checkout/cart/ HTTP/2.0", upstream: "fastcgi://unix:/var/run/php/domain.com-fpm.sock:", host: "www.domain.com.com", referrer: "https://www.domain.com.com/customer/account/login/"
Please help to resolve this issue. As i'm already tried multiple ways to resolve issue but didn't get the success.
Related
We are performing a load test using locust(1000 Users) on a webpage of our application.
Instance type: t3a.medium
The instance is running behind a load balancer. And we are using RDS Aurora Database which peaks at around 70% CPU utilization. EC2 instance metrics are healthy. EDIT: Instance memory consumption is within 800 MB out of available 4 GB
There are multiple 502 Server error: Bad Gateway and sometimes 500 and 520 errors as well.
Error 1:
2020/10/08 16:58:21 [error] 4344#4344: *41841 connect() to unix:/var/run/php/php7.2-fpm.sock failed (11: Resource temporarily unavailable) while connecting to upstream, client: <PublicIP>, server: <Domain name>, request: "GET <webpage> HTTP/1.1", upstream: "fastcgi://unix:/var/run/php/php7.2-fpm.sock:", host: "<Domain name>"
Error 2(Alert):
2020/10/08 19:15:11 [alert] 9109#9109: *105735 socket() failed (24: Too many open files) while connecting to upstream, client: <PublicIP>, server: <Domain name>, request: "GET <webpage> HTTP/1.1", upstream: "fastcgi://unix:/var/run/php/php7.2-fpm.sock:", host: "<Domain name>"
Listing down configuration files:
Nginx Configuration
server {
listen 80;
listen [::]:80;
root /var/www/####;
index index.php;
access_log /var/log/nginx/###access.log;
error_log /var/log/nginx/####error.log ;
server_name #####;
client_max_body_size 100M;
autoindex off;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~ \.php$ {
include fastcgi_params;
fastcgi_intercept_errors on;
fastcgi_index index.php;
fastcgi_pass unix:/var/run/php/php7.2-fpm.sock;
fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
}
}
/etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 8096;
multi_accept on;
use epoll;
epoll_events 512;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 2;
gzip_min_length 1000;
gzip_types text/xml text/css;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "MSIE [4-6] \.";
include /etc/nginx/conf.d/*.conf;
}
/etc/php/7.2/fpm/php-fpm.conf
emergency_restart_threshold 10
emergency_restart_interval 1m
process_control_timeout 10s
Php-fpm Important Parameters:
user = www-data
group = www-data
listen = /run/php/php7.2-fpm.sock
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
pm = static
pm.max_children = 300
/etc/security/limits.conf
nginx soft nofile 30000
nginx hard nofile 50000
/etc/sysctl.conf
net.nf_conntrack_max = 131072
net.core.somaxconn = 131072
net.core.netdev_max_backlog = 65535
kernel.msgmnb = 131072
kernel.msgmax = 131072
fs.file-max = 131072
What are we missing? Can anyone please point to the right direction?
So we were able to resolve this issue. The problem was php-fpm did not have access to access system resources. You may need to change values according to hardware specifications.
So, our final configuration looks like this:
In /etc/security/limits.conf, add following lines:
nginx soft nofile 10000
nginx hard nofile 30000
root soft nofile 10000
root hard nofile 30000
www-data soft nofile 10000
www-data hard nofile 30000
In /etc/sysctl.conf, add following values
net.nf_conntrack_max = 231072
net.core.somaxconn = 231072
net.core.netdev_max_backlog = 65535
kernel.msgmnb = 231072
kernel.msgmax = 231072
fs.file-max = 70000
In /etc/nginx/nginx.conf, change or add so finally it should have these values(kindly change them according to your use case and server capacity):
worker_processes auto;
worker_rlimit_nofile 30000;
events {
worker_connections 8096;
multi_accept on;
use epoll;
epoll_events 512;
}
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 2;
gzip_min_length 1000;
gzip_types text/xml text/css;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "MSIE [4-6] .";
In /etc/php/7.2/fpm/php-fpm.conf , change values to look like this:
emergency_restart_threshold = 10
emergency_restart_interval = 1m
process_control_timeout = 10s
rlimit_files = 10000
In /etc/php/7.2/fpm/pool.d/www.conf , change values to look like this:
user = www-data
group = www-data
listen.backlog = 4096
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
pm = static
pm.max_children = 1000
Picking up and rejuvenating an old dormant project and seeing a persistent Nginx 502 bad gateway on my Apache/Nginx development and as this was originally developed by someone else I'm struggling to find the answer.
Trying to view example.com/test gives the error. Any ideas for what I can check please?
Nginx error.log:
2017/09/07 18:20:31 [error] 11911#0: *311 connect() failed (111: Connection refused) while connecting to upstream, client: xx.my.ip.xx, server: www.example.com, request: "GET /test HTTP/1.1", upstream: "http://127.0.0.1:8000/test", host: "www.example.com"
2017/09/07 18:20:42 [info] 11911#0: *312 client closed connection while SSL handshaking, client: xx.my.ip.xx, server: 0.0.0.0:443
Nginx config:
user nobody;
# no need for more workers in the proxy mode
worker_processes 4;
error_log /var/log/nginx/error.log info;
worker_rlimit_nofile 20480;
events {
worker_connections 5120; # increase for busier servers
use epoll; # you should use epoll here for Linux kernels 2.6.x
}
http {
# custom start
proxy_buffering off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
# proxy_http_version appeared in nginx 1.1.4
proxy_http_version 1.1;
upstream thedevelopment {
server 127.0.0.1:8000;
}
server {
listen 80;
server_name www.example.com domain.com;
return 301 https://www.example.com$request_uri;
}
server {
listen 443;
ssl on;
ssl_certificate /etc/nginx/ssl/domain_com.crt;
ssl_certificate_key /etc/nginx/ssl/domain.key;
keepalive_timeout 70;
server_name www.example.com;
location / {
proxy_pass http://xx.xx.xx.130:8080;
}
location /test {
proxy_pass http://thedevelopment;
}
}
# custom end
server_name_in_redirect off;
server_names_hash_max_size 10240;
server_names_hash_bucket_size 1024;
include mime.types;
default_type application/octet-stream;
server_tokens off;
# remove/commentout disable_symlinks if_not_owner;if you get Permission denied error
# disable_symlinks if_not_owner;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 5;
gzip on;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
gzip_proxied any;
gzip_http_version 1.1;
gzip_min_length 1000;
gzip_comp_level 6;
gzip_buffers 16 8k;
# You can remove image/png image/x-icon image/gif image/jpeg if you have slow CPU
gzip_types text/plain text/xml text/css application/x-javascript application/xml image/png image/x-icon image/gif image/jpeg application/javascript application/xml+rss text/javascript application/atom+xml;
ignore_invalid_headers on;
client_header_timeout 3m;
client_body_timeout 3m;
send_timeout 3m;
reset_timedout_connection on;
connection_pool_size 256;
client_header_buffer_size 256k;
large_client_header_buffers 4 256k;
client_max_body_size 200M;
client_body_buffer_size 128k;
request_pool_size 32k;
output_buffers 4 32k;
postpone_output 1460;
proxy_temp_path /tmp/nginx_proxy/;
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=microcache:5m max_size=1000m;
client_body_in_file_only on;
log_format bytes_log "$msec $bytes_sent .";
log_format custom_microcache '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" nocache:$no_cache';
include "/etc/nginx/vhosts/*";
}
I'm trying to optimize a VPN server that's running Nginx + PHP-FPM + MariaDB for a website that has a lot of traffic.
Machine specs are:
8GB RAM
8 CPU cores
1x30GB SSD
Machine uses CENTOS 7.1
Here's my nginx.conf:
# Server globals
user nginx;
worker_processes 8;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
# Worker config
events {
worker_connections 1024;
use epoll;
}
http {
# Main settings
sendfile on;
tcp_nopush on;
tcp_nodelay on;
client_header_timeout 1m;
client_body_timeout 1m;
client_header_buffer_size 2k;
client_body_buffer_size 256k;
client_max_body_size 256m;
large_client_header_buffers 4 8k;
send_timeout 30;
keepalive_timeout 60 60;
reset_timedout_connection on;
server_tokens off;
server_name_in_redirect off;
server_names_hash_max_size 512;
server_names_hash_bucket_size 512;
# Log format
log_format main '$remote_addr - $remote_user [$time_local] $request '
'"$status" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format bytes '$body_bytes_sent';
#access_log /var/log/nginx/access.log main;
access_log off;
# Mime settings
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Compression
gzip on;
gzip_comp_level 9;
gzip_min_length 512;
gzip_buffers 8 64k;
gzip_types text/plain text/css text/javascript
application/x-javascript application/javascript;
gzip_proxied any;
# Proxy settings
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_header Set-Cookie;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
# SSL PCI Compliance
ssl_session_cache shared:SSL:10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
# Error pages
error_page 403 /error/403.html;
error_page 404 /error/404.html;
error_page 502 503 504 /error/50x.html;
# Cache
proxy_cache_path /var/cache/nginx levels=2 keys_zone=cache:10m inactive=60m max_size=512m;
proxy_temp_path /var/cache/nginx/temp;
proxy_cache_key "$host$request_uri $cookie_user";
proxy_ignore_headers Expires Cache-Control;
proxy_cache_use_stale error timeout invalid_header http_502;
proxy_cache_valid any 3d;
map $http_cookie $no_cache {
default 0;
~SESS 1;
~wordpress_logged_in 1;
}
# Wildcard include
include /etc/nginx/conf.d/*.conf;
}
Also here's my domainname.conf:
[domainname.tld]
listen = 127.0.0.1:9003
listen.allowed_clients = 127.0.0.1
user = admin
group = admin
pm = ondemand
pm.max_children = 70
pm.start_servers = 3
pm.min_spare_servers = 3
pm.max_spare_servers = 10
env[HOSTNAME] = $HOSTNAME
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /tmp
env[TMPDIR] = /tmp
env[TEMP] = /tmp
And here's the main www.conf:
[www]
listen = 127.0.0.1:9000
listen.allowed_clients = 127.0.0.1
user = apache
group = apache
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 3
pm.max_spare_servers = 35
And finally here's the my.cnf:
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
symbolic-links=0
skip-external-locking
key_buffer_size = 512M
max_allowed_packet = 128M
table_open_cache = 1024
sort_buffer_size = 8M
read_buffer_size = 8M
read_rnd_buffer_size = 256M
myisam_sort_buffer_size = 128M
thread_cache_size = 8
query_cache_size= 512M
thread_concurrency = 32
#innodb_use_native_aio = 0
innodb_file_per_table
max_connections=500
max_user_connections=250
wait_timeout=5
interactive_timeout=50
long_query_time=10
#slow_query_log=1
#slow_query_log_file=/var/log/mysql-slow-queries.log
[mysqld_safe]
log-error=/var/log/mariadb/mariadb.log
pid-file=/var/run/mariadb/mariadb.pid
#
# include all files from the config directory
#
!includedir /etc/my.cnf.d
The website is working, well okay because caching is enabled (page cache), I turned off the caching for a while on heavy load and there was error 500.
Also here's the memory usage:
#free -g
total used free shared buff/cache available
Mem: 7 2 2 0 3 4
Swap: 0 0 0
Any help is much appreciated, thank you.
pm.max_children -- decrease the value unless you expect lots of really small pages
query_cache_size= 512M -- Inefficient. No more than 50M
innodb_buffer_pool_size = 4G -- see below
You really should be using InnoDB, not MyISAM.
If 10K hits/day leads to 80K database hits/day, that is only 1/second, which is trivial.
We run a few high volume websites which together generate around 5 million pageviews per day. We have the most overkill servers as we anticipate growth but we are having reports of a few active users saying the site is sometimes slow on the first pageview. I've seen this myself every once in a while where the first pageview will take 3-5 seconds then it's instant after that for the rest of the day. This has happened to me maybe twice in the last 24 hours so not enough to figure out what's happening. Every page on our site uses PHP but one of the times it happened to me it was on a PHP page that doesn't have any database calls which makes me think the issue is limited to NGINX, PHP-FPM or network settings.
We have 3 NGINX servers running behind a load balancer. Our database is separate on a cluster. I included our configuration files for nginx and php-fpm as well as our current RAM usage and PHP-FPM status. This is based on middle of the day (average traffic for us). Please take a look and let me know if you see any red flags in my setup or have any suggestions to optimize further.
Specs for each NGINX Server:
OS: CentOS 7
RAM: 128GB
CPU: 32 cores (2.4Ghz each)
Drives: 2xSSD on RAID 1
RAM Usage (free -g)
total used free shared buff/cache available
Mem: 125 15 10 3 100 103
Swap: 15 0 15
PHP-FPM status (IE: http://server1_ip/status)
pool: www
process manager: dynamic
start time: 03/Mar/2016:03:42:49 -0800
start since: 1171262
accepted conn: 69827961
listen queue: 0
max listen queue: 0
listen queue len: 0
idle processes: 1670
active processes: 1
total processes: 1671
max active processes: 440
max children reached: 0
slow requests: 0
php-fpm config file:
[www]
user = nginx
group = nginx
listen = /var/opt/remi/php70/run/php-fpm/php-fpm.sock
listen.owner = nginx
listen.group = nginx
listen.mode = 0660
listen.allowed_clients = 127.0.0.1
pm = dynamic
pm.max_children = 6000
pm.start_servers = 1600
pm.min_spare_servers = 1500
pm.max_spare_servers = 2000
pm.max_requests = 1000
pm.status_path = /status
slowlog = /var/opt/remi/php70/log/php-fpm/www-slow.log
php_admin_value[error_log] = /var/opt/remi/php70/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path] = /var/opt/remi/php70/lib/php/session
php_value[soap.wsdl_cache_dir] = /var/opt/remi/php70/lib/php/wsdlcache
nginx config file:
user nginx;
worker_processes 32;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1000;
multi_accept on;
use epoll;
}
http {
log_format main '$remote_addr - $remote_user [$time_iso8601] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 10 10;
send_timeout 60;
types_hash_max_size 2048;
client_max_body_size 50M;
client_body_buffer_size 5m;
client_body_timeout 60;
client_header_timeout 60;
fastcgi_buffers 256 16k;
fastcgi_buffer_size 128k;
fastcgi_connect_timeout 60s;
fastcgi_send_timeout 60s;
fastcgi_read_timeout 60s;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
reset_timedout_connection on;
server_names_hash_bucket_size 100;
#compression
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/javascript application/xml;
gzip_disable "MSIE [1-6]\.";
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name domain1.com;
root /folderpath;
location / {
index index.php;
}
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
#server status
location /server-status {
stub_status on;
access_log off;
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
}
location = /status {
access_log off;
allow 127.0.0.1;
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
fastcgi_pass unix:/var/opt/remi/php70/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~ \.php$ {
try_files $uri =404;
fastcgi_pass unix:/var/opt/remi/php70/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
UPDATE:
I installed opcache as per the suggestion below. Not sure if it fixes the issue. Here are my settings
opcache.enable=1
opcache.memory_consumption=1024
opcache.interned_strings_buffer=64
opcache.max_accelerated_files=32531
opcache.max_wasted_percentage=10
2 minor tips:
if you use opcache, monitor it to check if its configuration (especially memory size) is ok, and avoid OOM reset, you can use https://github.com/rlerdorf/opcache-status (a single php page)
increase pm.max_requests to keep using same processes
I am doing load testing on an Nginx Server and I am having an issue where my CPU hits 100% but only 50% of my ram is being utilized. The server is this:
2 vCPU
2 GB of RAM
40GB SSD Drive
Rackspace High Performance Server
This is my Nginx Config
worker_processes 2;
error_log /var/log/nginx/error.log crit;
pid /var/run/nginx.pid;
events {
worker_connections 1524;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log main;
access_log off;
# Sendfile copies data between one FD and other from within the kernel.
# More efficient than read() + write(), since the requires transferring data to and from the user space.
sendfile on;
# Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet,
# instead of using partial frames. This is useful for prepending headers before calling sendfile,
# or for throughput optimization.
tcp_nopush on;
# don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time.
tcp_nodelay on;
# allow the server to close the connection after a client stops responding. Frees up socket-associated memory.
reset_timedout_connection on;
#keepalive_timeout 0;
keepalive_timeout 65;
# send the client a "request timed out" if the body is not loaded by this time. Default 60.
client_body_timeout 10;
# If the client stops reading data, free up the stale client connection after this much time. Default 60.
send_timeout 2;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
gzip on;
server_tokens off;
client_max_body_size 20m;
client_body_buffer_size 128k;
client_max_body_size 20m;
client_body_buffer_size 128k;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
# Load config files from the /etc/nginx/conf.d directory
# The default server is in conf.d/default.conf
fastcgi_cache_path /var/cache/nginx levels=1:2 keys_zone=microcache:10m max_size=1000m inactive=60m;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Kernal additions in /etc/sysctl.conf
# Increase system IP port limits to allow for more connections
net.ipv4.ip_local_port_range = 2000 65000
net.ipv4.tcp_window_scaling = 1
# number of packets to keep in backlog before the kernel starts dropping them
net.ipv4.tcp_max_syn_backlog = 3240000
# increase socket listen backlog
net.core.somaxconn = 3240000
net.ipv4.tcp_max_tw_buckets = 1440000
# Increase TCP buffer sizes
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_congestion_control = cubic
Example VHost Config with PHP-FPM
server {
listen 80;
server_name www.example.com;
location / {
root /data/sites/example.com/public_html;
index index.php index.html index.htm;
try_files $uri $uri/ /index.php?rt=$uri&$args;
}
location ~ \.php {
root /data/sites/example.com/public_html;
fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_index index.php;
fastcgi_param PATH_INFO $fastcgi_script_name;
fastcgi_param ENV production;
include fastcgi_params;
}
}
The server can handle about 60 active SBU connections clicking around, or about 300 request per second. Is the fact that is not fully utilizing RAM and more CPU a bad thing? Can I optimize this further?