代理服务器内核参数优化三则
$ vi /etc/sysctl.conf # 系统所有进程一共可以打开的文件数量, 每个套接字也占用一个文件描述字 fs.file-max = 1491124 # 系统同时保持TIME_WAIT套接字的最大数目,http 短链接会产生很多 TIME_WAIT 套接字。 net.ipv4.tcp_max_tw_buckets = 7000 # 关闭 tcp 来源跟踪 net.ipv4.conf.default.accept_source_route = 0 # 缩短套接字处于 TIME_WAIT 的时间, 60s -> 30s net.ipv4.tcp_fin_timeout = 30 # 启用 TIME_WAIT 复用,使得结束 TIEM_WAIT 状态的套接字的端口可以立刻被其他套接字使用。 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_tw_recycle = 1 # 关闭 tcp timestamp, 和 tw_reuse/tw_recycle 同时使用 # tw_recycle 一般不建议使用,RFC1323里面,TCP_TW_RECYCLE和TCP的timestamp选项(timestamp系统默认开启)同时生效的时候,在NAT场景下会导致服务器无法响应连接,这个也是可以复现的。 net.ipv4.tcp_timestamps = 0 # 打开内核的 SYN Cookie 功能,可以防止部分 DOS 攻击。 net.ipv4.tcp_syncookies = 1 # 减小 tcp keepalive 探测次数,可以即时释放长链接 net.ipv4.tcp_keepalive_probes = 3 # 缩短 tcp keepalive 探测间隔时间,同上 net.ipv4.tcp_keepalive_intvl = 15 # 增大内核 backlog 参数,使得系统能够保持更多的尚未完成 TCP 三次握手的套接字。 net.ipv4.tcp_max_syn_backlog = 8388608 # 同上 net.core.netdev_max_backlog = 8388608 # 同上 net.core.somaxconn = 8388608 # 默认参数 net.ipv4.tcp_keepalive_time = 7200 # 关闭对更大的滑动窗口(如长肥管道)支持,节省系统计算资源 net.ipv4.tcp_window_scaling = 0 # 关闭内核对误码大约拥塞的环境(如wifi/3g)的TCP优化,有线线路不需要 tcp_sack net.ipv4.tcp_sack = 0 # 增大应用程序可用端口范围。 net.ipv4.ip_local_port_range = 1024 65000 # Increase TCP buffer sizes net.core.rmem_default = 8388608 net.core.rmem_max = 16777216 net.core.wmem_max = 16777216 net.ipv4.tcp_rmem = 4096 87380 16777216 net.ipv4.tcp_wmem = 4096 65536 16777216 net.ipv4.tcp_congestion_control = cubic $vi /etc/security/limits.conf # 增大应用程序的最大打开文件数目限制 * hard nofile 65535 * soft nofile 65535 $ vi /etc/nginx/nginx.conf # This number should be, at maximum, the number of CPU cores on your system. worker_processes 8; # Number of file descriptors used for Nginx. This is set in the OS with 'ulimit -n 200000' # or using /etc/security/limits.conf worker_rlimit_nofile 200000; # only log critical errors error_log /var/log/nginx/error.log crit # Determines how many clients will be served by each worker process. # (Max clients = worker_connections * worker_processes) # "Max clients" is also limited by the number of socket connections available on the system (~64k) worker_connections 4000; # essential for linux, optmized to serve many clients with each thread use epoll; # Accept as many connections as possible, after nginx gets notification about a new connection. # May flood worker_connections, if that option is set too low. multi_accept on; # Caches information about open FDs, freqently accessed files. # Changing this setting, in my environment, brought performance up from 560k req/sec, to 904k req/sec. # I recommend using some varient of these options, though not the specific values listed below. open_file_cache max=200000 inactive=20s; open_file_cache_valid 30s; open_file_cache_min_uses 2; open_file_cache_errors on; # Buffer log writes to speed up IO, or disable them altogether #access_log /var/log/nginx/access.log main buffer=16k; access_log off; # Sendfile copies data between one FD and other from within the kernel. # More efficient than read() + write(), since the requires transferring data to and from the user space. sendfile on; # Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet, # instead of using partial frames. This is useful for prepending headers before calling sendfile, # or for throughput optimization. tcp_nopush on; # don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time. tcp_nodelay on; # Timeout for keep-alive connections. Server will close connections after this time. keepalive_timeout 30; # Number of requests a client can make over the keep-alive connection. This is set high for testing. keepalive_requests 100000; # allow the server to close the connection after a client stops responding. Frees up socket-associated memory. reset_timedout_connection on; # send the client a "request timed out" if the body is not loaded by this time. Default 60. client_body_timeout 10; # If the client stops reading data, free up the stale client connection after this much time. Default 60. send_timeout 2; # Compression. Reduces the amount of data that needs to be transferred over the network gzip on; gzip_min_length 10240; gzip_proxied expired no-cache no-store private auth; gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml; gzip_disable "MSIE [1-6]\.";
net.ipv4.tcp_syncookies = 1 # 表示开启 SYN Cookies。当出现 SYN 等待队列溢出时,启用 cookies 来处理,可防范少量 SYN 攻击,默认为 0,表示关闭; net.ipv4.tcp_tw_reuse = 1 # 表示开启重用。允许将 TIME-WAIT sockets 重新用于新的 TCP 连接,默认为 0,表示关闭; net.ipv4.tcp_tw_recycle = 1 # 表示开启 TCP 连接中 TIME-WAIT sockets 的快速回收,默认为 0,表示关闭; net.ipv4.tcp_fin_timeout = 30 # 修改系統默认的 TIMEOUT 时间。 net.ipv4.tcp_keepalive_time = 1200 # 表示当 keepalive 起用的时候,TCP 发送 keepalive 消息的频度。缺省是 2 小时,改为 20 分钟。 net.ipv4.ip_local_port_range = 10000 65000 # 表示用于向外连接的端口范围。缺省情况下很小:32768 到 61000,改为 10000 到 65000。(注意:这里不要将最低值设的太低,否则可能会占用掉正常的端口!) net.ipv4.tcp_max_syn_backlog = 8192 # 表示 SYN 队列的长度,默认为 1024,加大队列长度为 8192,可以容纳更多等待连接的网络连接数。 net.ipv4.tcp_max_tw_buckets = 5000 # 表示系统同时保持 TIME_WAIT 的最大数量,如果超过这个数字,TIME_WAIT 将立刻被清除并打印警告信息。 # increase TCP max buffer size settable using setsockopt() net.core.rmem_max = 67108864 net.core.wmem_max = 67108864 # increase Linux autotuning TCP buffer limit net.ipv4.tcp_rmem = 4096 87380 67108864 net.ipv4.tcp_wmem = 4096 65536 67108864 # increase the length of the processor input queue net.core.netdev_max_backlog = 250000 # recommended for hosts with jumbo frames enabled net.ipv4.tcp_mtu_probing=1
net.core.wmem_max = 12582912 net.core.rmem_max = 12582912 net.ipv4.tcp_rmem = 10240 87380 12582912 net.ipv4.tcp_wmem = 10240 87380 12582912 net.ipv4.ip_local_port_range = 18000 65535 net.ipv4.netfilter.ip_conntrack_tcp_timeout_time_wait = 1 net.ipv4.tcp_window_scaling = 1 net.ipv4.tcp_max_syn_backlog = 3240000 net.core.somaxconn = 3240000 net.ipv4.tcp_max_tw_buckets = 1440000 net.ipv4.tcp_congestion_control = cubic net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_fin_timeout = 15 net.ipv4.tcp_syn_retries = 2 net.ipv4.tcp_synack_retries = 2 net.ipv4.tcp_tw_recycle = 1
via iGFW http://ift.tt/2AoKMPm
No comments:
Post a Comment