2.6.1 lvs+keepalived+nginx+tomcat实现高性能负载均衡集群-1
2.6.2 lvs+keepalived+nginx+tomcat实现高性能负载均衡集群-
一、设置VIP
1,手动添加vip
1,ifconfig查看当前活动网卡。如:eth0
2,执行
ifconfig eth0:1 192.168.1.22 broadcast 192.168.1.2 netmask 255.255.255.0 up
进行vip添加
eth0:1表示这个VIP绑定的目标网卡设备,192.168.1.22就是VIP的值,广播地址为192.168.1.2,子网掩码为:255.255.255.0,up表示立即启用这个VIP。
3,执行ifconfig查看是否生效
4,测试 ping 192.168.1.22
5,写在/etc/rc.local里进行开机自动设置
二、 编译安装Nginx
安装前准备
对于nginx编译安装需要先安装编译 的工具,然后再安装nginx依赖
yum -y install gcc gcc-c++ autoconf automake make
yum -y install zlib zlib-devel openssl openssl-devel pcre pcre-devel
下载nginx
获取nginx,官方地址
wget http://nginx.org/download/nginx-1.12.2.tar.gz
解压(/root/lua/nginx-1.12.2)
tar -zxvf nginx-1.12.2.tar.gz
安装
第一步是配置,第二步是编译安装
配置 nginx
#进入到nginx 解压目录
cd nginx-1.12.2
#配置nginx
#–prefix 指定安装的目录
#/usr/local/nginx-lb 是安装目录,不能和自己下载的文件目录重了
#./configure --prefix=/usr/local/nginx-lb
#带ssl stub_status模块 添加strem模块 –with-stream,这样就能传输tcp协议了
#http_stub_status_module 状态监控
#http_ssl_module 配置https
#stream 配置tcp得转发
#http_gzip_static_module 压缩
#http_sub_module 替换请求
--prefix=/usr/local/nginx-lb --with-http_stub_status_module \
--with-http_ssl_module --with-stream -\
-with-http_gzip_static_module --with-http_sub_module
编译安装
#编译安装
make && make install
验证
安装成功后,会在./configure --prefix=/usr/local/nginx-lb,指定的目录/usr/local/nginx-lb创建4个 文件夹。具体功能下面有介绍。
#启动 nginx服务
/usr/local/nginx-lb/sbin/nginx
#停止服务
/usr/local/nginx-lb/sbin/nginx -s stop
#重启服务
/usr/local/nginx-lb /sbin/nginx -s reload
#查看启动情况
ps –ef | grep nginx
#查看是否启动成功(ip需要替换为自己的服务器ip)
curl 192.168.1.100
#查看端口情况
netstat -ano|grep 80
#查看nginx版本
./sbin/nginx -V
二、 安装jdk1.8和tomcat
省略…
三、 配置nginx和tomcat负载均衡/反向代理
编辑nginx/conf/nginx.conf
cd /usr/local/nginx-lb/conf/
vi nginx.conf
worker_processes 1;
events {
#工作模式及连接数上限
use epoll;
worker_connections 1024;
}
#设定http服务器,利用它的反向代理功能提供负载均衡支持
http {
include mime.types;
default_type application/octet-stream;
#代理
include proxy.conf;
#设定请求缓冲
server_names_hash_bucket_size 128;
client_header_buffer_size 32K;
large_client_header_buffers 4 32k;
# client_max_body_size 8m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
#keepalive_timeout 0;
keepalive_timeout 65;
#启动gzip压缩
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_vary on;
#此处为你tomcat的地址,可以写多个tomcat地址
upstream tomcat_pool {
server 192.168.1.101:8080 weight=4 max_fails=2 fail_timeout=30s;
server 192.168.1.102:8080 weight=4 max_fails=2 fail_timeout=30s;
}
server {
listen 80;
server_name localhost;
location / {
root html;
index index.jsp index.html index.htm;
}
#所有JSP的页面均交由tomcat处理
location ~ \.(jsp|jspx|dp)?$ {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://tomcat_pool;
}
#设定访问静态文件直接读取不经过tomcat
location ~ .*\.(htm|html|gif|jpg|jpeg|png|bmp|swf|ioc|rar|zip|txt|flv|mid|doc|ppt|pdf|xls|mp3|wma)$ {
expires 30d;
}
location ~ .*\.(js|css)?$ {
expires 1h;
}
#设定访问日志的存放路径
#access_log logs/study.log main;
# redirect server error pages to the static page /50x.html
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
添加proxy.conf
vim /usr/local/nginx-lb/conf/proxy.conf
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
验证配置是否正确
/usr/local/nginx-lb/sbin/nginx -t
nginx: the configuration file /usr/local/nginx-lb/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx-lb/conf/nginx.conf test is successful
简单测试
确保2个nginx都可以分别访问代理的两个tomcat 。
curl 192.168.1.101
ok,到此,nginx和tomcat 完成了。(注意:以上每组配置都一样)
四、 安装lvs+keepalived
lvs master
安装ipvsadm、keepalived
yum -y install ipvsadm keepalived
开启路由转发
永久开启
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
加载,使得配置文件立即生效
sysctl -p
检查
sysctl -a | grep "ip_forward"
net.ipv4.ip_forward = 1
net.ipv4.ip_forward_use_pmtu = 0
使用keepalived来管理lvs(master)
vim /etc/keepalived/keepalived.conf
global_defs
{
router_id master_210
}
vrrp_instance aiyou {
state MASTER #备份服务器上将MASTER改为 BACKUP
interface eth0 #服务器网卡名字,ip addr可以查看
virtual_router_id 100 #这个数值 master和slave必须统一
priority 100 #这个数值决定哪台服务器是master,备份服务上将100改为90
advert_int 1 #设定master与backup负载均衡器之间同步检查的时间间隔,单位是秒
authentication {
auth_type PASS
auth_pass 123456
}
# 虚拟IP地址列表,即VIP,如果有多个VIP,继续换行填写
virtual_ipaddress {
192.168.146.200
}
}
virtual_server 192.168.146.200 80 {
delay_loop 3 #每隔3秒查询realserver状态
lb_algo wrr #wrr算法
lb_kind DR #DR模式(Direct Route)
# persistence_timeout 60 #同一IP的连接60秒内被分配到同一台realserver
protocol TCP #用TCP协议检查realserver状态
real_server 192.168.146.139 80 {
weight 1 #权重(权重越高处理的请求越多)
TCP_CHECK {
connect_timeout 3 #3秒无响应超时
nb_get_retry 3 #重试次数
delay_before_retry 3 #重试间隔
connect_port 80 #监测端口
}
}
real_server 192.168.146.140 80 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
重启keepalived
systemctl restart keepalived
查看状态
systemctl status keepalived
lvs slave
安装ipvsadm、keepalived
yum -y install ipvsadm keepalived
开启路由转发
永久开启
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
加载,使得配置文件立即生效
sysctl -p
检查
sysctl -a | grep "ip_forward"
net.ipv4.ip_forward = 1
net.ipv4.ip_forward_use_pmtu = 0
使用keepalived来管理lvs(slave)
vim /etc/keepalived/keepalived.conf
global_defs
{
router_id slave_211
}
vrrp_instance aiyou {
state MASTER
interface eth0
virtual_router_id 100 #这个数值 master和slave必须统一
priority 150 #这个数值决定哪台服务器是master 这里我们比master数值低,所以角色是backup,
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.146.200
}
}
virtual_server 192.168.146.200 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
# persistence_timeout 50
protocol TCP
real_server 192.168.146.139 80 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.146.140 80 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
重启keepalived
systemctl restart keepalived
查看状态
systemctl status keepalived
nginx 1
nginx上要跑个脚本
vi /etc/init.d/realserver
#!/bin/bash
#
# Script to start LVS DR real server.
# description: LVS DR real server
#
. /etc/rc.d/init.d/functions
VIP=192.168.1.222 #这里根据需要改成自己的VIP地址
host=`/bin/hostname`
case "$1" in
start)
# Start LVS-DR real server on this machine.
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 $VIP broadcast $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:0
;;
stop)
# Stop LVS-DR real server loopback device(s).
/sbin/ifconfig lo:0 down
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
;;
status)
# Status of LVS-DR real server.
islothere=`/sbin/ifconfig lo:0 | grep $VIP`
isrothere=`netstat -rn | grep "lo:0" | grep $VIP`
if [ ! "$islothere" -o ! "isrothere" ];then
# Either the route or the lo:0 device
# not found.
echo "LVS-DR real server Stopped."
else
echo "LVS-DR real server Running."
fi
;;
*)
# Invalid entry.
echo "$0: Usage: $0 {start|status|stop}"
exit 1
;;
esac
赋予可执行权限,并启动脚本
cp realserver /etc/init.d/
chmod 755 /etc/init.d/realserver
service realserver start
nginx 2(每个nginx步骤和内容一样)
nginx上要跑个脚本
vi /etc/init.d/realserver
#!/bin/bash
#
# Script to start LVS DR real server.
# description: LVS DR real server
#
. /etc/rc.d/init.d/functions
VIP=192.168.1.222 #这里根据需要改成自己的VIP地址
host=`/bin/hostname`
case "$1" in
start)
# Start LVS-DR real server on this machine.
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 $VIP broadcast $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:0
;;
stop)
# Stop LVS-DR real server loopback device(s).
/sbin/ifconfig lo:0 down
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
;;
status)
# Status of LVS-DR real server.
islothere=`/sbin/ifconfig lo:0 | grep $VIP`
isrothere=`netstat -rn | grep "lo:0" | grep $VIP`
if [ ! "$islothere" -o ! "isrothere" ];then
# Either the route or the lo:0 device
# not found.
echo "LVS-DR real server Stopped."
else
echo "LVS-DR real server Running."
fi
;;
*)
# Invalid entry.
echo "$0: Usage: $0 {start|status|stop}"
exit 1
;;
esac
赋予可执行权限,并启动脚本
cp realserver /etc/init.d/
chmod 755 /etc/init.d/realserver
service realserver start
tomcat 采用一机多实例模式(非多虚拟主机),部署这里省略
测试
master上验证
ipvsadm -ln
salve上验证
ipvsadm -ln