- 实验:keepalived实现主主 nginx反向代理的高用性
- rs服务器准备
- ka和nginx代理配置
- HA Cluster 配置准备:
- nginx反向代理配置
- ka1,ka2高可用性配置
- 启动服务,测试
本节内容:
keepalived基于lvs,内置了lvs功能,实现了ipvsadm命令及lvs调度器的高可用性;而其它应用的高可用性keepalived提供了脚本方式以供解决。如nginx作为反向代理服务器时,不具备高可用性,利用keepalived的脚本方式实现nginx的高可用性。
keepalived实现lvs的原理是基于vrrp协议(借鉴路由器的工作方式),即一台lvs服务器实现调度,存在spof单点失败问题,再搭建一台lvs服务器,实现主备,当主lvs宕机,启动备用lvs即可实现高可用性。工作方式基于优先级,优先级高的自动为主lvs,又分为抢占式和非抢占式,即主lvs宕机,备用lvs成为主lvs,当原来宕机的主lvs修复后,谁当主的问题,默认抢占模式,即原主修复后优先级高,会重新成为主
keepalived基于脚本调用接口通过执行脚本完成脚本中定义的功能,进而影响集群事务, 以此支持nginx、haproxy等服务。这些是keepalived内置实现功能的原理,被开发keepalived的开发人员写好了。而我们要实现其它服务,如nginx作为反向代理服务器,则keepalived内置没有此脚本功能,但keepalived支持脚本调用。需要我们自己写脚本,通过keepalived调用自定义脚本实现高可用性
实验:keepalived实现主主 nginx反向代理的高用性
rs服务器准备
[root@cos27 ~ ]#hostnamectl set-hostname rs1
[root@cos37 ~ ]#hostnamectl set-hostname rs2
[root@cos27 ~ ]#exit
[root@cos37 ~ ]#exit
[root@rs1 ~ ]#yum install httpd -y
[root@rs2 ~ ]#yum install httpd -y
[root@rs1 ~ ]#systemctl start httpd
[root@rs2 ~ ]#systemctl start httpd
[root@rs1 ~ ]#echo rs1:192.168.31.27 > /var/www/html/index.html
[root@rs2 ~ ]#echo rs2:192.168.31.37 > /var/www/html/index.html
[root@rs1 ~ ]#curl 192.168.31.27
rs1:192.168.31.27
[root@rs2 ~ ]#curl 192.168.31.37
rs2:192.168.31.37
[root@rs1 ~ ]#ip a a 192.168.31.47/24 dev eth0
[root@rs2 ~ ]#ip a a 192.168.31.57/24 dev eth0
[root@rs1 ~ ]#vim /etc/httpd/conf.d/vhosts.conf
<virtualhost 192.168.31.47:80>
documentroot /data/website2/
<directory /data/website2>
require all granted
</directory>
</virtualhost>
[root@rs1 ~ ]#mkdir /data/website2 -p
[root@rs1 ~ ]#echo app.dhy.com:rs1 > /data/website2/index.html
[root@rs1 ~ ]#systemctl restart httpd
[root@rs1 ~ ]#curl 192.168.31.47
app.dhy.com:rs1
[root@rs2 ~ ]#vim /etc/httpd/conf.d/vhosts.conf
<virtualhost 192.168.31.57:80>
documentroot /data/website2/
<directory /data/website2>
require all granted
</directory>
</virtualhost>
[root@rs2 ~ ]#mkdir /data/website2 -p
[root@rs2 ~ ]#echo app.dhy.com:rs2 > /data/website2/index.html
[root@rs2 ~ ]#systemctl restart httpd
[root@rs2 ~ ]#curl 192.168.31.57
app.dhy.com:rs2
ka和nginx代理配置
HA Cluster 配置准备:
(1) 各节点时间必须同步
ntp, chrony
(2) 确保iptables及selinux不会成为阻碍
(3) 各节点之间可通过主机名互相通信(对KA并非必须)
建议使用/etc/hosts文件实现
(4) 各节点之间的root用户可以基于密钥认证的ssh服务完成互相通信(对KA并非必须)
[root@ka1 ~ ]#yum install keepalived
[root@ka2 ~ ]#yum install keepalived
[root@ka1 ~ ]#yum install nginx -y
[root@ka2 ~ ]#yum install nginx -y
[root@cos7 ~ ]#hostnamectl set-hostname ka1
[root@cos17 ~ ]#hostnamectl set-hostname ka2
[root@cos7 ~ ]#exit
[root@cos17 ~ ]#hxit
[root@ka1 ~ ]#cd /etc/keepalived/
[root@ka1 ~ ]#ssh-genkey
[root@ka1 ~ ]#ssh-copy-id 192.168.31.17
[root@ka1 ~ ]#ssh-genkey
[root@ka2 ~ ]#ssh-copy-id 192.168.31.7
[root@ka1 keepalived ]#vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ka1
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.31.17 ka1
[root@ka2 etc ]#vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ka2
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.31.7 ka1
nginx反向代理配置
[root@ka1 ~ ]#vim /etc/nginx/nginx.conf
#添加或者修改如下配置
http {
upstream websrvs{ #反向代理
server 192.168.31.27:80;
server 192.168.31.37:80;
}
upstream websrvs2{
server 192.168.31.47:80;
server 192.168.31.57:80;
}
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name www.dhy.com;
location / {
proxy_pass http://websrvs ;
}
}
server {
listen 80;
server_name app.dhy.com;
location / {
proxy_pass http://websrvs2 ;
}
}
[root@ka2 ~ ]#vim /etc/nginx/nginx.conf
同上
ka1,ka2高可用性配置
[root@ka1 ~ ]#vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from ka@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id ka1 #ka2上修改为ka2
vrrp_mcast_group4 230.10.10.10
}
vrrp_script chk_nginx {
script "/usr/bin/killall -0 nginx &> /dev/null "
interval 1
weight -30
# fall 2
# rise 1
}
vrrp_instance VI_1 {
state MASTER #ka2修改为BACKUP
interface eth1
virtual_router_id 50
priority 100 #ka2上修改为80
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
172.18.0.100/16
}
track_script {
chk_nginx
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
vrrp_instance VI_2 {
state BACKUP #ka2上修改为MASTER
interface eth1
virtual_router_id 60
priority 80 #ka2上修改为100
advert_int 1
authentication {
auth_type PASS
auth_pass 654321
}
virtual_ipaddress {
172.18.0.200/16
}
track_script {
chk_nginx
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
[root@ka1 ~ ]#vim /etc/keepalived/notify.sh
#!/bin/bash
#
contact='root@localhost'
notify() {
mailsubject="$(hostname) to be $1, vip floating"
mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
echo "$mailbody" | mail -s "$mailsubject" $contact
}
case $1 in
master)
notify master
systemctl start nginx
;;
backup)
notify backup
systemctl restart nginx
;;
fault)
notify fault
;;
*)
echo "Usage: $(basename $0) {master|backup|fault}"
exit 1
;;
esac
启动服务,测试
[root@ka1 ~ ]#systemctl start keepalived.service nginx.service
[root@ka2 ~ ]#systemctl start keepalived.service nginx.service
[root@client ~ ]#vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 client
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.18.0.100 www.dhy.com
172.18.0.200 app.dhy.com
[root@client ~ ]#curl www.dhy.com
rs1:192.168.31.27
[root@client ~ ]#curl www.dhy.com
rs2:192.168.31.37
[root@client ~ ]#for i in {1..100};do curl www.dhy.com;curl app.dhy.com;sleep 2;done
rs2:192.168.31.37
app.dhy.com:rs1
rs1:192.168.31.27
app.dhy.com:rs2
rs2:192.168.31.37
app.dhy.com:rs2
curl: (7) couldn't connect to host #[root@ka1 ~ ]#killall nginx
app.dhy.com:rs1
rs1:192.168.31.27
app.dhy.com:rs2
rs2:192.168.31.37
app.dhy.com:rs1
rs1:192.168.31.27
app.dhy.com:rs1
rs2:192.168.31.37