一、环境准备
主机名 | 主机IP | 虚拟VIP | 新增虚拟网卡 |
lvs | 192.168.19.133 | 192.168.19.125 | ens33:1 |
nginx01 | 192.168.19.136 | 192.168.19.125 | lo:1 |
nginx02 | 192.168.19.134 | 192.168.19.125 | lo:1 |
二、环境配置
1、修改主机名
# 在 LVS 上执行,修改主机名为 lvs
hostnamectl set-hostname lvs
bash
# 在 Nginx01 上执行,修改主机名为 nginx01
hostnamectl set-hostname nginx01
bash
# 在 Nginx02 上执行,修改主机名为 nginx02
hostnamectl set-hostname nginx02
bash
2、关闭NetworkManager服务(所有节点,都执行)
# 查看NetworkManager服务状态
systemctl status NetworkManager
# 停止NetworkManager服务
systemctl stop NetworkManage
# 禁用NetworkManager服务
systemctl disable NetworkManager
三、配置lvs节点(在lvs上执行)
1、查看网卡ens33
[root@lvs network-scripts]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.19.133 netmask 255.255.255.0 broadcast 192.168.19.255
inet6 fe80::a88d:1e8a:a1be:a113 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:0a:49:e3 txqueuelen 1000 (Ethernet)
RX packets 610 bytes 55060 (53.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 495 bytes 47774 (46.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
# 主机名为lvs的虚拟机,只有一张网卡ens33
[root@lvs ~]# cd /etc/sysconfig/network-scripts/
[root@lvs network-scripts]# ls
ifcfg-ens33 ifdown-eth ifdown-post ifdown-Team ifup-aliases ifup-ipv6 ifup-post ifup-Team init.ipv6-global
ifcfg-lo ifdown-ippp ifdown-ppp ifdown-TeamPort ifup-bnep ifup-isdn ifup-ppp ifup-TeamPort network-functions
ifdown ifdown-ipv6 ifdown-routes ifdown-tunnel ifup-eth ifup-plip ifup-routes ifup-tunnel network-functions-ipv6
ifdown-bnep ifdown-isdn ifdown-sit ifup ifup-ippp ifup-plusb ifup-sit ifup-wireless
# 查看网卡ens33配置
[root@lvs network-scripts]# cat ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="dhcp"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="73bd64fd-36f3-421e-bd63-94600586369a"
DEVICE="ens33"
ONBOOT="yes"
2、修改网卡ens33配置(固定IP地址,使其不再浮动)
[root@lvs network-scripts]# vi ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
IPADDR=192.168.19.133
NETMASK=255.255.255.0
GATEWAY=192.168.19.2
DNS1=192.168.19.2
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="73bd64fd-36f3-421e-bd63-94600586369a"
DEVICE="ens33"
ONBOOT="yes"
3、新增加网卡ens33:1(外部网卡ens33:1,映射绑定虚拟VIP)
[root@lvs network-scripts]# vi ifcfg-ens33:1
BOOTPROTO="static"
IPADDR=192.168.19.125
NETMASK=255.255.255.0
DEVICE="ens33:1"
ONBOOT="yes"
4、重启网络
# 查看网络状态
[root@lvs network-scripts]# systemctl status network
# 重启网络
[root@lvs network-scripts]# systemctl restart network
# 查看网络重启后状态
[root@lvs network-scripts]# systemctl status network
5、检查新增网卡ens33:1是否生效
6、安装ipvsadm
yum install ipvsadm -y
[root@lvs ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
四、配置nginx节点(在nginx01、nginx02上执行)
1、查看网卡ens33(在nginx01、nginx02上执行)
[root@nginx01 ~]# cd /etc/sysconfig/network-scripts
[root@nginx01 network-scripts]# ls
ifcfg-ens33 ifdown-eth ifdown-post ifdown-Team ifup-aliases ifup-ipv6 ifup-post ifup-Team init.ipv6-global
ifcfg-lo ifdown-ippp ifdown-ppp ifdown-TeamPort ifup-bnep ifup-isdn ifup-ppp ifup-TeamPort network-functions
ifdown ifdown-ipv6 ifdown-routes ifdown-tunnel ifup-eth ifup-plip ifup-routes ifup-tunnel network-functions-ipv6
ifdown-bnep ifdown-isdn ifdown-sit ifup ifup-ippp ifup-plusb ifup-sit ifup-wireless
# 查看ens33网卡配置
[root@nginx01 network-scripts]# cat ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="dhcp"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="73bd64fd-36f3-421e-bd63-94600586369a"
DEVICE="ens33"
ONBOOT="yes"
2、修改网卡ens33 配置(在nginx01、nginx02上,固定IP地址,使其不再浮动;下面是以nginx01为例,如果是修改nginx02配置,请一定要修改IPADDR=192.168.19.134 )
[root@nginx01 network-scripts]# pwd
/etc/sysconfig/network-scripts
[root@nginx01 network-scripts]# vi ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static" # 修改此处为静态地址,不在用dhcp模式随机分配IP地址
IPADDR=192.168.19.136 # 设定,指定IP地址
NETMASK=255.255.255.0
GATEWAY=192.168.19.2 # 设置网关
DNS1=192.168.19.2
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="73bd64fd-36f3-421e-bd63-94600586369a"
DEVICE="ens33"
ONBOOT="yes"
3、新增并配置本地网卡lo:1(在nginx01、nginx02上,内部建一个虚拟VIP,此虚拟VIP仅仅只能返回用户数据,而不能被外部访问到真实的服务器)
[root@nginx01 ~]# cd /etc/sysconfig/network-scripts/
[root@nginx01 network-scripts]# ls
ifcfg-ens33 ifdown-eth ifdown-post ifdown-Team ifup-aliases ifup-ipv6 ifup-post ifup-Team init.ipv6-global
ifcfg-lo ifdown-ippp ifdown-ppp ifdown-TeamPort ifup-bnep ifup-isdn ifup-ppp ifup-TeamPort network-functions
ifdown ifdown-ipv6 ifdown-routes ifdown-tunnel ifup-eth ifup-plip ifup-routes ifup-tunnel network-functions-ipv6
ifdown-bnep ifdown-isdn ifdown-sit ifup ifup-ippp ifup-plusb ifup-sit ifup-wireless
# 复制本地网卡配置文件,新增lo:1的本地网卡
[root@nginx01 network-scripts]# cp ifcfg-lo ifcfg-lo:1
# 编辑本地网卡lo:1
[root@nginx01 network-scripts]# vim ifcfg-lo:1
DEVICE=lo:1
IPADDR=192.168.19.125
NETMASK=255.255.255.255
NETWORK=127.0.0.0
# If you're having problems with gated making 127.0.0.0/8 a martian,
# you can change this to something else (255.255.255.255, for example)
BROADCAST=127.255.255.255
ONBOOT=yes
NAME=loopback
4、重启网络(在nginx01、nginx02上执行)
# 查看网络状态
[root@nginx01 network-scripts]# systemctl status network
# 重启网络
[root@nginx01 network-scripts]# systemctl restart network
# 查看网络重启后状态
[root@nginx01 network-scripts]# systemctl status network
5、检查新增虚拟网卡lo:1是否生效(在nginx01、nginx02上执行,下图以nginx01为例)
[root@nginx01 ~]# ip a
五、ARP响应级别与通告行为配置(在nginx01、nginx02上执行)
1、arp-ignore:ARP响应级别(处理请求)
0:只要本机配置了ip,就能响应请求
1:请求的目标地址到达对应的网络接口,才会响应请求
2、arp-announce:ARP通告行为(返回响应)
0:本机上任何网络接口都向外通告,所有的网卡都能接受到通告
1:尽可能避免本网卡与不匹配的目标进行通告
2:只在本网卡通告
3、修改配置文件(在nginx01、nginx02上都执行)
[root@nginx01 network-scripts]# vi /etc/sysctl.conf
cat >> /etc/sysctl.conf << EOF
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.default.arp_ignore = 1
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
EOF
# 刷新配置,使其生效
[root@nginx01 network-scripts]# sysctl -p
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.default.arp_ignore = 1
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
4、配置网关(在nginx01、nginx02上都执行,下方以nginx01为例)
[root@nginx01 ~]# route add -host 192.168.19.125 dev lo:1
# 将lo的内部响应转发到VIP的虚拟网卡上
[root@nginx01 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.19.2 0.0.0.0 UG 100 0 0 ens33
192.168.19.0 0.0.0.0 255.255.255.0 U 100 0 0 ens33
192.168.19.125 0.0.0.0 255.255.255.255 UH 0 0 0 lo
# 设置开机自启动
[root@nginx01 ~]# echo "route add -host 192.168.19.125 dev lo:1" >> /etc/rc.local
[root@nginx01 ~]# cat /etc/rc.local
#!/bin/bash
touch /var/lock/subsys/local
route add -host 192.168.19.125 dev lo:1
六、ipvsadm配置集群规则(只在lvs上执行)
6.1 创建一个用户访问集群的虚拟调度者(伴随者虚拟调度者的产生,它管辖的集群同步产生了)
[root@lvs ~]# ipvsadm -A -t 192.168.19.125:80 -s rr -p 5
- -A:添加的虚拟调度者
- -t:tcp协议
- -s:设置负载均衡的算法,rr表示轮询
- -p:设置连接持久化的时间
- -g: gatewaying (direct routing) (default)
[root@lvs ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.19.125:80 rr persistent 5
6.2 为虚拟调度者所掌管的集群,添加2台真实服务器(nginx01、nginx02)
# 在集群中添加2台真实服务器(nginx01、nginx02)
# VIP:192.168.19.125
# nginx01:192.168.19.136
# nginx02:192.168.19.134
[root@lvs ~]# ipvsadm -a -t 192.168.19.125:80 -r 192.168.19.136:80 -g
[root@lvs ~]# ipvsadm -a -t 192.168.19.125:80 -r 192.168.19.134:80 -g
# 保存集群用户请求转发规则,重启不会失效
[root@lvs ~]# ipvsadm -S
-A -t lvs:http -s rr -p 5
-a -t lvs:http -r 192.168.19.134:http -g -w 1
-a -t lvs:http -r 192.168.19.136:http -g -w 1
# 查看集群列表
[root@lvs ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.19.125:80 rr persistent 5
-> 192.168.19.134:80 Route 1 0 0
-> 192.168.19.136:80 Route 1 0 0
# 查看集群状态
[root@lvs ~]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes
-> RemoteAddress:Port
TCP 192.168.19.125:80 0 0 0 0 0
-> 192.168.19.134:80 0 0 0 0 0
-> 192.168.19.136:80 0 0 0 0 0
七、安装nginx
1、在线安装nginx(在nginx01、nginx02上执行)
//关闭并禁用防火墙
systemctl stop firewalld
systemctl disable firewalld
//关闭selinux
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/sysconfig/selinux
setenforce 0
//安装nginx前,则需要安装epel扩展源,否则无法安装nginx
yum -y install epel-release
//安装nginx
yum -y install nginx
2、修改nginx01的根目录默认文件(在nginx01上执行)
[root@nginx01 ~]# echo "<h1>nginx01:192.168.19.136</h1>" > /usr/share/nginx/html/index.html
3、修改nginx02的根目录默认文件(在nginx02上执行)
[root@nginx02 ~]# echo "<h1>nginx02:192.168.19.134</h1>" > /usr/share/nginx/html/index.html
4、启动nginx(在nginx01、nginx02上执行)
systemctl start nginx
systemctl enable nginx
5、测试nginx
八、LVS-DR轮询访问测试
1、启动nginx01、nginx02后,浏览器访问VIP:192.168.19.125,查看效果
2、修改请求持久化的时间为5s,系统默认300s
持久服务超时时间设置参数,针对一些需要保持状态的应用,例如一些http应用、ftp、ssl等。 在参数的时间范围内同一用户(client IP)的多次访问会被ipvs分配到同一台RealServer上。
# 修改请求持久化时间
[root@lvs network-scripts]# ipvsadm -E -t 192.168.19.125:80 -s rr -p 5
# 查看集群列表
[root@lvs network-scripts]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.19.125:80 rr persistent 5
-> 192.168.19.134:80 Route 1 0 0
-> 192.168.19.136:80 Route 1 0 0
# 查看集群状态
[root@lvs network-scripts]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes
-> RemoteAddress:Port
TCP 192.168.19.125:80 120 887 0 147864 0
-> 192.168.19.134:80 56 435 0 78554 0
-> 192.168.19.136:80 64 452 0 69310 0
[root@lvs network-scripts]#
3、设置tcp、tcpfin、udp链接会话保持的过期时间
# 设置会话保持的过期时间
[root@lvs network-scripts]# ipvsadm --set 1 1 1
[root@lvs network-scripts]# ipvsadm -Lnc
IPVS connection entries
pro expire state source virtual destination
TCP 00:52 NONE 192.168.19.1:0 192.168.19.125:80 192.168.19.134:80
4、访问浏览器访问VIP:192.168.19.125,每隔5S后刷新页面,会呈现如下交替变化的效果,证明安装已经成功,在此,恭喜你!
4、其它命令
# 查看持久化连接
ipvsadm -Ln --persistent-conn
# 查看连接请求过期时间以及请求源ip和目标ip
ipvsadm -Lnc
# 设置tcp tcpfin udp 的会话过期时间,与持久服务超时时间设置参数-p(persistent)相反
ipvsadm --set 1 1 1
# 查看过期时间
ipvsadm -Ln --timeout
--persistent(-p),持久服务超时时间设置参数,针对一些需要保持状态的应用,例如一些http应用、ftp、ssl等。 在参数的时间范围内同一用户(CIP相同,即客户IP相同)的多次访问会被ipvs分配到同一台RealServer上。
--set(tcp tcpfin udp),针对链接的超时时间。以tcp为例,一个tcp连接建立后会传输N个报文,当两个报文相继到达的时间差在超时时间内就会被转发到同一台realserver上进行处理, 若时间差大于超时时间就会根据调度算法重新选择RealServer,连接就有可能出现异常。 ipvs是根据CIP来识别是不是同一个链接发的报文。