k8s多节点

一: master 02 部署
#环境初始化 
systemctl disable  firewalld --now
systemctl status firewalld


setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config 


swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab 

hostnamectl  set-hostname master02

cat >> /etc/hosts <<EOF
192.168.23.103 master01
192.168.23.104 node01
192.168.23.105 node02
192.168.23.106 master02
EOF


scp /etc/hosts 192.168.23.103:/etc/hosts

#将master01上的证书文件,master组件配置文件 拷贝到master02节点
[root@master01 ~]# scp -r /opt/etcd/ 192.168.23.10:/opt/
[root@master01 ~]# scp -r /opt/kubernetes/ 192.168.23.10:/opt/
[root@master01 system]# cd /usr/lib/systemd/system
[root@master01 system]# scp {kube-apiserver,kube-controller-manager,kube-scheduler}.service 192.168.23.10:`pwd`

#master02节点修改kube-apiserver的ip
[root@master02 ~]# vim /opt/kubernetes/cfg/kube-apiserver
#修改以下内容
.....
--bind-address=192.168.23.10 \
.....
--advertise-address=192.168.23.10 \
....

k8s多节点(接单节点继续)_负载均衡


#在master02上开启各个服务,并设置开机自启
[root@master02 ~]# systemctl  enable --now kube-apiserver.service kube-controller-manager.service kube-scheduler.service 

[root@master02 ~]# ln -s /opt/kubernetes/bin/* /usr/local/bin/
[root@master02 ~]# kubectl get nodes
NAME             STATUS   ROLES    AGE   VERSION
192.168.23.104   Ready    <none>   22h   v1.12.3
192.168.23.105   Ready    <none>   21h   v1.12.3
[root@master02 ~]# kubectl get nodes -o wide

#此时,在master02节点上查到node节点的状态,仅是从etcd 查询到的信息。而此时node节点实际并为与master02节点建立通信连接,因此需要使用一个vip吧node节点与master节点关联起来

二: 部署两台负载均衡器
#将两台负载均衡器的firewalld和selinux关闭
systemctl  disable --now firewalld
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config

#在两台负载均衡器上,配置yum源,下载nginx
cat > /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx]
name=nginx reoo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
EOF
yum -y install nginx

#修改nginx配置文件,做4层反向代理
vim /etc/nginx/nginx.conf
stream {
    log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log /var/log/nginx/k8s-access.log main; 
    
    upstream k8s-apiserver {
        server 192.168.23.10:6443;
        server 192.168.23.103:6443;
   }    
    server {
        listen 6443;
        proxy_pass k8s-apiserver;
   }    
   
}  

http {
......

k8s多节点(接单节点继续)_ide_02


nginx -t 

systemctl  enable --now nginx
systemctl  status nginx

netstat -natp | grep nginx

k8s多节点(接单节点继续)_nginx_03


#配置keepalived,设置VIP为 192.168.23.18
yum -y install keepalived.x86_64 
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER    #备用设置为NGINX_BACKUP
}
vrrp_script check_nginx {
     script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 {
    state MASTER    #设置角色,备用为BACKUP
    interface ens33
    virtual_router_id 51
    priority 100     #备用设置为90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.23.18/24   #指定vip
    }
    track_script {
        check_nginx
  }
}


systemctl enable --now keepalived 
#在负载均衡器的master上使用ip a 命令可以看到 vip
ip a | grep 192.168.23.18


#配置nginx的检查脚本
vim /etc/nginx/check_nginx.sh
#!/bin/bash
count=$(ps -ef | grep nginx | egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
     systemctl stop keepalived
fi

chmod  +x /etc/nginx/check_nginx.sh

k8s多节点(接单节点继续)_负载均衡_04

k8s多节点(接单节点继续)_nginx_05


#修改所有node节点上的bootsrap.kubeconfig,kubelete.kubeconfig配置文件为VIP
cd /opt/kubernetes/cfg/

vim bootstrap.kubeconfig 
server: https://192.168.23.18:6443

vim kubelet.kubeconfig 
server: https://192.168.23.18:6443

vim kube-proxy.kubeconfig 
server: https://192.168.23.18:6443


#重启kubelet和 kube-proxy
systemctl  restart kubelet.service  kube-proxy.service

k8s多节点(接单节点继续)_sed_06

#在master01上将cluster-admin 角色授予用户system:anoymous
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous

#查看pod信息,获取pod 的ip. 可以看到ip为172.17.4.2,调度在192.168.23.104 node节点上
[root@master01 system]# kubectl  get pods -o wide
NAME                          READY   STATUS    RESTARTS   AGE    IP           NODE             NOMINATED NODE
nginx-test-7dc4f9dcc9-4dxnr   1/1     Running   0          105m   172.17.4.2   192.168.23.104   <none>


#在node节点上,使用curl访问
 curl 172.17.4.2

#在master 查看nginx的日志
kubectl  logs nginx-test-7dc4f9dcc9-4dxnr

k8s多节点(接单节点继续)_sed_07

k8s多节点(接单节点继续)_vim_08

k8s多节点(接单节点继续)_vim_09


三: 部署dashboard
#软件包 链接https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dashboard
#在master01上,将软件包Dashboard.zip上传到/opt目录下
[root@master01 opt]# ls Dashboard.zip 
Dashboard.zip

#创建dashborad 工作目录
 mkdir /opt/k8s/dashborad

#解压Dashboard.zip,解压到dashborad的工作目录,一共有7个文件
unzip Dashboard.zip  -d /opt/k8s/dashborad/


k8s多节点(接单节点继续)_负载均衡_10


[root@master01 opt]# cd /opt/k8s/dashborad/
[root@master01 dashborad]#vim k8s-admin.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: dashboard-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io


#使用kubectl create 创建resources,使用 -f 指定yaml文件
for i in `ls *yaml`
do 
kubectl create -f $i
done


#指定命名空间,查看资源
kubectl get role,rolebinding -n kube-system
kubectl get secret -n kube-system
kubectl get configmap  -n kube-system
kubectl get serviceaccount,deployment  -n kube-system
kubectl get pods,svc  -n kube-system

k8s多节点(接单节点继续)_ide_11

k8s多节点(接单节点继续)_ide_12


#访问测试:firefox火狐浏览器则可以直接访问,而谷歌浏览器因为缺少加密通信认证证书,无法直接访问
 cd /opt/k8s/dashborad/
 vim dashboard-controller.yaml 
 
 46           # PLATFORM-SPECIFIC ARGS HERE
 47           - --auto-generate-certificates
 #添加如下两行,指定加密的私钥和证书文件
 48           - --tls-key-file=dashboard-key.pem
 49           - --tls-cert-file=dashboard.pem


ls dashboard-cert.sh 
chmod  +x dashboard-cert.s

./dashboard-cert.sh /opt/k8s/k8s-cert/
ls *.pem

k8s多节点(接单节点继续)_负载均衡_13

k8s多节点(接单节点继续)_vim_14


#重新部署。当apply不生效时,先使用delete清楚资源,再使用 apply创建
kubectl  apply -f dashboard-controller.yaml

#由于可能会更换分配的节点,所以要再次查看你分配的节点服务器ip和端口
kubectl  get pods,svc -n kube-system -o wide

k8s多节点(接单节点继续)_ide_15


#获取token的简要信息,名为dashboard-admin-token-xxxxxx
kubectl  get secrets -n kube-system

#获取令牌序列号
kubectl  describe  secrets dashboard-admin-token-66rnj -n kube-system

k8s多节点(接单节点继续)_ide_16


k8s多节点(接单节点继续)_负载均衡_17

k8s多节点(接单节点继续)_sed_18

k8s多节点(接单节点继续)_ide_19

k8s多节点(接单节点继续)_vim_20

k8s多节点(接单节点继续)_vim_21

k8s多节点(接单节点继续)_sed_22

k8s多节点(接单节点继续)_nginx_23

k8s多节点(接单节点继续)_sed_24

k8s多节点(接单节点继续)_nginx_25

k8s多节点(接单节点继续)_vim_26

k8s多节点(接单节点继续)_sed_27

k8s多节点(接单节点继续)_nginx_28

k8s多节点(接单节点继续)_nginx_29