K8s虚拟机搭建全过程
环境准备
准备四台机器
1、设置主机名
$ hostnamectl set-hostname 主机名
主机名 | IP | 角色 |
k8s-master-1 | 192.168.137.11 | master |
k8s-master-2 | 192.168.137.12 | master |
k8s-worker-1 | 192.168.137.13 | worker |
k8s-worker-2 | 192.168.137.14 | worker |
2、设置hosts
全部节点都需设置
$ cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.137.11 k8s-master-1
192.168.137.12 k8s-master-2
192.168.137.13 k8s-worker-1
192.168.137.14 k8s-worker-2
3、安装依赖包
# 更新yum源
$ yum update
# 安装依赖包
$ yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
4、关闭防火墙、swap,重置iptables
# 关闭防火墙
$ systemctl stop firewalld && systemctl disable firewalld
# 重置iptables
$ iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
# 关闭swap
$ swapoff -a
# 永久关闭swap
$ sed -i 's/.*swap.*/#&/' /etc/fstab
# 关闭selinux
$ setenforce 0
# 关闭dnsmasq(否则可能导致docker容器无法解析域名,新建的虚拟机可能没有这个服务,就不用管了)
$ service dnsmasq stop && systemctl disable dnsmasq
5、系统参数设置
# 制作配置文件
$ cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
EOF
# 生效文件
$ sysctl -p /etc/sysctl.d/kubernetes.conf
# 如果上边的报错,执行下边的命令
$ cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
$ modprobe br_netfilter
$ lsmod | grep br_netfilter
$ sysctl --system
6、时间同步
$ yum install ntpdate -y
$ ntpdate time.windows.com
安装Docker
安装Docker:
$ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
$ yum -y install docker-ce-18.06.3.ce-3.el7
$ systemctl enable docker && systemctl start docker
$ docker version
设置Docker镜像加速器:
$ sudo mkdir -p /etc/docker
$ sudo tee /etc/docker/daemon.json <<-'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
"insecure-registries": ["https://b9pmyelo.mirror.aliyuncs.com"] #这句话要加的,要不然docker拉镜像的时候,会出现509的证书问题
}
EOF
$ sudo systemctl daemon-reload
$ sudo systemctl restart docker
安装必要工具
工具介绍
kubeadm: 部署集群用的命令
kubelet: 在集群中每台机器上都要运行的组件,负责管理pod、容器的生命周期
kubectl: 集群管理工具(可选,只要在控制集群的节点上安装即可)
配置yum源
$ cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装工具
- 查看列表
$ yum list kubeadm --showduplicates | sort -r
- 执行安装
$ yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
$ vim /etc/sysconfig/kubelet
$ KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
$ systemctl enable kubelet
搭建高可用集群
安装keepalived
# 在两台master节点上安装
$ yum install -y keepalived
$ mkdir -p /etc/keepalived
# Master1 节点的配置文件
$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.137.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER # 主节点
interface ens33 # 绑定网卡
garp_master_delay 10
smtp_alert
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.137.9 # 虚拟IP
}
}
# Master2 节点的配置文件
$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.137.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP #从节点
interface ens33 # 绑定网卡
garp_master_delay 10
smtp_alert
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.137.9 # 虚拟IP
}
}
# 最后,通过 ip a 去验证,也可以通过其他机器访问虚拟IP,轮流关闭某一个主节点
$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:ab:31:93 brd ff:ff:ff:ff:ff:ff
inet 192.168.137.11/24 brd 192.168.137.255 scope global noprefixroute dynamic ens33
valid_lft 1408sec preferred_lft 1408sec
inet 192.168.137.9/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::2560:46fc:50a5:1a0f/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:66:bf:3f:5f brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
部署Master节点
# 我们使用配置文件的方式去创建
$ kubeadm init --config kubeadm-config.yaml
# kubeadm-config.yaml 的内容如下:
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
controlPlaneEndpoint: 192.168.137.9:6443
imageRepository: registry.aliyuncs.com/google_containers
networking:
dnsDomain: cluster.local
# This CIDR is a Calico default. Substitute or remove for your CNI provider.
podSubnet: 10.244.0.0/16
serviceSubnet: 10.1.0.0/16
# 执行成功后,会出现其他Master节点加入集群和Worker节点加入集群的方式,如下
# 1、添加Master节点的方式:
$ kubeadm join 192.168.137.9:6443 --token tg30fs.d2jl43tyszt5m5ir \
--discovery-token-ca-cert-hash sha256:62a3588b53e051cea36db43c55534a792e1abc1ac4d2e1772a6ef86829c42112 \
--control-plane
# 2、Worker节点加入集群的方式
$ kubeadm join 192.168.137.9:6443 --token tg30fs.d2jl43tyszt5m5ir \
--discovery-token-ca-cert-hash sha256:62a3588b53e051cea36db43c55534a792e1abc1ac4d2e1772a6ef86829c42112
# Master1 执行下列命令
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 验证结果
$ kubectl get nodes
Master2加入集群
# 执行命令
$ kubeadm join 192.168.137.9:6443 --token tg30fs.d2jl43tyszt5m5ir \
--discovery-token-ca-cert-hash sha256:62a3588b53e051cea36db43c55534a792e1abc1ac4d2e1772a6ef86829c42112 \
--control-plane
# Master2 执行下列命令
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 验证结果
$ kubectl get nodes
两个Master节点加入成功,效果如下:
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-FqxYTl69-1660293463530)(C:\Users\wit-lihao\AppData\Roaming\Typora\typora-user-images\image-20220728170913227.png)]
把从节点加入集群,每个从节点执行下列命令即可
$ kubeadm join 192.168.137.9:6443 --token tg30fs.d2jl43tyszt5m5ir \
--discovery-token-ca-cert-hash sha256:62a3588b53e051cea36db43c55534a792e1abc1ac4d2e1772a6ef86829c42112
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-vmISlAGd-1660293463531)(C:\Users\wit-lihao\AppData\Roaming\Typora\typora-user-images\image-20220728171204488.png)]
安装CNI插件
通过上图,我们发现全部的节点都是NotReady,这里需要安装CNI插件,在Master节点部署CNI网络插件
目前主流的CNI插件是calico和flannel,这里我们使用calico
我们使用下边的网址下载下来calico.yaml
可参考这个网址教程。https://www.jianshu.com/p/2c436a0ffe7f
https://docs.projectcalico.org/v3.14/manifests/calico.yaml
# 修改calico.yaml里的pod网段。
把calico.yaml里pod所在网段改成kubeadm init时选项--pod-network-cidr所指定的网段,
直接用vim编辑打开此文件查找192,按如下标记进行修改:
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# 修改完毕后,执行下列脚本去启动calico
$ kubectl apply -f calico.yaml
K8s指令
namespace指令
1.4 创建namespace
$ kubectl create ns dev
1.5 删除namespace
$ kubectl delete ns dev
1.6 配置方式 ns-dev.yaml
apiVersion: v1
kind: Namespace
metadata:
name: dev
创建:kubectl create -f ns-dev.yaml
删除:kubectl delete -f ns-dev.yaml
Pod指令
2.2 创建并运行
kubernetes没有提供单独运行的pod命令,都是通过pod控制器来实现的
# 命令格式:kubectl run (pod名称) [参数]
# --image: pod中的镜像
# --port: pod运行端口
# --namespace: pod所处运行空间
kubectl run nginx --image=nginx:1.9.13 --port=80 --namespace dev
2.3 查看
kubectl get pod nginx -n dev -o wide # 查看pod基本信息
kubectl describe pod nginx -n dev # 查看pod详细信息
2.4 访问
kubectl get pod nginx -o wide -n dev # 获取pod内部ip地址
kubectl describe pod nginx -n dev # 获取pod内部端口
curl ip:port
2.5 删除(先删除deployment,deployment会一直保持期望状态的pod状态)
删除pod后,显示删除pod成功,但是再查询,发现又新产生了一个
这是因为当前Pod是由Pod控制器创建的,控制器会监控Pod状态,一旦发现Pod死亡,会立即重建
此时要想删除Pod,必须删除Pod控制器
kubectl get deployment -n dev
kubectl delete deployment deployment_name -n dev
kubectl delete pod nginx -n dev # 其实也不需要执行下面的命令,当Pod控制器删除后,Pod也会销毁
2.6 配置操作
apiVersion: v1
kind: Pod
metadata:
name: nginx # 这里的name是pod的name,命令行无法直接定义pod name,而yaml可以
namespace: dev
spec:
containers:
- image: nginx:1.19.3
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
创建:kubectl create -f nginx-pod.yaml
删除:kubectl delete -f nginx-pod.yaml
Label指令
3.3 命令方式
# 为pod资源打标签
kubectl label pod nginx-deployment-559d658b74-srp4b version=1.0
kubectl label pod nginx-deployment-559d658b74-srp4b env=dev
kubectl label pod nginx-deployment-559d658b74-srp4b business=middle
# 为pod资源更新标签
kubectl label pod nginx-deployment-559d658b74-srp4b version=2.0 --overwrite
# 查看标签
kubectl get pods --show-labels
# 筛选标签
kubectl get pod -l version=2.0,business=middle,env=dev --show-labels
kubectl get pod -l version=2.0,business=middle,env!=prod --show-labels
kubectl get pod -l version=1.0 --show-labels
# 删除标签
kubectl label pod nginx-deployment-559d658b74-srp4b version-
kubectl label pod nginx-deployment-559d658b74-srp4b env-
3.4 配置方式
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: dev
labels:
version: "3.0"
env: "test"
spec:
containers:
- image: nginx:1.19.3
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
Deployment指令
4.2 命令操作
# 命令格式: kubectl run deployment deployment_name [参数]
# --image: 指定pod镜像
# --port: 指定端口
# --replicas: 指定创建pod数量
# --namespace: 指定namespace
# 创建deployment
kubectl create deployment deploy-name --image=nginx:latest --port=80 -n dev
# 查看deployment基本信息
kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 3 3 3m4s
# 查看deployment详细信息
kubectl describe deployment nginx-deployment
# 删除deployment
kubectl delete deployment nginx-deployment
4.3 配置操作
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.16.1
ports:
- containerPort: 80
操作deployment yaml
创建:kubectl create -f deployment.yaml
删除:kubectl delete -f deployment.yaml
Service指令
5.2 创建集群内部可访问的Service
# 暴露Service
# expose: 暴露
# deployment: 指定deployment
# type: 暴露的方式(ClusterIP,NodePort)
# port: Service端口
# target-port: Pod端口
kubectl expose deployment nginx-deployment --name=nginx-svc --type=ClusterIP --port=80 --target-port=80 -n dev
# 查看Service
[root@host1 ~]# kubectl get services -n dev -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx-svc ClusterIP 10.99.196.21 <none> 80/TCP 77s app=nginx
# 这里产生了一个CLUSTER-IP,这就是service的ip,在service的生命周期中,这个地址是不会变动的
# 可以通过这个IP访问当前service对应的pod
注:cluster-ip只能在集群内部访问
curl cluster-ip:80
curl 10.99.196.21:80
5.3 创建集群外部可访问的Service
注:创建一个NodePort类型的Service后,集群内的所有节点(包括master/node)上都会监听一个3xxxx的端口号,并且通过集群中的任意一个角色ip:3xxxx的方式,都可以通过Service访问到Pod内部的容器
kubectl expose deployment nginx-deployment --name=nginx-svc-nodeport --type=NodePort --port=80 --target-port=80 -n dev
service/nginx-svc-nodeport exposed
# 查看Service
[root@host1 ~]# kubectl get svc -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-svc ClusterIP 10.99.196.21 <none> 80/TCP 10m
nginx-svc-nodeport NodePort 10.99.234.207 <none> 80:32399/TCP 2m33s
# 接下来就可以通过集群外的主机访问 节点IP:31928访问服务了
# 例如在电脑主机上通过浏览器访问下面的地址
curl 192.168.101.50:32399
5.4 删除Service
kubectl delete service nginx-svc -n dev
kubectl delete service nginx-svc-nodeport -n dev
5.5 配置方式
5.5.1 创建一个nginx-svc.yaml(ClusterIP)
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
namespace: dev
spec:
clusterIP: 10.99.10.100 # 可写可不写,不写会随机生成
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx
type: ClusterIP
5.5.2 创建一个nginx-svc-nodeport.yaml(NodePort)
apiVersion: v1
kind: Service
metadata:
name: nginx-svc-nodeport
namespace: dev
spec:
clusterIP: 10.99.10.101 # 可写可不写,不写会随机生成
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx
type: NodePort
操作
创建:kubectl create -f nginx-svc.yaml
kubectl create -f nginx-svc-nodeport.yaml
删除:kubectl delete -f nginx-svc.yaml
kubectl delete -f nginx-svc-nodeport.yaml
端口文档:
端口解释:
- port: port是k8s集群内部访问service的端口,即通过ClusterIP:Port可以访问到某个Service
- nodePort: nodePort是外部访问k8s集群的Service端口,通过nodeIP:nodePort可以访问到某个Service
- targetPort: targetPort是pod的端口,从port和nodePort来的流量经过kube-proxy流入到后段pod的target上,最后进入容器
- containerPort: containerPort是pod内部容器的端口,target映射到ContainerPort上