自己总结的,2022年CKA考试题目以及答案
第一题:
# 配置 namespace,名称为 app-team1
# 编写 namespace 资源清单
user1@k8s-master:~/cka-lab-init/1$ cat > ns-app-team1.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
name: app-team1
EOF
# 部署 namespace 资源
user1@k8s-master:~/cka-lab-init/1$ kubectl apply -f ns-app-team1.yaml
namespace/app-team1 created
# 查看部署的资源
user1@k8s-master:~/cka-lab-init/1$ kubectl get namespaces app-team1
NAME STATUS AGE
app-team1 Active 17s
第二题:
# 配置一个2副本的 deployments,分别调度到 k8s-node-1 和 k8s-node-1 上
# 编写 deployments 资源清单
user1@k8s-master:~/cka-lab-init/2$ cat > deployment-nginx-pod-nodes.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-pod-nodes
spec:
replicas: 2
selector:
matchLabels:
app: nginx-pod-nodes
template:
metadata:
labels:
app: nginx-pod-nodes
spec:
containers:
- name: nginx
image: nginx:1.18.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
EOF
# 部署 deployments 资源
user1@k8s-master:~/cka-lab-init/2$ kubectl apply -f deployment-nginx-pod-nodes.yaml
deployment.apps/nginx-pod-nodes created
# 查看部署的资源,分别在 k8s-node-1 和 k8s-node-1 上
user1@k8s-master:~/cka-lab-init/2$ kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-pod-nodes-859fcdcc9d-65bwd 1/1 Running 0 115s 10.244.2.2 k8s-node-2 <none> <none>
nginx-pod-nodes-859fcdcc9d-8jvdx 1/1 Running 0 115s 10.244.1.2 k8s-node-1 <none> <none>
第三题:
无需预配。
第四题:
# 预配置解决证书问题,就是把 /etc/kubernetes/pki/etcd 目录下的证书,copy 到 /opt/KUIN000601/ 目录
# 创建证书的目录
user1@k8s-master:~$ sudo mkdir /opt/KUIN00601/
# 把etcd的证书copy到上面创建的目录中,并且修改名字
user1@k8s-master:~$ cd /etc/kubernetes/pki/etcd
user1@k8s-master:/etc/kubernetes/pki/etcd$ sudo cp ca.crt /opt/KUIN00601/ca.crt
user1@k8s-master:/etc/kubernetes/pki/etcd$ sudo cp peer.crt /opt/KUIN00601/etcd-client.crt
user1@k8s-master:/etc/kubernetes/pki/etcd$ sudo cp peer.key /opt/KUIN00601/etcd-client.key
# 查看 etcd 的证书
user1@k8s-master:~$ ls -l /opt/KUIN00601/
total 12
-rw-r--r-- 1 root root 1017 1月 9 20:38 ca.crt
-rw-r--r-- 1 root root 1135 1月 9 20:38 etcd-client.crt
-rw------- 1 root root 1679 1月 9 20:38 etcd-client.key
# 注:如果没有 etcdctl 命令的话,可以从 docker 中复制 etcdctl 命令,查看 etcd 的 docker 名字
user1@k8s-master:~$ sudo docker ps | grep etcd
9c1e60ade311 303ce5db0e90 "etcd --advertise-cl…" 3 hours ago Up 3 hours k8s_etcd_etcd-k8s-master_kube-system_4ffc056bc44b10088ef49648e403a455_1
# 从 etcd 的 docker 中把 etcdctl 文件 copy 出来
user1@k8s-master:~$ sudo docker cp 9c1e60ade311:/usr/local/bin/etcdctl /usr/local/bin/etcdctl
# 确认有 etcdctl 命令
user1@k8s-master:~$ etcdctl version
etcdctl version: 3.4.3
API version: 3.4
# 创建一个备份到目录
user1@k8s-master:~$ sudo mkdir /var/lib/backup
user1@k8s-master:~$ sudo mkdir /srv/data
# 把备份到copy为etcd-snapshot-previous.db,用于恢复到实验(要等前面3题完成时的备份)
user1@k8s-master:~$ sudo cp /srv/data/etcd-snapshot.db /var/lib/backup/etcd-snapshot-previous.db
第五题:
# 预配要增加一个 internal 的 namespace
# 编写 namespace 资源清单
user1@k8s-master:~/cka-lab-init/5$ cat > ns-internal.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
name: internal
EOF
# 部署 namespace 资源
user1@k8s-master:~/cka-lab-init/5$ kubectl apply -f ns-internal.yaml
namespace/internal created
# 查看部署的资源
user1@k8s-master:~/cka-lab-init/5$ kubectl get ns internal
NAME STATUS AGE
internal Active 18s
# 预配置在 internal 的 namespace 里面增加2个 pod,用于测试
user1@k8s-master:~/cka-lab-init/5$ cat > nginx-app-pod-80.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-pod
namespace: internal
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Pod
metadata:
name: client-pod-1
namespace: internal
labels:
app: client-pod-1
spec:
containers:
- name: client-pod
image: centos:7
command: ["/bin/bash","-c","sleep 3600"]
EOF
# 创建2个 pod 用于测试
user1@k8s-master:~/cka-lab-init/5$ kubectl apply -f nginx-app-pod-80.yaml
deployment.apps/nginx-pod created
pod/client-pod-1 created
# 查看 pod 资源
user1@k8s-master:~/cka-lab-init/5$ kubectl get pod -o wide -n internal
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
client-pod-1 1/1 Running 0 57s 10.244.2.10 k8s-node-2 <none> <none>
nginx-pod-7cf7d6dbc8-2tgz2 1/1 Running 0 57s 10.244.2.9 k8s-node-2 <none> <none>
# 测试 ping 访问
user1@k8s-master:~/cka-lab-init/5$ kubectl exec -n internal client-pod-1 -- ping 10.244.2.9
PING 10.244.2.9 (10.244.2.9) 56(84) bytes of data.
64 bytes from 10.244.2.9: icmp_seq=1 ttl=63 time=0.071 ms
# 测试 http 访问
user1@k8s-master:~/cka-lab-init/5$ kubectl exec -n internal client-pod-1 -- curl 10.244.2.9
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
第六题:
# 预配置创建一个 deployment 名称为 front-end。
user1@k8s-master:~/cka-lab-init/6$ cat > front-end.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: front-end
spec:
replicas: 1
selector:
matchLabels:
app: nginx-front-end
template:
metadata:
labels:
app: nginx-front-end
spec:
containers:
- name: nginx
image: nginx:1.18.0
imagePullPolicy: IfNotPresent
EOF
# 部署 front-end。
user1@k8s-master:~/cka-lab-init/6$ kubectl apply -f front-end.yaml
deployment.apps/front-end created
### 查看 front-end。
user1@k8s-master:~/cka-lab-init/6$ kubectl get deployments front-end
NAME READY UP-TO-DATE AVAILABLE AGE
front-end 1/1 1 1 63s
### 查看 pod。
user1@k8s-master:~/cka-lab-init/6$ kubectl get pod front-end-7bd47f9bb4-vhlx2 -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
front-end-7bd47f9bb4-vhlx2 1/1 Running 0 39s 10.244.2.11 k8s-node-2 <none> <none>
第七题:
# 预配置创建一个 ing-internal 的 namespace
# 编写 namespace 资源清单
user1@k8s-master:~/cka-lab-init/7$ sudo cat > ns-ing-internal.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
name: ing-internal
EOF
# 部署 namespace 资源
user1@k8s-master:~/cka-lab-init/7$ kubectl apply -f ns-ing-internal.yaml
namespace/ing-internal created
# 编写 ingress 控制器的资源清单
user1@k8s-master:~/cka-lab-init/7$ sudo cat > deploy.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
/name: ingress-nginx
/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: /v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: /v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup:
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: /v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- endpoints
verbs:
- create
- get
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: /v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup:
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
selector:
/name: ingress-nginx
/instance: ingress-nginx
/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
/name: ingress-nginx
/instance: ingress-nginx
/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
/name: ingress-nginx
/instance: ingress-nginx
/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
/name: ingress-nginx
/instance: ingress-nginx
/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=ingress-nginx/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
apiVersion: /v1beta1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
name: ingress-nginx-admission
namespace: ingress-nginx
webhooks:
- name:
rules:
- apiGroups:
- extensions
-
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /extensions/v1beta1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: /v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
rules:
- apiGroups:
-
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: /v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
roleRef:
apiGroup:
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
spec:
containers:
- name: create
image: jettech/kube-webhook-certgen:v1.2.0
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.ingress-nginx.svc
- --namespace=ingress-nginx
- --secret-name=ingress-nginx-admission
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
spec:
containers:
- name: patch
image: jettech/kube-webhook-certgen:v1.2.0
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=ingress-nginx
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: /v1
kind: Role
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: /v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
roleRef:
apiGroup:
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-2.4.0
/name: ingress-nginx
/instance: ingress-nginx
/version: 0.33.0
/managed-by: Helm
/component: admission-webhook
namespace: ingress-nginx
EOF
# 创建 ingress 控制器。
user1@k8s-master:~/cka-lab-init/7$ kubectl apply -f deploy.yaml
# 查看创建的 SVC。
user1@k8s-master:~/cka-lab-init/7$ kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller NodePort 10.103.24.70 <none> 80:32401/TCP,443:30961/TCP 43s
ingress-nginx-controller-admission ClusterIP 10.101.98.116 <none> 443/TCP 43s
### 查看创建的 pod。
user1@k8s-master:~/cka-lab-init/7$ kubectl get pod -o wide -n ingress-nginx
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ingress-nginx-admission-create-tz7jq 0/1 Completed 0 6m15s 10.244.2.12 k8s-node-2 <none> <none>
ingress-nginx-admission-patch-7lklk 0/1 Completed 0 6m15s 10.244.2.13 k8s-node-2 <none> <none>
ingress-nginx-controller-7fcdd96bcf-hrfg4 1/1 Running 0 6m25s 10.244.2.16 k8s-node-2 <none> <none>
### 查看 ingress-controller 的 endpoint。
user1@k8s-master:~/cka-lab-init/7$ kubectl get endpoints -n ingress-nginx
NAME ENDPOINTS AGE
ingress-nginx-controller 10.244.2.16:443,10.244.2.16:80 6m54s
ingress-nginx-controller-admission 10.244.2.16:8443 6m54s
### 查看 ingress-controller 的详细信息。
user1@k8s-master:~/cka-lab-init/7$ kubectl describe endpoints -n ingress-nginx ingress-nginx-controller
Name: ingress-nginx-controller
Namespace: ingress-nginx
Labels: /component=controller
/instance=ingress-nginx
/managed-by=Helm
/name=ingress-nginx
/version=0.33.0
helm.sh/chart=ingress-nginx-2.4.0
Annotations: /last-change-trigger-time: 2021-01-09T14:02:16Z
Subsets:
Addresses: 10.244.2.16
NotReadyAddresses: <none>
Ports:
Name Port Protocol
---- ---- --------
https 443 TCP
http 80 TCP
### 在 k8s-node-2 上创建文件
user1@k8s-node-2:/tmp/7/html$ pwd
/tmp/7/html
user1@k8s-node-2:/tmp/7/html$ cat index.html
hi
# 编写 SVC 为 hi 的资源清单。指定在 k8s-node-2 节点,因为 hostpath 存储在 node-2 节点。
user1@k8s-master:~/cka-lab-init/7$ sudo cat nginx-app-service.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-app
namespace: ing-internal
spec:
replicas: 1
selector:
matchLabels:
app: nginx-app
template:
metadata:
labels:
app: nginx-app
spec:
nodeName: k8s-node-2
containers:
- name: nginx
image: nginx:1.18.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html-hi
mountPath: /usr/share/nginx/html/
volumes:
- name: html-hi
hostPath:
path: /tmp/7/html
---
apiVersion: v1
kind: Service
metadata:
name: hi
namespace: ing-internal
spec:
selector:
app: nginx-app
ports:
- name: web
port: 5678
targetPort: 80
# 部署 SVC 和 pod
user1@k8s-master:~/cka-lab-init/7$ kubectl apply -f nginx-app-service.yaml
deployment.apps/nginx-app created
service/hi created
# 查看部署的 SVC。
user1@k8s-master:~/cka-lab-init/7$ kubectl get svc -n ing-internal
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
hi ClusterIP 10.96.192.19 <none> 5678/TCP 40s
### 查看部署的 pod。
user1@k8s-master:~/cka-lab-init/7$ kubectl get pod -n ing-internal -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-app-778d9f8cbc-kfvxm 1/1 Running 0 62s 10.244.2.17 k8s-node-2 <none> <none>
# 访问 SVC 和 pod
user1@k8s-master:~/cka-lab-init/7$ sudo curl 10.96.192.19:5678
hi
user1@k8s-master:~/cka-lab-init/7$ sudo curl 10.244.2.17
hi
第八题:
# 预配置要创建一个名为 webserver 的 deployment
user1@k8s-master:~/cka-lab-init/8$ sudo cat > deploy-guestbook.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook
spec:
replicas: 1
selector:
matchLabels:
app: guestbook
template:
metadata:
labels:
app: guestbook
spec:
containers:
- name: nginx
image: nginx:1.18.0
imagePullPolicy: IfNotPresent
EOF
# 部署 webserver 应用。
user1@k8s-master:~/cka-lab-init/8$ kubectl apply -f deploy-guestbook.yaml
deployment.apps/webserver created
# 查看 webserver 的 pod 数量。
user1@k8s-master:~/cka-lab-init/8$ kubectl get pod webserver-7b7cfc759-7gk5r
NAME READY STATUS RESTARTS AGE
webserver-7b7cfc759-7gk5r 1/1 Running 0 22s
第九题:
# 预配置给节点打标签
user1@k8s-master:~$ kubectl label nodes k8s-node-2 disk=spinning
node/k8s-node-2 labeled
# 查看标签
user1@k8s-master:~$ kubectl get nodes --show-labels | grep spinning
k8s-node-2 Ready <none> 4h27m v1.18.8 /arch=amd64,/os=linux,disk=spinning,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node-2,kubernetes.io/os=linux
第十题:
# 预配置目录和文件。
user1@k8s-master:~$ sudo mkdir /opt/KUSC00402/
user1@k8s-master:~$ sudo touch /opt/KUSC00402/kusc00402.txt
user1@k8s-master:~$ ls /opt/KUSC00402/kusc00402.txt
/opt/KUSC00402/kusc00402.txt
第十一题:
无
第十二题:
# 预配置目录 /srv/app-data
user1@k8s-master:~$ sudo mkdir -p /srv/app-data
user1@k8s-master:~$ ls -d /srv/app-data/
/srv/app-data/
第十三题:
# 预配置 storageClass
# 创建NFS服务器。
user1@k8s-master:~$ sudo apt-get install -y nfs-kernel-server
# 配置NFS文件共享。
user1@k8s-master:~$ sudo mkdir /nfs-server
echo "/nfs-server *(rw,sync,no_root_squash)" >> /etc/exports
user1@k8s-master:~$ cat /etc/exports
/nfs-server *(rw,sync,no_root_squash)
user1@k8s-master:~$ sudo chmod 700 /nfs-server/
user1@k8s-master:~$ sudo service nfs-kernel-server restart
user1@k8s-master:~$ sudo service nfs-kernel-server status
# 验证NFS服务。
user1@k8s-master:~$ sudo showmount -e 127.0.0.1
Export list for 127.0.0.1:
/nfs-server *
# 在 node2 节点也要安装 nfs client 软件。并且测试一下 nfs 存储。
user1@k8s-node-2:~$ sudo apt-get install -y nfs-kernel-server
user1@k8s-node-2:~$ sudo showmount -e k8s-master
Export list for k8s-master:
/nfs-server *
# 创建ServiceAccount解决权限问题。
# 编写 RBAC 资源清单文件
user1@k8s-master:~/cka-lab-init/13$ sudo cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: default
---
kind: ClusterRole
apiVersion: /v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: /v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup:
---
kind: Role
apiVersion: /v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: /v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup:
# 创建 RBAC 资源
user1@k8s-master:~/cka-lab-init/13$ kubectl apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole./nfs-client-provisioner-runner created
clusterrolebinding./run-nfs-client-provisioner created
role./leader-locking-nfs-client-provisioner created
rolebinding./leader-locking-nfs-client-provisioner created
# 编写 nfs-client 的资源清单文件
user1@k8s-master:~/cka-lab-init/13$ sudo cat nfs-client-provisioner.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-storage
- name: NFS_SERVER
value: k8s-master
- name: NFS_PATH
value: /nfs-server
volumes:
- name: nfs-client-root
nfs:
server: k8s-master
path: /nfs-server
# 创建 nfs-client 资源
user1@k8s-master:~/cka-lab-init/13$ kubectl apply -f nfs-client-provisioner.yaml
deployment.apps/nfs-client-provisioner created
# 查看 nfs-client 的 pod
user1@k8s-master:~/cka-lab-init/13$ kubectl get pod nfs-client-provisioner-6546c4b76-xj42f
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-6546c4b76-xj42f 1/1 Running 0 30s
# 编写 storageclass 资源清单
user1@k8s-master:~/cka-lab-init/13$ sudo cat managed-nfs-storageclass.yaml
apiVersion: /v1
kind: StorageClass
metadata:
name: csi-hostpath-sc
provisioner: nfs-storage
allowVolumeExpansion: true
# 创建 storageclass 资源
user1@k8s-master:~/cka-lab-init/13$ kubectl apply -f managed-nfs-storageclass.yaml
storageclass./csi-hostpath-sc created
# 查看 storageclass 资源
user1@k8s-master:~/cka-lab-init/13$ kubectl get storageclasses.
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-hostpath-sc nfs-storage Delete Immediate true 35s
第十四题:
# 预配置
# 编写 pod 为 foobar 的资源清单
user1@k8s-master:~/cka-lab-init/14$ sudo cat > foobar-pod.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: foobar
labels:
app: foobar
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
EOF
# 创建 pod 资源
user1@k8s-master:~/cka-lab-init/14$ kubectl apply -f foobar-pod.yaml
pod/foobar created
# 查看 pod。
user1@k8s-master:~/cka-lab-init/14$ kubectl get pod foobar
NAME READY STATUS RESTARTS AGE
foobar 1/1 Running 0 23s
# 创建 /opt/KUTR00101
user1@k8s-master:~$ sudo mkdir /opt/KUTR00101
第十五题:
# 编写 pod 资源清单
user1@k8s-master:~/cka-lab-init/15$ sudo cat > big-corp-app.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: big-corp-app
spec:
containers:
- name: count
image: busybox
args:
- /bin/sh
- -c
- >
i=0;
while true;
do
echo "$i: $(date)" >> /var/log/big-corp-app.log;
sleep 1;
done
volumeMounts:
- name: varlog
mountPath: /var/log
volumes:
- name: varlog
emptyDir: {}
EOF
### 创建 pod
user1@k8s-master:~/cka-lab-init/15$ kubectl apply -f big-corp-app.yaml
pod/big-corp-app created
### 查看 pod
user1@k8s-master:~/cka-lab-init/15$ kubectl get pod
NAME READY STATUS RESTARTS AGE
big-corp-app 1/1 Running 0 23s
第十六题:
# 预配置 metrics-server
# 1、下载yaml文件。
user1@k8s-master:~/cka-lab-init/16$ sudo wget https:///kubernetes-sigs/metrics-server/releases/download/v0.3.6/components.yaml
# 2、修改编码yaml文件。增加如下命令。
user1@k8s-master:~/cka-lab-init/16$ sudo vim components.yaml
......
kind: Deployment
command:
- /metrics-server
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
/google_containers #修改源
# 3、修改api-server的yaml文件。
user1@k8s-master:~/cka-lab-init/16$ sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
......
spec:
containers:
- command
- --enable-aggregator-routing=true
# 4、创建metrics-server,使用yaml文件。
user1@k8s-master:~/cka-lab-init/16$ kubectl apply -f components.yaml
# 5、查看metrics-server的deployments。
user1@k8s-master:~/cka-lab-init/16$ kubectl get deployments.apps -n kube-system metrics-server
NAME READY UP-TO-DATE AVAILABLE AGE
metrics-server 1/1 1 1 28s
# 6、查看metrics-server的pod。
user1@k8s-master:~/cka-lab-init/16$ kubectl get pod -n kube-system metrics-server-5f5f46797c-l9vs8
NAME READY STATUS RESTARTS AGE
metrics-server-5f5f46797c-l9vs8 1/1 Running 0 57s
# 7、查看api是否已添加metrics。
user1@k8s-master:~/cka-lab-init/16$ kubectl api-versions | grep metrics
/v1beta1
# 8、检查监控指标的可用性。
user1@k8s-master:~/cka-lab-init/16$ kubectl get --raw "/apis//v1beta1/nodes"
user1@k8s-master:~/cka-lab-init/16$ kubectl get --raw "/apis//v1beta1/pods"
# 9、监控pod的资源信息。
user1@k8s-master:~/cka-lab-init/16$ kubectl top pod
NAME CPU(cores) MEMORY(bytes)
big-corp-app 1m 1Mi
# 编写一个 pod 资源清单,标签为 name=cpu-user
user1@k8s-master:~/cka-lab-init/16$ sudo cat cpu-pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cpu-pod
spec:
replicas: 3
selector:
matchLabels:
name: cpu-user
template:
metadata:
labels:
name: cpu-user
spec:
containers:
- name: nginx
image: nginx:1.18.0
imagePullPolicy: IfNotPresent
# 创建 pod 资源
user1@k8s-master:~/cka-lab-init/16$ kubectl apply -f cpu-pod.yaml
deployment.apps/cpu-pod created
# 查看标签为 name=cpu-user 的 pod
user1@k8s-master:~/cka-lab-init/16$ kubectl get pod -l name=cpu-user -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default cpu-pod-7bfb667b98-8v2gx 1/1 Running 0 21s
default cpu-pod-7bfb667b98-qjfmm 1/1 Running 0 21s
default cpu-pod-7bfb667b98-w6mm6 1/1 Running 0 21s
# 创建目录/opt/KUTR00401
user1@k8s-master:~$ sudo mkdir /opt/KUTR00401
第十七题:
# 预配 关闭 node-3 节点的 kubelet
# 查看 kubelet 状态
user1@k8s-node-3:~$ sudo systemctl status kubelet.service
● kubelet.service - kubelet: The Kubernetes Node Agent
Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
Drop-In: /etc/systemd/system/kubelet.service.d
└─10-kubeadm.conf
Active: active (running) since Sat 2021-01-09 18:05:24 CST; 5h 9min ago
# 关闭 kubelet
user1@k8s-node-3:~$ sudo systemctl stop kubelet.service
# 查看节点状态
user1@k8s-master:~$ kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 5h39m v1.19.0
k8s-node-1 Ready,SchedulingDisabled <none> 5h24m v1.18.8
k8s-node-2 Ready <none> 5h24m v1.18.8
k8s-node-3 NotReady <none> 5h24m v1.18.8