一、CCE容器集群
一、CCE创建
1、华为云创建CCE
此时k8s集群已经搭建完毕,有三个node节点,如果需要把集群放入到jumpserver管理,可以直接按照之前添加资产的配置放入,直接使用operation的用户进行管理即可,使用cce搭建的k8s集群master节点是无法连接的,使用kubectl管理集群,可以找一台机器比如jumpserver主机管理
2、使用kubectl访问k8s-cce
需要先下载kubectl以及配置文件,拷贝到jumpserver主机,完成配置后,即可使用kubectl访问k8s集群
1、下载kubectl:https://dl.k8s.io/v1.28.0-alpha.3/kubernetes-client-linux-amd64.tar.gz 下载与集群版本对应的或者更新的kubectl
2、下载kubectl配置文件:选择有效期,如5天,之后点击下载
3、安装和配置kubectl:上传kubernetes-client-linux-amd64.tar 和 kubeconfig.yaml配置文件到jumpserver主机的/root目录下
[root@jumpserver ~]# tar -xf kubernetes-client-linux-amd64.tar.gz
[root@jumpserver ~]# mv kubernetes/client/bin/kubectl /usr/local/bin/
4、登录到jumpserver主机,配置kubectl,若是root用户登录,可以直接执行
[root@jumpserver ~]# mkdir -p $HOME/.kube
[root@jumpserver ~]# mv kubeconfig.yaml $HOME/.kube/config
5、配置kubectl的tab键,节省输入时间
[root@jumpserver ~]# kubectl completion bash >/etc/bash_completion.d/kubectl
[root@jumpserver ~]# exit #退出重新登录
[root@jumpserver ~]# kubectl get nodes 可以看到结果
NAME STATUS ROLES AGE VERSION
192.168.1.2 Ready <none> 10m v1.23.8-r0-23.2.26
192.168.1.3 Ready <none> 10m v1.23.8-r0-23.2.26
192.168.1.4 Ready <none> 10m v1.23.8-r0-23.2.26
3、配置集群访问harbor镜像仓库
使用jumpserver主机连接三台k8s计算节点,更改hosts文件,做主机名和IP地址解析,更改daemon.json文件,使其后期可以下载镜像,三台机器都需要配置(以其中一台为例)
方法一:可以通过提前配置好的jumpserver资产连接,配置
[root@jumpserver ~]# ssh k8s@192.168.1.252 -p2222
按p,查看资产,输入id进入相关node节点
[ks8@node-pkkea ~]$ sudo -s
[root@node-pkkea ks8]# vim /etc/hosts
192.168.1.30 harbor
[root@node-pkkea ks8]# vim /etc/docker/daemon.json
{
"storage-driver": "overlay2", #加上,
"insecure-registries":["harbor:443"] #添加
}
快速添加上面两个内容
cat >>/etc/hosts<<EOF
192.168.1.30 harbor
EOF
cat >>/etc/docker/daemon.json <<EOF
, "insecure-registries":["harbor:443"]
EOF
[root@node-pkkea ks8]# systemctl restart docker
方法二:也可以直接连接配置
[root@jumpserver ~]# kubectl get nodes #获取节点IP地址
NAME STATUS ROLES AGE VERSION
192.168.1.2 Ready <none> 40m v1.23.8-r0-23.2.26
192.168.1.3 Ready <none> 40m v1.23.8-r0-23.2.26
192.168.1.4 Ready <none> 40m v1.23.8-r0-23.2.26
[root@jumpserver ~]# ssh 192.168.1.2
[root@node-pkkea ~]# vim /etc/hosts
192.168.1.30 harbor
[root@node-pkkea ~]# vim /etc/docker/daemon.json
{
"storage-driver": "overlay2", #加上,
"insecure-registries":["harbor:443"] #添加
}
[root@node-pkkea ~]# systemctl restart docker
二、项目部署
二、学茶项目
1、部署后端程序
#使用jumpserver主机管理k8s集群,编写资源对象文件
[root@jumpserver ~]# mkdir tea-yaml
[root@jumpserver ~]# cd tea-yaml/
设置时区,确保pod中容器时间和宿主机保持一致
[root@jumpserver tea-yaml]# vim tz.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: timezone
data:
timezone: "Asia/Shanghai"
[root@jumpserver tea-yaml]# kubectl apply -f tz.yaml
[root@jumpserver tea-yaml]# kubectl get configmaps
NAME DATA AGE
kube-root-ca.crt 1 31m
timezone 1 9s
#创建启动jar包的pv和pvc资源对象文件
[root@jumpserver tea-yaml]# vim pv-pvc-jar.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-jar
spec:
volumeMode: Filesystem
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.101
path: /project/jar
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-jar
spec:
#storageClassName: ""
volumeMode: Filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
#创建验证码passport的资源对象文件,使用jar包的pvc
[root@jumpserver tea-yaml]# vim passport-jar.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: passport-jar
spec:
selector:
matchLabels:
app: passport-jar
replicas: 1
template:
metadata:
labels:
app: passport-jar
spec:
volumes:
- name: passport-jar
persistentVolumeClaim:
claimName: pvc-jar
containers:
- name: passport-jar
image: harbor:443/myimg/jar:base
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: timezone
key: timezone
command: ["/bin/bash"]
args:
- -c
- |
java -Dfile.encoding=utf-8 -jar /project/jar/passport-provider-1.0-SNAPSHOT.jar -Xmx128M -Xms128M -Xmn64m -XX:MaxMetaspaceSize=128M-XX:MetaspaceSize=128M --server.port=30094 --spring.profiles.active=vm
ports:
- protocol: TCP
containerPort: 30094
volumeMounts:
- name: passport-jar
mountPath: /project/jar
restartPolicy: Always
#创建学茶网前台的资源对象文件,使用jar包的pvc
[root@jumpserver tea-yaml]# vim teaserver-jar.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: teaserver-jar
spec:
selector:
matchLabels:
app: teaserver-jar
replicas: 1
template:
metadata:
labels:
app: teaserver-jar
spec:
volumes:
- name: teaserver-jar
persistentVolumeClaim:
claimName: pvc-jar
containers:
- name: teaserver-jar
image: harbor:443/myimg/jar:base
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: timezone
key: timezone
command: ["/bin/bash"]
args:
- -c
- |
java -Dfile.encoding=utf-8 -jar /project/jar/tea-server-admin-1.0.0-SNAPSHOT.jar -Xmx128M -Xms128M -Xmn64m -XX:MaxMetaspaceSize=128M -XX:MetaspaceSize=128M --server.port=30091 --spring.profiles.active=vm
ports:
- protocol: TCP
containerPort: 30091
volumeMounts:
- name: teaserver-jar
mountPath: /project/jar
restartPolicy: Always
#创建学茶网后台程序使用的图片的pv 和pvc 资源对象文件
[root@jumpserver tea-yaml]# vim pv-pvc-image.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-image
spec:
volumeMode: Filesystem
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.101
path: /home/images/vm/
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: image
spec:
#storageClassName: ""
volumeMode: Filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
#创建学茶网后台程序的资源对象文件,使用jar包和图片的pvc
[root@jumpserver tea-yaml]# vim teaadmin-jar.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: teaadmin-jar
spec:
selector:
matchLabels:
app: teaadmin-jar
replicas: 1
template:
metadata:
labels:
app: teaadmin-jar
spec:
volumes:
- name: teaadmin-jar
persistentVolumeClaim:
claimName: pvc-jar
- name: image
persistentVolumeClaim:
claimName: image
containers:
- name: teaadmin-jar
image: harbor:443/myimg/jar:base
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: timezone
key: timezone
command: ["/bin/bash"]
args:
- -c
- |
java -Dfile.encoding=utf-8 -jar /project/jar/tea-admin-main-1.0.0-SNAPSHOT.jar -Xmx128M -Xms128M -Xmn64m -XX:MaxMetaspaceSize=128M -XX:MetaspaceSize=128M --server.port=30092 --spring.profiles.active=vm
ports:
- protocol: TCP
containerPort: 30092
volumeMounts:
- name: teaadmin-jar
mountPath: /project/jar
- name: image
mountPath: /home/images/vm/
restartPolicy: Always
#创建学茶网后台附件中心的资源对象文件,使用jar包的pvc
[root@jumpserver tea-yaml]# vim attach-jar.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: attache-jar
spec:
selector:
matchLabels:
app: attache-jar
replicas: 1
template:
metadata:
labels:
app: attache-jar
spec:
volumes:
- name: attach-jar
persistentVolumeClaim:
claimName: pvc-jar
containers:
- name: attache-jar
image: harbor:443/myimg/jar:base
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: timezone
key: timezone
command: ["/bin/bash"]
args:
- -c
- |
java -Dfile.encoding=utf-8 -jar /project/jar/attach-server-main-1.0.0-SNAPSHOT.jar -Xmx128M -Xms128M -Xmn64m -XX:MaxMetaspaceSize=128M -XX:MetaspaceSize=128M --server.port=30093 --spring.profiles.active=vm
ports:
- protocol: TCP
containerPort: 30093
volumeMounts:
- name: attach-jar
mountPath: /project/jar
restartPolicy: Always
[root@jumpserver tea-yaml]# kubectl apply -f pv-pvc-jar.yaml
[root@jumpserver tea-yaml]# kubectl apply -f passport-jar.yaml
[root@jumpserver tea-yaml]# kubectl apply -f teaserver-jar.yaml
[root@jumpserver tea-yaml]# kubectl apply -f pv-pvc-image.yaml
[root@jumpserver tea-yaml]# kubectl apply -f teaadmin-jar.yaml
[root@jumpserver tea-yaml]# kubectl apply -f attach-jar.yaml
创建对应的service服务,对用后端的jar
[root@jumpserver tea-yaml]# vim passport-service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: passport-service
spec:
ports:
- protocol: TCP
port: 30094
targetPort: 30094
selector:
app: passport-jar
type: ClusterIP
[root@jumpserver tea-yaml]# vim teaserver-service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: teaserver-service
spec:
ports:
- protocol: TCP
port: 30091
targetPort: 30091
selector:
app: teaserver-jar
type: ClusterIP
[root@tea jar]# vim teaadmin-service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: teaadmin-service
spec:
ports:
- protocol: TCP
port: 30092
targetPort: 30092
selector:
app: teaadmin-jar
type: ClusterIP
启动jar对应的service服务
[root@jumpserver tea-yaml]# kubectl apply -f passport-service.yaml
[root@jumpserver tea-yaml]# kubectl apply -f teaserver-service.yaml
[root@jumpserver tea-yaml]# kubectl apply -f teaadmin-service.yaml
[root@jumpserver tea-yaml]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 79m
passport-service ClusterIP 10.247.184.212 <none> 30094/TCP 4m9s
teaadmin-service ClusterIP 10.247.181.110 <none> 30092/TCP 55s
teaserver-service ClusterIP 10.247.248.165 <none> 30091/TCP 2m19s
2、部署前后台nginx页面
#创建学茶网后台管理页面和前台的页面
[root@jumpserver tea-yaml]# vim pv-pvc-backendpage.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pageadmin
spec:
volumeMode: Filesystem
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.101
path: /project/page/backend-page/dist/
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pageadmin
spec:
#storageClassName: ""
volumeMode: Filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
[root@jumpserver tea-yaml]# vim pv-pvc-frontpage.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pagefront
spec:
volumeMode: Filesystem
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.101
path: /project/page/front-page/dist/
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pagefront
spec:
#storageClassName: ""
volumeMode: Filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2G
[root@jumpserver tea-yaml]# kubectl apply -f pv-pvc-backendpage.yaml
[root@jumpserver tea-yaml]# kubectl apply -f pv-pvc-frontpage.yaml
#获取nginx的配置文件,进行nginx前后端页面交互部署,以及图片站点部署
[root@jumpserver tea-yaml]# vim /etc/hosts
...
192.168.1.30 harbor
[root@jumpserver tea-yaml]# vim /etc/docker/daemon.json #只要以下内容
{
"insecure-registries":["harbor:443"]
}
[root@jumpserver tea-yaml]# systemctl restart docker
[root@jumpserver tea-yaml]# docker run -itd --name nginx harbor:443/myimg/tea:nginx
[root@jumpserver tea-yaml]# docker cp nginx:/usr/local/nginx/conf/nginx.conf ./
[root@jumpserver tea-yaml]# vim nginx.conf
...
#限制文件传输
30 client_body_buffer_size 30m;
31 client_max_body_size 30m;
...
#在文件倒数最后一个花括号里面写
server { #网站图片站点
listen 30080;
server_name __;
location / {
root /home/images/vm/;
index index.html index.htm;
}
}
server { #网站前台页面
listen 30091;
server_name __;
location / {
root "/project/page/front-page/dist/";
index index.html;
}
location /api/ {
proxy_pass http://teaserver-service:30091/;
}
location /passport-api/ {
proxy_pass http://passport-service:30094/;
}
}
server { #网站后台管理页面
listen 30092;
server_name __;
location / {
root "/project/page/backend-page/dist/";
index index.html;
}
location /api/ {
proxy_pass http://teaadmin-service:30092/;
}
location /to_passport/ {
proxy_pass http://passport-service:30094/;
}
}
} #最后一个括号已经存在,不要复制
#创建nginx的configmap
[root@jumpserver tea-yaml]# kubectl create configmap nginx --from-file=nginx.conf
#编写nginx的资源对象文件,定义引用nginx的configmap
[root@jumpserver tea-yaml]# vim nginx.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
volumes:
- name: nginx
configMap:
name: nginx
- name: pageadmin
persistentVolumeClaim:
claimName: pageadmin
- name: pagefront
persistentVolumeClaim:
claimName: pagefront
- name: image
persistentVolumeClaim:
claimName: image
containers:
- name: nginx
image: harbor:443/myimg/tea:nginx
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: timezone
key: timezone
ports:
- name: image-30080
protocol: TCP
containerPort: 30080
- name: teaserver-30091
protocol: TCP
containerPort: 30091
- name: teaadmin-30092
protocol: TCP
containerPort: 30092
volumeMounts:
- name: nginx
mountPath: /usr/local/nginx/conf/nginx.conf
subPath: nginx.conf
- name: image
mountPath: /home/images/vm/
- name: pagefront
mountPath: /project/page/front-page/dist/
- name: pageadmin
mountPath: /project/page/backend-page/dist/
restartPolicy: Always
#创建nginx的service文件
[root@jumpserver tea-yaml]# vim nginx-service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
ports:
- name: image-30080
protocol: TCP
port: 30080
targetPort: 30080
nodePort: 30080
- name: teaserver-30091
protocol: TCP
port: 30091
targetPort: 30091
nodePort: 30091
- name: teadmin-30092
protocol: TCP
port: 30092
targetPort: 30092
nodePort: 30092
selector:
app: nginx
type: NodePort
[root@jumpserver tea-yaml]# kubectl apply -f nginx.yaml
[root@jumpserver tea-yaml]# kubectl apply -f nginx-service.yaml
3、发布服务
使NodePort发布服务
使用负载均衡elb,分别发布30080,30091,30092,前端访问端口和后端访问端口都是一样的,后端服务器选择cce的三个计算节点即可
访问图片站点:http://ELB负载均衡IP:30080/tea_attach/dog.jpg
学茶商城后台管理页面:http://ELB负载均衡IP:30092
发布新的商品