创建CephFilesystem 默认Ceph未部署对CephFS的支持,使用官方提供的默认yaml可部署文件存储的filesystem。

配置亲和力调度 [root@K8S-PROD-M1 ceph]# kubectl label nodes k8s-prod-w1 ceph-mds=enabled [root@K8S-PROD-M1 ceph]# kubectl label nodes k8s-prod-w2 ceph-mds=enabled node/k8s-prod-w1 labeled 配置部署文件 [root@K8S-PROD-M1 ceph]# cp filesystem.yaml filesystem-deploy.yaml [root@K8S-PROD-M1 ceph]# vi filesystem-deploy.yaml apiVersion: ceph.rook.io/v1 kind: CephFilesystem metadata: name: myfs namespace: rook-ceph spec: metadataPool: replicated: size: 3 parameters: compression_mode: none dataPools: - failureDomain: host replicated: size: 3 parameters: compression_mode: none preservePoolsOnDelete: true metadataServer: activeCount: 1 activeStandby: true placement: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: ceph-mds operator: In values: - enabled topologyKey: kubernetes.io/hostname annotations: resources: 部署CephFS [root@K8S-PROD-M1 ceph]# kubectl apply -f filesystem-deploy.yaml 查看CephFS [root@K8S-PROD-M1 ceph]# kubectl get cephfilesystems.ceph.rook.io -n rook-ceph NAME ACTIVEMDS AGE myfs 1 5m10s

[root@K8S-PROD-M1 ~]# kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. [root@rook-ceph-tools-6f77f8564f-mbbx4 /]# ceph status cluster: id: 792e13e8-0617-4bce-92bc-67fcccd82bef health: HEALTH_OK

services: mon: 3 daemons, quorum a,b,d (age 99m) mgr: a(active, since 32m) mds: myfs:1 {0=myfs-a=up:active} 1 up:standby-replay osd: 3 osds: 3 up (since 3m), 3 in (since 23h) rgw: 1 daemon active (my.store.a)

task status: scrub status: mds.myfs-a: idle mds.myfs-b: idle

data: pools: 11 pools, 177 pgs objects: 304 objects, 3.8 MiB usage: 3.6 GiB used, 26 GiB / 30 GiB avail pgs: 177 active+clean

io: client: 12 KiB/s rd, 0 B/s wr, 12 op/s rd, 7 op/s wr 创建StorageClass [root@K8S-PROD-M1 rook]# cd cluster/examples/kubernetes/ceph/csi/cephfs [root@K8S-PROD-M1 cephfs]# kubectl apply -f storageclass.yaml 创建PVC [root@K8S-PROD-M1 cephfs]# cp pvc.yaml pvc-deploy.yaml [root@K8S-PROD-M1 cephfs]# vi pvc-deploy.yaml

apiVersion: v1 kind: PersistentVolumeClaim metadata: name: cephfs-pvc spec: accessModes:

  • ReadWriteMany resources: requests: storage: 1Gi storageClassName: rook-cephfs

[root@K8S-PROD-M1 cephfs]# kubectl apply -f pvc-deploy.yaml

[root@K8S-PROD-M1 cephfs]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE cephfs-pvc Bound pvc-3bd8829d-c101-4142-86c7-beaacf0a23ed 1Gi RWX rook-cephfs 108s rbd-pvc Bound pvc-0d6574d8-00b7-4964-95e0-60fa256c1659 1Gi RWO rook-ceph-block 2d [root@K8S-PROD-M1 cephfs]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-0d6574d8-00b7-4964-95e0-60fa256c1659 1Gi RWO Delete Bound default/rbd-pvc rook-ceph-block 2d pvc-3bd8829d-c101-4142-86c7-beaacf0a23ed 1Gi RWX Delete Bound default/cephfs-pvc rook-cephfs 51s 使用PVC 创建2个Pod同时写入:

[root@K8S-PROD-M1 cephfs]# vi cephfs-demo-deploy.yaml apiVersion: apps/v1 kind: Deployment metadata: name: cephfs-demo-deploy spec: replicas: 2 selector: matchLabels: app: cephfs-demo template: metadata: labels: app: cephfs-demo spec: restartPolicy: Always containers: - name: cephfs-demo image: busybox volumeMounts: - name: cephfs-pvc mountPath: /tmp command: ['sh', '-c', 'echo date +"%Y-%m-%d %H:%M:%S" begin >> /tmp/data; exit 0'] volumes: - name: cephfs-pvc persistentVolumeClaim: claimName: cephfs-pvc readOnly: false

[root@K8S-PROD-M1 cephfs]# kubectl apply -f cephfs-demo-deploy.yaml 测试持久性 [root@K8S-PROD-M1 cephfs]# vi cephfs-demo-pod.yaml apiVersion: v1 kind: Pod metadata: name: cephfs-demo-pod spec: restartPolicy: OnFailure containers:

  • name: cephfs-demo image: busybox volumeMounts:
    • name: cephfs-pvc mountPath: /tmp command: ['sh', '-c', 'cat /tmp/data; exit 0'] volumes:
  • name: cephfs-pvc persistentVolumeClaim: claimName: cephfs-pvc

[root@K8S-PROD-M1 cephfs]# kubectl apply -f cephfs-demo-pod.yaml

查看结果:数据重现

[root@K8S-PROD-M1 cephfs]# kubectl logs -f cephfs-demo-pod cephfs-demo 2020-11-05 08:27:20 begin 2020-11-05 08:27:22 begin