1.emptyDir

  pod删除的时候,volume也就不存在了,生命周期和pod同样

apiVersion: v1
kind: Pod
metadata:
  name: redis-pod
spec:
  containers:
  - image: redis
    name: redis
    volumeMounts:
    - mountPath: /cache
      name: cache-volume
  volumes:
    - name: cache-volume
      emptyDir: {}

2.hostpath

  宿主机上面的目录,一旦宿主机宕机文件就不存在,不是分布式的

apiVersion: v1
kind: Pod
metadata:
  name: test-host-pd
spec:
  containers:
  - image: nginx
    name: test-container
    volumeMounts:
    - mountPath: /tmp-test
      name: test-volume
  volumes:
  - name: test-volume
    hostPath:
      path: /opt/data/
      type: Directory

3.nfs分布式的但是 nfs本身为单电

192.168.56.12 部署了nfsserver端   所有node节点需要安装 nfs-utils
[root@k8s-node2 nginx]# cat /etc/exports
/opt/nginx  192.168.56.0/24(rw,no_root_squash)
[root@k8s-node2 nginx]# 
[root@k8s-node2 nginx]# cat /opt/nginx/index.html 
nginx-index
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 2
  template:
    metadata:
      labels:
        app: nginx-nfs
    spec:
      containers:
      - name: nginx-nfs
        image: nginx
        volumeMounts:
        - name: nginxdata
          mountPath: /usr/share/nginx/html
        ports:
        - containerPort: 80
      volumes:
      - name: nginxdata
        nfs:
          server: 192.168.56.12
          path: /opt/nginx


kubectl create -f nginx.yaml
kubectl   expose  deployment   nginx-deployment    --port=90 --type=NodePort  --target-port=80   --name=nginx-service-nfs
kubectl get svc

k8s-16-k8s基础存储_16-

k8s-16-k8s基础存储_k8s_02

4.glusterfs

192.168.56.11 192.168.56.12安装glusterfs制作复制卷
  yum install centos-release-gluster  -y
  yum install glusterfs-server -y
 systemctl start   glusterfsd
  systemctl start    glusterd.service
  systemctl enable   glusterfsd.service
  systemctl enable   glusterd.service
 gluster peer  probe  192.168.56.12 #在192.168.56.11上面
gluster volume create nginx-volume replica 2 192.168.56.11:/opt/data   192.168.56.12:/opt/data  force
 gluster   volume start  img-volume
 glusterfs   volume start  nginx-volume 
gluster volume info nginx-volume


[root@k8s-node2 data]# cat /opt/data/index.html 
glusterfs-nginx-index
[root@k8s-node2 data]# 
客户端安装192.168.56.10:
 yum install glusterfs-client -y
mount  -t  glusterfs 192.168.56.12:/nginx-volume   /mnt/glusterfs/  ##注意挂载写复制卷
Filesystem                   Size  Used Avail Use% Mounted on
/dev/mapper/centos-root       39G  3.4G   35G   9% /
devtmpfs                     478M     0  478M   0% /dev
tmpfs                        489M     0  489M   0% /dev/shm
tmpfs                        489M   50M  439M  11% /run
tmpfs                        489M     0  489M   0% /sys/fs/cgroup
/dev/sda1                   1014M  143M  872M  15% /boot
/dev/mapper/centos-home       19G   33M   19G   1% /home
tmpfs                         98M     0   98M   0% /run/user/0
192.168.56.12:/nginx-volume   39G  3.9G   35G  11% /mnt/glusterfs
[root@k8s-master glusterfs]#


创建endpoint   kubectl create -f  ed.yaml

{
  "kind": "Endpoints",
  "apiVersion": "v1",
  "metadata": {
    "name": "glusterfs-cluster"
  },
  "subsets": [
    {
      "addresses": [
        {
          "ip": "192.168.56.11"
        }
      ],
      "ports": [
        {
          "port": 1
        }
      ]
    },
    {
      "addresses": [
        {
          "ip": "192.168.56.12"
        }
      ],
      "ports": [
        {
          "port": 1
        }
      ]
    }
  ]
}
  kubectl create -f  service.yaml
  
{
  "kind": "Service",
  "apiVersion": "v1",
  "metadata": {
    "name": "glusterfs-cluster"
  },
  "spec": {
    "ports": [
      {"port": 1}
    ]
  }
}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment-glusterfs
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx-gf
    spec:
      containers:
      - name: nginx-gf
        image: nginx
        volumeMounts:
        - name: glusterfsvol
          mountPath: /usr/share/nginx/html
        ports:
        - containerPort: 80
      volumes:
      - name: glusterfsvol
        glusterfs:
          endpoints: glusterfs-cluster
          path: nginx-volume
          readOnly: false

---

apiVersion: v1
kind: Service
metadata:
  name: nginx-service-gf
spec:
  selector:
    app: nginx-gf
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  type: NodePort
[root@k8s-master volume]# kubectl  get  svc -o wide 
NAME                TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE       SELECTOR
glusterfs-cluster   ClusterIP   10.10.10.42    <none>        1/TCP            16m       <none>
httpd               ClusterIP   10.10.10.113   <none>        80/TCP           6d        run=httpd
kubernetes          ClusterIP   10.10.10.1     <none>        443/TCP          14d       <none>
my-service          ClusterIP   10.10.10.120   <none>        80/TCP,443/TCP   7d        app=nginx
nginx               ClusterIP   10.10.10.164   <none>        80/TCP           6d        run=nginx
nginx-service       NodePort    10.10.10.84    <none>        88:36741/TCP     14d       run=nginx
nginx-service-gf    NodePort    10.10.10.23    <none>        80:34973/TCP     13m       app=nginx-gf

k8s-16-k8s基础存储_k8s_03

k8s-16-k8s基础存储_k8s_04


https://github.com/kubernetes/kubernetes/tree/8fd414537b5143ab039cb910590237cabf4af783/examples/volumes/glusterfs


6.ceph