#支持配置文件方式,重启服务,读取配置文件自动挂载,重启自动挂载存储,支持自动格式化磁盘(未格式化,自动格式化,已格式化跳过,支持namespace挂载)

1. 安装支持ceph的内核模块
可选:
centos/ubuntu:
yum install -y ceph-common
或
apt install -y ceph-common

2. 拷贝认证密钥
cephadmin@ceph-deploy:~/ceph-cluster$ sudo scp ceph.conf ceph.client.admin.keyring root@<客户端服务器IP>:/etc/ceph

测试获取集群信息:
客户端测试是否能够获取集群信息:
[root@ceph-client-centos ~]# ceph -s
  cluster:
    id:     fbcd7dfd-c0b1-420e-a1c3-5eb5002c0cd3
    health: HEALTH_WARN
            clock skew detected on mon.ceph-mon02
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 43m)
    mgr: ceph-mgr01(active, since 25h), standbys: ceph-mgr02
    osd: 8 osds: 8 up (since 44m), 8 in (since 31h)
 
  data:
    pools:   3 pools, 97 pgs
    objects: 39 objects, 30 MiB
    usage:   284 MiB used, 2.3 TiB / 2.3 TiB avail
    pgs:     97 active+clean
能够获取说明没问题了

3. 创建存储池
ceph osd pool create myrbd1 128
ceph osd pool create myrbd2 128


4. 创建存储资源。特性配置
使用myrbd1存储池创建一个myimg1和一个myimg2的块设备,资源从 myrbd1取
cephadmin@ceph-deploy:~$ rbd create myimg1 --size 5G --pool myrbd1
cephadmin@ceph-deploy:~$ rbd create myimg2 --size 5G --pool myrbd1

使用myrbd2存储池创建一个myimg1和一个myimg2的块设备,资源从 myrbd2取
cephadmin@ceph-deploy:~$ rbd create myimg1 --size 5G --pool myrbd2
cephadmin@ceph-deploy:~$ rbd create myimg2 --size 5G --pool myrbd2


# 例子: 使用myrbd1存储池创建一个myimg3的块设备(这是创建了安全策略的,可选配置可不配)
# cephadmin@ceph-deploy:~$ rbd create myimg3 --size 3G --pool myrbd1 --image-format 2 --image-feature layering


检查:
cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool myrbd1
myimg1
myimg2
cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool myrbd2
myimg1
myimg2


关系图:
Ceph 集群
  ├── 存储池(Pool)
     ├── myrbd1
     │   ├── myimg1(RBD 映像)
     │   └── myimg2(RBD 映像)
     ├── myrbd2
         ├── myimg1(RBD 映像)
         └── myimg2(RBD 映像)


#查看指定rbd信息
cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image myimg1 --pool myrbd1 info
rbd image 'myimg1':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 637ae655d83e
	block_name_prefix: rbd_data.637ae655d83e
	format: 2
	features: layering, exclusive-lock
	op_features: 
	flags: 
	create_timestamp: Wed May 29 22:29:41 2024
	access_timestamp: Wed May 29 22:29:41 2024
	modify_timestamp: Wed May 29 22:29:41 2024
cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image myimg1 --pool myrbd2 info
rbd image 'myimg1':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 214e1fd2fc2b
	block_name_prefix: rbd_data.214e1fd2fc2b
	format: 2
	features: layering, exclusive-lock
	op_features: 
	flags: 
	create_timestamp: Thu May 30 20:52:49 2024
	access_timestamp: Thu May 30 20:52:49 2024
	modify_timestamp: Thu May 30 20:52:49 2024


特性的关闭[可选]
必须在ceph服务器端操作:
cephadmin@ceph-deploy:~/ceph-cluster$ rbd feature disable myrbd1/myimg1 object-map fast-diff deep-flatten



客户端配置:
配置为系统服务,开机启动挂载
1. 创建配置文件,写入挂载点镜像等
[root@ceph-client-centos ~]# cat /etc/rbd_mount.conf
myrbd1
  myimg1 /mysql-1
  myimg2 /mysql-2
myrbd2
  myimg1 /leilei-3
  myimg2 /leilei-4

#支持namespace配置
#[myrbd3]
#namespace=ns1
#myimg1 /mnt/test-1
#myimg2 /mnt/test-2


2. 创建挂载脚本
[root@ceph-client-centos ~]# cat /etc/init.d/ceph-mount-rbd.sh
#-------------------------------------------------------------------#
#!/bin/bash
# chkconfig: 345 20 80
# description: 自动挂载 Ceph RBD 设备

log_file="/var/log/ceph-mount.log"
mapping_file="/etc/rbd_mapping"
config_file="/etc/rbd_mount.conf"

log() {
    echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$log_file"
}

log "脚本开始运行"

case "$1" in
  start)
        log "读取配置文件并挂载 RBD 设备"

        if [ ! -f "$mapping_file" ]; then
            touch "$mapping_file"
        fi

        if [ ! -f "$config_file" ]; then
            log "配置文件不存在:$config_file"
            exit 1
        fi

        declare -A mappings

        # 读取现有的映射关系
        if [ -s "$mapping_file" ]; then
            while read -r line; do
                device=$(echo "$line" | awk '{print $1}')
                mount_point=$(echo "$line" | awk '{print $2}')
                mappings[$mount_point]=$device
            done < "$mapping_file"
        fi

        # 清空映射文件
        > "$mapping_file"

        # 读取配置文件
        while read -r line; do
            log "读取行:$line"
            # 忽略注释行和空行
            if [[ $line =~ ^# ]] || [[ -z $line ]]; then
                log "跳过行:$line"
                continue
            fi
            
            # 解析配置
            if [[ $line =~ ^\ *([^\ ]+)\ *$ ]]; then
                pool=${BASH_REMATCH[1]}
                log "设置 pool 为:$pool"
            elif [[ $line =~ ^\ *([^\ ]+)\ *([^\ ]+)\ *$ ]]; then
                image=${BASH_REMATCH[1]}
                mount_point=${BASH_REMATCH[2]}
                log "挂载 Ceph RBD 设备 $image 到 $mount_point"

                mapped_device=$(rbd map -p "$pool" "$image" --id admin --keyring /etc/ceph/ceph.client.admin.keyring)
                if [ $? -ne 0 ]; then
                    log "RBD 映射失败,错误码:$?"
                    exit 1
                fi

                log "映射到的设备:$mapped_device"
                if [ -n "$mapped_device" ];then
                    # 等待设备出现在 /dev 中
                    udevadm settle

                    if [ ! -e "$mapped_device" ]; then
                        log "设备 $mapped_device 不存在,等待 udev 处理"
                        sleep 5
                    fi

                    if [ ! -e "$mapped_device" ]; then
                        log "设备 $mapped_device 仍然不存在,映射失败"
                        exit 1
                    fi

                    # 记录映射
                    echo "$mapped_device $mount_point" >> "$mapping_file"
                    # 检查是否已格式化
                    fs_type=$(blkid -o value -s TYPE "$mapped_device")
                    if [ -z "$fs_type" ];then
                        log "设备未格式化,正在格式化设备:$mapped_device"
                        mkfs.ext4 "$mapped_device"
                    else
                        log "设备已格式化,文件系统类型:$fs_type"
                    fi

                    # 挂载设备
                    mkdir -p "$mount_point"
                    mount "$mapped_device" "$mount_point"
                    if [ $? -eq 0 ];then
                        log "挂载成功:$mapped_device 到 $mount_point"
                    else
                        log "挂载失败:$mapped_device 到 $mount_point,错误码:$?"
                        exit 1
                    fi
                else
                    log "无法找到映射的设备"
                    exit 1
                fi
            else
                log "无效的配置行:$line"
            fi
        done < "$config_file"
        ;;
  stop)
        log "停止所有挂载并解除映射"
        if [ -s "$mapping_file" ];then
            while read -r line; do
                device=$(echo $line | awk '{print $1}')
                mount_point=$(echo $line | awk '{print $2}')
                
                umount "$mount_point"
                if [ $? -eq 0 ];then
                    log "卸载成功:$device 从 $mount_point"
                else
                    log "卸载失败:$device 从 $mount_point,错误码:$?"
                fi
                
                rbd unmap "$device"
                if [ $? -eq 0 ];then
                    log "解除映射成功:$device"
                else
                    log "解除映射失败:$device,错误码:$?"
                fi
            done < "$mapping_file"
        fi
        ;;
  *)
        echo "Usage: $0 {start|stop}"
        exit 1
esac

exit 0                                              
#-------------------------------------------------------------------#

chmon a+x /etc/init.d/ceph-mount-rbd.sh

3.配置服务启动配置文件
vim /etc/systemd/system/ceph-rbd-mount.service
#----------------------------
[Unit]
Description=Ceph RBD 自动挂载服务
After=network.target

[Service]
Type=oneshot
ExecStart=/etc/init.d/ceph-mount-rbd.sh start
ExecStop=/etc/init.d/ceph-mount-rbd.sh stop
RemainAfterExit=true

[Install]
WantedBy=multi-user.target
#----------------------------

启动ceph-rbd-mount.service服务:
[root@ceph-client-centos ~]# systemctl status ceph-rbd-mount.service
● ceph-rbd-mount.service - Ceph RBD 自动挂载服务
   Loaded: loaded (/etc/systemd/system/ceph-rbd-mount.service; enabled; vendor preset: disabled)
   Active: active (exited) since Fri 2024-05-31 00:59:12 CST; 8s ago
  Process: 1215 ExecStart=/etc/init.d/ceph-mount-rbd.sh start (code=exited, status=0/SUCCESS)
 Main PID: 1215 (code=exited, status=0/SUCCESS)
    Tasks: 0
   Memory: 0B
   CGroup: /system.slice/ceph-rbd-mount.service

May 31 00:59:12 ceph-client-centos systemd[1]: Starting Ceph RBD 自动挂载服务...
May 31 00:59:12 ceph-client-centos systemd[1]: Started Ceph RBD 自动挂载服务.


检查挂载:
[root@ceph-client-centos ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        980M     0  980M   0% /dev
tmpfs           991M     0  991M   0% /dev/shm
tmpfs           991M  9.6M  981M   1% /run
tmpfs           991M     0  991M   0% /sys/fs/cgroup
/dev/sda2       100G  3.1G   97G   4% /
/dev/sda1       187M  109M   78M  59% /boot
tmpfs           199M     0  199M   0% /run/user/0
/dev/rbd0       4.8G   20M  4.6G   1% /mysql-1
/dev/rbd1       2.9G  9.0M  2.8G   1% /mysql-2
/dev/rbd2       4.8G   20M  4.6G   1% /leilei-3
/dev/rbd3       2.9G  9.0M  2.8G   1% /leilei-4

[root@ceph-client-centos ~]# lsblk 
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0  100G  0 disk 
├─sda1   8:1    0  190M  0 part /boot
└─sda2   8:2    0 99.8G  0 part /
sr0     11:0    1 1024M  0 rom  
rbd0   253:0    0    5G  0 disk /mysql-1
rbd1   253:16   0    3G  0 disk /mysql-2
rbd2   253:32   0    5G  0 disk /leilei-3
rbd3   253:48   0    3G  0 disk /leilei-4

[root@ceph-client-centos ~]# rbd showmapped
id  pool    namespace  image   snap  device   
0   myrbd1             myimg1  -     /dev/rbd0
1   myrbd1             myimg2  -     /dev/rbd1
2   myrbd2             myimg1  -     /dev/rbd2
3   myrbd2             myimg2  -     /dev/rbd3