Ceph OSD纵向扩容(已有的osd节点添加硬盘)
# 每个服务器添加一个 30G 的硬盘作为纵向扩容节点 /dev/sdc
[root@node1 ~]# lsblk /dev/sdc
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sdc 8:32 0 30G 0 disk
[root@node1 ~]# fdisk -l
·····
磁盘 /dev/sdc:32.2 GB, 32212254720 字节,62914560 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节
# 查看 node1 的磁盘列表
[root@node1 my-cluster]# ceph-deploy disk list node1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk list node1
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : list
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0bd8c3f170>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : ['node1']
[ceph_deploy.cli][INFO ] func : <function disk at 0x7f0bd8e93938>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[node1][INFO ] Running command: fdisk -l
# 清除磁盘分区
[root@node1 my-cluster]# ceph-deploy disk zap node1 /dev/sdc
······
[node1][WARNIN] Running command: /usr/bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10 conv=fsync
[node1][WARNIN] stderr: 记录了10+0 的读入
[node1][WARNIN] 记录了10+0 的写出
[node1][WARNIN] 10485760字节(10 MB)已复制
[node1][WARNIN] stderr: ,0.0588607 秒,178 MB/秒
[node1][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdc>
[root@node1 my-cluster]# ceph-deploy osd create --data /dev/sdc node1
[root@node1 my-cluster]# ceph-deploy osd create --data /dev/sdc node2
[root@node1 my-cluster]# ceph-deploy osd create --data /dev/sdc node3
[root@node1 my-cluster]# ceph -s
cluster:
id: 3f5560c6-3af3-4983-89ec-924e8eaa9e06
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3 (age 6h)
mgr: node1(active, since 3d), standbys: node2, node3
mds: cephfs-demo:1 {0=node1=up:active} 2 up:standby
osd: 6 osds: 6 up (since 65s), 6 in (since 65s)
rgw: 1 daemon active (node1)
task status:
scrub status:
mds.node1: idle
data:
pools: 7 pools, 224 pgs
objects: 288 objects, 221 MiB
usage: 6.7 GiB used, 143 GiB / 150 GiB avail
pgs: 224 active+clean
[root@node1 my-cluster]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.14635 root default
-3 0.04878 host node1
0 hdd 0.01949 osd.0 up 1.00000 1.00000
3 hdd 0.02930 osd.3 up 1.00000 1.00000
-5 0.04878 host node2
1 hdd 0.01949 osd.1 up 1.00000 1.00000
4 hdd 0.02930 osd.4 up 1.00000 1.00000
-7 0.04878 host node3
2 hdd 0.01949 osd.2 up 1.00000 1.00000
5 hdd 0.02930 osd.5 up 1.00000 1.00000
[root@node1 my-cluster]# ceph df
RAW STORAGE:
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 150 GiB 143 GiB 727 MiB 6.7 GiB 4.47
TOTAL 150 GiB 143 GiB 727 MiB 6.7 GiB 4.47
POOLS:
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
ceph-demo 1 64 216 MiB 79 652 MiB 0.47 45 GiB
.rgw.root 2 32 1.2 KiB 4 768 KiB 0 45 GiB
default.rgw.control 3 32 0 B 8 0 B 0 45 GiB
default.rgw.meta 4 32 0 B 0 0 B 0 45 GiB
default.rgw.log 5 32 0 B 175 0 B 0 45 GiB
cephfs_data 6 16 0 B 0 0 B 0 45 GiB
cephfs_metadata 7 16 2.9 KiB 22 1.5 MiB 0 45 GiB
# 查看节点的 OSD
[root@node1 my-cluster]# ceph-deploy osd list {node-name}
REBALANCING 数据重分布
# 添加 OSD节点 以后自动触发 reblancing 状态
watch -n1 `ceph -s`
# 临时关闭 rebalancing 功能
[root@node1 my-cluster]# ceph osd set norebalance
norebalance is set
[root@node1 my-cluster]# ceph osd set nobackfill
nobackfill is set
[root@node1 my-cluster]# ceph -s
cluster:
id: 3f5560c6-3af3-4983-89ec-924e8eaa9e06
health: HEALTH_WARN
nobackfill,norebalance flag(s) set
services:
mon: 3 daemons, quorum node1,node2,node3 (age 6h)
mgr: node1(active, since 3d), standbys: node2, node3
mds: cephfs-demo:1 {0=node1=up:active} 2 up:standby
osd: 6 osds: 6 up (since 21m), 6 in (since 21m)
flags nobackfill,norebalance
rgw: 1 daemon active (node1)
task status:
scrub status:
mds.node1: idle
data:
pools: 7 pools, 224 pgs
objects: 288 objects, 221 MiB
usage: 6.7 GiB used, 143 GiB / 150 GiB avail
pgs: 224 active+clean
# 开启 rebalance 功能
[root@node1 my-cluster]# ceph osd unset nobackfill
nobackfill is unset
[root@node1 my-cluster]# ceph osd unset norebalance
norebalance is unset
[root@node1 my-cluster]# ceph -s
cluster:
id: 3f5560c6-3af3-4983-89ec-924e8eaa9e06
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3 (age 6h)
mgr: node1(active, since 3d), standbys: node2, node3
mds: cephfs-demo:1 {0=node1=up:active} 2 up:standby
osd: 6 osds: 6 up (since 23m), 6 in (since 23m)
rgw: 1 daemon active (node1)
task status:
scrub status:
mds.node1: idle
data:
pools: 7 pools, 224 pgs
objects: 288 objects, 221 MiB
usage: 6.7 GiB used, 143 GiB / 150 GiB avail
pgs: 224 active+clean