查看pools
创建一个pool,包含64个pg和64个pgp
[root@node-1 ~]# ceph osd pool create ceph-demo 64 64
pool 'ceph-demo' created
[root@node-1 ~]# ceph osd lspools
1 ceph-demo
查看pg和pgp
[root@node-1 ~]# ceph osd pool get ceph-demo pg_num
pg_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo pgp_num
pgp_num: 64
#副本数,默认就是3个
[root@node-1 ~]# ceph osd pool get ceph-demo size
size: 3
调整pg和pgp
[root@node-1 ~]# ceph osd pool set ceph-demo pg_num 128
set pool 1 pg_num to 128
[root@node-1 ~]# ceph osd pool set ceph-demo pgp_num 128
set pool 1 pgp_num to 128
查看
[root@node-1 ~]# ceph -s
cluster:
id: c16b9145-7216-4058-8bfb-c9b7b2b702de
health: HEALTH_OK
services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 2w)
mgr: node-1(active, since 2w), standbys: node-2, node-3
osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
data:
pools: 1 pools, 128 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 147 GiB / 150 GiB avail
pgs: 128 active+clean
创建rbd
rbd create -p ceph-demo --image rbd-demo.img --size 10G
查看
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
[root@node-1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
删除rbd
rbd rm -p ceph-demo --image rbd-demo.img
挂载块设备
禁用新特性,3.10内核不支持
rbd feature disable ceph-demo/rbd-demo.img deep-flatten
rbd feature disable ceph-demo/rbd-demo.img fast-diff
rbd feature disable ceph-demo/rbd-demo.img object-map
rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
查看
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
创建
创建
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
查看
[root@node-1 ~]# rbd device list
id pool namespace image snap device
0 ceph-demo rbd-demo.img - /dev/rbd0
[root@node-1 ~]# fdisk -l
Disk /dev/rbd0: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
然后就可以进行格式化,分区操作了
mkfs.ext4 /dev/rbd0
lsblk
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo
df -h
扩容
当前10个G
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:

本文详述了Ceph存储系统的管理操作,包括创建和管理pool、PG,调整OSD设置,创建和删除RBD镜像,监控集群健康状态,处理告警,以及OSD扩容和换盘等。还涉及快照、克隆、回收机制和RBD高级功能的使用。
最低0.47元/天 解锁文章
316

被折叠的 条评论
为什么被折叠?



