查看pools
创建一个pool,包含64个pg和64个pgp
[root@node-1 ~]# ceph osd pool create ceph-demo 64 64
pool 'ceph-demo' created
[root@node-1 ~]# ceph osd lspools
1 ceph-demo
查看pg和pgp
[root@node-1 ~]# ceph osd pool get ceph-demo pg_num
pg_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo pgp_num
pgp_num: 64
#副本数,默认就是3个
[root@node-1 ~]# ceph osd pool get ceph-demo size
size: 3
调整pg和pgp
[root@node-1 ~]# ceph osd pool set ceph-demo pg_num 128
set pool 1 pg_num to 128
[root@node-1 ~]# ceph osd pool set ceph-demo pgp_num 128
set pool 1 pgp_num to 128
查看
[root@node-1 ~]# ceph -s
cluster:
id: c16b9145-7216-4058-8bfb-c9b7b2b702de
health: HEALTH_OK
services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 2w)
mgr: node-1(active, since 2w), standbys: node-2, node-3
osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
data:
pools: 1 pools, 128 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 147 GiB / 150 GiB avail
pgs: 128 active+clean
创建rbd
rbd create -p ceph-demo --image rbd-demo.img --size 10G
查看
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
[root@node-1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
删除rbd
rbd rm -p ceph-demo --image rbd-demo.img
挂载块设备
禁用新特性,3.10内核不支持
rbd feature disable ceph-demo/rbd-demo.img deep-flatten
rbd feature disable ceph-demo/rbd-demo.img fast-diff
rbd feature disable ceph-demo/rbd-demo.img object-map
rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
查看
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
创建
创建
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
查看
[root@node-1 ~]# rbd device list
id pool namespace image snap device
0 ceph-demo rbd-demo.img - /dev/rbd0
[root@node-1 ~]# fdisk -l
Disk /dev/rbd0: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
然后就可以进行格式化,分区操作了
mkfs.ext4 /dev/rbd0
lsblk
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo
df -h
扩容
当前10个G
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
[root@node-1 ~]# rbd resize ceph-demo/rbd-demo.img --size 20G
Resizing image: 100% complete...done.
扩容后,变成20个G
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1143ee2e8a3a
block_name_prefix: rbd_data.1143ee2e8a3a
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Nov 17 14:57:35 2020
access_timestamp: Tue Nov 17 14:57:35 2020
modify_timestamp: Tue Nov 17 14:57:35 2020
但是只是对底层进行扩容,但是对于磁盘分区没有扩容
resize2fs /dev/rbd0
缩容不建议错,很容易造成数据丢失
告警排查
健康状态详情
[root@node-1 ~]# ceph health detail
HEALTH_WARN application not enabled on 1 pool(s)
POOL_APP_NOT_ENABLED application not enabled on 1 pool(s)
application not enabled on pool 'ceph-demo'
use 'ceph osd pool application enable <pool-name>
<app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or
freeform for custom applications.
一个资源池没有启用application,解决方案是启用起来,并且指定应用类型
解决
[root@node-1 ~]# ceph osd pool application enable ceph-demo rbd
enabled application 'rbd' on pool 'ceph-demo'
[root@node-1 ~]# ceph osd pool application get ceph-demo
{
"rbd": {}
}
查看状态
[root@node-1 ~]# ceph -s
cluster:
id: c16b9145-7216-4058-8bfb-c9b7b2b702de
health: HEALTH_OK
services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 2w)
mgr: node-1(active, since 2w), standbys: node-2, node-3
osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
data:
pools: 1 pools, 128 pgs
objects: 4 objects, 35 B
usage: 3.0 GiB used, 147 GiB / 150 GiB avail
pgs: 128 active+clean
Ceph集群维护
1. 物理机器关机维护
-
迁移相关虚拟机/业务至其他节点(openstack等需要把虚拟机迁移出去);
在MON或者有admin权限的节点执行操作:
# 设置OSD的锁定 for i in noout nobackfill norecovery;do ceph osd set $i;done # 完成机器维护后,加电,重新同步数据 for i in noout nobackfill norecovery;do ceph osd unset $i;done
2. 用systemctl控制Ceph
# start/stop all ceph-mds@.service instances at once
systemctl start/stop/restart ceph-mds.target
# start/stop all ceph-mgr@.service instances at once
systemctl start/stop/restart ceph-mgr.target
# start/stop all ceph-mon@.service instances at once
systemctl start/stop/restart ceph-mon.target
# start/stop all ceph-osd@.service instances at once
systemctl start/stop/restart ceph-osd.target
# start/stop all ceph-radosgw@.service instances at once
systemctl start/stop/restart ceph-radosgw.target
# start/stop all ceph*@.service instances at once
systemctl start/stop/restart ceph.target
3. Ceph常用监测命令
# 检查集群状态
ceph health
ceph status
ceph quorum_status
ceph mon_status
# 观察集群内正发生的事件
ceph -w/-s
# 检查集群的使用情况
ceph df
# 检查 OSD 状态
ceph osd stat
ceph osd dump
# 打印 CRUSH 树
ceph osd tree
# 检查 Mon 状态
ceph mon stat
ceph mon dump
# 检查MON法定人数状态
ceph quorum_status -f json-pretty
# 检查 MDS 状态
ceph mds stat
ceph mds dump
-----------------------------------
# 监控 PG
ceph pg dump
# 查看指定 PG 的 Acting Set 或 Up Set 中包含的 OSD
ceph pg map {pg-num}
查看机器的监控状态
ceph health1
查看ceph的实时运行状态
ceph -w1
检查信息状态信息
ceph -s1
查看ceph存储空间
ceph df1
删除一个节点的所有的ceph数据包
ceph-deploy purge node1 ceph-deploy purgedata node112
创建管理用户
为ceph创建一个admin用户并为admin用户创建一个密钥,把密钥保存到/etc/ceph目录下:
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' > /etc/ceph/ceph.client.admin.keyring
或
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.admin.keyring123
为osd.0创建一个用户并创建一个key
ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *' -o /var/lib/ceph/osd/ceph-0/keyring1
为mds.node1创建一个用户并创建一个key
ceph auth get-or-create mds.node1 mon 'allow rwx' osd 'allow *' mds 'allow *' -o /var/lib/ceph/mds/ceph-node1/keyring1
查看ceph集群中的认证用户及相关的key
ceph auth list1
删除集群中的一个认证用户
ceph auth del osd.01
查看集群的详细配置
ceph daemon mon.node1 config show | more1
查看集群健康状态细节
ceph health detail1
查看ceph log日志所在的目录
ceph-conf --name mon.node1 --show-config-value log_file1
mon命令
ceph mon stat#查看mon的状态信息
ceph mon dump#查看ceph映射信息
ceph mon remove node1 #删除一个mon节点 ceph-deploy mon destroy {host-name [host-name]...}
ceph mon add node1 node1_ip #添加一个mon节点 ceph-deploy mon create {host-name [host-name]...}
mon节点的/var/lib/ceph/mon/ceph-node2/store.db文件内容一致,添加mon注意先改配置目录配置文件,再推送到所有节点
ceph-deploy --overwrite-conf config push node1 node2 node3
mds命令
ceph mds stat #查看msd状态
ceph mds dump #msd的映射信息
ceph mds rm 0 mds.node1#删除一个mds节点
ceph-deploy mds create {host-name}[:{daemon-name}] [{host-name}[:{daemon-name}] ...]
osd命令
ceph osd stat #查看osd状态
ceph osd dump #osd的映射信息
ceph osd tree#查看osd目录树
ceph osd down 0 #down掉osd.0节点
ceph osd rm 0#集群删除一个osd硬盘
ceph osd crush remove osd.4#删除标记
ceph osd getmaxosd#查看最大osd个数
ceph osd setmaxosd 10#设置osd的个数
ceph osd out osd.3#把一个osd节点逐出集群
ceph osd in osd.3#把逐出的osd加入集群
ceph osd pause#暂停osd (暂停后整个集群不再接收数据)
ceph osd unpause#再次开启osd (开启后再次接收数据)
pg命令
ceph pg stat#查看pg状态
ceph pg dump#查看pg组的映射信息
ceph pg map 0.3f#查看一个pg的map
ceph pg 0.26 query#查看pg详细信息
ceph pg dump --format plain#显示一个集群中的所有的pg统计
osd命令
ceph osd lspools#查看ceph集群中的pool数量
ceph osd pool create jiayuan 100#创建一个pool 这里的100指的是PG组
ceph osd pool delete jiayuan jiayuan --yes-i-really-really-mean-it #集群名字需要重复两次
rados df#显示集群中pool的详细信息
ceph osd pool get data pg_num #查看data池的pg数量
ceph osd pool set data target_max_bytes 100000000000000#设置data池的最大存储空间为100T(默认是1T)
ceph osd pool set data size 3 #设置data池的副本数是3
ceph osd pool set data min_size 2 #设置data池能接受写操作的最小副本为2
ceph osd pool set data pg_num 100#设置一个pool的pg数量
ceph osd pool set data pgp_num 100#设置一个pool的pgp数量
rados和rbd命令
rados lspools#查看ceph集群中有多少个pool (只是查看pool)
rados df #查看ceph集群中有多少个pool,并且每个pool容量及利用情况
rados mkpool test#创建一个pool
rados create test-object -p test#创建一个对象object
rados rm test-object-1 -p test#删除一个对象object
rados -p test ls
rbd ls pool_name#查看ceph中一个pool里的所有镜像
rbd info -p pool_name --image 74cb427c-cee9-47d0-b467-af217a67e60a #查看ceph pool中一个镜像的信息
rbd create -p test --size 10000 zhanguo#在test池中创建一个命名为zhanguo的10000M的镜像
rbd rm -p test lizhanguo #删除一个镜像
rbd resize -p test --size 20000 zhanguo #调整一个镜像的尺寸
创建一个osd
ceph-deploy disk zap {osd-server-name}:{disk-name} #擦净磁盘
ceph-deploy osd prepare {node-name}:{disk}[:{path/to/journal}]
ceph-deploy osd prepare osdserver1:sdb:/dev/ssd1
ceph-deploy osd activate {node-name}:{path/to/disk}[:{path/to/journal}]
ceph-deploy osd activate osdserver1:/dev/sdb1:/dev/ssd1
把改过的配置文件分发给集群内各主机
ceph-deploy config push {host-name [host-name]...}
CRUSH映射
ceph osd getcrushmap -o MAP #获取一个CRUSH映射
crushtool -d MAP -o MAP.TXT #反编译一个CRUSH映射
crushtool -c MAP.TXT -o MAP #编译一个CRUSH映射
ceph osd setcrushmap -i MAP #设置一个CRUSH映射
块设备的一些命令
单位为M,默认在rbd pool中
创建块设备:rbd create {image-name} --size {megabytes} --pool {pool-name}
列出块设备:rbd ls {poolname} -l
检索块信息:rbd --image {image-name} info
更改块大小:rbd resize --image {image-name} --size {megabytes}
删除块设备:rbd rm {image-name}
映射块设备:rbd map {image-name} --pool {pool-name} --id {user-name}
查看已映射块设备:rbd showmapped
取消映射:rbd unmap /dev/rbd/{poolname}/{imagename}
快照和克隆相关命令
创建快照:
rbd --pool {pool-name} snap create --snap {snap-name} {image-name}
rbd snap create {pool-name}/{image-name}@{snap-name}
快照回滚:
rbd --pool {pool-name} snap rollback --snap {snap-name} {image-name}
rbd snap rollback {pool-name}/{image-name}@{snap-name}
清除快照:
rbd --pool {pool-name} snap purge {image-name}
rbd snap purge {pool-name}/{image-name}
删除快照:
rbd --pool {pool-name} snap rm --snap {snap-name} {image-name}
rbd snap rm {pool-name}/{image-name}@{snap-name}
列出快照:
rbd --pool {pool-name} snap ls {image-name}
rbd snap ls {pool-name}/{image-name}
保护快照:
rbd --pool {pool-name} snap protect --image {image-name} --snap {snapshot-name}
rbd snap protect {pool-name}/{image-name}@{snapshot-name}
取消保护快照:
rbd --pool {pool-name} snap unprotect --image {image-name} --snap {snapshot-name}
rbd snap unprotect {pool-name}/{image-name}@{snapshot-name}
快照克隆
rbd clone {pool-name}/{parent-image}@{snap-name} {pool-name}/{child-image-name}
查看快照的克隆
rbd --pool {pool-name} children --image {image-name} --snap {snap-name}
rbd children {pool-name}/{image-name}@{snapshot-name}
RGW对象存储
创建
cceph-deploy rgw create node-1
CephFS文件存储
创建mds服务
ceph-deploy mds create node-1 node-2 node-3
[root@node-1 ceph-deploy]# ceph osd pool create cephfs_metadata 16 16
pool 'cephfs_metadata' created
[root@node-1 ceph-deploy]# ceph osd pool create cephfs_data 16 16
pool 'cephfs_data' created
[root@node-1 ceph-deploy]# ceph fs new cephfs-demo cephfs_metadata cephfs_data
new fs with metadata pool 6 and data pool 7
[root@node-1 ceph-deploy]#
[root@node-1 ceph-deploy]# ceph fs ls
name: cephfs-demo, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@node-1 ceph-deploy]# ceph -s
cluster:
id: c16b9145-7216-4058-8bfb-c9b7b2b702de
health: HEALTH_OK
services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 46m)
mgr: node-1(active, since 46m), standbys: node-2, node-3
mds: cephfs-demo:1 {0=node-2=up:active} 2 up:standby
osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
rgw: 1 daemon active (node-1)
task status:
scrub status:
mds.node-2: idle
data:
pools: 7 pools, 288 pgs
objects: 213 objects, 3.5 KiB
usage: 3.0 GiB used, 147 GiB / 150 GiB avail
pgs: 288 active+clean
内核级别挂载,性能高一点
mkdir /mnt/cephfs
[root@node-1 ceph-deploy]# mount -t ceph 172.16.10.224:6789:/ /mnt/cephfs/ -o name=admin
[root@node-1 ceph-deploy]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 8.9M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/sda3 211G 1.8G 210G 1% /
/dev/sda1 1014M 141M 874M 14% /boot
tmpfs 783M 0 783M 0% /run/user/0
tmpfs 3.9G 52K 3.9G 1% /var/lib/ceph/osd/ceph-0
172.16.10.224:6789:/ 47G 0 47G 0% /mnt/cephfs
用户级别挂载
安装客户端
yum -y install ceph-fuse
mkdir /mnt/ceph-fuse
[root@node-1 ceph-deploy]# ceph-fuse -n client.admin -m 172.16.10.224:6789,172.16.10.225:6789,172.16.10.226:6789 /mnt/ceph-fuse
2020-11-17 17:54:20.511 7eff93888f80 -1 init, newargv = 0x5571a711e340 newargc=9ceph-fuse[29325]: starting ceph client
ceph-fuse[29325]: starting fuse
[root@node-1 ceph-deploy]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 9.0M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/sda3 211G 1.9G 210G 1% /
/dev/sda1 1014M 141M 874M 14% /boot
tmpfs 783M 0 783M 0% /run/user/0
tmpfs 3.9G 52K 3.9G 1% /var/lib/ceph/osd/ceph-0
172.16.10.224:6789:/ 47G 0 47G 0% /mnt/cephfs
ceph-fuse 47G 0 47G 0% /mnt/ceph-fuse
OSD扩容和换盘
横向扩容增加节点
纵向扩容增加容量
root@node-1 ceph-deploy]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.14639 root default
-3 0.04880 host node-1
0 hdd 0.04880 osd.0 up 1.00000 1.00000
-5 0.04880 host node-2
1 hdd 0.04880 osd.1 up 1.00000 1.00000
-7 0.04880 host node-3
2 hdd 0.04880 osd.2 up 1.00000 1.00000
-----------------------------------
清理有分区的磁盘
ceph-deploy disk zap node-1 /dev/sdc 原理就是用dd把分区数据都刷掉
增加磁盘
ceph-deploy osd create node-1 --data /dev/sdc
增加osd会使PG移动,消耗性能,所以扩容osd需要一个一个的扩容,减少对业务的影响
查看osd延时
ceph osd perf
删除osd
ceph osd out osd.5
等信息同步完再删
ceph osd crush rm osd.5
ceph osd rm osd.5
ceph author rm osd.5
RBD高级功能
创建
[root@node-1 ~]# rbd create ceph-demo/ceph-trash.img --size 10G
[root@node-1 ~]# rbd info ceph-demo/ceph-trash.img
rbd image 'ceph-trash.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 861f92bbad7f
block_name_prefix: rbd_data.861f92bbad7f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Nov 18 10:57:52 2020
access_timestamp: Wed Nov 18 10:57:52 2020
modify_timestamp: Wed Nov 18 10:57:52 2020
删除
[root@node-1 ~]# rbd rm ceph-demo/ceph-trash.img
Removing image: 100% complete...done.
设置回收机制
[root@node-1 ~]# rbd create ceph-demo/ceph-trash.img --size 10G
[root@node-1 ~]# rbd trash move ceph-demo/ceph-trash.img --expires-at 20201119
[root@node-1 ~]# rbd trash -p ceph-demo ls
8640e50e08fb ceph-trash.img
回收回来,防止误删除
[root@node-1 ~]# rbd trash restore -p ceph-demo 8640e50e08fb
[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
映射到本地文件系统
ceph-deploy osd create node-1 --data /dev/sdc
[root@node-1 ~]# rbd device map ceph-demo/rbd-test.img
/dev/rbd1
格式化
mkfs.ext4 /dev/rbd1
mount /dev/rbd0 /media
touch test.txt
创建快照
[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@snap_20201118
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap_20201118 10 GiB Wed Nov 18 11:15:23 2020
数据恢复
删除test.txt
rbd snap rollback ceph-demo/rbd-test.img@snap_20201118
rbd snap ls ceph-demo/rbd-test.img
卸载之后,重新挂载
umount /media
mount /dev/rbd1 /media
创建快照
[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@template
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap_20201118 10 GiB Wed Nov 18 11:15:23 2020
5 template 10 GiB Wed Nov 18 11:29:21 2020
保护快照
[root@node-1 ~]# rbd snap protect ceph-demo/rbd-test.img@template
会发现无法删除了
[root@node-1 ~]# rbd snap rm ceph-demo/rbd-test.img@template
Removing snap: 0% complete...failed.2020-11-18 11:32:20.904 7f2cef31ec80 -1 librbd::Operations: snapshot is protected
rbd: snapshot 'template' is protected from removal.
快照克隆
[root@node-1 ~]# rbd clone ceph-demo/rbd-test.img@template ceph-demo/vm1-clone.img
[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
rbd-test.img
vm1-clone.img
使用克隆快照
rbd device map ceph-demo/vm1-clone.img
mount /dev/rbd2 /mnt
查看镜像创建了几个快照
[root@node-1 ~]# rbd children ceph-demo/rbd-test.img@template
ceph-demo/vm1-clone.img
解除复制关系
登录后复制
rbd flatten ceph-demo/vm1-clone.img
RBD导入导出
创建快照
[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@snap-demo
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap_20201118 10 GiB Wed Nov 18 11:15:23 2020
5 template 10 GiB yes Wed Nov 18 11:29:21 2020
6 snap-demo 10 GiB Wed Nov 18 15:17:24 2020
[root@node-1 ~]#
导出备份
[root@node-1 ~]# rbd export ceph-demo/rbd-test.img@snap-demo /root/rbd-test.img
Exporting image: 100% complete...done.
导入
[root@node-1 ~]# rbd import rbd-test.img ceph-demo/rbd-test-new.img
Importing image: 100% complete...done.
导入
[root@node-1 ~]# rbd import rbd-test.img ceph-demo/rbd-test-new.img
Importing image: 100% complete...done.
[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
rbd-test-new.img
rbd-test.img
vm1-clone.img