手动部署ceph集群
节点规划:
主机名 public-ip Cluster-ip可选(生产环境必须) 角色
arm-openstack-01 192.168.16.42 192.168.200.42 Mon,mgr,mds,osd
arm-openstack-02 192.168.16.43 192.168.200.43 Mon,mgr,mds,osd
arm-openstack-03 192.168.16.45 192.168.200.44 Mon,mgr,mds,osd
Ceph版本:ceph-12.2.8-7.ky10
环境初始化
没有特殊说明,以下操作在所有节点执行。
所有节点必须配置主机名
#arm-openstack-01
hostnamectl set-hostname arm-openstack-01
#arm-openstack-02
hostnamectl set-hostname arm-openstack-02
#arm-openstack-03
hostnamectl set-hostname arm-openstack-03
配置hosts解析
cat >> /etc/hosts <<EOF
192.168.16.42 arm-openstack-01
192.168.16.43 arm-openstack-02
192.168.16.45 arm-openstack-03
EOF
配置防火墙和时间同步
#关闭防火墙和selinux
systemctl disable --now firewalld
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
#配置时间同步
yum install -y chrony
systemctl enable --now chronyd
配置yum源
[root@arm-openstack-01 ~]# cat > /etc/yum.repos.d/kylin_aarch64.repo <<EOF
###Kylin Linux Advanced Server 10 - os repo###
[ks10-adv-os]
name = Kylin Linux Advanced Server 10 - Os
baseurl = http://update.cs2c.com.cn:8080/NS/V10/V10SP1.1/os/adv/lic/base/$basearch/
gpgcheck = 1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-kylin
enabled = 1
[ks10-adv-updates]
name = Kylin Linux Advanced Server 10 - Updates
baseurl = http://update.cs2c.com.cn:8080/NS/V10/V10SP1.1/os/adv/lic/updates/$basearch/
gpgcheck = 1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-kylin
enabled = 1
EOF
arm-openstack-01节点添加节点SSH互信
ssh-keygen -t rsa
ssh-copy-id root@arm-openstack-01
ssh-copy-id root@arm-openstack-02
ssh-copy-id root@arm-openstack-03
安装ceph依赖包
yum install -y snappy leveldb gdisk python-argparse gperftools-libs
yum install -y ceph
初始化monitor节点
[root@node1 ~]# uuidgen
8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
#arm-openstack-01
export cephuid=8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
#arm-openstack-02
export cephuid=8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
#arm-openstack-03
export cephuid=8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
所有节点创建Ceph配置文件:
cat > /etc/ceph/ceph.conf <<EOF
[global]
fsid = 8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
mon initial members = arm-openstack-01, arm-openstack-02, arm-openstack-03
mon host = 192.168.16.42, 192.168.16.43, 192.168.16.45
mds_cache_memory_limit = 1099511627776
mds_session_blacklist_on_timeout = False
public network = 192.168.16.0/24
#cluster network = 172.16.200.0/24 #生产必须
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
mon_max_pg_per_osd = 512
mon_osd_backfillfull_ratio = 0.75
mon_osd_full_ratio = 0.85
mon_osd_max_split_count = 512
mon_osd_nearfull_ratio = 0.7
mon_pg_warn_min_per_osd = 300
osd_deep_scrub_randomize_ratio = 0.01
osd_failsafe_full_ratio = 0.9
osd_pool_default_min_size = 1
osd_pool_default_pg_num = 250
osd_pool_default_pgp_num = 250
osd_pool_default_size = 2
[osd]
osd memory target = 75703768842
osd_crush_update_on_start = True
osd_max_backfills = 1
osd_max_write_size = 1024
osd_memory_target = 4294967296
osd_recovery_max_active = 1
osd_recovery_max_chunk = 1048576
osd_recovery_max_single_start = 1
osd_recovery_op_priority = 1
osd_recovery_sleep = 0
osd_recovery_threads = 1
osd_scrub_begin_hour = 22
osd_scrub_end_hour = 7
EOF
以下操作在arm-openstack-01节点执行
为集群创建一个keyring,并生成一个monitor密钥。
#ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
生成administrator keyring,生成client.admin用户并将用户添加到keyring。
#ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
生成bootstrap-osd keyring,生成client.bootstrap-osd用户并将用户添加到keyring。
#ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'
将生成的密钥添加到中ceph.mon.keyring。
#ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
#ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
将所有者更改为ceph.mon.keyring。
#chown ceph:ceph /tmp/ceph.mon.keyring
使用主机名,主机IP地址和FSID生成monitor map。另存为/tmp/monmap:
#monmaptool --create --add arm-openstack-01 192.168.16.42 --add arm-openstack-02 192.168.16.43 --add arm-openstack-03 192.168.16.45 --fsid $cephuid /tmp/monmap
复制monitor map到另外2个节点
#scp /tmp/monmap root@arm-openstack-02:/tmp
#scp /tmp/monmap root@arm-openstack-03:/tmp
复制ceph.client.admin.keyring到另外2个节点
#scp /etc/ceph/ceph.client.admin.keyring root@arm-openstack-02:/etc/ceph/
#scp /etc/ceph/ceph.client.admin.keyring root@arm-openstack-03:/etc/ceph/
复制ceph.mon.keyring到另外2个节点
#scp /tmp/ceph.mon.keyring root@arm-openstack-02:/tmp/
#scp /tmp/ceph.mon.keyring root@arm-openstack-03:/tmp/
#注意修改文件权限
#arm-openstack-02
# chown ceph:ceph /tmp/ceph.mon.keyring
#arm-openstack-03
# chown ceph:ceph /tmp/ceph.mon.keyring
创建monitor数据目录
#arm-openstack-01
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-arm-openstack-01
#arm-openstack-02
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-arm-openstack-02
#arm-openstack-03
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-arm-openstack-03
用monitor map和keyring填充monitor守护程序。
#arm-openstack-01
sudo -u ceph ceph-mon --mkfs -i arm-openstack-01 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#arm-openstack-02
sudo -u ceph ceph-mon --mkfs -i arm-openstack-02 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#arm-openstack-03
sudo -u ceph ceph-mon --mkfs -i arm-openstack-03 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
查看生成的文件
[root@arm-openstack-01 ~]# ls /var/lib/ceph/mon/ceph-arm-openstack-01/
keyring kv_backend store.db
启动monitor服务
#arm-openstack-01
systemctl restart ceph-mon@arm-openstack-01
systemctl enable ceph-mon@arm-openstack-01
#arm-openstack-02
systemctl restart ceph-mon@arm-openstack-02
systemctl enable ceph-mon@arm-openstack-02
#arm-openstack-03
systemctl restart ceph-mon@arm-openstack-03
systemctl enable ceph-mon@arm-openstack-03
初始化manager
为守护程序创建身份验证密钥,将该密钥放入/var/lib/ceph/mgr/ceph-node/目录下:
#arm-openstack-01
ceph auth get-or-create mgr.arm-openstack-01 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-arm-openstack-01
sudo -u ceph cat /var/lib/ceph/mgr/ceph-arm-openstack-01/keyring
[mgr.arm-openstack-01]
key = AQDMt+9ejx8HLhAA6IqshHKAg72p8MW/s6cdLg==
#arm-openstack-02
ceph auth get-or-create mgr.arm-openstack-02 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-arm-openstack-02
sudo -u ceph cat /var/lib/ceph/mgr/ceph-arm-openstack-02/keyring
[mgr.node2]
key = AQDSt+9e+T6kKRAAW8A6zelgtQiHbdmaNCSGag==
#arm-openstack-03
ceph auth get-or-create mgr.arm-openstack-03 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-arm-openstack-03
sudo -u ceph cat /var/lib/ceph/mgr/ceph-arm-openstack-03/keyring
[mgr.node3]
key = AQDYt+9e9iE2EBAAP+cyRwxGP80lDDzwc/eFGA==
启动ceph-mgr守护程序:
#arm-openstack-01
systemctl restart ceph-mgr@arm-openstack-01
systemctl enable ceph-mgr@arm-openstack-01
#arm-openstack-02
systemctl restart ceph-mgr@arm-openstack-02
systemctl enable ceph-mgr@arm-openstack-02
#arm-openstack-03
systemctl restart ceph-mgr@arm-openstack-03
systemctl enable ceph-mgr@arm-openstack-03
通过ceph status查看输出来检查mgr是否出现
ceph status
添加OSD
arm-openstack-01上面操作:
复制keyring到其他2个节点
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@arm-openstack-02:/var/lib/ceph/bootstrap-osd/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@arm-openstack-03:/var/lib/ceph/bootstrap-osd/
确认磁盘
[root@arm-openstack-01 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 446.6G 0 disk
├─sda1 8:1 0 200M 0 part /boot/efi
├─sda2 8:2 0 1G 0 part /boot
└─sda3 8:3 0 445.4G 0 part
├─klas-root 252:0 0 391.4G 0 lvm /
├─klas-swap 252:1 0 4G 0 lvm [SWAP]
└─klas-backup 252:2 0 50G 0 lvm
sdb 8:16 0 837.9G 0 disk
sdc 8:32 0 837.9G 0 disk
sdd 8:48 0 837.9G 0 disk
可见规划该主机的3个磁盘可以加入到集群
其他两台节点上同理,确认磁盘。
3个节点上执行(按需求添加):
ceph-volume lvm create --data /dev/sdb
举例:
ceph-volume lvm create --filestore --data {vg name/lv name} --journal {vg name/lv name}
参数如下:
optional arguments:
-h, --help show this help message and exit
--osd-id OSD_ID Reuse an existing OSD id
--osd-fsid OSD_FSID Reuse an existing OSD fsid
--crush-device-class CRUSH_DEVICE_CLASS
Crush device class to assign this OSD to
--dmcrypt Enable device encryption via dm-crypt
--no-systemd Skip creating and enabling systemd units and starting
OSD services when activating
required arguments:
--data DATA OSD data path. A physical device or logical volume
filestore:
--filestore Use the filestore objectstore
--journal JOURNAL (REQUIRED) A logical volume (vg_name/lv_name), or path
to a device
bluestore:
--bluestore Use the bluestore objectstore
--block.db BLOCK_DB Path to bluestore block.db logical volume or device
--block.wal BLOCK_WAL
Path to bluestore block.wal logical volume or device
确认添加的磁盘tree
ceph osd tree
目标节点上启动对应的osd
#arm-openstack-01
systemctl restart ceph-osd@0
systemctl enable ceph-osd@0
#arm-openstack-02
systemctl restart ceph-osd@1
systemctl enable ceph-osd@1
#arm-openstack-03
systemctl restart ceph-osd@2
systemctl enable ceph-osd@2
查看集群状态
[root@arm-openstack-01 ~]# ceph -s
cluster:
id: 8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
health: HEALTH_OK
services:
mon: 3 daemons, quorum arm-openstack-01,arm-openstack-02,arm-openstack-03
mgr: arm-openstack-03(active), standbys: arm-openstack-02, arm-openstack-01
osd: 6 osds: 6 up, 6 in #我这里已添加6块osd
data:
pools: 0 pools, 0 pgs
objects: 2.57k objects, 19.7GiB
usage: 64.3GiB used, 4.85TiB / 4.91TiB avail
添加mds
构建第一个mds
1)为mds元数据服务器创建一个目录
[root@arm-openstack-01 ~]#mkdir -p /var/lib/ceph/mds/ceph-arm-openstack-01
2)为bootstrap-mds客户端创建一个密钥 注:(如果下面的密钥在目录里已生成可以省略此步骤
[root@arm-openstack-01~]#ceph-authtool --create-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring --gen-key -n client.bootstrap-mds
creating /var/lib/ceph/bootstrap-mds/ceph.keyring
[root@arm-openstack-01 ~]# cat /var/lib/ceph/bootstrap-mds/ceph.keyring
[client.bootstrap-mds]
key = AQD489thY6q9NRAAdoviwR8xz2NsjMIc2xF72w==
3)在ceph auth库中创建bootstrap-mds客户端,赋予权限添加之前创建的密钥 注(查看ceph auth list 用户权限认证列表 如果已有client.bootstrap-mds此用户,此步骤可以省略)
[root@arm-openstack-01 ~]##ceph auth add client.bootstrap-mds mon 'allow profile bootstrap-mds' -i /var/lib/ceph/bootstrap-mds/ceph.keyring
added key for client.bootstrap-ds
[root@arm-openstack-01 ~]#ceph auth list |grep -A 2 client.bootstrap-mds
installed auth entries:
client.bootstrap-mds
key: AQCOe9pcEuCiIRAAHvGgC98ZGYKE5klCw4kAfA==
caps: [mon] allow profile bootstrap-mds
4)在root家目录里创建ceph.bootstrap-mds.keyring文件
[root@arm-openstack-01 ~]#touch /root/ceph.bootstrap-mds.keyring
5)把keyring /var/lib/ceph/bootstrap-mds/ceph.keyring里的密钥导入家目录下的ceph.bootstrap-mds.keyring文件里
[root@arm-openstack-01~]#ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
importing contents of /var/lib/ceph/bootstrap-mds/ceph.keyring into ceph.bootstrap-mds.keyring
cat /root/ceph.bootstrap-mds.keyring
[client.bootstrap-mds]
key = AQCOe9pcEuCiIRAAHvGgC98ZGYKE5klCw4kAfA==
6)在ceph auth库中创建mds.arm-openstack-01用户,并赋予权限和创建密钥,密钥保存在/var/lib/ceph/mds/ceph-arm-openstack-01/keyring文件里
[root@arm-openstack-01~]#ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.arm-openstack-01 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-arm-openstack-01/keyring
[root@arm-openstack-01~]#cat /var/lib/ceph/mds/ceph-arm-openstack-01/keyring
[mds.arm-openstack-01]
key = AQDn9Ntha5ljAhAAk3XkdnIlYPQy6BbreLmkBQ==
[root@arm-openstack-01~]# ceph auth list | grep -A 4 mds.arm-openstack-01
installed auth entries:
mds.arm-openstack-01
key: AQDn9Ntha5ljAhAAk3XkdnIlYPQy6BbreLmkBQ==
caps: [mds] allow
caps: [mon] allow profile mds
caps: [osd] allow rwx
7)启动mds,并设置开机自启
[root@arm-openstack-01~]#systemctl start ceph-mds@arm-openstack-01
[root@arm-openstack-01~]#systemctl enable ceph-mds@arm-openstack-01
[root@arm-openstack-01~]#systemctl status ceph-mds@arm-openstack-01
构建第二个节点,第三个节点
8)拷贝密钥文件到arm-openstack-02,arm-openstack-03
[root@arm-openstack-01~]#scp ceph.bootstrap-mds.keyring arm-openstack-02:/root/ceph.bootstrap-mds.keyring
[root@arm-openstack-01~]#scp /var/lib/ceph/bootstrap-mds/ceph.keyring arm-openstack-02:/var/lib/ceph/bootstrap-mds/ceph.keyring
[root@arm-openstack-01~]#scp ceph.bootstrap-mds.keyring arm-openstack-03:/root/ceph.bootstrap-mds.keyring
[root@arm-openstack-01~]#scp /var/lib/ceph/bootstrap-mds/ceph.keyring arm-openstack-03:/var/lib/ceph/bootstrap-mds/ceph.keyring
9)在arm-openstack-02,arm-openstack-03上创建mds元数据目录
[root@arm-openstack-02~]#mkdir -p /var/lib/ceph/mds/ceph-arm-openstack-02
[root@arm-openstack-03~]#mkdir -p /var/lib/ceph/mds/ceph-arm-openstack-03
10)在ceph auth库中创建mds.arm-openstack-02用户,并赋予权限和创建密钥,密钥保存在/var/lib/ceph/mds/ceph-arm-openstack-02/keyring文件里
[root@arm-openstack-02~]#ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.arm-openstack-02 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-arm-openstack-02/keyring
[root@arm-openstack-03~]#ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.arm-openstack-03 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-arm-openstack-03/keyring
11)启动mds,并设置开机自启
[root@arm-openstack-02 ~]#systemctl start ceph-mds@arm-openstack-02
[root@arm-openstack-02 ~]#systemctl enable ceph-mds@arm-openstack-02
[root@arm-openstack-03 ~]#systemctl start ceph-mds@arm-openstack-03
[root@arm-openstack-03 ~]#systemctl enable ceph-mds@arm-openstack-03
12)创建cephfs所需的pool,并创建fs,
[root@arm-openstack-01 ~]#ceph osd pool create cephfs_data 128
[root@arm-openstack-01 ~]#ceph osd pool create cephfs_metadata 128
[root@arm-openstack-01~]#ceph fs new cephfs cephfs_metadata cephfs_data
[root@arm-openstack-01 ~]ceph fs ls
[root@arm-openstack-01 ~]ceph mds stat
[root@arm-openstack-01 ~]ceph -s
13)客户端mount cephFS
yum -y install ceph-fuse
ceph-authtool -p /etc/ceph/ceph.client.admin.keyring > admin.key
chmod 600 admin.key
mount.ceph ceph4:6789:/ /mnt -o name=admin,secretfile=admin.key
参考网站如下:
https://blog.youkuaiyun.com/networken/article/details/106892818
https://www.cnblogs.com/luoliyu/articles/10863096.html