setenforce 0
systemctl stop firewalld.service //关闭防火墙每个节点
hostnamectl set-hostname name
添加host
vi /etc/hosts
172.16.0.45 ceph01
172.16.0.46 ceph02
172.16.0.47 ceph03
安装ceph-deploy
sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
vi /etc/yum.repos.d/ceph.repo 添加
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
sudo yum update
sudo yum install -y ceph-deploy
安装配置ntp
sudo yum install -y ntp ntpdate ntp-doc
chkconfig ntpd on
tzselect
sudo timedatectl set-timezone 'Asia/Shanghai'
ntpdate pool.ntp.org
##NPT配置
# vi /etc/ntp.conf
##打开NTPserver
# /etc/init.d/ntpd start
安装ssh
sudo yum install openssh-server
创建ceph-deploy user{lam} (每个节点)
sudo useradd -d /home/lam -m lam
sudo passwd lam
echo "lam ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/lam
sudo chmod 0440 /etc/sudoers.d/lam
vi /etc/sudoers //确认
## Allows people in group wheel to run all commands
%wheel ALL=(ALL) ALL
usermod -g root lam
su - lam
node1设置ssh
ssh-keygen //一直回车
Generating public/private key pair.
Enter file in which to save the key (/ceph-admin/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /ceph-admin/.ssh/id_rsa.
Your public key has been saved in /ceph-admin/.ssh/id_rsa.pub.
拷贝ssh-key
ssh-copy-id lam@ceph01
ssh-copy-id lam@ceph02
ssh-copy-id lam@ceph03
vi ~/.ssh/config
Host node1
Hostname ceph01
User lam
Host node2
Hostname ceph02
User lam
Host node3
Hostname ceph03
User lam
sudo chmod 600 ~/.ssh/config //赋予config文件权限
sudo setenforce 0
sudo yum install yum-plugin-priorities
创建分区
# parted /dev/sdb
GNU Parted 3.1
Using /dev/sda
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) mklabel gpt
(parted) mkpart primary xfs 0% 100%
(parted) quit
# mkfs.xfs /dev/sda1
##可以创建脚本
# vi parted.sh
#!/bin/bash
set -e
if [ ! -x "/sbin/parted" ]; then
echo "This script requires /sbin/parted to run!" >&2
exit 1
fi
DISKS="b c"
for i in ${DISKS}; do
echo "Creating partitions on /dev/sd${i} ..."
parted -a optimal --script /dev/sd${i} -- mktable gpt
parted -a optimal --script /dev/sd${i} -- mkpart primary xfs 0% 100%
sleep 1
#echo "Formatting /dev/sd${i}1 ..."
mkfs.xfs -f /dev/sd${i}1 &
done
SSDS="d"
for i in ${SSDS}; do
parted -s /dev/sd${i} mklabel gpt
parted -s /dev/sd${i} mkpart primary 0% 50%
parted -s /dev/sd${i} mkpart primary 51% 100%
sleep 1
#echo "Formatting /dev/sd${i}1 ..."
mkfs.xfs -f /dev/sd${i}1 &
sleep 1
#echo "Formatting /dev/sd${i}2 ..."
mkfs.xfs -f /dev/sd${i}2 &
done
# sh parted.sh
ls /dev | grep sd
sudo fdisk -l /dev/sdb
创建ceph集群、
su - lam
# mkdir ~/ceph-cluster
# cd ~/ceph-cluster
如果失败(删除): //删除后需要重新安装ceph
ceph-deploy purge {ceph-node} [{ceph-node}]
ceph-deploy purgedata {ceph-node} [{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
ceph-deploy new ceph01
[root@ceph01 my-cluster]# ls -l
total 12
-rw-r--r-- 1 root root 195 Dec 28 14:45 ceph.conf
-rw-r--r-- 1 root root 2915 Dec 28 14:45 ceph-deploy-ceph.log
-rw------- 1 root root 73 Dec 28 14:45 ceph.mon.keyring
ceph01:
ceph-deploy install ceph01 ceph02 ceph03
ceph-deploy mon create-initial
查看一下 Ceph 存储节点的硬盘情况:
# ceph-deploy disk list ceph01
# ceph-deploy disk list ceph02
# ceph-deploy disk list ceph03
//////初始化 Ceph 硬盘,然后创建 osd 存储节点,存储节点:单个硬盘:对应的 journal 分区,一一对应:
/////////创建 ceph-osd1 存储节点
/////////# ceph-deploy disk zap ceph01:sdb ceph01:sdc ceph02:sdb ceph02:sdc ceph03:sdb ceph03:sdc
/////////# ceph-deploy osd create ceph01:sdb:/dev/sdd1 ceph01:sdc:/dev/sdd2
/////////# ceph-deploy osd create ceph02:sdb:/dev/sdd1 ceph02:sdc:/dev/sdd2
/////////# ceph-deploy osd create ceph03:sdb:/dev/sdd1 ceph03:sdc:/dev/sdd2
ceph-deploy osd prepare ceph01:/dev/sdb1 ceph01:/dev/sdc1 ceph02:/dev/sdb1 ceph02:/dev/sdc1 ceph03:/dev/sdb1 ceph03:/dev/sdc1
ceph-deploy osd prepare ceph01:/dev/sdd1 ceph01:/dev/sdd2 ceph02:/dev/sdd1 ceph02:/dev/sdd2 ceph03:/dev/sdd1 ceph03:/dev/sdd2
ceph-deploy osd activate ceph01:/dev/sdb1 ceph01:/dev/sdc1 ceph02:/dev/sdb1 ceph02:/dev/sdc1 ceph03:/dev/sdb1 ceph03:/dev/sdc1
ceph-deploy osd activate ceph01:/dev/sdd1 ceph01:/dev/sdd2 ceph02:/dev/sdd1 ceph02:/dev/sdd2 ceph03:/dev/sdd1 ceph03:/dev/sdd2
ceph-deploy admin ceph01 ceph02 ceph03
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph -s
丢包:
[root@ceph01 ~]# sudo systemctl stop ceph-osd@0
[root@ceph01 ~]# sudo systemctl start ceph-osd@0
no mgr:
ceph-deploy mgr create host-name
ceph tell osd.0 config set osd_heartbeat_grace 20
ceph tell osd.0 config set osd_heartbeat_interval 5
ceph tell osd.1 config set osd_heartbeat_grace 20
ceph tell osd.1 config set osd_heartbeat_interval 5
ceph tell osd.2 config set osd_heartbeat_grace 20
ceph tell osd.2 config set osd_heartbeat_interval 5
ceph tell osd.3 config set osd_heartbeat_grace 20
ceph tell osd.3 config set osd_heartbeat_interval 5
ceph tell osd.4 config set osd_heartbeat_grace 20
ceph tell osd.4 config set osd_heartbeat_interval 5
ceph tell osd.5 config set osd_heartbeat_grace 20
ceph tell osd.5 config set osd_heartbeat_interval 5
systemctl stop firewalld.service //关闭防火墙每个节点
hostnamectl set-hostname name
添加host
vi /etc/hosts
172.16.0.45 ceph01
172.16.0.46 ceph02
172.16.0.47 ceph03
安装ceph-deploy
sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
vi /etc/yum.repos.d/ceph.repo 添加
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
sudo yum update
sudo yum install -y ceph-deploy
安装配置ntp
sudo yum install -y ntp ntpdate ntp-doc
chkconfig ntpd on
tzselect
sudo timedatectl set-timezone 'Asia/Shanghai'
ntpdate pool.ntp.org
##NPT配置
# vi /etc/ntp.conf
##打开NTPserver
# /etc/init.d/ntpd start
安装ssh
sudo yum install openssh-server
创建ceph-deploy user{lam} (每个节点)
sudo useradd -d /home/lam -m lam
sudo passwd lam
echo "lam ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/lam
sudo chmod 0440 /etc/sudoers.d/lam
vi /etc/sudoers //确认
## Allows people in group wheel to run all commands
%wheel ALL=(ALL) ALL
usermod -g root lam
su - lam
node1设置ssh
ssh-keygen //一直回车
Generating public/private key pair.
Enter file in which to save the key (/ceph-admin/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /ceph-admin/.ssh/id_rsa.
Your public key has been saved in /ceph-admin/.ssh/id_rsa.pub.
拷贝ssh-key
ssh-copy-id lam@ceph01
ssh-copy-id lam@ceph02
ssh-copy-id lam@ceph03
vi ~/.ssh/config
Host node1
Hostname ceph01
User lam
Host node2
Hostname ceph02
User lam
Host node3
Hostname ceph03
User lam
sudo chmod 600 ~/.ssh/config //赋予config文件权限
sudo setenforce 0
sudo yum install yum-plugin-priorities
创建分区
# parted /dev/sdb
GNU Parted 3.1
Using /dev/sda
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) mklabel gpt
(parted) mkpart primary xfs 0% 100%
(parted) quit
# mkfs.xfs /dev/sda1
##可以创建脚本
# vi parted.sh
#!/bin/bash
set -e
if [ ! -x "/sbin/parted" ]; then
echo "This script requires /sbin/parted to run!" >&2
exit 1
fi
DISKS="b c"
for i in ${DISKS}; do
echo "Creating partitions on /dev/sd${i} ..."
parted -a optimal --script /dev/sd${i} -- mktable gpt
parted -a optimal --script /dev/sd${i} -- mkpart primary xfs 0% 100%
sleep 1
#echo "Formatting /dev/sd${i}1 ..."
mkfs.xfs -f /dev/sd${i}1 &
done
SSDS="d"
for i in ${SSDS}; do
parted -s /dev/sd${i} mklabel gpt
parted -s /dev/sd${i} mkpart primary 0% 50%
parted -s /dev/sd${i} mkpart primary 51% 100%
sleep 1
#echo "Formatting /dev/sd${i}1 ..."
mkfs.xfs -f /dev/sd${i}1 &
sleep 1
#echo "Formatting /dev/sd${i}2 ..."
mkfs.xfs -f /dev/sd${i}2 &
done
# sh parted.sh
ls /dev | grep sd
sudo fdisk -l /dev/sdb
创建ceph集群、
su - lam
# mkdir ~/ceph-cluster
# cd ~/ceph-cluster
如果失败(删除): //删除后需要重新安装ceph
ceph-deploy purge {ceph-node} [{ceph-node}]
ceph-deploy purgedata {ceph-node} [{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
ceph-deploy new ceph01
[root@ceph01 my-cluster]# ls -l
total 12
-rw-r--r-- 1 root root 195 Dec 28 14:45 ceph.conf
-rw-r--r-- 1 root root 2915 Dec 28 14:45 ceph-deploy-ceph.log
-rw------- 1 root root 73 Dec 28 14:45 ceph.mon.keyring
ceph01:
ceph-deploy install ceph01 ceph02 ceph03
ceph-deploy mon create-initial
查看一下 Ceph 存储节点的硬盘情况:
# ceph-deploy disk list ceph01
# ceph-deploy disk list ceph02
# ceph-deploy disk list ceph03
//////初始化 Ceph 硬盘,然后创建 osd 存储节点,存储节点:单个硬盘:对应的 journal 分区,一一对应:
/////////创建 ceph-osd1 存储节点
/////////# ceph-deploy disk zap ceph01:sdb ceph01:sdc ceph02:sdb ceph02:sdc ceph03:sdb ceph03:sdc
/////////# ceph-deploy osd create ceph01:sdb:/dev/sdd1 ceph01:sdc:/dev/sdd2
/////////# ceph-deploy osd create ceph02:sdb:/dev/sdd1 ceph02:sdc:/dev/sdd2
/////////# ceph-deploy osd create ceph03:sdb:/dev/sdd1 ceph03:sdc:/dev/sdd2
ceph-deploy osd prepare ceph01:/dev/sdb1 ceph01:/dev/sdc1 ceph02:/dev/sdb1 ceph02:/dev/sdc1 ceph03:/dev/sdb1 ceph03:/dev/sdc1
ceph-deploy osd prepare ceph01:/dev/sdd1 ceph01:/dev/sdd2 ceph02:/dev/sdd1 ceph02:/dev/sdd2 ceph03:/dev/sdd1 ceph03:/dev/sdd2
ceph-deploy osd activate ceph01:/dev/sdb1 ceph01:/dev/sdc1 ceph02:/dev/sdb1 ceph02:/dev/sdc1 ceph03:/dev/sdb1 ceph03:/dev/sdc1
ceph-deploy osd activate ceph01:/dev/sdd1 ceph01:/dev/sdd2 ceph02:/dev/sdd1 ceph02:/dev/sdd2 ceph03:/dev/sdd1 ceph03:/dev/sdd2
ceph-deploy admin ceph01 ceph02 ceph03
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph -s
丢包:
[root@ceph01 ~]# sudo systemctl stop ceph-osd@0
[root@ceph01 ~]# sudo systemctl start ceph-osd@0
no mgr:
ceph-deploy mgr create host-name
ceph tell osd.0 config set osd_heartbeat_grace 20
ceph tell osd.0 config set osd_heartbeat_interval 5
ceph tell osd.1 config set osd_heartbeat_grace 20
ceph tell osd.1 config set osd_heartbeat_interval 5
ceph tell osd.2 config set osd_heartbeat_grace 20
ceph tell osd.2 config set osd_heartbeat_interval 5
ceph tell osd.3 config set osd_heartbeat_grace 20
ceph tell osd.3 config set osd_heartbeat_interval 5
ceph tell osd.4 config set osd_heartbeat_grace 20
ceph tell osd.4 config set osd_heartbeat_interval 5
ceph tell osd.5 config set osd_heartbeat_grace 20
ceph tell osd.5 config set osd_heartbeat_interval 5