raid0
raid0 : 至少两块 读写速度块但安全性不高
添加几个硬盘(5G 3个)
[root@localhost ~]# mount /dev/cdrom /opt/centos (根据自己的yum源进行挂载)
[root@localhost ~]# yum install mdadm -y
[root@localhost ~]# mdadm --help (可以不输此命令)
[root@localhost ~]# mdadm --help-options
Some common options are:
--assemble -A : Assemble an array
--build -B : Build an array without metadata
--create -C : Create a new array
--detail -D : Display details of an array
--examine -E : Examine superblock on an array component
[root@localhost ~]# mdadm -C --help
[root@localhost~]# fdisk /dev/sdb 分两个区,为1G
[root@localhost ~]# mdadm -C /dev/md0 -l 0 -n 2 /dev/sdb[1-2]
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
-C /dev/。。 raid名称
-l raid等级
-n 磁盘数量
[root@localhost ~]# cat /proc/mdstat 查看所有运行的RAID阵列的状态
Personalities : [raid0]
md0 : active raid0 sdb2[1] sdb1[0]
2095104 blocks super 1.2 512k chunks
unused devices:
[root@localhost ~]# mdadm -D /dev/md0 查询此raid盘的详情信息
/dev/md0:
Version : 1.2
Creation Time : Tue Dec 29 00:39:49 2020
Raid Level : raid0
Array Size : 2095104 (2046.34 MiB 2145.39 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Tue Dec 29 00:39:49 2020
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Chunk Size : 512K
Name : localhost.localdomain:0 (local to host localhost.localdomain)
UUID : 5d7adbe8:29d1a72c:36d6aa16:2f5ec1d4
Events : 0
Number Major Minor RaidDevice State
0 8 17 0 active sync /dev/sdb1
1 8 18 1 active sync /dev/sdb2
[root@localhost ~]# mdadm -Ds 查询uuid
ARRAY /dev/md0 metadata=1.2 name=localhost.localdomain:0 UUID=5d7adbe8:29d1a72c:36d6aa16:2f5ec1d4
给raid0分区:
[root@localhost ~]# fdisk /dev/md0 (分区,为1G)
[root@localhost ~]# lsblk 查看分区详情
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 60G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 59.5G 0 part
├─centos-root 253:0 0 38.6G 0 lvm /
├─centos-swap 253:1 0 2G 0 lvm [SWAP]
└─centos-home 253:2 0 18.9G 0 lvm /home
sdb 8:16 0 5G 0 disk
├─sdb1 8:17 0 1G 0 part
│ └─md0 9:0 0 2G 0 raid0
│ └─md0p1 259:1 0 1G 0 md
└─sdb2 8:18 0 1G 0 part
└─md0 9:0 0 2G 0 raid0
└─md0p1 259:1 0 1G 0 md
sdc 8:32 0 5G 0 disk
sdd 8:48 0 5G 0 disk
sr0 11:0 1 4G 0 rom /opt/centos
格式化
[root@localhost ~]# mkfs.ext4 /dev/md0p1 格式化
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=128 blocks, Stripe width=256 blocks
65536 inodes, 262144 blocks
13107 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=268435456
8 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Allocating group tables: done
Writing inode tables: done
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done
[root@localhost ~]# mkdir /opt/md0
[root@localhost ~]# mount /dev/md0p1 /opt/md0
[root@localhost ~]# df -h 查看挂载情况
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 39G 874M 38G 3% /
devtmpfs 903M 0 903M 0% /dev
tmpfs 913M 0 913M 0% /dev/shm
tmpfs 913M 8.5M 904M 1% /run
tmpfs 913M 0 913M 0% /sys/fs/cgroup
/dev/mapper/centos-home 19G 33M 19G 1% /home
/dev/sda1 497M 125M 373M 25% /boot
tmpfs 183M 0 183M 0% /run/user/0
/dev/sr0 4.1G 4.1G 0 100% /opt/centos
/dev/md0p1 976M 2.6M 907M 1% /opt/md0
raid1
[root@localhost ~]# fdisk /dev/sdc (创建3个分区,都为1G)
[root@localhost ~]# mdadm -C /dev/md1 -l 1 -n 2 -x 1 /dev/sdc[1-3]
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array?
Continue creating array? (y/n) y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
-x:为热备盘数
-l raid等级
-n 磁盘数量
查看所有运行的RAID阵列的状态
[root@jing ~]# cat /proc/mdstat 查看raid详情
Personalities : [raid0] [raid1]
md1 : active raid1 sdc3[2](S) sdc2[1] sdc1[0]
1047552 blocks super 1.2 [2/2] [UU]
md0 : active raid0 sdb2[1] sdb1[0]
2095104 blocks super 1.2 512k chunks
unused devices: <none>
-s 为空闲盘
查询uuid查询uuid
[root@jing ~]# mdadm -Ds
ARRAY /dev/md0 metadata=1.2 name=jing:0 UUID=a53a57ac:5eb1561f:1409a534:d5e28d8b
ARRAY /dev/md1 metadata=1.2 spares=1 name=jing:1 UUID=61b4eda5:dd2d14c9:4bec3e3b:a6e3457a
查询此raid盘的详情信息
[root@localhost ~]# mdadm -D /dev/md1
Number Major Minor RaidDevice State
0 8 33 0 active sync /dev/sdc1
1 8 34 1 active sync /dev/sdc2
[root@jing ~]# fdisk /dev/md1 (创建分区,为500M)
[root@jing ~]# mkfs.ext4 /dev/md1p1 格式化
[root@jing ~]# mkdir /opt/md1
[root@jing ~]# mount /dev/md1p1 /opt/md1 挂载
[root@jing ~]# df -h 查看挂载情况
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 17G 948M 17G 6% /
devtmpfs 901M 0 901M 0% /dev
tmpfs 912M 0 912M 0% /dev/shm
tmpfs 912M 8.7M 903M 1% /run
tmpfs 912M 0 912M 0% /sys/fs/cgroup
/dev/sda1 1014M 143M 872M 15% /boot
tmpfs 183M 0 183M 0% /run/user/0
/dev/sr0 4.3G 4.3G 0 100% /opt/cdrom
/dev/md0p1 976M 2.6M 907M 1% /opt/md0
/dev/md1p1 477M 2.3M 445M 1% /opt/md1
raid1中/dev/sdc1出现故障,观察/dev/sdc3 备用盘能否自动顶替故障盘
先设置动态查看设备状态 (watch: -n 可以实时查看改变的内容)
[root@jing ~]# watch -n 1 cat /proc/mdstat 先设置动态查看设备状态 (watch: -n 可以实时查看改变的内容)
Every 1.0s: cat /proc/mdstat Tue Dec 29 10:23:13 2020
Personalities : [raid0] [raid1]
md1 : active raid1 sdc3[2](S) sdc2[1] sdc1[0]
1047552 blocks super 1.2 [2/2] [UU]
md0 : active raid0 sdb2[1] sdb1[0]
2095104 blocks super 1.2 512k chunks
unused devices: <none>
将/dev/sdc1指定为故障状态
[root@jing ~]# mdadm -f /dev/md1 /dev/sdc1
mdadm: set /dev/sdc1 faulty in /dev/md1
-f 故障
-r 删除
再查看状态
Every 1.0s: cat /proc/mdstat Tue Dec 29 10:25:48 2020
Personalities : [raid0] [raid1]
md1 : active raid1 sdc3[2] sdc2[1] sdc1[0](F)
1047552 blocks super 1.2 [2/2] [UU]
md0 : active raid0 sdb2[1] sdb1[0]
2095104 blocks super 1.2 512k chunks
unused devices: <none>
最后热插拔:(以下命令相当于你拔掉U盘时,右击安全退出)
[root@localhost ~]# mdadm -r /dev/md1 /dev/sdc1
mdadm: hot removed /dev/sdc1 from /dev/md1
raid5
[root@jing ~]# fdisk /dev/sdd (3个主分区,1个扩展分区,1个逻辑分区)
[root@jing ~]# mdadm -C /dev/md5 -l 5 -n 3 -x 1 /dev/sdd[1-4]
[root@jing ~]# partprobe 刷新分区
测试:
1、先分区
[root@localhost ~]# fdisk /dev/sdb
刷新分区
[root@localhost ~]# partprobe
2、故障移除
[root@localhost ~]# mdadm -f /dev/md5 /dev/sdd1
[root@localhost ~]# mdadm -r /dev/md5 /dev/sdd1
3、在raid5上再继续扩展一个盘
[root@localhost ~]# mdadm -a /dev/md5 /dev/sdb3
mdadm: added /dev/sdb3
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4]
md5 : active raid5 sdb3[5](S) sdd3[4] sdd2[1] sdd1[0]
2095104 blocks super 1.2 level 5, 32k chunk, algorithm 2 [3/3] [UUU]
(可以看出加进去的盘是热备盘而不是运作的盘)
[root@localhost ~]# fdisk /dev/md5
[root@jing ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 19G 0 part
├─centos-root 253:0 0 17G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 5G 0 disk
├─sdb1 8:17 0 1G 0 part
│ └─md0 9:0 0 2G 0 raid0
│ └─md0p1 259:1 0 1G 0 md /opt/md0
├─sdb2 8:18 0 1G 0 part
│ └─md0 9:0 0 2G 0 raid0
│ └─md0p1 259:1 0 1G 0 md /opt/md0
└─sdb3 8:19 0 1G 0 part
└─md5 9:5 0 2G 0 raid5
└─md5p1 259:3 0 1G 0 md
sdc 8:32 0 5G 0 disk
├─sdc1 8:33 0 1G 0 part
├─sdc2 8:34 0 1G 0 part
│ └─md1 9:1 0 1023M 0 raid1
│ └─md1p1 259:2 0 500M 0 md /opt/md1
└─sdc3 8:35 0 1G 0 part
└─md1 9:1 0 1023M 0 raid1
└─md1p1 259:2 0 500M 0 md /opt/md1
sdd 8:48 0 5G 0 disk
├─sdd1 8:49 0 1G 0 part
├─sdd2 8:50 0 1G 0 part
│ └─md5 9:5 0 2G 0 raid5
│ └─md5p1 259:3 0 1G 0 md
├─sdd3 8:51 0 1G 0 part
│ └─md5 9:5 0 2G 0 raid5
│ └─md5p1 259:3 0 1G 0 md
└─sdd4 8:52 0 1G 0 part
└─md5 9:5 0 2G 0 raid5
└─md5p1 259:3 0 1G 0 md
sr0 11:0 1 4.2G 0 rom /opt/cdrom
[root@localhost ~]# mkfs.ext4 /dev/md5p1 格式化
[root@localhost ~]# mkdir /opt/md5
[root@localhost ~]# mount /dev/md5p1 /opt/md5/ 挂载
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 17G 948M 17G 6% /
devtmpfs 901M 0 901M 0% /dev
tmpfs 912M 0 912M 0% /dev/shm
tmpfs 912M 8.7M 903M 1% /run
tmpfs 912M 0 912M 0% /sys/fs/cgroup
/dev/sda1 1014M 143M 872M 15% /boot
tmpfs 183M 0 183M 0% /run/user/0
/dev/sr0 4.3G 4.3G 0 100% /opt/cdrom
/dev/md0p1 976M 2.6M 907M 1% /opt/md0
/dev/md1p1 477M 2.3M 445M 1% /opt/md1
/dev/md5p1 976M 2.6M 907M 1% /opt/md5
raid10
[root@jing ~]# mdadm -S /dev/md0 停止指定raid
mdadm: stopped /dev/md0
故障移除
[root@jing ~]# mdadm -f /dev/md5 /dev/sdb3
mdadm: set /dev/sdb3 faulty in /dev/md5
[root@jing ~]# mdadm -r /dev/md5 /dev/sdb3
mdadm: hot removed /dev/sdb3 from /dev/md5
[root@jing ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 19G 0 part
├─centos-root 253:0 0 17G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 5G 0 disk
├─sdb1 8:17 0 1G 0 part
├─sdb2 8:18 0 1G 0 part
├─sdb3 8:19 0 1G 0 part
└─sdb4 8:20 0 1G 0 part
sdc 8:32 0 5G 0 disk
├─sdc1 8:33 0 1G 0 part
├─sdc2 8:34 0 1G 0 part
│ └─md1 9:1 0 1023M 0 raid1
│ └─md1p1 259:2 0 500M 0 md /opt/md1
└─sdc3 8:35 0 1G 0 part
└─md1 9:1 0 1023M 0 raid1
└─md1p1 259:2 0 500M 0 md /opt/md1
sdd 8:48 0 5G 0 disk
├─sdd1 8:49 0 1G 0 part
├─sdd2 8:50 0 1G 0 part
│ └─md5 9:5 0 2G 0 raid5
│ └─md5p1 259:3 0 1G 0 md /opt/md5
├─sdd3 8:51 0 1G 0 part
│ └─md5 9:5 0 2G 0 raid5
│ └─md5p1 259:3 0 1G 0 md /opt/md5
└─sdd4 8:52 0 1G 0 part
└─md5 9:5 0 2G 0 raid5
└─md5p1 259:3 0 1G 0 md /opt/md5
sr0 11:0 1 4.2G 0 rom /opt/cdrom
[root@jing ~]# mdadm -C /dev/md10 -l 10 -n 4 /dev/sdb[1-4]
mdadm: /dev/sdb1 appears to be part of a raid array:
level=raid0 devices=2 ctime=Tue Dec 29 09:47:37 2020
mdadm: /dev/sdb2 appears to be part of a raid array:
level=raid0 devices=2 ctime=Tue Dec 29 09:47:37 2020
mdadm: /dev/sdb3 appears to be part of a raid array:
level=raid5 devices=3 ctime=Tue Dec 29 11:09:46 2020
Continue creating array?
Continue creating array? (y/n) y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md10 started.
[root@jing ~]# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md10 : active raid10 sdb4[3] sdb3[2] sdb2[1] sdb1[0]
2095104 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
md5 : active raid5 sdd3[4] sdd4[3] sdd2[1]
2095104 blocks super 1.2 level 5, 512k chunk, algorithm 2 [3/3] [UUU]
md1 : active raid1 sdc3[2] sdc2[1]
1047552 blocks super 1.2 [2/2] [UU]
unused devices: <none>
[root@jing ~]# mdadm -D /dev/md10 查看是那些盘或分区
/dev/md10:
Version : 1.2
Creation Time : Tue Dec 29 11:53:58 2020
Raid Level : raid10
Array Size : 2095104 (2046.00 MiB 2145.39 MB)
Used Dev Size : 1047552 (1023.00 MiB 1072.69 MB)
Raid Devices : 4
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Tue Dec 29 11:54:09 2020
State : clean
Active Devices : 4
Working Devices : 4
Failed Devices : 0
Spare Devices : 0
Layout : near=2
Chunk Size : 512K
Consistency Policy : resync
Name : jing:10 (local to host jing)
UUID : 7986a795:8d90b66f:3c4f6e0a:5e6e9055
Events : 17
Number Major Minor RaidDevice State
0 8 17 0 active sync set-A /dev/sdb1
1 8 18 1 active sync set-B /dev/sdb2
2 8 19 2 active sync set-A /dev/sdb3
3 8 20 3 active sync set-B /dev/sdb4
删除raid
[root@localhost ~]# df -Th(查看挂载情况)
[root@localhost ~]# umount /dev/md0p1 (停的时候确保所有都是umount状态)
[root@localhost ~]# mdadm -D /dev/md0(查看是那些盘或分区)
[root@localhost ~]# mdadm -S /dev/md0 (关闭raid)
mdadm: stopped /dev/md0
[root@localhost ~]# mdadm --misc --zero-superblock /dev/sdb[1-2] (清除raid标识,以此类推)
其他参数:
[root@localhost ~]# mdadm -S /dev/md0 (停止指定raid)
[root@localhost ~]# mdadm -Ss (停止所有raid)
[root@localhost ~]# mdadm -As /dev/md0(开启指定raid)
[root@localhost ~]# mdadm -As (开启所有raid)
命令汇总
创建
[root@localhost ~]# mdadm --create(-C) /dev/md1 --level=1(-l) --raid-devices=2 (-n)/dev/sdc1 /dev/sdd1
标记故障
[root@localhost aaa]# mdadm /dev/md1 --fail (-f)/dev/sdc1
移除
[root@localhost aaa]# mdadm /dev/md1 --remove(-r) /dev/sdc1
添加
[root@localhost aaa]# mdadm /dev/md1 --add (-a)/dev/sde1
显示详情信息
[root@localhost aaa]# mdadm --detail(-D) /dev/md1
停止
[root@localhost ~]# mdadm --stop (-S)/dev/md0
清除raid标识
[root@localhost ~]# mdadm --help
[root@localhost ~]# mdadm --misc --help
[root@localhost ~]# mdadm --misc --zero-superblock /dev/sdb[1-2]