In this tutorial I am using debian 9 system with two disks, /dev/vda and /dev/vdb which are identical in size.
/dev/vdb is currently unused, and /dev/vda has the following partition:
See info
fdisk -l
/dev/vda1: / partition, ext4;
/dev/vda2: swap
After completing this guide I will have the following situation:
/dev/md0: / partition, ext4;
/dev/md1: swap
The current situation:
df -h
root@debian9:~# df -h
Output
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 19G 969M 17G 6% /
udev 494M 4.0K 494M 1% /dev
tmpfs 201M 272K 201M 1% /run
none 5.0M 0 5.0M 0% /run/lock
none 502M 0 502M 0% /run/shm
none 100M 0 100M 0% /run/user
root@debian9:~#
fdisk -l
Output
Disk /dev/vda: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00059a4b
Device Boot Start End Blocks Id System
/dev/vda1 * 2048 39845887 19921920 83 Linux
/dev/vda2 39847934 41940991 1046529 5 Extended
/dev/vda2 39847936 41940991 1046528 82 Linux swap / Solaris
Disk /dev/vdb: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/vdb doesn’t contain a valid partition table
root@debian9:~#
Installing mdadm
First of all install md tools:
aptitude install initramfs-tools mdadm
In order to avoid reboot, let’s load few kernel modules:
modprobe linear
modprobe multipath
modprobe raid0
modprobe raid1
modprobe raid5
modprobe raid6
modprobe raid10
Now:
cat /proc/mdstat
root@debian9:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
unused devices:
root@debian9:~#
Preparing the second disk
To create a software RAID1 on a running system, we have to prepare the second disk added to the system (in this case /dev/vdb) for RAID1, then copy the contents from the first disk (/dev/vda) to it, and finally add the first disk to the RAID1 array.
Let’s copy the partition table from /dev/vda to /dev/vdb so that the both disks have the exactly same layout:
for mbr
sfdisk -d /dev/vda | sfdisk --force /dev/vdb
for gpt
apt-get install gdisk
sgdisk -R=/dev/vda /dev/vdb
sgdisk -G /dev/vdb
root@debian9:~# sfdisk -d /dev/vda | sfdisk --force /dev/vdb
Checking that no-one is using this disk right now ...
Warning: extended partition does not start at a cylinder boundary.
DOS and Linux will interpret the contents differently.
OK
Disk /dev/vdb: 2610 cylinders, 255 heads, 63 sectors/track
sfdisk: ERROR: sector 0 does not have an msdos signature
/dev/vdb: unrecognized partition table type
Old situation:
No partitions found
New situation:
Units = sectors of 512 bytes, counting from 0
Device Boot Start End #sectors Id System
/dev/vdb1 * 2048 39845887 39843840 83 Linux
/dev/vdb2 39847934 41940991 2093058 5 Extended
/dev/vdb3 0 - 0 0 Empty
/dev/vdb4 0 - 0 0 Empty
/dev/vdb5 39847936 41940991 2093056 82 Linux swap / Solaris
Warning: partition 1 does not end at a cylinder boundary
Warning: partition 2 does not start at a cylinder boundary
Warning: partition 2 does not end at a cylinder boundary
Warning: partition 5 does not end at a cylinder boundary
Successfully wrote the new partition table
Re-reading the partition table ...
If you created or changed a DOS partition, /dev/foo7, say, then use dd(1)
to zero the first 512 bytes: dd if=/dev/zero of=/dev/foo7 bs=512 count=1
(See fdisk(8).)
root@debian9:~#
And the output of the command:
fdisk -l
root@debian9:~# fdisk -l
Disk /dev/vda: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00059a4b
Device Boot Start End Blocks Id System
/dev/vda1 * 2048 39845887 19921920 83 Linux
/dev/vda2 39847934 41940991 1046529 5 Extended
/dev/vda2 39847936 41940991 1046528 82 Linux swap / Solaris
Disk /dev/vdb: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Device Boot Start End Blocks Id System
/dev/vdb1 * 2048 39845887 19921920 83 Linux
/dev/vdb2 39847934 41940991 1046529 5 Extended
/dev/vdb5 39847936 41940991 1046528 82 Linux swap / Solaris
root@debian9:~#
Change the partitions type on /dev/vdb to Linux raid autodetect:
sfdisk --change-id /dev/vdb 1 fd
sfdisk --change-id /dev/vdb 5 fd
To make sure that there are no remains from previous RAID installations on /dev/vdb, we run the following commands:
mdadm --zero-superblock /dev/vdb1
mdadm --zero-superblock /dev/vdb5
If you receive the following error messages then there are no remains from previous RAID installations, which is nothing to worry about:
root@debian9:~# mdadm --zero-superblock /dev/vdb1
mdadm: Unrecognised md component device - /dev/vdb1
root@debian9:~# mdadm --zero-superblock /dev/vdb5
mdadm: Unrecognised md component device - /dev/vdb5
root@debian9:~#
Creating RAID arrays
Now use mdadm to create the raid arrays. We mark the first drive (vda) as “missing” so it doesn’t wipe out our existing data:
mdadm --create /dev/md0 --level=1 --raid-disks=2 missing /dev/vdb1
mdadm --create /dev/md1 --level=1 --raid-disks=2 missing /dev/vdb5
root@debian9:~# mdadm --create /dev/md0 --level=1 --raid-disks=2 missing /dev/vdb1
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
root@debian9:~# mdadm --create /dev/md1 --level=1 --raid-disks=2 missing /dev/vdb5
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
root@debian9:~#
See status
cat /proc/mdstat
root@debian9:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md1 : active raid1 vdb5[1]
1045952 blocks super 1.2 [2/1] [_U]
md0 : active raid1 vdb1[1]
19905408 blocks super 1.2 [2/1] [_U]
unused devices:
root@debian9:~#
The output above means that we have two degraded arrays ([U] or [U] means that an array is degraded while [UU] means that the array is ok).
Create the filesystems on RAID arrays (ext4 on /dev/md0 and swap on /dev/md1)
mkfs.ext4 /dev/md0
mkswap /dev/md1
Output
root@debian9:~# mkfs.ext4 /dev/md0
mke2fs 1.42.5 (29-Jul-2012)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1245184 inodes, 4976352 blocks
248817 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=0
152 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
root@debian9:~# mkswap /dev/md1
mkswap: /dev/md1: warning: don't erase bootbits sectors
on whole disk. Use -f to force.
Setting up swapspace version 1, size = 1045948 KiB
no label, UUID=728f7cfe-bd95-43e5-906d-c8a70023d081
root@debian9:~#
Adjust mdadm configuration file which doesn’t contain any information about RAID arrays yet:
cp /etc/mdadm/mdadm.conf /etc/mdadm/mdadm.conf_orig
mdadm --examine --scan >> /etc/mdadm/mdadm.conf
Display the content of /etc/mdadm/mdadm.conf:
cat /etc/mdadm/mdadm.conf
root@debian9:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default (built-in), scan all partitions (/proc/partitions) and all
# containers for MD superblocks. alternatively, specify devices to scan, using
# wildcards if desired.
#DEVICE partitions containers
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST
# instruct the monitoring daemon where to send mail alerts
MAILADDR root
# definitions of existing MD arrays
# This file was auto-generated on Tue, 23 Oct 2012 04:36:40 -0700
# by mkconf $Id$
root@debian9:~#
Adjusting The System To RAID1
Let’s mount /dev/md0:
mkdir /mnt/md0
mount /dev/md0 /mnt/md0
See current mount:
mount
root@debian9:~# mount
/dev/vda1 on / type ext4 (rw,errors=remount-ro)
proc on /proc type proc (rw,noexec,nosuid,nodev)
sysfs on /sys type sysfs (rw,noexec,nosuid,nodev)
none on /sys/fs/fuse/connections type fusectl (rw)
none on /sys/kernel/debug type debugfs (rw)
none on /sys/kernel/security type securityfs (rw)
udev on /dev type devtmpfs (rw,mode=0755)
devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=0620)
tmpfs on /run type tmpfs (rw,noexec,nosuid,size=10%,mode=0755)
none on /run/lock type tmpfs (rw,noexec,nosuid,nodev,size=5242880)
none on /run/shm type tmpfs (rw,nosuid,nodev)
none on /run/user type tmpfs (rw,noexec,nosuid,nodev,size=104857600,mode=0755)
/dev/md0 on /mnt/md0 type ext4 (rw)
root@debian9:~#
Change the UID values in /etc/fstab with the UUID values returned by blkid:
blkid /dev/md0 /dev/md1
root@debian9:~# blkid /dev/md0 /dev/md1
/dev/md0: UUID="4a49251b-e357-40a4-b13f-13b041c55a9d" TYPE="ext4"
/dev/md1: UUID="728f7cfe-bd95-43e5-906d-c8a70023d081" TYPE="swap"
root@debian9:~#
After changing the UUID values the /etc/fstab should look as follows:
cat /etc/fstab
root@debian9:~# cat /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# # / was on /dev/vda1 during installation
UUID=4a49251b-e357-40a4-b13f-13b041c55a9d / ext4 errors=remount-ro 0 1
# swap was on /dev/vda2 during installation
UUID=728f7cfe-bd95-43e5-906d-c8a70023d081 none swap sw 0 0
/dev/fd0 /media/floppy0 auto rw,user,noauto,exec,utf8 0 0
root@debian9:~#
Next replace /dev/vda1 with /dev/md0 in /etc/mtab:
sed -e "s/dev\/vda1/dev\/md0/" -i /etc/mtab
cat /etc/mtab
root@debian9:~# cat /etc/mtab
/dev/md0 / ext4 rw,errors=remount-ro 0 0
proc /proc proc rw,noexec,nosuid,nodev 0 0
sysfs /sys sysfs rw,noexec,nosuid,nodev 0 0
none /sys/fs/fuse/connections fusectl rw 0 0
none /sys/kernel/debug debugfs rw 0 0
none /sys/kernel/security securityfs rw 0 0
udev /dev devtmpfs rw,mode=0755 0 0
devpts /dev/pts devpts rw,noexec,nosuid,gid=5,mode=0620 0 0
tmpfs /run tmpfs rw,noexec,nosuid,size=10%,mode=0755 0 0
none /run/lock tmpfs rw,noexec,nosuid,nodev,size=5242880 0 0
none /run/shm tmpfs rw,nosuid,nodev 0 0
none /run/user tmpfs rw,noexec,nosuid,nodev,size=104857600,mode=0755 0 0
/dev/md0 /mnt/md0 ext4 rw 0 0
root@debian9:~#
Setup the GRUB2 boot loader.
Create the file /etc/grub.d/09_swraid1_setup as follows:
cp /etc/grub.d/40_custom /etc/grub.d/09_swraid1_setup
nano /etc/grub.d/09_swraid1_setup
#!/bin/sh
exec tail -n +3 $0
# This file provides an easy way to add custom menu entries. Simply type the
# menu entries you want to add after this comment. Be careful not to change
# the 'exec tail' line above.
menuentry 'Debian GNU/Linux-RAID' --class debian --class gnu-linux --class gnu --class os {
recordfail
insmod mdraid1x
insmod ext2
set root='(md/10)'
linux /boot/vmlinuz-4.9.0-4-amd64 root=/dev/md0 ro quiet
initrd /boot/initrd.img-4.9.0-4-amd64
}
Make sure you use the correct kernel version in the menuentry (in the linux and initrd lines).
uname -r
Output
root@debian9:~# uname -r
4.9.0-4-amd64
root@debian9:~#
#Update grub configuration and adjust our ramdisk to the new situation:
nano /etc/default/grub```
add line
GRUB_RECORDFAIL_TIMEOUT=5
Make grub update
update-grub
update-initramfs -u
Shell output
root@debian9:~# update-grub
Generating grub.cfg ...
Found linux image: /boot/vmlinuz-4.9.0-4-amd64
Found initrd image: /boot/initrd.img-4.9.0-4-amd64
Found memtest86+ image: /boot/memtest86+.bin
done
root@debian9:~# update-initramfs -u
update-initramfs: Generating /boot/initrd.img-4.9.0-4-amd64
W: mdadm: /etc/mdadm/mdadm.conf defines no arrays.
root@debian9:~#
Copy files to the new disk
Copy the files from the first disk (/dev/vda) to the second one (/dev/vdb)
!!!!!!!!!!!!!!!CAREFULL!!!!!!!!!!!!!!!!!!!!!!!
cp -dpRx --remove-destination / /mnt/md0
Preparing GRUB2 (Part 1)
Install GRUB2 boot loader on both disks (/dev/vda and /dev/vdb):
grub-install /dev/vda
grub-install /dev/vdb
Output
root@debian9:~# grub-install /dev/vda
Installation finished. No error reported.
root@debian9:~# grub-install /dev/vdb
Installation finished. No error reported.
Now we reboot the system and hope that it boots ok from our RAID arrays:
reboot
Preparing /dev/vda
If everything went well, you should now find /dev/md0 in the output of:
df -h
Output
root@debian9:~# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/md0 19G 985M 17G 6% /
udev 494M 4.0K 494M 1% /dev
tmpfs 201M 304K 201M 1% /run
none 5.0M 0 5.0M 0% /run/lock
none 502M 0 502M 0% /run/shm
none 100M 0 100M 0% /run/user
root@debian9:~#
The output of:
cat /proc/mdstat
Output
root@debian9:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md1 : active raid1 vdb5[1]
1045952 blocks super 1.2 [2/1] [_U]
md0 : active raid1 vdb1[1]
19905408 blocks super 1.2 [2/1] [_U]
unused devices:
root@debian9:~#
Change the partitions type on /dev/vda to Linux raid autodetect:
sfdisk --change-id /dev/vda 1 fd
sfdisk --change-id /dev/vda 5 fd
See disk usage
fdisk -l
Output
root@debian9:~# fdisk -l
Disk /dev/vda: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00059a4b
Device Boot Start End Blocks Id System
/dev/vda1 * 2048 39845887 19921920 fd Linux raid autodetect
/dev/vda2 39847934 41940991 1046529 5 Extended
/dev/vda2 39847936 41940991 1046528 fd Linux raid autodetect
Disk /dev/vdb: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Device Boot Start End Blocks Id System
/dev/vdb1 * 2048 39845887 19921920 fd Linux raid autodetect
/dev/vdb2 39847934 41940991 1046529 5 Extended
/dev/vdb5 39847936 41940991 1046528 fd Linux raid autodetect
Disk /dev/md0: 20.4 GB, 20383137792 bytes
2 heads, 4 sectors/track, 4976352 cylinders, total 39810816 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/md0 doesn't contain a valid partition table
Disk /dev/md1: 1071 MB, 1071054848 bytes
2 heads, 4 sectors/track, 261488 cylinders, total 2091904 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/md1 doesn't contain a valid partition table
root@debian9:~#
Now we can add /dev/vda1 and /dev/vda2 to the respective RAID arrays:
mdadm --add /dev/md0 /dev/vda1
mdadm --add /dev/md1 /dev/vda2
Output
root@debian9:~# mdadm --add /dev/md0 /dev/vda1
mdadm: added /dev/vda1
root@debian9:~# mdadm --add /dev/md1 /dev/vda2
mdadm: added /dev/vda2
root@debian9:~#
Take a look at:
watch -n 1 cat /proc/mdstat
root@debian9:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md1 : active raid1 vda2[2] vdb5[1]
1045952 blocks super 1.2 [2/1] [_U]
resync=DELAYED
md0 : active raid1 vda1[2] vdb1[1]
19905408 blocks super 1.2 [2/1] [_U]
[=======>.............] recovery = 36.4% (7247872/19905408) finish=1.0min speed=205882K/sec
unused devices:
root@debian9:~#
Then adjust /etc/mdadm/mdadm.conf to the new situation:
cp /etc/mdadm/mdadm.conf_orig /etc/mdadm/mdadm.conf
mdadm --examine --scan >> /etc/mdadm/mdadm.conf
Display the content of /etc/mdadm/mdadm.conf:
cat /etc/mdadm/mdadm.conf
root@debian9:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default (built-in), scan all partitions (/proc/partitions) and all
# containers for MD superblocks. alternatively, specify devices to scan, using
# wildcards if desired.
#DEVICE partitions containers
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST
# instruct the monitoring daemon where to send mail alerts
MAILADDR root
# definitions of existing MD arrays
# This file was auto-generated on Tue, 23 Oct 2012 04:36:40 -0700
# by mkconf $Id$
ARRAY /dev/md/0 metadata=1.2 UUID=89e5afc0:2d741a2c:7d0f40f0:a1457396 name=ubuntu:0
ARRAY /dev/md/1 metadata=1.2 UUID=ce9163fc:4e168956:5c9050ad:68f15735 name=ubuntu:1
root@debian9:~#
Preparing GRUB2 (Part 2)
Now it’s safe to delete /etc/grub.d/09_swraid1_setup
rm -f /etc/grub.d/09_swraid1_setup
Update our GRUB2 bootloader configuration and install it again on both disks (/dev/vda and /dev/vdb)
update-grub
update-initramfs -u
grub-install /dev/vda
grub-install /dev/vdb
Reboot the machine
reboot
Repair soft raid 0 on debian 8/9 / ubuntu 16.04
See status md
cat /proc/mdstat
Add disk to raid
mdadm -a /dev/md0 /dev/vda1
mdadm -a /dev/md1 /dev/vda2
See sync status
watch -n .1 cat /proc/mdstat
Output
Every 0.1s: cat /proc/mdstat debian-9: Wed Apr 25 14:07:04 2018
Personalities : [raid1]
md1 : active raid1 vda2[2] vdb2[1]
3928064 blocks super 1.2 [2/1] [_U]
resync=DELAYED
md0 : active raid1 vda1[2] vdb1[1]
58950656 blocks super 1.2 [2/1] [_U]
[======>..............] recovery = 32.8% (19363712/58950656) finish=4.5min speed=143888K/sec
unused devices: <none>
Waiting for sync
Personalities : [raid1]
md1 : active raid1 vda2[2] vdb2[1]
3928064 blocks super 1.2 [2/2] [UU]
md0 : active raid1 vda1[2] vdb1[1]
58950656 blocks super 1.2 [2/2] [UU]
unused devices: <none>
Done