Skip to content

storage

add a new disk to a new volume group

what disk will be used, and what group will be created?

$ lsblk
NAME         MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda            8:0    0   30G  0 disk
├─sda1         8:1    0    9G  0 part /
└─sda2         8:2    0    1G  0 part [SWAP]
sdf            8:80   0   12T  0 disk
sr0           11:0    1 1024M  0 rom

in my case, i want to add sdf as /backup, on a group called data and a volume called backup. run all as root user

create the physical volume and volume group

$ pvcreate /dev/sdf
  Physical volume "/dev/sdf" successfully created.

$ vgcreate data /dev/sdf
  Volume group "data" successfully created

create the logical volume

$ lvcreate -n backup -l 100%FREE data
  Logical volume "backup" created.

format, add disk to fstab, and mount

$ mkfs.xfs /dev/data/backup
meta-data=/dev/data/backup       isize=512    agcount=12, agsize=268435455 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=3221224448, imaxpct=5
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=521728, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

create the fstab entry and mount point, and mount it

echo "UUID=$(blkid /dev/data/backup | cut -d "\"" -f 2) /backup xfs defaults,noatime 0 0" >> /etc/fstab
systemctl daemon-reload
mkdir /backup
mount -a

add new disk to an existing volume group

what disk will be used, and what group will be extended?

$ lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   16G  0 disk
|-sda1            8:1    0    1G  0 part /boot
`-sda2            8:2    0   15G  0 part
  |-centos-root 253:0    0 29.4G  0 lvm  /
  `-centos-swap 253:1    0  1.6G  0 lvm
sdb               8:16   0   16G  0 disk
sr0              11:0    1 1024M  0 rom

in my case, i want to add sdb to / (group centos-root). run all as root user

pvcreate /dev/sdb
lvmdiskscan -l
vgextend centos /dev/sdb
lvm lvextend -l +100%FREE /dev/mapper/centos-root
xfs_growfs /dev/mapper/centos-root

pvcreate /dev/sdb error: device is partitioned

use wipefs first to get rid of all metadata

sudo wipefs --all /dev/sdb

creating partition size larger than 2tb

in this example, operations are done to create /dev/sdb1 using ext4

$ parted /dev/sdb
GNU Parted 2.3
Using /dev/sdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted)

(parted) mklabel gpt
Warning: The existing disk label on /dev/sdb will be destroyed and all data on this disk will be lost. Do you want to continue?
Yes/No? yes
(parted)
(parted) unit TB

# creating a primary with 3tb? use this
(parted) mkpart primary 0 3

# creating a partition using all the space available? use this
(parted) mkpart primary 0% 100%

# want to set the filesystem too?
(parted) mkpart primary ext4 0% 100%
# the partition will be used in a raid? set this flag
(parted) set 1 raid on
(parted) print
Model: ATA ST33000651AS (scsi)
Disk /dev/sdb: 3.00TB
Sector size (logical/physical): 512B/512B
Partition Table: gpt

Number  Start   End     Size    File system  Name     Flags
 1      0.00TB  3.00TB  3.00TB  ext4         primary
(parted) quit
$ mkfs.ext4 /dev/sdb1
mke2fs 1.41.12 (17-May-2010)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
183148544 inodes, 732566272 blocks
36628313 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=4294967296
22357 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
    32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
    4096000, 7962624, 11239424, 20480000, 23887872, 71663616, 78675968,
    102400000, 214990848, 512000000, 550731776, 644972544

Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

This filesystem will be automatically checked every 31 mounts or
180 days, whichever comes first.  Use tune2fs -c or -i to override.

find the partition id, and add it to /etc/fstab

$ blkid /dev/sdb1
/dev/sdb1: UUID="7b7bdd66-218c-4a64-b760-82824f87724b" BLOCK_SIZE="512" TYPE="xfs" PARTLABEL="primary" PARTUUID="dc00594a-b233-46c3-8f79-cdc3a5cf28ad"

$ vim /etc/fstab
UUID=7b7bdd66-218c-4a64-b760-82824f87724b /DISK_BACKUP            ext4     defaults,noatime 0 0

$ mkdir /DISK_BACKUP
$ mount -a

create software raid 10

create some partitions using the raid flag. after that, use mdadm to manage the raid array.

yum install mdadm -y
mdadm --create /dev/md0 --level raid10 --name <RAID_NAME> --raid-disks <NUMBER_OF_DISKS> <LIST_OF_PARTITIONS, like /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1> 
echo "MAILADDR root@localhost" >> /etc/mdadm.conf
mdadm --detail --scan >> /etc/mdadm.conf

create new file system on the new raid device

mkfs.xfs /dev/disk/by-id/md-name-<RAID_NAME>
mkdir /data
mount /dev/disk/by-id/md-name-<RAID_NAME> /data

check the array status

mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
#or
cat /proc/mdstat

simulate disk sdb1 failure

mdadm --manage --set-faulty /dev/disk/by-id/md-name-<RAID_NAME> /dev/sdb1

#Check syslog for new failure messages
tail /var/log/messages

Oct  3 16:43:42 centos-62-1 kernel: md/raid10:md0: Disk failure on sdb1, disabling device.
Oct  3 16:43:42 centos-62-1 kernel: md/raid10:md0: Operation continuing on 3 devices.

# check array status
mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
cat /proc/mdstat

simulate disk sdd1 failure

mdadm --manage --set-faulty /dev/disk/by-id/md-name-<RAID_NAME> /dev/sdd1

# check syslog for new failure messages
tail /var/log/messages

Oct  3 16:45:01 centos-62-1 kernel: md/raid10:md0: Disk failure on sdd1, disabling device.
Oct  3 16:45:01 centos-62-1 kernel: md/raid10:md0: Operation continuing on 2 devices.

# check array status
mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
cat /proc/mdstat

remove sdb1 from the array and re-add it

mdadm /dev/disk/by-id/md-name-<RAID_NAME> -r /dev/sdb1
mdadm /dev/disk/by-id/md-name-<RAID_NAME> -a /dev/sdb1

# check array status
mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
cat /proc/mdstat

remove sdd1 from the array and re-add it

mdadm /dev/disk/by-id/md-name-<RAID_NAME> -r /dev/sdd1
mdadm /dev/disk/by-id/md-name-<RAID_NAME> -a /dev/sdd1

# check array status
mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
cat /proc/mdstat

replace failed raid10 disk

after replace the disk, they'll appear as unused on lsblk

$ sudo lsblk
NAME          MAJ:MIN RM  SIZE RO TYPE   MOUNTPOINTS
[...]
sdb1          259:0    0  2.9T  0 disk
└─md0           9:0    0  5.8T  0 raid10 /MOUNT_POINT
sdc1          259:1    0  2.9T  0 disk
└─md0           9:0    0  5.8T  0 raid10 /MOUNT_POINT
sdd1          259:2    0  2.9T  0 disk
sde1          259:3    0  2.9T  0 disk
└─md0           9:0    0  5.8T  0 raid10 /MOUNT_POINT

since your array is incomplete, this info will appear on the status command

$ sudo mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
[...]
      Raid Devices : 4
     Total Devices : 3
[...]
             State : active, degraded
[...]
    Number   Major   Minor   RaidDevice State
       0     259        3        0      active sync set-A   /dev/sdb1
       1     259        1        1      active sync set-B   /dev/sdc1
       2     259        0        2      active sync set-A   /dev/sde1
       -       0        0        3      removed
$ sudo cat /proc/mdstat
Personalities : [raid10]
md0 : active raid10 sde1[2] sdb1[0] sdc1[1]
      6250969088 blocks super 1.2 512K chunks 2 near-copies [4/3] [UUU_]
      bitmap: 42/47 pages [168KB], 65536KB chunk

so, add the new disk to the array

$ sudo mdadm --manage /dev/md0 --add /dev/sdd1
mdadm: added /dev/sdd1

and check the recovery status and progress

$ sudo mdadm --detail /dev/disk/by-id/md-name-<RAID_NAME>
[...]
      Raid Devices : 4
     Total Devices : 4
[...]
             State : active, degraded, recovering
[...]
    Rebuild Status : 0% complete
[...]
    Number   Major   Minor   RaidDevice State
       0     259        3        0      active sync set-A   /dev/sdb1
       1     259        1        1      active sync set-B   /dev/sdc1
       2     259        0        2      active sync set-A   /dev/sde1
       4     259        2        3      spare rebuilding   /dev/sdd1
$ sudo cat /proc/mdstat
Personalities : [raid10]
md0 : active raid10 sdd1[4] sde1[2] sdb1[0] sdc1[1]
      6250969088 blocks super 1.2 512K chunks 2 near-copies [4/3] [UUU_]
      [>....................]  recovery =  1.2% (40347776/3125484544) finish=255.9min speed=200905K/sec
      bitmap: 42/47 pages [168KB], 65536KB chunk

unused devices: <none>

create iso file from a directory

$ sudo genisoimage -nobak -iso-level 4 -o ./my.iso /my/path

/bin/rm: Argument list too long

$ find . -name '*.gz' | xargs rm