Check DM multipath
multipath -l
600507680281816384000000000000dc dm-27 ABC,1234
size=200G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 3:0:0:4 sdf 8:80 active ready running
| `- 4:0:0:4 sdp 8:240 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 3:0:1:4 sdk 8:160 active ready running
`- 4:0:1:4 sdu 65:64 active ready running
3600507680281816384000000000000cc dm-26 ABC,1234
size=200G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 3:0:1:3 sdj 8:144 active ready running
| `- 4:0:1:3 sdt 65:48 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 3:0:0:3 sde 8:64 active ready running
`- 4:0:0:3 sdo 8:224 active ready running
3600507680281816384000000000000cb dm-25 ABC,1234
size=200G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 3:0:0:2 sdd 8:48 active ready running
| `- 4:0:0:2 sdn 8:208 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 3:0:1:2 sdi 8:128 active ready running
`- 4:0:1:2 sds 65:32 active ready running
3600507680281816384000000000000ca dm-23 ABC,1234
size=200G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 3:0:1:1 sdh 8:112 active ready running
| `- 4:0:1:1 sdr 65:16 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 3:0:0:1 sdc 8:32 active ready running
`- 4:0:0:1 sdm 8:192 active ready running
3600507680281816384000000000000c9 dm-24 ABC,1234
size=200G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 3:0:0:0 sdb 8:16 active ready running
| `- 4:0:0:0 sdl 8:176 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 3:0:1:0 sdg 8:96 active ready running
`- 4:0:1:0 sdq 65:0 active ready runnin
#Create MD arrays using mdadm.
mdadm --create /dev/md0 --raid-devices=1 --level=0
--metadata=1.2 --force /dev/mapper/3600507680281816384000000000000c9
mdadm --create /dev/md1 --raid-devices=1 --level=0
--metadata=1.2 --force /dev/mapper/3600507680281816384000000000000ca
mdadm --create /dev/md2 --raid-devices=1 --level=0
--metadata=1.2 --force /dev/mapper/3600507680281816384000000000000cb
mdadm --create /dev/md3 --raid-devices=1 --level=0
--metadata=1.2 --force /dev/mapper/3600507680281816384000000000000cc
mdadm --create /dev/md4 --raid-devices=1 --level=0
--metadata=1.2 --force /dev/mapper/3600507680281816384000000000000dc
#Create md for reboot /clusterconf/QAS/mdadm.conf
ARRAY /dev/md0 UUID=cb6443ea:7a85f171:3dfca667:723eb9c9
ARRAY /dev/md1 UUID=fd640129:153c4dd5:0a7b2596:44c1584f
ARRAY /dev/md2 UUID=e8c6b9d2:14f71cee:f27dc725:c452ea1e
ARRAY /dev/md3 UUID=8158487c:5ea824ee:fe01fca9:32dc5cbd
ARRAY /dev/md4 UUID=2e152c68:2f72bfb8:4373b348:1164ddc6
#manually start and stop md devices like this:
# to stop md scan
mdadm --stop -scan
# to verify stop md
more /proc/mdstat
mdadm --detail /dev/md0
for DEVICE in /dev/md0 /dev/md1 /dev/md2 /dev/md3 /dev/md4; do
mdadm --assemble "${DEVICE}" --config=/clusterconf/QAS/mdadm.conf; done;
## to scan md -> mdadm --assemble --scan
|