mkfs.ext4 /dev/sdb1
mkfs.xfs /dev/sdb2
mkfs.vfat /dev/sdb1
mkswap /dev/vdb
一. 概念
1. 关键字
- LVM逻辑卷管理 (Logical Volume Manager)
- 物理存储介质 (The physical media)
- 系统的存储设备硬件, 如硬盘:/dev/hda,/dev/sda,/dev/vda 等,是存储系统的基础
- 物理卷(PV,physicalvolume)
- 物理卷是LVM逻辑卷 管理系统 最底层。包含逻辑卷管理相关的管理参数,可以是整个物理硬盘或实际物理硬盘上的分区。如:硬盘的MBR分区或GPT分区,还有RAID,回环文件等
- 卷组(VG,Volume Group)
- 卷组是一个或多个物理卷的集合,并在设备的文件系统中显示为 /dev/VG_NAME
- 逻辑卷(LV,logicalvolume)
- 逻辑卷建立在卷组基础上,由物理块PE组成,是一个虚拟分区,显示为 /dev/VG_NAME/LV_NAME
- 卷组中未分配的空间可用于建立新的逻辑卷,逻辑卷建立后可以动态扩展和缩小空间。在逻辑卷之上可以建立文件系统(比如/home或者/usr等)
- 物理块(PE,physical extent)
- 每一个物理卷被划分为 称为PE的基本单元—物理块。物理块PE是一个卷组中最小的物理区域存储单元,默认为4 MiB(大小可设置)
- 同一卷组所有PV的PE大小需一致,新的pv加入到vg后,pe的大小自动更改为vg中定义的pe大小
2. 原理
- LVM的工作原理其实很简单,它就是通过将底层的物理硬盘抽象的封装起来,然后以逻辑卷的方式呈现给上层应用。在传统的磁盘管理机制中,我们的上层应用是直接访问文件系统,从而对底层的物理硬盘进行读取,而在LVM中,其通过对底层的硬盘进行封装,当我们对底层的物理硬盘进行操作时,其不再是针对于分区进行操作,而是通过一个叫做逻辑卷的东西来对其进行底层的磁盘管理操作。比如说我增加一个物理硬盘,这个时候上层的服务是感觉不到的,因为呈现给上层服务的是以逻辑卷的方式
- LVM最大的特点就是可以对磁盘进行动态管理。因为逻辑卷的大小是可以动态调整的,而且不会丢失现有的数据。如果我们新增加了硬盘,其也不会改变现有上层的逻辑卷。作为一个动态磁盘管理机制,逻辑卷技术大大提高了磁盘管理的灵活性
3. 优点
- 比起传统的硬盘分区管理方式,LVM更富有灵活性,可以将多块硬盘看做一个大硬盘, 可以创建跨众多硬盘空间的分区
- 在调整逻辑卷(LV)大小时可以不用考虑逻辑卷在硬盘上的位置,不用担心没有可用的连续空间
二. 操作案例
1. 基于创建
yum -y install lvm2
[root@lvm ~]
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
└─sda1 8:1 0 20G 0 part /
sdb 8:16 0 40G 0 disk
sdc 8:32 0 10G 0 disk
sr0 11:0 1 973M 0 rom
[root@lvm ~]
Physical volume "/dev/sdb" successfully created.
Physical volume "/dev/sdc" successfully created.
[root@lvm ~]
PV VG Fmt Attr PSize PFree
/dev/sdb lvm2 --- 40.00g 40.00g
/dev/sdc lvm2 --- 10.00g 10.00g
pvscan
[root@lvm ~]
Volume group "yaya" successfully created
[root@lvm ~]
VG
yaya 2 0 0 wz--n- 49.99g 49.99g
[root@lvm ~]
Logical volume "Moon001" created.
[root@lvm ~]
Logical volume "Moon002" created.
[root@lvm ~]
Logical volume "Moon003" created.
[root@lvm ~]
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
Moon001 yaya -wi-a----- 10.00g
Moon002 yaya -wi-a----- 5.00g
Moon003 yaya -wi-a----- 5.00g
[root@lvm ~]
ACTIVE '/dev/yaya/Moon001' [10.00 GiB] inherit
ACTIVE '/dev/yaya/Moon002' [5.00 GiB] inherit
ACTIVE '/dev/yaya/Moon003' [5.00 GiB] inherit
[root@lvm ~]
--- Volume group ---
VG Name yaya
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 49.99 GiB
PE Size 4.00 MiB
Total PE 12798
Alloc PE / Size 5120 / 20.00 GiB
Free PE / Size 7678 / 29.99 GiB
VG UUID P52FYJ-i06q-rifK-W7Nf-HjCN-4gmb-ZVj7KC
[root@lvm ~]
VG
yaya 2 3 0 wz--n- 49.99g 29.99g
[root@lvm ~]
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
└─sda1 8:1 0 20G 0 part /
sdb 8:16 0 40G 0 disk
├─yaya-Moon001 253:0 0 10G 0 lvm
├─yaya-Moon002 253:1 0 5G 0 lvm
└─yaya-Moon003 253:2 0 5G 0 lvm
sdc 8:32 0 10G 0 disk
sr0 11:0 1 973M 0 rom
[root@lvm ~]
total 0
lrwxrwxrwx 1 root root 7 Mar 14 22:13 Moon001 -> ../dm-0
lrwxrwxrwx 1 root root 7 Mar 14 22:13 Moon002 -> ../dm-1
lrwxrwxrwx 1 root root 7 Mar 14 22:13 Moon003 -> ../dm-2
mkfs.ext4 /dev/yaya/Moon001
mkfs.xfs /dev/yaya/Moon002
mkfs.vfat /dev/yaya/Moon003
[root@lvm ~]
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
655360 inodes, 2621440 blocks
131072 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2151677952
80 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
mkdir /Moon-{a,b,c}
mount /dev/yaya/Moon001 /Moon-a
mount /dev/yaya/Moon002 /Moon-b
mount /dev/yaya/Moon003 /Moon-c
[root@lvm ~]
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 943M 0 943M 0% /dev
tmpfs tmpfs 954M 0 954M 0% /dev/shm
tmpfs tmpfs 954M 10M 944M 2% /run
tmpfs tmpfs 954M 0 954M 0% /sys/fs/cgroup
/dev/sda1 xfs 22G 2.0G 20G 9% /
tmpfs tmpfs 191M 0 191M 0% /run/user/0
/dev/mapper/yaya-Moon001 ext4 11G 38M 9.9G 1% /Moon-a
/dev/mapper/yaya-Moon002 xfs 5.4G 34M 5.4G 1% /Moon-b
/dev/mapper/yaya-Moon003 vfat 5.4G 4.1k 5.4G 1% /Moon-c
vim /etc/fstab
UUID=85fd024b-5296-406d-a63b-50f19e292839 / xfs defaults 0 0
/dev/yaya/Moon001 /Moon-a ext4 defaults 0 0
/dev/yaya/Moon002 /Moon-b xfs defaults 0 0
/dev/yaya/Moon003 /Moon-c vfat defaults 0 0
2. 基于追加
[root@lvm ~]
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
└─sda1 8:1 0 20G 0 part /
sdb 8:16 0 40G 0 disk
├─yaya-Moon001 253:0 0 12G 0 lvm
├─yaya-Moon002 253:1 0 7G 0 lvm
└─yaya-Moon003 253:2 0 7.5G 0 lvm
sdc 8:32 0 10G 0 disk
sdd 8:48 0 20G 0 disk
sr0 11:0 1 973M 0 rom
pvcreate /dev/sdd
[root@lvm ~]
Physical volume "/dev/sdd" successfully created.
[root@lvm ~]
PV /dev/sdb VG yaya lvm2 [<40.00 GiB / <13.50 GiB free]
PV /dev/sdc VG yaya lvm2 [<10.00 GiB / <10.00 GiB free]
PV /dev/sdd lvm2 [20.00 GiB]
Total: 3 [69.99 GiB] / in use: 2 [49.99 GiB] / in no VG: 1 [20.00 GiB]
vgextend yaya /dev/sdd
[root@lvm ~]
Volume group "yaya" successfully extended
[root@lvm ~]
PV /dev/sdb VG yaya lvm2 [<40.00 GiB / <13.50 GiB free]
PV /dev/sdc VG yaya lvm2 [<10.00 GiB / <10.00 GiB free]
PV /dev/sdd VG yaya lvm2 [<20.00 GiB / <20.00 GiB free]
Total: 3 [<69.99 GiB] / in use: 3 [<69.99 GiB] / in no VG: 0 [0 ]
lvextend -l +100%FREE /dev/yaya/Moon002
xfs_growfs /dev/yaya/Moon002
resize2fs /dev/yaya/Moon001
3. 逻辑卷扩容
[root@lvm ~]
Size of logical volume yaya/Moon002 changed from 5.00 GiB (1280 extents) to 7.00 GiB (1792 extents).
Logical volume yaya/Moon002 successfully resized.
meta-data=/dev/mapper/yaya-Moon002 isize=512 agcount=4, agsize=327680 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=1310720, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@lvm ~]
Filesystem Size Used Avail Use% Mounted on
devtmpfs 900M 0 900M 0% /dev
tmpfs 910M 0 910M 0% /dev/shm
tmpfs 910M 9.5M 901M 2% /run
tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/sda1 20G 1.8G 19G 9% /
tmpfs 182M 0 182M 0% /run/user/0
/dev/mapper/yaya-Moon001 12G 41M 12G 1% /Moon-a
/dev/mapper/yaya-Moon002 7.0G 33M 7.0G 1% /Moon-b
/dev/mapper/yaya-Moon003 5.0G 4.0K 5.0G 1% /Moon-c