CentOS7安装ZFS

zfs用来顶替Raid控制卡,有相当强悍的性能,TrueNAS用的就是这玩意。

官方安装文档: https://openzfs.github.io/openzfs-docs/Getting%20Started/RHEL%20and%20CentOS.html

CentOS7安装

1yum install -y epel-release
2yum install -y https://zfsonlinux.org/epel/zfs-release.el7_9.noarch.rpm
3yum install -y kernel-devel zfs

加载模块

 1[root@localhost ~]# lsmod|grep zfs
 2
 3[root@localhost ~]# modprobe zfs
 4
 5[root@localhost ~]# lsmod|grep zfs
 6zfs                  3986850  0 
 7zunicode              331170  1 zfs
 8zlua                  151525  1 zfs
 9zcommon                89551  1 zfs
10znvpair                94388  2 zfs,zcommon
11zavl                   15167  1 zfs
12icp                   301854  1 zfs
13spl                   104299  5 icp,zfs,zavl,zcommon,znvpair

看看文件系统

1[root@localhost ~]#  zfs list
2no datasets available

192.168.85.100的本地磁盘从sdb 一直到 sdg,首先zpool建立池子,类似raid卡的功能 然后再zfs建立文件系统

 1[root@localhost ~]# zpool create -f zfspool sdb sdc sdd sde sdf sdg
 2
 3[root@localhost ~]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: none requested
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          sdb       ONLINE       0     0     0
12          sdc       ONLINE       0     0     0
13          sdd       ONLINE       0     0     0
14          sde       ONLINE       0     0     0
15          sdf       ONLINE       0     0     0
16          sdg       ONLINE       0     0     0
17
18errors: No known data errors
19
20[root@localhost ~]# df -h
21文件系统                 容量  已用  可用 已用% 挂载点
22devtmpfs                  63G     0   63G    0% /dev
23tmpfs                     63G     0   63G    0% /dev/shm
24tmpfs                     63G  9.9M   63G    1% /run
25tmpfs                     63G     0   63G    0% /sys/fs/cgroup
26/dev/mapper/centos-root   50G  1.7G   49G    4% /
27/dev/sda1               1014M  189M  826M   19% /boot
28/dev/mapper/centos-home  392G   33M  392G    1% /home
29tmpfs                     13G     0   13G    0% /run/user/0
30zfspool                   53T  128K   53T    1% /zfspool

上面对生产无意义,没有任何冗余的配置在生产是行不通的

破坏掉先

1zpool destroy zfspool

条带对机房基本无意义,做mirror 1即Raid1

 1[root@localhost ~]# zpool create -f zfspool mirror sdb sdc
 2
 3[root@localhost ~]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: none requested
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          mirror-0  ONLINE       0     0     0
12            sdb     ONLINE       0     0     0
13            sdc     ONLINE       0     0     0
14
15errors: No known data errors

mirror的话往里面增加盘必须成对,单盘禁止往里面加

1[root@localhost ~]# zpool add -f zfspool mirror sde
2invalid vdev specification: mirror requires at least 2 devices

增加一对盘进去,这样就做成Raid10了

 1[root@localhost ~]# zpool add -f zfspool mirror sde sdf
 2
 3[root@localhost ~]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: none requested
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          mirror-0  ONLINE       0     0     0
12            sdb     ONLINE       0     0     0
13            sdc     ONLINE       0     0     0
14          mirror-1  ONLINE       0     0     0
15            sde     ONLINE       0     0     0
16            sdf     ONLINE       0     0     0
17
18errors: No known data errors

另外zfs有个特殊的raidz1 raidz2 raidz3 分别代表允许1块盘坏,2块盘坏,3块盘坏 需要的盘最小数量分别是2块,3块,4块

在生产,比较适合的是允许2块盘坏,并加1块host spare

 1[root@localhost /]# zpool create -f zfspool raidz2 sdb sdc sde sdf
 2
 3[root@localhost /]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: none requested
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          raidz2-0  ONLINE       0     0     0
12            sdb     ONLINE       0     0     0
13            sdc     ONLINE       0     0     0
14            sde     ONLINE       0     0     0
15            sdf     ONLINE       0     0     0
16
17errors: No known data errors

增加热备盘spare

 1[root@localhost /]# zpool add zfspool spare sdg
 2
 3[root@localhost /]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: none requested
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          raidz2-0  ONLINE       0     0     0
12            sdb     ONLINE       0     0     0
13            sdc     ONLINE       0     0     0
14            sde     ONLINE       0     0     0
15            sdf     ONLINE       0     0     0
16        spares
17          sdg       AVAIL   
18
19errors: No known data errors

如果出现坏盘,用好盘sdf换掉坏盘sde

 1[root@localhost /]# zpool replace zfspool sde sdf
 2
 3[root@localhost /]# zpool status
 4  pool: zfspool
 5 state: ONLINE
 6  scan: resilvered 1.17M in 0 days 00:00:00 with 0 errors on Thu Mar 11 20:02:19 2021
 7config:
 8
 9        NAME        STATE     READ WRITE CKSUM
10        zfspool     ONLINE       0     0     0
11          raidz2-0  ONLINE       0     0     0
12            sdb     ONLINE       0     0     0
13            sdc     ONLINE       0     0     0
14            sdf     ONLINE       0     0     0
15        spares
16          sdg       AVAIL   
17
18errors: No known data errors

检查zpool磁盘组的完整性

1zpool scrub testpool

增加cache会增加读速度,类似盘阵热点SSD自动落盘技术

1$ zpool create mirror /dev/sda /dev/sdb cache /dev/sdk /dev/sdl

增加log会提高写速度,类似盘阵热点SSD自动落盘技术

1$ zpool create mirror /dev/sda /dev/sdb log /dev/sdk /dev/sdl

zpool 建立好zfspool后,可以建文件系统

1zfs create zfspool/dba-vol
2
3[root@localhost /]# zfs list
4NAME              USED  AVAIL     REFER  MOUNTPOINT
5zfspool          1.47M  35.2T      153K  /zfspool
6zfspool/dba-vol   262K  35.2T      160K  /zfspool/dba-vol

修改挂载点

1zfs set mountpoint=/path/to/mount zpool-name/dataset-name

快照

1zfs snapshot [pool]/[dataset name]@[snapshot name]  
2zfs snapshot zfspool/dba-vol@dba-vol-20210315
3
4zfs list -t snapshot  
5
6注意,快照恢复后,该时间点之后的快照会全部丢失
7zfs rollback -r [pool]/[dataset]@[snapshot name]  
8zfs rollback -r zfspool/dba-vol@dba-vol-20210315

Ucarp和nginx提供内网vip
Prometheus集成进mysql_exporter
comments powered by Disqus