目录
一、GFS部署
1.1.环境
1.2.更改节点名称
1.3.节点进行磁盘挂载,安装本地源
1.4.添加节点创建集群
1.5.根据规划创建卷
1.6. 部署gluster客户端
1.7. 破坏性测试
挂起 node2 节点或者关闭glusterd服务来模拟故障
复制卷,在node3和node4上的,关闭node4
一、GFS部署
1.1.环境
Node1节点:node1/192.168.190.101 磁盘: /dev/sdb1 挂载点: /data/sdb1
/dev/sdc1 /data/sdc1
/dev/sdd1 /data/sdd1
/dev/sde1 /data/sde1
Node2节点:node2/192.168.190.105 磁盘: /dev/sdb1 挂载点: /data/sdb1
/dev/sdc1 /data/sdc1
/dev/sdd1 /data/sdd1
/dev/sde1 /data/sde1
Node3节点:node3/192.168.190.103 磁盘: /dev/sdb1 挂载点: /data/sdb1
/dev/sdc1 /data/sdc1
/dev/sdd1 /data/sdd1
/dev/sde1 /data/sde1
Node4节点:node4/192.168.190.104 磁盘: /dev/sdb1 挂载点: /data/sdb1
/dev/sdc1 /data/sdc1
/dev/sdd1 /data/sdd1
/dev/sde1 /data/sde1
=====客户端节点:192.168.190.102=====
1.2.更改节点名称
hostnamectl set-hostname node1
bash
hostnamectl set-hostname node2
bash
hostnamectl set-hostname node3
bash
hostnamectl set-hostname node4
bash
1.3.节点进行磁盘挂载,安装本地源
1.关闭防火墙
[root@node1 ~] # systemctl stop firewalld
[root@node1 ~] # setenforce 0
2.磁盘分区,并挂载
[root@node1 ~] # vim /opt/fdisk.sh
#!/bin/bash
NEWDEV=`ls /dev/sd* | grep -o 'sd[b-z]' | uniq`
for VAR in $NEWDEV
do
echo -e "n\np\n\n\n\nw\n" | fdisk /dev/$VAR &> /dev/null
mkfs.xfs /dev/${VAR}"1" &> /dev/null
mkdir -p /data/${VAR}"1" &> /dev/null
echo "/dev/${VAR}"1" /data/${VAR}"1" xfs defaults 0 0" >> /etc/fstab
done
mount -a &> /dev/null
:wq
[root@node1 ~] # chmod +x /opt/fdisk.sh
[root@node1 ~] # cd /opt/
[root@node1 /opt] # ./fdisk.sh
3.修改主机名,配置/etc/hosts文件
[root@node1 /opt] # echo "192.168.190.101 node1" >> /etc/hosts
[root@node1 /opt] # echo "192.168.190.105 node2" >> /etc/hosts
[root@node1 /opt] # echo "192.168.190.103 node3" >> /etc/hosts
[root@node1 /opt] # echo "192.168.190.104 node4" >> /etc/hosts
[root@node1 /opt] # ls
fdisk.sh rh
[root@node1 /opt] # rz -E
rz waiting to receive.
[root@node1 /opt] # ls
fdisk.sh gfsrepo.zip rh
[root@node1 /opt] # unzip gfsrepo.zip
4. 安装、启动GlusterFS(所有node节点上操作)
[root@node1 /opt] # cd /etc/yum.repos.d/
[root@node1 /etc/yum.repos.d] # ls
local.repo repos.bak
[root@node1 /etc/yum.repos.d] # mv * repos.bak/
[root@node1 /etc/yum.repos.d] # ls
repos.bak
[root@node1 /etc/yum.repos.d] # vim glfs.repo
[glfs]
name=glfs
baseurl=file:///opt/gfsrepo
gpgcheck=0
enabled=1
:wq
[root@node1 /etc/yum.repos.d] # yum clean all && yum makecache
已加载插件:fastestmirror, langpacks
正在清理软件源: glfs
Cleaning up everything
Maybe you want: rm -rf /var/cache/yum, to also free up space taken by orphaned data from disabled or removed repos
已加载插件:fastestmirror, langpacks
glfs | 2.9 kB 00:00:00
(1/3): glfs/filelists_db | 62 kB 00:00:00
(2/3): glfs/other_db | 46 kB 00:00:00
(3/3): glfs/primary_db | 92 kB 00:00:00
Determining fastest mirrors
元数据缓存已建立
[root@node1 /etc/yum.repos.d] # yum -y install glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
[root@node1 /etc/yum.repos.d] # systemctl start glusterd.service
[root@node1 /etc/yum.repos.d] # systemctl enable glusterd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterd.service to /usr/lib/systemd/system/glusterd.service.
[root@node1 /etc/yum.repos.d] # systemctl status glusterd.service
● glusterd.service - GlusterFS, a clustered file-system server
Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled)
Active: active (running) since 二 2021-03-02 19:45:10 CST; 764ms ago
Main PID: 51664 (glusterd)
CGroup: /system.slice/glusterd.service
└─51664 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
1.4.添加节点创建集群
添加节点到存储信任池中(仅需在一个节点上操作,我这里依旧在node1节点上操作)
[root@node1 ~] # gluster peer probe node1
peer probe: success. Probe on localhost not needed
[root@node1 ~] # gluster peer probe node2
peer probe: success.
[root@node1 ~] # gluster peer probe node3
peer probe: success.
[root@node1 ~] # gluster peer probe node4
peer probe: success.
[root@node1 ~] # gluster peer status
Number of Peers: 3
Hostname: node2
Uuid: af72a77e-7e11-457d-9706-76d467bd583c
State: Peer in Cluster (Connected)
Hostname: node3
Uuid: b9734bc8-f293-4705-9363-d4802149f8a5
State: Peer in Cluster (Connected)
Hostname: node4
Uuid: ad7032d2-774f-4d7c-b480-43ddf299ff1a
State: Peer in Cluster (Connected)
1.5.根据规划创建卷
创建分布式卷
#创建分布式卷,没有指定类型,默认创建的是分布式卷
gluster volume create fenbushi node1:/data/sdb1 node2:/data/sdb1 force
[root@node1 yum.repos.d]#gluster volume create fenbushi node1:/data/sdb1 node2:/data/sdb1 force
volume create: fenbushi: success: please start the volume to access data
[root@node1 yum.repos.d]#gluster volume list
fenbushi
[root@node1 yum.repos.d]#gluster volume start fenbushi
volume start: fenbushi: success
[root@node1 yum.repos.d]#gluster volume info fenbushi
Volume Name: fenbushi
Type: Distribute
Volume ID: 1c0a63e6-4da6-4d99-885d-670c613b35b2
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/data/sdb1
Brick2: node2:/data/sdb1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
创建条带卷
#指定类型为 stripe,数值为 2,且后面跟了 2 个 Brick Server,所以创建的是条带卷
[root@node1 ~]#gluster volume create tiaodaijun stripe 2 node1:/data/sdc1 node2:/data/sdc1 forcevolume create: tiaodaijun: success: please start the volume to access data
[root@node1 ~]#gluster volume list
fenbushi
tiaodaijun
[root@node1 ~]#gluster volume start tiaodaijun
volume start: tiaodaijun: success
[root@node1 ~]#gluster volume info tiaodaijun
Volume Name: tiaodaijun
Type: Stripe
Volume ID: 0f6aae69-17d3-4057-b540-64c077283e0d
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: node1:/data/sdc1
Brick2: node2:/data/sdc1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
创建复制卷
#指定类型为 replica,数值为 2,且后面跟了 2 个 Brick Server,所以创建的是复制卷
[root@node1 ~]#gluster volume create fuzhi replica 2 node3:/data/sdb1 node4:/data/sdb1 force
volume create: fuzhi: success: please start the volume to access data
[root@node1 ~]#gluster volume list
fenbushi
fuzhi
tiaodaijun
[root@node1 ~]#gluster volume start fuzhi
volume start: fuzhi: success
[root@node1 ~]#gluster volume list fuzhi
fenbushi
fuzhi
tiaodaijun
[root@node1 ~]#gluster volume info fuzhi
Volume Name: fuzhi
Type: Replicate
Volume ID: 2b934144-45a5-4978-8724-2580f7696f55
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: node3:/data/sdb1
Brick2: node4:/data/sdb1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
创建分布式条带卷
#指定类型为 stripe,数值为 2,而且后面跟了 4 个 Brick Server,是 2 的两倍,所以创建的是分布式条带卷
[root@node1 ~]#gluster volume create fenbushi-tiaodai stripe 2 node1:/data/sdd1/ node2:/data/sdd1 node3:/data/sdd1 node4:/data/sdd1 force
volume create: fenbushi-tiaodai: success: please start the volume to access data
[root@node1 ~]#gluster volume list
fenbushi
fenbushi-tiaodai
fuzhi
tiaodaijun
[root@node1 ~]#gluster volume start fenbushi-tiaodai
volume start: fenbushi-tiaodai: success
[root@node1 ~]#gluster volume info fenbushi-tiaodai
Volume Name: fenbushi-tiaodai
Type: Distributed-Stripe
Volume ID: ba04645b-86e7-419f-b5fa-ab6569958f22
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: node1:/data/sdd1
Brick2: node2:/data/sdd1
Brick3: node3:/data/sdd1
Brick4: node4:/data/sdd1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
创建分布式复制卷
指定类型为 replica,数值为 2,而且后面跟了 4 个 Brick Server,是 2 的两倍,所以创建的是分布式复制卷
[root@node1 ~]#gluster volume create fbs-fz replica 2 node1:/data/sde1 node2:/data/sde1 node3:/data/sde1 node4:/data/sde1 force
volume create: fbs-fz: success: please start the volume to access data
[root@node1 ~]#gluster volume list
fbs-fz
fenbushi
fenbushi-tiaodai
fuzhi
tiaodaijun
[root@node1 ~]#gluster volume start fbs-fz
volume start: fbs-fz: success
[root@node1 ~]#gluster volume info fbs-fz
Volume Name: fbs-fz
Type: Distributed-Replicate
Volume ID: 2c539124-02dc-42e8-98e6-9eccc735d95d
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: node1:/data/sde1
Brick2: node2:/data/sde1
Brick3: node3:/data/sde1
Brick4: node4:/data/sde1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
1.6. 部署gluster客户端
部署Gluster客户端(192.168.190.102)
1.安装客户端软件
[root@promote ~]#systemctl stop firewalld
[root@promote ~]#setenforce 0
[root@promote ~]#cd /opt
[root@promote opt]#ls
rh
[root@promote opt]#rz -E
rz waiting to receive.
[root@promote opt]#ls
gfsrepo.zip rh
[root@promote opt]#unzip gfsrepo.zip
[root@promote opt]#cd /etc/yum.repos.d/
[root@promote yum.repos.d]#ls
local.repo repos.bak
[root@promote yum.repos.d]#mv *.repo repos.bak/
[root@promote yum.repos.d]#ls
repos.bak
[root@promote yum.repos.d]#vim glfs.repo
[glfs]
name=glfs
baseurl=file:///opt/gfsrepo
gpgcheck=0
enabled=1
[root@promote yum.repos.d]#yum clean all && yum makecache
[root@promote yum.repos.d]#yum -y install glusterfs glusterfs-fuse
2.创建挂载目录
[root@promote yum.repos.d]# mkdir -p /test/{fengbushi,fenbushi-tiaodai,fbs-fz,tiaodaijun,fuzhi}
[root@localhost test]# ls
fbs-fz fengbushi tiaodaijun fenbushi-tiaodai fuzhi
3.配置 /etc/hosts 文件
[root@localhost test]# echo "192.168.190.101 node1" >> /etc/hosts
[root@localhost test]# echo "192.168.190.105 node2" >> /etc/hosts
[root@localhost test]# echo "192.168.190.103 node3" >> /etc/hosts
[root@localhost test]# echo "192.168.190.104 node4" >> /etc/hosts
4.挂载 Gluster 文件系统
#临时挂载
[root@localhost test]# mount.glusterfs node1:fbs-fz /test/fbs-fz
[root@localhost test]# mount.glusterfs node1:tiaodaijun /test/tiaodaijun/
[root@localhost test]# mount.glusterfs node1:fuzhi /test/fuzhi/
[root@localhost test]# mount.glusterfs node1:fenbushi-tiaodai /test/fenbushi-tiaodai/
[root@localhost test]# mount.glusterfs node1:fenbushi /test/fengbushi/
[root@localhost test]# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/centos-root 50G 3.7G 47G 8% /
devtmpfs 897M 0 897M 0% /dev
tmpfs 912M 0 912M 0% /dev/shm
tmpfs 912M 26M 887M 3% /run
tmpfs 912M 0 912M 0% /sys/fs/cgroup
/dev/sda1 5.0G 179M 4.9G 4% /boot
tmpfs 183M 4.0K 183M 1% /run/user/42
tmpfs 183M 48K 183M 1% /run/user/0
node1:fbs-fz 40G 65M 40G 1% /test/fbs-fz
node1:tiaodaijun 40G 65M 40G 1% /test/tiaodaijun
node1:fuzhi 20G 33M 20G 1% /test/fuzhi
node1:fenbushi-tiaodai 80G 130M 80G 1% /test/fenbushi-tiaodai
node1:fenbushi 40G 65M 40G 1% /test/fengbushi
1.7.测试 Gluster 文件系统
1.卷中写入文件,客户端操作
cd /opt
dd if=/dev/zero of=/opt/demo1.log bs=1M count=40
dd if=/dev/zero of=/opt/demo2.log bs=1M count=40
dd if=/dev/zero of=/opt/demo3.log bs=1M count=40
dd if=/dev/zero of=/opt/demo4.log bs=1M count=40
dd if=/dev/zero of=/opt/demo5.log bs=1M count=40
ls -lh /opt
cp /opt/demo* /test/fengbushi/
cp /opt/demo* /test/fenbushi-tiaodai/
cp /opt/demo* /test/fuzhi/
cp /opt/demo* /test/fbs-fz/
cp /opt/demo* /test/tiaodaijun/
查看分布式文件分布
查看条带卷文件分布
查看复制卷分布
查看分布式条带卷分布
查看分布式复制卷分布
1.7. 破坏性测试
挂起 node2 节点或者关闭glusterd服务来模拟故障
[root@node2 ~]# systemctl stop glusterd.service
条带卷,无法访问,不具备冗余
复制卷,在node3和node4上的,关闭node4
[root@node4 ~]#init 0
客户端,仍然存在
分布式复制卷,具有冗余
带有复制数据的,数据都比较安全