Zuletzt aktiv 1710702440

mount.sh Orginalformat
1# Assuming using /dev/sdb is your new empty disk
2# Assuming 10.0.0.* is your private network
3
4# Install and configure GlusterFS. (Run on all nodes)
5apt-get install glusterfs-server -y
6systemctl start glusterd
7systemctl enable glusterd
8
9# Format the disk and mount it (Run on all nodes)
10mkfs.xfs /dev/sdb
11echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab
12mkdir -p /var/no-direct-write-here/gluster-bricks
13mount /var/no-direct-write-here/gluster-bricks
14
15# Add the peers (Run on node1)
16gluster peer probe node2
17gluster peer probe node3
18gluster peer status
19gluster pool list
20
21# Create the volume (Run on node1)
22gluster volume create swarm-vol replica 3 \
23 node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \
24 node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \
25 node3:/var/no-direct-write-here/gluster-bricks/swarm-vol
26gluster volume set swarm-vol auth.allow 10.64.50.*
27gluster volume start swarm-vol
28gluster volume status
29gluster volume info swarm-vol
30
31# Mount the volume (Run on all nodes)
32echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,x-systemd.requires=glusterd.service,x-systemd.automount 0 0' >> /etc/fstab
33
34mkdir -p /swarm-vol
35mount /swarm-vol
36chown -R root:docker /swarm-vol
37
38# Final check
39cd /swarm-vol/
40df -Th .
41
42# READ: https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/