mount.sh
· 1.3 KiB · Bash
Eredeti
# Assuming using /dev/sdb is your new empty disk
# Assuming 10.0.0.* is your private network
# Install and configure GlusterFS. (Run on all nodes)
apt-get install glusterfs-server -y
systemctl start glusterd
systemctl enable glusterd
# Format the disk and mount it (Run on all nodes)
mkfs.xfs /dev/sdb
echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab
mkdir -p /var/no-direct-write-here/gluster-bricks
mount /var/no-direct-write-here/gluster-bricks
# Add the peers (Run on node1)
gluster peer probe node2
gluster peer probe node3
gluster peer status
gluster pool list
# Create the volume (Run on node1)
gluster volume create swarm-vol replica 3 \
node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \
node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \
node3:/var/no-direct-write-here/gluster-bricks/swarm-vol
gluster volume set swarm-vol auth.allow 10.64.50.*
gluster volume start swarm-vol
gluster volume status
gluster volume info swarm-vol
# Mount the volume (Run on all nodes)
echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,x-systemd.requires=glusterd.service,x-systemd.automount 0 0' >> /etc/fstab
mkdir -p /swarm-vol
mount /swarm-vol
chown -R root:docker /swarm-vol
# Final check
cd /swarm-vol/
df -Th .
# READ: https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/
1 | # Assuming using /dev/sdb is your new empty disk |
2 | # Assuming 10.0.0.* is your private network |
3 | |
4 | # Install and configure GlusterFS. (Run on all nodes) |
5 | apt-get install glusterfs-server -y |
6 | systemctl start glusterd |
7 | systemctl enable glusterd |
8 | |
9 | # Format the disk and mount it (Run on all nodes) |
10 | mkfs.xfs /dev/sdb |
11 | echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab |
12 | mkdir -p /var/no-direct-write-here/gluster-bricks |
13 | mount /var/no-direct-write-here/gluster-bricks |
14 | |
15 | # Add the peers (Run on node1) |
16 | gluster peer probe node2 |
17 | gluster peer probe node3 |
18 | gluster peer status |
19 | gluster pool list |
20 | |
21 | # Create the volume (Run on node1) |
22 | gluster volume create swarm-vol replica 3 \ |
23 | node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \ |
24 | node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \ |
25 | node3:/var/no-direct-write-here/gluster-bricks/swarm-vol |
26 | gluster volume set swarm-vol auth.allow 10.64.50.* |
27 | gluster volume start swarm-vol |
28 | gluster volume status |
29 | gluster volume info swarm-vol |
30 | |
31 | # Mount the volume (Run on all nodes) |
32 | echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,x-systemd.requires=glusterd.service,x-systemd.automount 0 0' >> /etc/fstab |
33 | |
34 | mkdir -p /swarm-vol |
35 | mount /swarm-vol |
36 | chown -R root:docker /swarm-vol |
37 | |
38 | # Final check |
39 | cd /swarm-vol/ |
40 | df -Th . |
41 | |
42 | # READ: https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/ |