# Assuming using /dev/sdb is your new empty disk # Assuming 10.0.0.* is your private network # Install and configure GlusterFS. (Run on all nodes) apt-get install glusterfs-server -y systemctl start glusterd systemctl enable glusterd # Format the disk and mount it (Run on all nodes) mkfs.xfs /dev/sdb echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab mkdir -p /var/no-direct-write-here/gluster-bricks mount /var/no-direct-write-here/gluster-bricks # Add the peers (Run on node1) gluster peer probe node2 gluster peer probe node3 gluster peer status gluster pool list # Create the volume (Run on node1) gluster volume create swarm-vol replica 3 \ node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \ node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \ node3:/var/no-direct-write-here/gluster-bricks/swarm-vol gluster volume set swarm-vol auth.allow 10.64.50.* gluster volume start swarm-vol gluster volume status gluster volume info swarm-vol # Mount the volume (Run on all nodes) echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab mkdir -p /swarm-vol mount /swarm-vol chown -R root:docker /swarm-vol # Hack to fix gluster not mounting on boot (Run on all nodes) sudo mkdir /etc/systemd/system/swarm-vol.mount.d echo "[Unit] Requires=glusterfs-server.service After=network-online.target Wants=glusterfs-server.service Restart=always RestartSec=3 [Intall] WantedBy=multi-user.target " | sudo tee /etc/systemd/system/swarm-vol.mount.d/override.conf sudo systemctl daemon-reload sudo systemctl start swarm\\x2dvol.mount # Final check cd /swarm-vol/ df -Th .