# Assuming using /dev/sdb is your new empty disk # Assuming 10.0.0.* is your private network # Install and configure GlusterFS. (Run on all nodes) apt-get install glusterfs-server -y systemctl start glusterd systemctl enable glusterd # Format the disk and mount it (Run on all nodes) mkfs.xfs /dev/sdb echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab mkdir -p /var/no-direct-write-here/gluster-bricks mount /var/no-direct-write-here/gluster-bricks # Add the peers (Run on node1) gluster peer probe node2 gluster peer probe node3 gluster peer status gluster pool list # Create the volume (Run on node1) gluster volume create swarm-vol replica 3 \ node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \ node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \ node3:/var/no-direct-write-here/gluster-bricks/swarm-vol gluster volume set swarm-vol auth.allow 10.64.50.* gluster volume start swarm-vol gluster volume status gluster volume info swarm-vol # Mount the volume (Run on all nodes) echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,x-systemd.requires=glusterd.service,x-systemd.automount 0 0' >> /etc/fstab mkdir -p /swarm-vol mount /swarm-vol chown -R root:docker /swarm-vol # Final check cd /swarm-vol/ df -Th . # READ: https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/