forked from rimusz/glusterfs-gce
-
Notifications
You must be signed in to change notification settings - Fork 0
/
create_volume.sh
executable file
·37 lines (28 loc) · 1.12 KB
/
create_volume.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#!/bin/bash
# Create GlusterFS cluster volume
if [ "$1" = "" ]
then
echo "Usage: create_volume.sh volume_name"
exit
fi
VOLUME=$1
# Get settings from the file
source cluster/settings
# Create the folder for the volume
for (( i=1; i<=${COUNT}; i++ ))
do
echo "Create the folder ${VOLUME} on ${SERVER}-${i} ..."
gcloud compute ssh --zone ${REGION}-${ZONES[$i-1]} ${SERVER}-${i} --command "sudo mkdir /data/brick1/${VOLUME} && sudo chmod 777 /data/brick1/${VOLUME}"
# concatenate servers/bricks
CLUSTER=$CLUSTER${SPACE}${SERVER}-${i}:/data/brick1/${VOLUME}
done
echo " "
# run on server1
# create volume
gcloud compute ssh --zone ${REGION}-${ZONES[0]} ${SERVER}-1 --command "sudo gluster volume create ${VOLUME} replica ${COUNT} ${CLUSTER}"
# start volume
gcloud compute ssh --zone ${REGION}-${ZONES[0]} ${SERVER}-1 --command "sudo gluster volume start ${VOLUME}"
# check volumes
gcloud compute ssh --zone ${REGION}-${ZONES[0]} ${SERVER}-1 --command "sudo gluster volume info"
# enable bitrot scrubbing (biweekly)
gcloud compute ssh --zone ${REGION}-${ZONES[0]} ${SERVER}-1 --command "sudo gluster volume bitrot ${VOLUME} enable"