forked from redhat-cop/ocp4-helpernode
-
Notifications
You must be signed in to change notification settings - Fork 0
/
checker.sh.j2
225 lines (196 loc) · 6.52 KB
/
checker.sh.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
#!/bin/bash
##################################################
# FUNCTIONS
##################################################
##
show_help () {
echo "Usage:"
echo "$(basename $0) {dns-masters|dns-workers|dns-etcd|dns-other|install-info|haproxy|services|nfs-info}"
echo ""
}
##
dns-masters () {
echo "======================"
echo "DNS Config for Masters"
echo "======================"
echo ""
egrep --color=none -A {{ (masters | length) + 1 }} '^; Create entries for the master hosts' /var/named/zonefile.db
echo ""
echo "======================"
echo "DNS Lookup for Masters"
echo "======================"
{% for m in masters %}
echo ""
echo "{{ m.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }}"
echo "-------------------------------------------------"
echo "IP: $(dig @localhost {{ m.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }} +short)"
echo "Reverse: $(dig @localhost -x $(dig @localhost {{ m.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }} +short) +short)"
{% endfor %}
}
###
dns-workers () {
echo "======================"
echo "DNS Config for Workers"
echo "======================"
echo ""
egrep --color=none -A {{ (workers | length) + 1 }} '^; Create entries for the worker' /var/named/zonefile.db
echo ""
echo "======================"
echo "DNS Lookup for Workers"
echo "======================"
{% for w in workers %}
echo ""
echo "{{ w.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }}"
echo "-------------------------------------------------"
echo "IP: $(dig @localhost {{ w.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }} +short)"
echo "Reverse: $(dig @localhost -x $(dig @localhost {{ w.name | lower }}.{{ dns.clusterid }}.{{ dns.domain | lower }} +short) +short)"
{% endfor %}
}
###
dns-etcd () {
echo "==================="
echo "DNS Config for ETCD"
echo "==================="
echo ""
egrep --color=none -A 4 '^; The ETCd cluster lives' /var/named/zonefile.db
echo ""
echo "==================="
echo "DNS lookup for ETCD"
echo "==================="
for i in etcd-{0..2}
do
dig @localhost ${i}.{{ dns.clusterid }}.{{ dns.domain | lower }} +short
done
echo ""
echo "==================="
echo "SRV config for ETCD"
echo "==================="
echo ""
egrep --color=none -A 4 '^; The SRV' /var/named/zonefile.db
echo ""
echo "==================="
echo "SRV lookup for ETCD"
echo "==================="
dig @localhost _etcd-server-ssl._tcp.{{ dns.clusterid }}.{{ dns.domain | lower }} SRV +short
echo ""
}
###
dns-other () {
echo "======================"
echo "DNS Config for Others"
echo "======================"
echo ""
egrep --color=none -A {{ (other | default([]) | length) + 1 }} '^; Create entries for the other' /var/named/zonefile.db
echo ""
echo "======================"
echo "DNS Lookup for Others"
echo "======================"
{% if other is defined %}
{% for o in other %}
echo ""
echo "{{ o.name }}.{{ dns.clusterid }}.{{ dns.domain }}"
echo "-------------------------------------------------"
echo "IP: $(dig @localhost {{ o.name }}.{{ dns.clusterid }}.{{ dns.domain }} +short)"
echo "Reverse: $(dig @localhost -x $(dig @localhost {{ o.name }}.{{ dns.clusterid }}.{{ dns.domain }} +short) +short)"
{% endfor %}
{% endif %}
}
###
install-info () {
cat <<EOF
This server should also be used as the install node. Apache is running on http://{{ helper.ipaddr }}:8080 You can put your openshift-install artifacts (bios images and ignition files) in /var/www/html
Quickstart Notes:
mkdir ~/install
cd ~/install
vi install-config.yaml
openshift-install create ignition-configs
cp *.ign /var/www/html/ignition/
chmod o+r /var/www/html/ignition/*.ign
restorecon -vR /var/www/html/
(See https://docs.openshift.com/container-platform/4.2/installing/installing_bare_metal/installing-bare-metal.html for more details)
EOF
}
###
haproxy () {
cat <<EOF
HAProxy stats are on http://{{ helper.ipaddr }}:9000 and you should use it to monitor the install when you start.
EOF
}
###
services () {
echo "Status of services:"
echo "==================="
for i in dhcpd named haproxy httpd tftp; do echo -e "Status of $i svc \t\t-> $(systemctl status --no-pager $i | grep --color Active)"; done
}
###
nfs-info () {
availablesize=$(df -h --output=avail /export | tail -1 | tr -d " ""\t""[:alpha:]")
warningsize=50
#
cat <<EOF
An NFS server has been installed and the entire /export directory has been shared out. To set up the nfs-auto-provisioner; you just need to run the following command after "openshift-install wait-for bootstrap-complete --log-level debug" has finished...
$(basename $0) nfs-setup
Thats it! Right now, this is an "opinionated" setup (there is no "how do I set this up for..."). For now, this is what you get.
Once it's setup, create a PVC for the registry (an example of one has been provided)
oc create -f /usr/local/src/registry-pvc.yaml -n openshift-image-registry
Check that with "oc get pv" and "oc get pvc -n openshift-image-registry".
Then set the registry to use this NFS volume. (you may need to remove the emptyDir if you set it beforehand)
oc patch configs.imageregistry.operator.openshift.io cluster --type=json -p '[{"op": "remove", "path": "/spec/storage/emptyDir" }]'
oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"pvc":{ "claim": "registry-pvc"}}}}'
Check the status by watching "oc get pods -n openshift-image-registry"
EOF
#
if [ ${availablesize} -le ${warningsize} ]; then
cat <<EOW
!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!
I've detected that you're using as small disk for your /export directory. OpenShift uses a LOT of storage and it's recommended to use a disk of AT LEAST 100GB. You can do one of two things...
1) Add a disk to this server and mount it on /export (RECOMMENDED)
2) Grow the / volume (not ideal but okay)
If you proceed to use this server how it is, you void ANY chance of help from me on slack or github :)
Make sure your permissions are right once you've mounted over /export (EL8 shown below, on EL7 the owner is: nfsnobody)
chown nobody:nobody /export
chmod 775 /export
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
EOW
fi
}
##
##################################################
# MAIN SCRIPT
##################################################
case "$1" in
dns-masters)
dns-masters
;;
dns-workers)
dns-workers
;;
dns-etcd)
dns-etcd
;;
dns-other)
dns-other
;;
install-info)
install-info
;;
haproxy)
haproxy
;;
services)
services
;;
nfs-info)
nfs-info
;;
nfs-setup)
/usr/local/bin/nfs-provisioner-setup.sh
;;
*)
show_help
exit 1
;;
esac
#
exit 0
###