-
Notifications
You must be signed in to change notification settings - Fork 6
/
resources.yaml
232 lines (213 loc) · 4.97 KB
/
resources.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
---
# Global configuration of computing nodes.
images:
default: vgcn~rockylinux-9-latest-x86_64~+generic+workers+internal~20240321~66731~HEAD~60854e6
gpu: vgcn~rockylinux-9-latest-x86_64~+generic+workers-gpu+internal~20240327~34401~HEAD~2a80ce3
secure: vggp-v60-secure-j322-692e75a7c101-main
network: bioinf
secgroups:
- ufr-ingress
# interactive-egress: A reduced more stringent egress rule for all nodes
- interactive-egress
sshkey: cloud3
pubkeys:
# The public key(s) that will be accepted when SSHing to a machine.
- "AAAAB3NzaC1yc2EAAAABIwAAAQEAuSG1VOumQhbJfOyalJjS4lPC8eeeL02ld/VI2BFApyMfwbB1QxehY3lMBXt/cBTzqU3MxgJQVzNr/AgjHa5rKn2hSGMfKAdaX2tG686k715bBjRm9rJNhmc8KSH9tVS35M0HwRXMfiGvSmb5qJ6utWRZe6RM2JMIbqqI5Oc4tbzPPVKk1+yvT1JdYuyqAOp2yvQbOqKaXmqOhPdPNaJZMI4o+UHmmb8FH6OTDY27G7X7u07ZPwVi1j+6ZFVMQZqg1RhUdg9kmHpHwMX7P6NcD4G9GsISHIh92eva9xgWYGiS0wUsmOWTNgAzzsfRZjMFep+jB2wup6QN7XpMw97eTw=="
# Behavior of `synchronize.py''
graceful: false
# 18/6/2024: Updated the max number of possible workers
nodes_inventory:
c1.c28m225d50: 5 #(16.04.2024: RZ swapped the underlying servers for a 4 in 1 node and this will be of a different flavor and we need to wait to get the hardware)
c1.c28m475d50: 19
c1.c36m100d50: 30
c1.c36m225d50: 15
c1.c36m900d50: 1
c1.c36m975d50: 8
c1.c60m1975d50: 1
c1.c120m205d50: 10
c1.c120m405d50: 22
c1.c125m425d50: 16
c1.c28m935d50: 4
c1.c28m875d50: 2
g1.c14m40g1d50: 4
g1.c8m40g1d50: 14
deployment:
# worker-fetch:
# count: 0
# flavor: c1.c36m100d50
# group: upload
# image: default
worker-interactive:
count: 2 #3
flavor: c1.c36m100d50
group: interactive
docker: true
image: default
volume:
size: 1024
type: default
worker-c28m475:
count: 19
flavor: c1.c28m475d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c28m225:
count: 5
flavor: c1.c28m225d50
group: compute # compute_test
docker: true
volume:
size: 1024
type: default
image: default
worker-c36m100:
count: 28
flavor: c1.c36m100d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c36m225:
count: 14
flavor: c1.c36m225d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c36m900:
count: 1 #1 it's a c1.c36m975d50 host with probably a faulty memory bank
flavor: c1.c36m900d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c36m975:
count: 8
flavor: c1.c36m975d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
# 18/06/24: Hardware is still connected to the old cloud
# worker-c28m935:
# count: 4 #4
# flavor: c1.c28m935d50
# group: compute
# docker: true
# volume:
# size: 1024
# type: default
# image: default
# 18/06/24: Hardware is still connected to the old cloud
# worker-c28m875:
# count: 2 #2
# flavor: c1.c28m875d50
# group: compute
# docker: true
# volume:
# size: 1024
# type: default
# image: default
worker-c64m2:
count: 1
flavor: c1.c60m1975d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c120m205:
count: 6
flavor: c1.c120m205d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c120m405:
count: 22
flavor: c1.c120m405d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c125m425:
count: 16 #16
flavor: c1.c125m425d50
group: compute
docker: true
volume:
size: 1024
type: default
image: default
worker-c14m40g1:
count: 4
flavor: g1.c14m40g1d50
group: compute_gpu
docker: true
volume:
size: 1024
type: default
image: gpu
worker-c8m40g1:
count: 14
flavor: g1.c8m40g1d50
group: compute_gpu
docker: true
volume:
size: 1024
type: default
image: gpu
# Trainings
training-kmb6:
count: 0
flavor: c1.c120m205d50
start: 2024-10-28
end: 2024-12-19
group: training-kmb613-2
training-hand:
count: 1
flavor: c1.c120m205d50
start: 2024-11-18
end: 2024-12-20
group: training-hands-on-plus
training-rnr-:
count: 1
flavor: c1.c120m205d50
start: 2024-11-25
end: 2024-12-20
group: training-rnr-sekoskaita
training-heh-:
count: 1
flavor: c1.c120m205d50
start: 2024-12-16
end: 2024-12-20
group: training-heh-mag2024
training-gdaw:
count: 1
flavor: c1.c120m205d50
start: 2024-12-10
end: 2024-12-30
group: training-gdawg2024
training-ga-f:
count: 1
flavor: c1.c120m205d50 #Maybe change?
start: 2025-02-17
end: 2025-03-01
group: training-ga-fcen-uba-2025