-
Notifications
You must be signed in to change notification settings - Fork 0
/
interpol_job
46 lines (39 loc) · 1.49 KB
/
interpol_job
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/bin/sh
#SBATCH --partition=cms-uhh ## or allgpu / cms / cms-uhh / maxgpu
#SBATCH --time=24:00:00
#SBATCH --nodes=1
#SBATCH --job-name interpol_mpi # give job unique name
#SBATCH --output ./interpol_log/%j-std.out # terminal output
#SBATCH --error ./interpol_log/%j-std.err
#S BATCH --mail-type END
#S BATCH --mail-user [email protected]
#SBATCH --constraint=GPU
# #SBATCH --nodelist=max-cmsg004 # you can select specific nodes, if necessary
#SBATCH --constraint="P100|V100" # ask for specific types of GPUs
# (GPU drivers, anaconda, .bashrc, etc) --> likely not necessary if you're installing your own cudatoolkit & cuDNN versions
#source /etc/profile.d/modules.sh
#module load maxwell
#module load cuda
#module load anaconda/3
#module load mpi/mpich-x86_64
module load mpi/openmpi3-x86_64
. ~/.bashrc
export PATH="/home/kaemmlel/miniconda3/envs/torch/bin:$PATH"
set -x
# export PATH="/home/kaemmlel/miniconda3/envs/torch/bin:$PATH"
export PYTHONPATH="" # openmpi module adds python 2 packages to pythonpath
# go to your folder with your python scripts
cd ~/Projects/amazing_ai
START=b
END=b
# run
MPI4PY_RC_THREADS=0 time mpirun --use-hwthread-cpus \
python -m amazing_ai.scripts.interpol_images \
--file-start @events_${START}_sr.h5 \
--file-end @events_${END}_sr.h5 \
-o @interpol_${START}${END}{flags}_{n_starts}_{n_per_start}.h5 \
--steps 100 \
-n 6000 \
--n-per-start 100 \
--blur \
--mpi