From 450fc540ddc9101cff79772f4053637c423b7858 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 23 Jun 2022 17:15:57 -0400 Subject: [PATCH 01/25] Revert "Merge pull request #410 from carpentries-incubator/revert-407-amdahl-code" This reverts commit bdf8529a958af90400f7a651accb84c2f6b41b81, reversing changes made to 86229ebc05571ca9d9dbdd4cc6f9faceacc56cbd. --- _episodes/15-transferring-files.md | 209 ++--- _episodes/16-parallel.md | 777 ++++++------------ .../parallel/eight-tasks-jobscript.snip | 14 + .../parallel/four-tasks-jobscript.snip | 5 +- ...jobscript.snip => one-task-jobscript.snip} | 5 +- .../parallel/eight-tasks-jobscript.snip} | 6 +- .../parallel/four-tasks-jobscript.snip | 6 +- ...jobscript.snip => one-task-jobscript.snip} | 6 +- ...script.snip => eight-tasks-jobscript.snip} | 7 +- .../parallel/four-tasks-jobscript.snip | 5 +- .../parallel/one-task-jobscript.snip} | 7 +- .../NIST_CTCMS_slurm/_config_options.yml | 4 +- .../parallel/eight-tasks-jobscript.snip | 11 + .../parallel/four-tasks-jobscript.snip | 5 +- .../parallel/one-task-jobscript.snip | 11 + .../parallel/eight-tasks-jobscript.snip | 14 + .../parallel/four-tasks-jobscript.snip | 5 +- ...jobscript.snip => one-task-jobscript.snip} | 5 +- .../parallel/eight-tasks-jobscript.snip | 13 + .../parallel/four-tasks-jobscript.snip | 6 +- .../parallel/one-task-jobscript.snip | 13 + files/hpc-intro-code.tar.gz | Bin 0 -> 3391 bytes files/hpc-intro-data.tar.gz | Bin 36535 -> 0 bytes files/hpc-intro-data.zip | Bin 41253 -> 0 bytes files/jargon.html | 2 +- 25 files changed, 432 insertions(+), 704 deletions(-) create mode 100644 _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip rename _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (70%) rename _includes/snippets_library/{UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip => EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip} (62%) rename _includes/snippets_library/EPCC_Cirrus_pbs/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (62%) rename _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/{one-task-with-memory-jobscript.snip => eight-tasks-jobscript.snip} (68%) rename _includes/snippets_library/{NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip => Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip} (65%) create mode 100644 _includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip create mode 100644 _includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip create mode 100644 _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip rename _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (70%) create mode 100644 _includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip create mode 100644 _includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip create mode 100644 files/hpc-intro-code.tar.gz delete mode 100644 files/hpc-intro-data.tar.gz delete mode 100644 files/hpc-intro-data.zip diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 042d59af..5856e2dc 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -16,25 +16,27 @@ Performing work on a remote computer is not very useful if we cannot get files to or from the cluster. There are several options for transferring data between computing resources using CLI and GUI utilities, a few of which we will cover. -## Download Files From the Internet +## Download Lesson Files From the Internet One of the most straightforward ways to download files is to use either `curl` or `wget`. One of these is usually installed in most Linux shells, on Mac OS terminal and in GitBash. Any file that can be downloaded in your web browser -through a direct link can be downloaded using `curl -O` or `wget`. This is a -quick way to download datasets or source code. +through a direct link can be downloaded using `curl` or `wget`. This is a +quick way to download datasets or source code. The syntax for these commands is -The syntax for these commands is: `curl -O https://some/link/to/a/file` -and `wget https://some/link/to/a/file`. Try it out by downloading -some material we'll use later on, from a terminal on your local machine. +* `curl -O https://some/link/to/a/file` +* `wget https://some/link/to/a/file` + +Try it out by downloading some material we'll use later on, from a terminal on +your local machine. ``` -{{ site.local.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +{{ site.local.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz ``` {: .language-bash} or ``` -{{ site.local.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +{{ site.local.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz ``` {: .language-bash} @@ -44,8 +46,8 @@ or > This is an archive file format, just like `.zip`, commonly used and supported > by default on Linux, which is the operating system the majority of HPC > cluster machines run. You may also see the extension `.tgz`, which is exactly -> the same. We'll talk more about "tarballs," since "tar-dot-g-z" is a -> mouthful, later on. +> the same. We'll talk more about "tarballs" later, since "tar-dot-g-z" is a +> mouthful. {: .discussion} ## Transferring Single Files and Folders With `scp` @@ -59,47 +61,29 @@ mechanism. To _upload to_ another computer: ``` -{{ site.local.prompt }} scp path/to/local/file.txt {{ site.remote.user }}@{{ site.remote.login }}:/path/on/{{ site.remote.name }} -``` -{: .language-bash} - -To _download from_ another computer: - -``` -{{ site.local.prompt }} scp {{ site.remote.user }}@{{ site.remote.login }}:/path/on/{{ site.remote.name }}/file.txt path/to/local/ +{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_path ``` {: .language-bash} Note that everything after the `:` is relative to our home directory on the -remote computer. We can leave it at that if we don't care where the file goes. +remote computer. We can leave it at that if we don't have a more specific +destination in mind. + +Upload the lesson material to your remote home directory like so: ``` -{{ site.local.prompt }} scp local-file.txt {{ site.remote.user }}@{{ site.remote.login }}: +{{ site.local.prompt }} scp hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: ``` {: .language-bash} -> ## Upload a File -> -> Copy the file you just downloaded from the Internet to your home directory on -> {{ site.remote.name }}. -> -> > ## Solution -> > -> > ``` -> > {{ site.local.prompt }} scp hpc-intro-data.tar.gz {{ site.remote.user }}@{{ site.remote.login }}:~/ -> > ``` -> > {: .language-bash} -> {: .solution} -{: .challenge} - -Most computer clusters are protected from the open internet by a _firewall_. -This means that the `curl` command will fail, as an address outside the -firewall is unreachable from the inside. To get around this, run the `curl` or -`wget` command from your local machine to download the file, then use the `scp` -command to upload it to the cluster. - > ## Why Not Download on {{ site.remote.name }} Directly? > +> Most computer clusters are protected from the open internet by a _firewall_. +> This means that the `curl` command will fail, as an address outside the +> firewall is unreachable from the inside. To get around this, run the `curl` +> or `wget` command from your local machine to download the file, then use the +> `scp` command to upload it to the cluster. +> > Try downloading the file directly. Note that it may well fail, and that's > OK! > @@ -107,25 +91,29 @@ command to upload it to the cluster. > > > > ``` > > {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -> > {{ site.remote.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz -> > or -> > {{ site.remote.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +> > {{ site.remote.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz +> > # or +> > {{ site.remote.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz > > ``` > > {: .language-bash} > {: .solution} > > Did it work? If not, what does the terminal output tell you about what > happened? -> {: .challenge} {: .discussion} -To copy a whole directory, we add the `-r` flag, for "**r**ecursive": copy the +## Transferring a Directory + +If you went ahead and extracted the tarball, don't worry! `scp` can handle +entire directories as well as individual files. + +To copy a whole directory, we add the `-r` flag for "**r**ecursive": copy the item specified, and every item below it, and every item below those... until it reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r some-local-folder {{ site.remote.user }}@{{ site.remote.login }}:target-directory/ +{{ site.local.prompt }} scp -r hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ ``` {: .language-bash} @@ -157,7 +145,7 @@ A trailing slash on the target directory is optional, and has no effect for > ## A Note on `rsync` > > As you gain experience with transferring files, you may find the `scp` -> command limiting. The [rsync][rsync] utility provides +> command limiting. The [rsync] utility provides > advanced features for file transfer and is typically faster compared to both > `scp` and `sftp` (see below). It is especially useful for transferring large > and/or many files and creating synced backup folders. @@ -166,13 +154,15 @@ A trailing slash on the target directory is optional, and has no effect for > commonly used options: > > ``` -> {{ site.local.prompt }} rsync -avzP path/to/local/file.txt {{ site.remote.user }}@{{ site.remote.login }}:directory/path/on/{{ site.remote.name }}/ +> {{ site.local.prompt }} rsync -avzP hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > > The options are: -> * `a` (archive) to preserve file timestamps and permissions among other things -> * `v` (verbose) to get verbose output to help monitor the transfer +> +> * `a` (**a**rchive) to preserve file timestamps, permissions, and folders, +> among other things; implies recursion +> * `v` (**v**erbose) to get verbose output to help monitor the transfer > * `z` (compression) to compress the file during transit to reduce size and > transfer time > * `P` (partial/progress) to preserve partially transferred files in case @@ -181,27 +171,25 @@ A trailing slash on the target directory is optional, and has no effect for > To recursively copy a directory, we can use the same options: > > ``` -> {{ site.local.prompt }} rsync -avzP path/to/local/dir {{ site.remote.user }}@{{ site.remote.login }}:directory/path/on/{{ site.remote.name }}/ +> {{ site.local.prompt }} rsync -avzP hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ > ``` > {: .language-bash} > -> As written, this will place the local directory and its contents under the -> specified directory on the remote system. If the trailing slash is omitted on +> As written, this will place the local directory and its contents under your +> home directory on the remote system. If the trailing slash is omitted on > the destination, a new directory corresponding to the transferred directory -> ('dir' in the example) will not be created, and the contents of the source +> will not be created, and the contents of the source > directory will be copied directly into the destination directory. > -> The `a` (archive) option implies recursion. -> > To download a file, we simply change the source and destination: > > ``` -> {{ site.local.prompt }} rsync -avzP {{ site.remote.user }}@{{ site.remote.login }}:path/on/{{ site.remote.name }}/file.txt path/to/local/ +> {{ site.local.prompt }} rsync -avzP {{ site.remote.user }}@{{ site.remote.login }}:hpc-intro-code ./ > ``` > {: .language-bash} {: .callout} -All file transfers using the above methods use SSH to encrypt data sent through +File transfers using both `scp` and `rsync` use SSH to encrypt data sent through the network. So, if you can connect via SSH, you will be able to transfer files. By default, SSH uses network port 22. If a custom SSH port is in use, you will have to specify it using the appropriate flag, often `-p`, `-P`, or @@ -213,17 +201,18 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > modify this command? > > ``` -> {{ site.local.prompt }} rsync test.txt {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} rsync hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > > > ## Solution > > > > ``` +> > {{ site.local.prompt }} man rsync > > {{ site.local.prompt }} rsync --help | grep port > > --port=PORT specify double-colon alternate port number > > See http://rsync.samba.org/ for updates, bug reports, and answers -> > {{ site.local.prompt }} rsync --port=768 test.txt {{ site.remote.user }}@{{ site.remote.login }}: +> > {{ site.local.prompt }} rsync --port=768 hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} > {: .solution} @@ -279,57 +268,40 @@ The most common archiving command you will use on a (Linux) HPC cluster is optionally, compress it. Let's start with the file we downloaded from the lesson site, -`hpc-lesson-data.tar.gz`. The "gz" part stands for _gzip_, which is a -compression library. Reading this file name, it appears somebody took a folder -named "hpc-lesson-data," wrapped up all its contents in a single file with -`tar`, then compressed that archive with `gzip` to save space. Let's check -using `tar` with the `-t` flag, which prints the "**t**able of contents" -without unpacking the file, specified by `-f `, on the remote -computer. Note that you can concatenate the two flags, instead of writing -`-t -f` separately. +`hpc-intro-code.tar.gz`. The "gz" part stands for _gzip_, which is a +compression library. This kind of file can usually be interpreted by reading +its name: it appears somebody took a folder named "hpc-intro-code," wrapped up +all its contents in a single file with `tar`, then compressed that archive with +`gzip` to save space. Let's check using `tar` with the `-t` flag, which prints +the "**t**able of contents" without unpacking the file, specified by +`-f `, on the remote computer. Note that you can concatenate the two +flags, instead of writing `-t -f` separately. ``` {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -{{ site.remote.prompt }} tar -tf hpc-lesson-data.tar.gz -hpc-intro-data/ -hpc-intro-data/north-pacific-gyre/ -hpc-intro-data/north-pacific-gyre/NENE01971Z.txt -hpc-intro-data/north-pacific-gyre/goostats -hpc-intro-data/north-pacific-gyre/goodiff -hpc-intro-data/north-pacific-gyre/NENE02040B.txt -hpc-intro-data/north-pacific-gyre/NENE01978B.txt -hpc-intro-data/north-pacific-gyre/NENE02043B.txt -hpc-intro-data/north-pacific-gyre/NENE02018B.txt -hpc-intro-data/north-pacific-gyre/NENE01843A.txt -hpc-intro-data/north-pacific-gyre/NENE01978A.txt -hpc-intro-data/north-pacific-gyre/NENE01751B.txt -hpc-intro-data/north-pacific-gyre/NENE01736A.txt -hpc-intro-data/north-pacific-gyre/NENE01812A.txt -hpc-intro-data/north-pacific-gyre/NENE02043A.txt -hpc-intro-data/north-pacific-gyre/NENE01729B.txt -hpc-intro-data/north-pacific-gyre/NENE02040A.txt -hpc-intro-data/north-pacific-gyre/NENE01843B.txt -hpc-intro-data/north-pacific-gyre/NENE01751A.txt -hpc-intro-data/north-pacific-gyre/NENE01729A.txt -hpc-intro-data/north-pacific-gyre/NENE02040Z.txt +{{ site.remote.prompt }} tar -tf hpc-intro-code.tar.gz +hpc-intro-code/ +hpc-intro-code/amdahl +hpc-intro-code/README.md +hpc-intro-code/LICENSE.txt ``` {: .language-bash} -This shows a folder containing another folder, which contains a bunch of files. -If you've taken The Carpentries' Shell lesson recently, these might look -familiar. Let's see about that compression, using `du` for "**d**isk -**u**sage". +This shows a folder which contains a few files. Let's see about that +compression, using `du` for "**d**isk **u**sage". ``` -{{ site.remote.prompt }} du -sh hpc-lesson-data.tar.gz -36K hpc-intro-data.tar.gz +{{ site.remote.prompt }} du -sh hpc-intro-code.tar.gz +3.4K hpc-intro-code.tar.gz ``` {: .language-bash} > ## Files Occupy at Least One "Block" > -> If the filesystem block size is larger than 36 KB, you'll see a larger +> If the filesystem block size is larger than 3.4 KB, you'll see a larger > number: files cannot be smaller than one block. +> You can use the `--apparent-size` flag to see the exact size, although the +> unoccupied space in that filesystem block can't be used for anything else. {: .callout} Now let's unpack the archive. We'll run `tar` with a few common flags: @@ -351,49 +323,34 @@ When it's done, check the directory size with `du` and compare. > > ## Commands > > > > ``` -> > {{ site.remote.prompt }} tar -xvzf hpc-lesson-data.tar.gz +> > {{ site.remote.prompt }} tar -xvzf hpc-intro-code.tar.gz > > ``` > > {: .language-bash} > > > > ``` -> > hpc-intro-data/ -> > hpc-intro-data/north-pacific-gyre/ -> > hpc-intro-data/north-pacific-gyre/NENE01971Z.txt -> > hpc-intro-data/north-pacific-gyre/goostats -> > hpc-intro-data/north-pacific-gyre/goodiff -> > hpc-intro-data/north-pacific-gyre/NENE02040B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01978B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02043B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02018B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01843A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01978A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01751B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01736A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01812A.txt -> > hpc-intro-data/north-pacific-gyre/NENE02043A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01729B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02040A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01843B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01751A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01729A.txt -> > hpc-intro-data/north-pacific-gyre/NENE02040Z.txt +> > hpc-intro-code/ +> > hpc-intro-code/amdahl +> > hpc-intro-code/README.md +> > hpc-intro-code/LICENSE.txt > > ``` > > {: .output} > > > > Note that we did not type out `-x -v -z -f`, thanks to the flag -> > concatenation, though the command works identically either way. +> > concatenation, though the command works identically either way -- +> > so long as the concatenated list ends with `f`, because the next string +> > must specify the name of the file to extract. > > > > ``` -> > {{ site.remote.prompt }} du -sh hpc-lesson-data -> > 144K hpc-intro-data +> > {{ site.remote.prompt }} du -sh hpc-intro-code +> > 16K hpc-intro-code > > ``` > > {: .language-bash} > {: .solution} > > > ## Was the Data Compressed? > > -> > Text files compress nicely: the "tarball" is one-quarter the total size of -> > the raw data! +> > Text files (including Python source code) compress nicely: the "tarball" is +> > one-quarter the total size of the raw data! > {: .discussion} {: .challenge} @@ -402,13 +359,13 @@ extracting it -- set a `c` flag instead of `x`, set the archive filename, then provide a directory to compress: ``` -{{ site.local.prompt }} tar -cvzf compressed_data.tar.gz hpc-intro-data +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz hpc-intro-code ``` {: .language-bash} > ## Working with Windows > -> When you transfer text files to from a Windows system to a Unix system (Mac, +> When you transfer text files from a Windows system to a Unix system (Mac, > Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files > slightly different than Unix, and adds an extra character to every line. > diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index 32b02627..f826f6c7 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -7,16 +7,13 @@ questions: - "What benefits arise from parallel execution?" - "What are the limits of gains from execution in parallel?" objectives: -- "Construct a program that can execute in parallel." - "Prepare a job submission script for the parallel executable." - "Launch jobs with parallel execution." - "Record and summarize the timing and accuracy of jobs." - "Describe the relationship between job parallelism and performance." keypoints: - "Parallel programming allows applications to take advantage of - parallel hardware; serial code will not 'just work.'" -- "Distributed memory parallelism is a common case, using the Message - Passing Interface (MPI)." + parallel hardware." - "The queuing system facilitates executing parallel tasks." - "Performance improvements from parallel execution do not scale linearly." --- @@ -25,632 +22,338 @@ We now have the tools we need to run a multi-processor job. This is a very important aspect of HPC systems, as parallelism is one of the primary tools we have to improve the performance of computational tasks. -Our example implements a stochastic algorithm for estimating the value of -π, the ratio of the circumference to the diameter of a circle. -The program generates a large number of random points on a 1×1 square -centered on (½,½), and checks how many of these points fall -inside the unit circle. -On average, π/4 of the randomly-selected points should fall in the -circle, so π can be estimated from 4*f*, where _f_ is the observed -fraction of points that fall in the circle. -Because each sample is independent, this algorithm is easily implemented -in parallel. +If you disconnected, log back in to the cluster. -{% include figure.html url="" caption="" max-width="40%" - file="/fig/pi.png" - alt="Algorithm for computing pi through random sampling" %} - -## A Serial Solution to the Problem - -We start from a Python script using concepts taught in Software Carpentry's -[Programming with Python][inflammation] workshops. -We want to allow the user to specify how many random points should be used -to calculate π through a command-line parameter. -This script will only use a single CPU for its entire run, so it's classified -as a serial process. - -Let's write a Python program, `pi.py`, to estimate π for us. -Start by importing the `numpy` module for calculating the results, -and the `sys` module to process command-line parameters: - -``` -import numpy as np -import sys -``` -{: .language-python} - -We define a Python function `inside_circle` that accepts a single parameter -for the number of random points used to calculate π. -See [Programming with Python: Creating Functions][python-func] -for a review of Python functions. -It randomly samples points with both _x_ and _y_ on the half-open interval -[0, 1). -It then computes their distances from the origin (i.e., radii), and returns -how many of those distances were less than or equal to 1.0. -All of this is done using _vectors_ of double-precision (64-bit) -floating-point values. - -``` -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count ``` -{: .language-python} - -Next, we create a main function to call the `inside_circle` function and -calculate π from its returned result. -See [Programming with Python: Command-Line Programs][cmd-line] -for a review of `main` functions and parsing command-line parameters. - -``` -def main(): - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - print(my_pi) - -if __name__ == '__main__': - main() -``` -{: .language-python} - -If we run the Python script locally with a command-line parameter, as in -`python pi-serial.py 1024`, we should see the script print its estimate of -π: - -``` -{{ site.local.prompt }} python pi-serial.py 1024 -3.10546875 +{{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} ``` {: .language-bash} -> ## Random Number Generation -> -> In the preceding code, random numbers are conveniently generated using the -> built-in capabilities of NumPy. In general, random-number generation is -> difficult to do well, it's easy to accidentally introduce correlations into -> the generated sequence. -> -> * Discuss why generating high quality random numbers might be difficult. -> * Is the quality of random numbers generated sufficient for estimating π -> in this implementation? -> -> > ## Solution -> > -> > * Computers are deterministic and produce pseudo random numbers using -> > an algorithm. The choice of algorithm and its parameters determines -> > how random the generated numbers are. Pseudo random number generation -> > algorithms usually produce a sequence numbers taking the previous output -> > as an input for generating the next number. At some point the sequence of -> > pseudo random numbers will repeat, so care is required to make sure the -> > repetition period is long and that the generated numbers have statistical -> > properties similar to those of true random numbers. -> > * Yes. -> {: .solution } -{: .discussion } - -## Measuring Performance of the Serial Solution - -The stochastic method used to estimate π should converge on the true -value as the number of random points increases. -But as the number of points increases, creating the variables `x`, `y`, and -`radii` requires more time and more memory. -Eventually, the memory required may exceed what's available on our local -laptop or desktop, or the time required may be too long to meet a deadline. -So we'd like to take some measurements of how much memory and time the script -requires, and later take the same measurements after creating a parallel -version of the script to see the benefits of parallelizing the calculations -required. - -### Estimating Memory Requirements - -Since the largest variables in the script are `x`, `y`, and `radii`, each -containing `n_samples` points, we'll modify the script to report their -total memory required. -Each point in `x`, `y`, or `radii` is stored as a NumPy `float64`, we can -use NumPy's [`dtype`][np-dtype] function to calculate the size of a `float64`. - -Replace the `print(my_pi)` line with the following: - -``` -size_of_float = np.dtype(np.float64).itemsize -memory_required = 3 * n_samples * size_of_float / (1024**3) -print("Pi: {}, memory: {} GiB".format(my_pi, memory_required)) -``` -{: .language-python} - -The first line calculates the bytes of memory required for a single -64-bit floating point number using the `dtype` function. -The second line estimates the total amount of memory required to store three -variables containing `n_samples` `float64` values, converting the value into -units of [gibibytes][units]. -The third line prints both the estimate of π and the estimated amount of -memory used by the script. - -The updated Python script is: - -``` -import numpy as np -import sys - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count - -def main(): - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB".format(my_pi, memory_required)) - -if __name__ == '__main__': - main() -``` -{: .language-python} - -Run the script again with a few different values for the number of samples, -and see how the memory required changes: - -``` -{{ site.local.prompt }} python pi-serial.py 1000 -Pi: 3.144, memory: 2.2351741790771484e-05 GiB -{{ site.local.prompt }} python pi-serial.py 2000 -Pi: 3.18, memory: 4.470348358154297e-05 GiB -{{ site.local.prompt }} python pi-serial.py 1000000 -Pi: 3.140944, memory: 0.022351741790771484 GiB -{{ site.local.prompt }} python pi-serial.py 100000000 -Pi: 3.14182724, memory: 2.2351741790771484 GiB -``` -{: .language-bash } - -Here we can see that the estimated amount of memory required scales linearly -with the number of samples used. -In practice, there is some memory required for other parts of the script, -but the `x`, `y`, and `radii` variables are by far the largest influence -on the total amount of memory required. - -### Estimating Calculation Time - -Most of the calculations required to estimate π are in the -`inside_circle` function: - -1. Generating `n_samples` random values for `x` and `y`. -1. Calculating `n_samples` values of `radii` from `x` and `y`. -1. Counting how many values in `radii` are under 1.0. +## Help! -There's also one multiplication operation and one division operation required -to convert the `counts` value to the final estimate of π in the main -function. +Many command-line programs include a "help" message. Navigate to the directory +of the decompressed files, then print the `amdahl` program's help message: -A simple way to measure the calculation time is to use Python's `datetime` -module to store the computer's current date and time before and after the -calculations, and calculate the difference between those times. - -To add the time measurement to the script, add the following line below the -`import sys` line: - -``` -import datetime ``` -{: .language-python} - -Then, add the following line immediately above the line calculating `counts`: - -``` -start_time = datetime.datetime.now() -``` -{: .language-python} - -Add the following two lines immediately below the line calculating `counts`: - +{{ site.remote.prompt }} cd hpc-intro-code +{{ site.remote.prompt }} ./amdahl --help ``` -end_time = datetime.datetime.now() -elapsed_time = (end_time - start_time).total_seconds() -``` -{: .language-python} - -And finally, modify the `print` statement with the following: +{: .language-bash} ``` -print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) -``` -{: .language-python} - -The final Python script for the serial solution is: +usage: amdahl [-h] [-p [PARALLEL_PROPORTION]] [-w [WORK_SECONDS]] +optional arguments: + -h, --help show this help message and exit + -p [PARALLEL_PROPORTION], --parallel-proportion [PARALLEL_PROPORTION] + Parallel proportion should be a float between 0 and 1 + -w [WORK_SECONDS], --work-seconds [WORK_SECONDS] + Total seconds of workload, should be an integer greater than 0 ``` -import numpy as np -import sys -import datetime +{: .output} -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count +This message doesn't tell us much about what the program _does_, but it does +tell us the important flags we might want to use when launching it. -def main(): - n_samples = int(sys.argv[1]) - start_time = datetime.datetime.now() - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - end_time = datetime.datetime.now() - elapsed_time = (end_time - start_time).total_seconds() - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) +## Running the Job on a Compute Node -if __name__ == '__main__': - main() -``` -{: .language-python} - -Run the script again with a few different values for the number of samples, -and see how the solution time changes: +Create a submission file, requesting one task on a single node, then launch it. ``` -{{ site.local.prompt }} python pi-serial.py 1000000 -Pi: 3.139612, memory: 0.022351741790771484 GiB, time: 0.034872 s -{{ site.local.prompt }} python pi-serial.py 10000000 -Pi: 3.1425492, memory: 0.22351741790771484 GiB, time: 0.351212 s -{{ site.local.prompt }} python pi-serial.py 100000000 -Pi: 3.14146608, memory: 2.2351741790771484 GiB, time: 3.735195 s +{{ site.remote.prompt }} nano serial-job.sh +{{ site.remote.prompt }} cat serial-job.sh ``` -{: .language-bash } - -Here we can see that the amount of time required scales approximately linearly -with the number of samples used. -There could be some variation in additional runs of the script with the same -number of samples, since the elapsed time is affected by other programs -running on the computer at the same time. -But if the script is the most computationally-intensive process running at the -time, its calculations are the largest influence on the elapsed time. - -Now that we've developed our initial script to estimate π, we can see -that as we increase the number of samples: - -1. The estimate of π tends to become more accurate. -1. The amount of memory required scales approximately linearly. -1. The amount of time to calculate scales approximately linearly. - -In general, achieving a better estimate of π requires a greater number of -points. -Take a closer look at `inside_circle`: should we expect to get high accuracy -on a single machine? - -Probably not. -The function allocates three arrays of size _N_ equal to the number of points -belonging to this process. -Using 64-bit floating point numbers, the memory footprint of these arrays can -get quite large. -Each 100,000,000 points sampled consumes 2.24 GiB of memory. -Sampling 400,000,000 points consumes 8.94 GiB of memory, -and if your machine has less RAM than that, it will grind to a halt. -If you have 16 GiB installed, you won't quite make it to 750,000,000 points. - -## Running the Serial Job on a Compute Node +{: .language-bash} -Create a submission file, requesting one task on a single node and enough -memory to prevent the job from running out of memory: +{% include {{ site.snippets }}/parallel/one-task-jobscript.snip %} ``` -{{ site.remote.prompt }} nano serial-pi.sh -{{ site.remote.prompt }} cat serial-pi.sh +{{ site.remote.prompt }} {{ site.sched.submit.name }} serial-job.sh ``` {: .language-bash} -{% include {{ site.snippets }}/parallel/one-task-with-memory-jobscript.snip %} - -Then submit your job. We will use the batch file to set the options, -rather than the command line. +As before, use the {{ site.sched.name }} status commands to check whether your job +is running and when it ends: ``` -{{ site.remote.prompt }} {{ site.sched.submit.name }} serial-pi.sh +{{ site.remote.prompt }} {{ site.sched.status }} {{ site.sched.flag.user }} ``` {: .language-bash} -As before, use the status commands to check when your job runs. -Use `ls` to locate the output file, and examine it. Is it what you expected? - -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? +Use `ls` to locate the output file. The `-t` flag sorts in +reverse-chronological order: newest first. What was the output? -Modify the job script to increase both the number of samples and the amount -of memory requested (perhaps by a factor of 2, then by a factor of 10), -and resubmit the job each time. +> ## Read the Job Output +> +> The cluster output should be written to a file in the folder you launched the +> job from. +> +> ``` +> {{ site.remote.prompt }} ls -t +> ``` +> {: .language-bash} +> ``` +> slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +> ``` +> {: .output} +> ``` +> {{ site.remote.prompt }} cat slurm-347087.out +> ``` +> {: .language-bash} +> ``` +> Doing 30.000000 seconds of 'work' on 1 processor, +> which should take 30.000000 seconds with 0.850000 parallel proportion of the workload. +> +> Hello, World! I am process 0 of 1 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. +> Hello, World! I am process 0 of 1 on {{ site.remote.node }}. I will do parallel 'work' for 25.500000 seconds. +> +> Total execution time (according to rank 0): 30.033140 seconds +> ``` +> {: .output} +{: .solution} -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? +`amdahl` takes two optional parameters as input: the amount of work and the +proportion of that work that is parallel in nature. Based on the output, we can +see that the code uses a default of 30 seconds of work that is 85% +parallel. The program ran for just over 30 seconds in total, and if we run the +numbers, it is true that 15% of it was marked 'serial' and 85% was 'parallel'. -Even with sufficient memory for necessary variables, -a script could require enormous amounts of time to calculate on a single CPU. -To reduce the amount of time required, -we need to modify the script to use multiple CPUs for the calculations. -In the largest problem scales, -we could use multiple CPUs in multiple compute nodes, -distributing the memory requirements across all the nodes used to -calculate the solution. +Since we only gave the job one CPU, this job wasn't really parallel: the +processor performed the 'serial' work for 4.5 seconds, then the 'parallel' part +for 25.5 seconds, and no time was saved. The cluster can do better, if we ask. ## Running the Parallel Job -We will run an example that uses the Message Passing Interface (MPI) for -parallelism -- this is a common tool on HPC systems. +The `amdahl` program uses the Message Passing Interface (MPI) for parallelism +-- this is a common tool on HPC systems. > ## What is MPI? > -> The Message Passing Interface is a set of tools which allow multiple parallel -> jobs to communicate with each other. +> The Message Passing Interface is a set of tools which allow multiple tasks +> running simultaneously to communicate with each other. > Typically, a single executable is run multiple times, possibly on different > machines, and the MPI tools are used to inform each instance of the -> executable about how many instances there are, which instance it is. -> MPI also provides tools to allow communication and coordination between -> instances. +> executable about its sibling processes, and which instance it is. +> MPI also provides tools to allow communication between instances to +> coordinate work, exchange information about elements of the task, or to +> transfer data. > An MPI instance typically has its own copy of all the local variables. {: .callout} -While MPI jobs can generally be run as stand-alone executables, in order for -them to run in parallel they must use an MPI _run-time system_, which is a -specific implementation of the MPI _standard_. -To do this, they should be started via a command such as `mpiexec` (or -`mpirun`, or `srun`, etc. depending on the MPI run-time you need to use), -which will ensure that the appropriate run-time support for parallelism is -included. +While MPI-aware executables can generally be run as stand-alone programs, in +order for them to run in parallel they must use an MPI _run-time environment_, +which is a specific implementation of the MPI _standard_. +To activate the MPI environment, the program should be started via a command +such as `mpiexec` (or `mpirun`, or `srun`, etc. depending on the MPI run-time +you need to use), which will ensure that the appropriate run-time support for +parallelism is included. > ## MPI Runtime Arguments > > On their own, commands such as `mpiexec` can take many arguments specifying > how many machines will participate in the execution, > and you might need these if you would like to run an MPI program on your -> laptop (for example). +> own (for example, on your laptop). > In the context of a queuing system, however, it is frequently the case that -> we do not need to specify this information as the MPI run-time will have been -> configured to obtain it from the queuing system, +> MPI run-time will obtain the necessary parameters from the queuing system, > by examining the environment variables set when the job is launched. {: .callout} -> ## What Changes Are Needed for an MPI Version of the π Calculator? -> -> First, we need to import the `MPI` object from the Python module `mpi4py` by -> adding an `from mpi4py import MPI` line immediately below the `import -> datetime` line. -> -> Second, we need to modify the "main" function to perform the overhead and -> accounting work required to: -> -> * subdivide the total number of points to be sampled, -> * _partition_ the total workload among the various parallel processors -> available, -> * have each parallel process report the results of its workload back -> to the "rank 0" process, -> which does the final calculations and prints out the result. -> -> The modifications to the serial script demonstrate four important concepts: -> -> * COMM_WORLD: the default MPI Communicator, providing a channel for all the -> processes involved in this `mpiexec` to exchange information with one -> another. -> * Scatter: A collective operation in which an array of data on one MPI rank -> is divided up, with separate portions being sent out to the partner ranks. -> Each partner rank receives data from the matching index of the host array. -> * Gather: The inverse of scatter. One rank populates a local array, -> with the array element at each index assigned the value provided by the -> corresponding partner rank -- including the host's own value. -> * Conditional Output: since every rank is running the _same code_, the -> partitioning, the final calculations, and the `print` statement are -> wrapped in a conditional so that only one rank performs these operations. -{: .discussion} - -We add the lines: +Let's modify the job script to request more cores and use the MPI run-time. +```bash +{{ site.remote.prompt }} cp serial-job.sh parallel-job.sh +{{ site.remote.prompt }} nano parallel-job.sh +{{ site.remote.prompt }} cat parallel-job.sh ``` -comm = MPI.COMM_WORLD -cpus = comm.Get_size() -rank = comm.Get_rank() -``` -{: .language-python} -immediately before the `n_samples` line to set up the MPI environment for -each process. +{% include {{ site.snippets }}/parallel/four-tasks-jobscript.snip %} -We replace the `start_time` and `counts` lines with the lines: +Then submit your job. Note that the submission command has not really changed +from how we submitted the serial job: all the parallel settings are in the +batch file rather than the command line. ``` -if rank == 0: - start_time = datetime.datetime.now() - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus -else: - partitions = None - counts = None +{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-job.sh ``` -{: .language-python} - -This ensures that only the rank 0 process measures times and coordinates -the work to be distributed to all the ranks, while the other ranks -get placeholder values for the `partitions` and `counts` variables. - -Immediately below these lines, let's - -* distribute the work among the ranks with MPI `scatter`, -* call the `inside_circle` function so each rank can perform its share - of the work, -* collect each rank's results into a `counts` variable on rank 0 using MPI - `gather`. +{: .language-bash} -by adding the following three lines: +As before, use the status commands to check when your job runs. ``` -partition_item = comm.scatter(partitions, root=0) -count_item = inside_circle(partition_item) -counts = comm.gather(count_item, root=0) +{{ site.remote.prompt }} ls -t ``` -{: .language-python} - -Illustrations of these steps are shown below. +{: .language-bash} +``` +slurm-347178.out parallel-job.sh slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +``` +{: .output} +``` +{{ site.remote.prompt }} cat slurm-347178.out +``` +{: .language-bash} +``` +Doing 30.000000 seconds of 'work' on 4 processors, +which should take 10.875000 seconds with 0.850000 parallel proportion of the workload. ---- + Hello, World! I am process 0 of 4 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. + Hello, World! I am process 2 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 1 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 3 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 0 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. -Setup the MPI environment and initialize local variables -- including the -vector containing the number of points to generate on each parallel processor: +Total execution time (according to rank 0): 10.887713 seconds +``` +{: .output} -{% include figure.html url="" caption="" max-width="50%" - file="/fig/initialize.png" - alt="MPI initialize" %} +> ## Is it 4× faster? +> +> The parallel job received 4× more processors than the serial job: +> does that mean it finished in ¼ the time? +> +> > ## Solution +> > +> > The parallel job did take _less_ time: 11 seconds is better than 30! +> > But it is only a 2.7× improvement, not 4×. +> > +> > Look at the job output: +> > +> > * While "process 0" did serial work, processes 1 through 3 did their +> > parallel work. +> > * While process 0 caught up on its parallel work, +> > the rest did nothing at all. +> > +> > Process 0 always has to finish its serial task before it can start on the +> > parallel work. This sets a lower limit on the amount of time this job will +> > take, no matter how many cores you throw at it. +> > +> > This is the basic principle behind [Amdahl's Law][amdahl], which is one way +> > of predicting improvements in execution time for a __fixed__ workload that +> > can be subdivided and run in parallel to some extent. +> {: .solution} +{: .challenge} -Distribute the number of points from the originating vector to all the parallel -processors: +## How Much Does Parallel Execution Improve Performance? -{% include figure.html url="" caption="" max-width="50%" - file="/fig/scatter.png" - alt="MPI scatter" %} +In theory, dividing up a perfectly parallel calculation among _n_ MPI processes +should produce a decrease in total run time by a factor of _n_. +As we have just seen, real programs need some time for the MPI processes to +communicate and coordinate, and some types of calculations can't be subdivided: +they only run effectively on a single CPU. -Perform the computation in parallel: +Additionally, if the MPI processes operate on different physical CPUs in the +computer, or across multiple compute nodes, even more time is required for +communication than it takes when all processes operate on a single CPU. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/compute.png" - alt="MPI compute" %} +In practice, it's common to evaluate the parallelism of an MPI program by -Retrieve counts from all the parallel processes: +* running the program across a range of CPU counts, +* recording the execution time on each run, +* comparing each execution time to the time when using a single CPU. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/gather.png" - alt="MPI gather" %} +Since "more is better" -- improvement is easier to interpret from increases in +some quantity than decreases -- comparisons are made using the speedup factor +_S_, which is calculated as the single-CPU execution time divided by the multi-CPU +execution time. For a perfectly parallel program, a plot of the speedup _S_ +versus the number of CPUs _n_ would give a straight line, _S_ = _n_. -Print out the report: +Let's run one more job, so we can see how close to a straight line our `amdahl` +code gets. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/finalize.png" - alt="MPI finalize" %} +```bash +{{ site.remote.prompt }} nano parallel-job.sh +{{ site.remote.prompt }} cat parallel-job.sh +``` ---- +{% include {{ site.snippets }}/parallel/eight-tasks-jobscript.snip %} -Finally, we'll ensure the `my_pi` through `print` lines only run on rank 0. -Otherwise, every parallel processor will print its local value, -and the report will become hopelessly garbled: +Then submit your job. Note that the submission command has not really changed +from how we submitted the serial job: all the parallel settings are in the +batch file rather than the command line. ``` -if rank == 0: - my_pi = 4.0 * sum(counts) / sum(partitions) - end_time = datetime.datetime.now() - elapsed_time = (end_time - start_time).total_seconds() - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * sum(partitions) * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) +{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-job.sh ``` -{: .language-python} - -A fully commented version of the final MPI parallel python code is available -[here](/files/pi-mpi.py). - -Our purpose here is to exercise the parallel workflow of the cluster, not to -optimize the program to minimize its memory footprint. -Rather than push our local machines to the breaking point (or, worse, the login -node), let's give it to a cluster node with more resources. +{: .language-bash} -Create a submission file, requesting more than one task on a single node: +As before, use the status commands to check when your job runs. ``` -{{ site.remote.prompt }} nano parallel-pi.sh -{{ site.remote.prompt }} cat parallel-pi.sh +{{ site.remote.prompt }} ls -t ``` {: .language-bash} - -{% include {{ site.snippets }}/parallel/four-tasks-jobscript.snip %} - -Then submit your job. We will use the batch file to set the options, -rather than the command line. - ``` -{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-pi.sh +slurm-347271.out parallel-job.sh slurm-347178.out slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +``` +{: .output} +``` +{{ site.remote.prompt }} cat slurm-347178.out ``` {: .language-bash} +``` +which should take 7.687500 seconds with 0.850000 parallel proportion of the workload. -As before, use the status commands to check when your job runs. -Use `ls` to locate the output file, and examine it. -Is it what you expected? - -* How good is the value for π? -* How much memory did it need? -* How much faster was this run than the serial run with 100000000 points? - -Modify the job script to increase both the number of samples and the amount -of memory requested (perhaps by a factor of 2, then by a factor of 10), -and resubmit the job each time. -You can also increase the number of CPUs. - -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? - -## How Much Does MPI Improve Performance? - -In theory, by dividing up the π calculations among _n_ MPI processes, -we should see run times reduce by a factor of _n_. -In practice, some time is required to start the additional MPI processes, -for the MPI processes to communicate and coordinate, and some types of -calculations may only be able to run effectively on a single CPU. - -Additionally, if the MPI processes operate on different physical CPUs -in the computer, or across multiple compute nodes, additional time is -required for communication compared to all processes operating on a -single CPU. - -[Amdahl's Law][amdahl] is one way of predicting improvements in execution time -for a __fixed__ parallel workload. If a workload needs 20 hours to complete on -a single core, and one hour of that time is spent on tasks that cannot be -parallelized, only the remaining 19 hours could be parallelized. Even if an -infinite number of cores were used for the parallel parts of the workload, the -total run time cannot be less than one hour. + Hello, World! I am process 4 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 0 of 8 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. + Hello, World! I am process 2 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 1 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 3 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 5 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 6 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 7 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 0 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. -In practice, it's common to evaluate the parallelism of an MPI program by +Total execution time (according to rank 0): 7.697227 seconds +``` +{: .output} -* running the program across a range of CPU counts, -* recording the execution time on each run, -* comparing each execution time to the time when using a single CPU. +> ## Non-Linear Output +> +> When we ran the job with 4 parallel workers, the serial job wrote its output +> first, then the parallel processes wrote their output, with process 0 coming +> in first and last. +> +> With 8 workers, this is not the case: since the parallel workers take less +> time than the serial work, it is hard to say which process will write its +> output first, except that it will _not_ be process 0! +{: .discussion} + +Now, let's summarize the amount of time it took each job to run: + +| Number of CPUs | Runtime (sec) | +| --- | --- | +| 1 | 30.033140 | +| 4 | 10.887713 | +| 8 | 7.697227 | + +Then, use the first row to compute speedups _S_, using Python as a command-line calculator: + +``` +{{ site.remote.prompt }} for n in 30.033 10.888 7.6972; do python3 -c "print(30.033 / $n)"; done +``` +{: .language-bash} -The speedup factor _S_ is calculated as the single-CPU execution time divided -by the multi-CPU execution time. -For a laptop with 8 cores, the graph of speedup factor versus number of cores -used shows relatively consistent improvement when using 2, 4, or 8 cores, but -using additional cores shows a diminishing return. - -{% include figure.html url="" caption="" max-width="50%" - file="/fig/laptop-mpi_Speedup_factor.png" - alt="MPI speedup factors on an 8-core laptop" %} - -For a set of HPC nodes containing 28 cores each, the graph of speedup factor -versus number of cores shows consistent improvements up through three nodes -and 84 cores, but __worse__ performance when adding a fourth node with an -additional 28 cores. -This is due to the amount of communication and coordination required among -the MPI processes requiring more time than is gained by reducing the amount -of work each MPI process has to complete. This communication overhead is not -included in Amdahl's Law. - -{% include figure.html url="" caption="" max-width="50%" - file="/fig/hpc-mpi_Speedup_factor.png" - alt="MPI speedup factors on an 8-core laptop" %} - -In practice, MPI speedup factors are influenced by: - -* CPU design, -* the communication network between compute nodes, -* the MPI library implementations, and -* the details of the MPI program itself. +| Number of CPUs | Speedup | Ideal | +| --- | --- | --- | +| 1 | 1.0 | 1.0 | +| 4 | 2.75 | 4.0 | +| 8 | 3.90 | 8.0 | + +The job output files have been telling us that this program is performing 85% +of its work in parallel, leaving 15% to run in serial. This seems reasonably +high, but our quick study of speedup shows that in order to get a 4× speedup, +we have to use 8 or 9 processors in parallel. In real programs, the speedup +factor is influenced by + +* CPU design +* communication network between compute nodes +* MPI library implementations +* details of the MPI program itself + +Using Amdahl's Law, you can prove that with this program, it is _impossible_ +to reach 8× speedup, no matter how many processors you have on hand. Details of +that analysis, with results to back it up, are left for the next class in the +HPC Carpentry workshop, _HPC Workflows_. In an HPC environment, we try to reduce the execution time for all types of jobs, and MPI is an extremely common way to combine dozens, hundreds, or diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..ad8a8eee --- /dev/null +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,14 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip index ac8effab..dfa00e6b 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip similarity index 70% rename from _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip index 5838157f..91ebd101 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip similarity index 62% rename from _includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip index 56aee37a..9caa7145 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=1:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=8 # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip index b1d90eb9..04a6fb3a 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=4:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=4 # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip similarity index 62% rename from _includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip index 56aee37a..d267e8cd 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=1:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=1 # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip similarity index 68% rename from _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip index 13418f34..b052e666 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip @@ -1,10 +1,9 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 -{{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G +{{ site.sched.comment }} -n 8 # Load the computing environment we need # (mpi4py and numpy are in SciPy-bundle) @@ -12,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip index 1512adde..b24c7153 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip @@ -1,10 +1,9 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need # (mpi4py and numpy are in SciPy-bundle) @@ -12,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip similarity index 65% rename from _includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip index 5838157f..4c149443 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need -module load python3 +module load Python # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml index 393d5580..0f55d3f0 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml +++ b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml @@ -21,7 +21,7 @@ snippets: "/snippets_library/NIST_CTCMS_slurm" local: prompt: "[user@laptop ~]$" - bash_shebang: "#!/usr/bin/env bash" + bash_shebang: "#!/usr/bin/bash" remote: name: "ruth" @@ -32,7 +32,7 @@ remote: homedir: "/users" user: "yourUsername" prompt: "501 ruth%" - bash_shebang: "#!/usr/bin/env bash" + bash_shebang: "#!/bin/bash" sched: name: "Slurm" diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..09ab213e --- /dev/null +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,11 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip index 5eb930b4..af8f4653 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip @@ -1,12 +1,11 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip new file mode 100644 index 00000000..984f5740 --- /dev/null +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip @@ -0,0 +1,11 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 1 + +# Execute the task +./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..ad8a8eee --- /dev/null +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,14 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip index ac8effab..dfa00e6b 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip similarity index 70% rename from _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip index 5838157f..91ebd101 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..9caa7145 --- /dev/null +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,13 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -l nodes=1:ppn=8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip index b1d90eb9..04a6fb3a 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=4:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=4 # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip new file mode 100644 index 00000000..d267e8cd --- /dev/null +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip @@ -0,0 +1,13 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -l nodes=1:ppn=1 + +# Load the computing environment we need +module load python3 + +# Execute the task +./amdahl +``` +{: .language-bash} diff --git a/files/hpc-intro-code.tar.gz b/files/hpc-intro-code.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..1b2e3d9e60c002bbe0492992be0c15fe0eb5fb03 GIT binary patch literal 3391 zcmV-F4Z!jriwFP!000001MOPfa^gl7o@+hD3HH>0gE3${PF!(PjLnRx#oz)Q?^I0{ zX{5#|7HX|pGA7wnZS6DU5%zNLbCat)N}goT>6SqJ`EST?Z#>DNU^>JXDIN9 zxf9YY_{_bLkX8E0p(!|w`8R5yw140zHX#+>@t{Y)v$+W4YAAq>piVXjh748D1Cmm~ z>G^b-oKL?K^GRn4!4F**O{yokY($F^=QIb!GP#t(@6m&3wpMRKWSmec6k&IjIc} z(!dccnAs}_JIkC;0j8YKL1b58tdKDWd&*iyLb6NAXI|~FF^OR^#|#}7Xj%B3$ISo* zjCajI;7%H%57T%_Hd-u_a0=EJ7tAkK3~Oc%xpX@H)C6dgModA@Ro5GE}atpfLaRA4}s>iK1q(~H^s#QotRZYO~+hWkZU1Vi=|?k;w!siKpuP{g5zTDzS#4C{{oz$xJLHac zoVkdNlZduayzZlq=;Vu+!I*OwimKcyMeF4rBhYwWAaa4-$li~T1P?*Wa$v!>AVpWz zgNYOsXNr7M7z}A{z^~DW*DA_Umd2aKIdQh2&6zbb8mL-2@){toQNxH|^o2-x?OJp% z@&b%iu22=ZEU4@e^Y(QzcEz)Oc z6{aqT7kF7Jjax1TI*QtqMW%5IEp9nnpD&VDUD&2;3tc>yqFuqHR7ml4#F-5J_(-Bo zW>aOTvBx?*(SO-QiI#Mrg`hMW6dzXRvdrw{vgBT?4>Ie_z9Q{l)>vRTY1w=7><3rx zL4L>0Q2CTf@mXf#iAcpm)Rod`F?WGjA=nO<+i zY`53T=+UvC8UO9W{4;2w|I@BjPtR+Gk+sP+sLAuXQd!ggDV2+_OL70Fynk3x{hz~9 z+W+|!MCW=;yOvOa3JW}snK$g3f3I9SF-Tu?T%geo7W$6tiTh~rXXaVP4L zL4hoEqjfR)3bibTNE>4@t}d`drM%3o0?C03E0 z2R1`TP)-dYUHNO-FkcBwrl>0iM(oZWg`;W1ftf>_->RM$71xqZg3O3(AY8*Rwzm!H zy?q$?%>1$R(~nWp?I(H=HQu&6+tbEd1nhG%^DS46LGB~CY8ujUC~+j8u}Xp(+s!G? ztZz8g3QgcX9uTU!)rik%Vd^r{L=j#O16s}z;sz7*4L4{5z9GhW6G$e-yHU z=4+ro4mpC}U$tds$WKyn0{U0OTFLcC@~pWIS3!R?@>(-~Jn}lf-T?9%XpQ_l>rLtJ z%fOr9cs0+PL0$#DA>;_UoY*j%LSD`>8$(`F2qJ%^TdafL9M6l;n?#<6-YD`M^k$Kh z&>KchKyMm3f-dd+84f<5_R+scrKMO8<@jPhzNo|(2l2)0_~J0W_#(deGWuI8F1<=# zOIP|3O~+ls$kZ+B%1DzrcsS4C(O7+0u$@zH2IIFMbR1WG?(GRay~7hU9jK`%(A>`D z4eI}J$zA%6L3IQAx_54gfNs&uLezP>_L-+%>W|eSAIH+mW8E{rfg?LmhjsNfGVvmY zdLR~kbY#mR@@eQd0}@n!$`@m5EQ|!Ad5Kq2`qKi04|M;e@9>doO6jjFf?@pCi@_ih zX#ivpT9XVl5g5Ir*00A0%m+4Rlc>p)V0L6v%c0Ms?>4w`ZQpWaipd+@HmdeT7{qNi zU!eRW7&8eLis%Rrflyghhq=&tb;58E0fjPZSe;#*zzOp`)U7`$7@GHi&$%-JcFSx> z^=(2|eSd`-Kd*P8VVm4V0BR^1Uk!u6lSg}d*9aH(3#J(D2@}_0(T9Ow7ycJ}+$pW6 zh5mn|eo||8YK7n~*xVZT_y3FK;z33C|110WgTDW)l*{F`|Nki{5d#e4M0gWl#i*>A zgL1K4P6ivpxZwWCmNM#~Q$WUWgh7g8d0s^t_2K4_`PV$JqM|#YZmy&ctNQfXK=pLN z#B~hS)ON17|lz9iXUM5 z!X>AlSxKQ*{GOYtflb5pSFt1-z3jBr8!-C-$W%M{ zJCld2diSh#*##WYt~R^xq4gH3&G+z^dh;|7wZC4pYn=|X+D85SqEWBmy?XPcad}#A zzJoV7uh~KeuA>_xXtxElU<6gKbqMUd);>AIpVc?@M!ow!Z@jH{n}qqc)rKluRNLM9 z$z`M3hKtMgMXOW8$-1)T|bwv<(`jxr3{-8r`Dx zs`&4uTW>Wm5?a+ji?bZ;(21MoWRc8;I4R$s}StI8FU2rIb=iDW#NBN-3q3Qc5YMlu}A5rIb=iDW#NB VN-3q3Qc8bj`VV=+WO@Kl008V6sLlWY literal 0 HcmV?d00001 diff --git a/files/hpc-intro-data.tar.gz b/files/hpc-intro-data.tar.gz deleted file mode 100644 index 8ccc37cf1856388b61c1cf217eb44eb30d63978c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36535 zcmV({K+?Y-iwFSe!x>-z1MR&_lXb~?oyT$giZ3V*g?8RA135wtYiK#bLpeNv!3F`F z1Z1FT$r1kXUe8)tO^Kk*mW*zOps$EvcXd^rv(Mg{Z@!mx`PIMu#m|2Io9}-2+n@dA z&%gWmfAeQQ##hnTo|AvpJtForul}#!efvjP z5h6`_bFf3YfZtywV-DDuV3BE9t$f{#VYrx*{DEWhz*lJ@muYkKVAH zqCen;@0#PT)0bZPV|>%voV5?VEWh0ESxc9Db>$zO&sOzsMITOYI$EFl0Bx z+OCH`T(jRXuda#@w;cU@n`?J|x@Vp39JYjhc7v$Hylm%!x&py%JQcls zD|_t1BfD$q5$V>h&DGAUr>mQ;3--g;r_iI&WpV{oue!8S^iMt~cT=B7_onf#XXts> z+I4%cUa31e^iqc}NgumtS&UP78$InhwaWGxeqE&xyyZS#L|u#R^sATWgLbcItqs4c zZ=TlGUHdJYc6V9j^1FU&x<&iw-f3lM*%pqZIsMbpwAk9=sH%QxJ+Z6X+IqYnvmSZe<8`mg%%_h{MjsI)T9UbEHXwcV?pPUm6jDwgiiJf|K% zr|TkWN-$Xy&~?6$P-C(BUpp}V+ypZ;mTdNf+g+6&h!zv{COEsNzRdG6Bk^@14M z9m{vvx8kgN@7X%M^M`u+Y@e+^<}vFhx-m^_kq>?p85GTRUwJlVj-j2c-KX!$u3J~3 z8$A4*dds=4zH<8&Mzki_WPGcQuWi?KTZTS&(Gwm;4|4IcdifgCQEq~-vq@wg$l)+HGS##{W-O)%G5*f3e`I(^h_dHmZUzK zLc#D-Xxc4(Y7=Mm>R8nNB6M*7bUx}r^?@e0PYZLjrafEz!!@QB(ACYnOV`JfFSCwY zKNsAXe7NPcTPHX3=rT9AOy6!+oB}8hjbvP>^7iX0lxvgLqHS7zNw;ZTb5FUSe9)_h zTKuxSw=PTJ)sLy@f)s5P;)`#-0)sY}K2m#eujwKbw^>Cz^`aoiisV-tQMdBPEY+#Q zMVo#!KkhLWmv{Xp+8$cD+=t@$q^sB46T|%US*E&yxA~rNY1(8}tCL^W7VJaU&W*qN zBYmRW? zmbI={zwMXKLf-n|x^@Q*9bEe0+5%ciT-2VrTo&NqUA4d!zZZu9A6#djPB^W(!fB}u zspC=4QLn3GqfNycCd;>C)5Q78c65o3~VW<4zwA5anz+B!*sPDH1#M zXva@2vUcb2*-mV1lT$}8z$+b{YkJfQalUrqu$C$j*dd%MZ|V((Hj%DJx4(Nk==;J^wsYjwJ9M{nB07v-Z6bY)+nb{yQJk38kMD!_Z`ae+ z4)H?P8a`|Xt#U6)8B4&jGHKjB}v*_A6HLfP42*tD9VX zL@Rjl)6M0M{Z8;YS--5M=0LwlB6RL194&T?PDVWh?x@Zzg&Qs5eY`%2B3db4+Im+d zIP9NsyhHSk3T@t7m2*=EsqR{~NOv;Sm$yXKv7{Y89ouyVad2^(djDwuD(U0HlTU7L zp;2!!THgRQ{E3cdSkh+VL zC(P;4^dE9M*IV>5Ecxu$ zPTU+}`Zq-ZjtsB)!Q#|)bzWABTpPKtL!Asy4qJ{BuVhxt(CaF-ZVu7pOWcX-qmwgaU04R}jVpNYdx~^dNtco)f0LslWZBE2= z<@z+dqZ2^ItnJrX(-CEPRq9l%%IlPFI%XYe0U83{=1TIkLdk~tZt~IZOa7JX3{rnR z7>7kgVVx?M!097iA4=QraH8t`-y9ZtSua|x`%B`ok}m1vDXCXDE53H!86E3OpTK*n zE`<+{Kd0h~@_Om5rBVHG{c!ZlL;KaQrMFd3Qx@yghsQw@%`SCR&|1`CqVvrMqawT_ zzLsnPG|^5AzH?wSl0BVv(@S2rk77ovdQ3lh9fB&{w1xebdJ|=0MKw%KGNIoj$~yMc z&RMPGU3q}2qOOIo$cOzD^*kF9TFcSjDvi}asT*+;`*ns8T=cBHlm{8Oa;oJ=pli~j z>TD9eRbHXZLQcAlKdkPuHsZ_@)GbOBbn?y#w|y@7(_ODy1xPP*-EG}N1@wFx?yVlr zI4o@CW~WmTI)-S`D&8tx(lUKVrE*E_h(vI0xlMAr{o(X3r*n$ce5+`n_@ElsD^Ka# z;$**B6v~jue)>#t#F3#jr1g~)Q7_ST=khqT^|(Spw1c4HuOf<`my1H3+&U9oC-bP3 zy6aHTj(QbNqbAj@aayG(sdpxiW}kR3u=l;*_4pLmRmwP)DhbpPlhCCfc4~^HaPF(u z-)Rvy62q)BZMUwgygnAl-IPP92=W=C56+3Lt-|l>ZYXM$MHrzj)&i*a9eITukQJe4 z>TGQvG9wOg-%q`$YAjj?{7^ZBj$P_cr;|G6Rk7$XZfA@-gtP{=U|0^m@Vj-gaY#=} zHP+$caM!Ib?7w8NIy3czF8Ppa09|e!T4~;V`h&urN%ul$QQ<&T%%DUfO&_c|XWyOt zMO#>B;Kx68Pv)om`p5f!fA!nn{{Fk4fA{HFW>#@+b_TS^%acC`v12-{||oti=Y4XUw`@jm;T+azx&mf@BZCy|Lz^$@BaH=fA^(+{PNcyAHVMwU)sm7zWw=M>TSRDFMmFMR)dtz7-%Z-4Xa|KrPl z`{ghG>)OTt`CmV1|NrH$fBDP*AMf#x?f=>9-{1eE@2~s+r}+9Y_CL$zAKU)^ogc9K zKmOSle)ZemefyhVe9Hy#>$xai`}h3(+rRnkm;dwI-~ImAzx~ZW|MI`-fqwbjx8MEk z?|$>;=U;yJ^WXgCZ~x}|&-1%4f9}818-Dxc*MF&Z{IxFV&-1DN{4eUCfB6^xLjP=k zu21&&KG#3^ci;Z?@4x-K-znex#sBm#|Mh?WKmHF~`9Jl8{efRUaQ@eng8p|u+WfEX zzd!#gbA8?aKgAdQhozE=fyeX=O;y{Rs{YGLTY9M?Vvyp_27PlHNK|@hqc2q_ccy2w zRGh1jQ=wE?~D z?&qT7aL;KUl_Yb`N=3C*EOixu={3;2RR!a|EuXGV6;OM2w@0;C4!7nqb)%(BPwX;z zIxaKE)lI9T&b068V5m-Fj!Pw}Ev%C6xHr^QB~q_a&ieN2W9tE=ioZ+GaSRp4C$-~I zSyX0gce}u%iXQGp)^9G21y%LOYe%0&513wx2cD{>eM@xfT(B|*TU1=`kE>*!)caZ< zp6qlc%gkK4z&mI^Pu8aUPL%L;Ia|6WJhVRepSup1nWj>V!b0OsRF}8r!m`%cC=FG= zw)A7l?M!RBn@yDdXwLsHzQub$BvY2h_8dO_PKlDlC#Os5Vx0jwwNq4$zt;<4x z(98SM@#6{4jI?NTs{Xwm_2?FE=6ZLlCf7>qEd9cC+AUlazl(QuJ5b9}Yt*~WBR9RP zZcUrsQ=4C1Q}4O`*;LkRVQTv{+UJd~Btr(bYFunfQ=!c; znawUH{?#|OH#dJ5J!86A?!BGNTrx`54%_;1oo+U7Cc3(f`WSAL7n%=xm-l{b=S4rt zJ)}eTPpw z&Z!sex%bRl_veI<3DjiTkWjQc{d;$Q6os_D4`b$kuL~+TzDE2&)P>)G;q07CU ztPsX@>v!M{`IS-eKdYxks9|016tj!s(%p z){g=(&20X*(naHY-(BXadb}mw-(CT$xFMGTzgvP`7kaW1n=Y9Dy4}apve@*|bC>7< z>NwojO{K!fSm7^FfvqT@yP&hl%Uz4+b_GlwmY(7&W_6G??!&UMbgy;nauWy&oSiz| z?#ftH_mEj*8mqcgPr~0cq@78O-q!;Ig0aHe+?k0huy^ZNY=6YQK1 zjtt()<(%2?JYd4EFsHlhfi6vCZLvW!lxth$ov?%rTJl@z!Wd?`x_5eytV*;M7$0zU9ozW6zi2zd2BIu0jcMh(?nV}E8{Lsq+ z=+f#NRq1rpYZPwY<_=WG5ZpTj6Rp3YVBWp2$r+nLLfOe$%M5_ea+Y)H!80Gdezr%y zr!cwJF!q7F&$H9gNQ9j_p|#SSmJOEjCLO5caAzqa)lGaUh3<}?6+LDanoep304b0_lkhC%D5PTOV+N4_qG&OdDoW0xszSNvjV$um{^h z*Xd#CI80_;M|n+ily#@qKA*QiXgiZG(iCm1l6jRjrdFOZ#DHyd$PPepS}#G+=%SZ) zWcNiL_DL-Ok{rOu?u=|Ynfo^1{j$=sC$DAz6l_p;$kEa4i_n{Pet$a}>#)+Y7$mpO z3U2ek!*Bvrp+2kj3#mWjcx4Q4a0MLVQYG9h@^09OK1fe30gd51n3@FpKh z4BnaMWh}Iq`_x&PZ<8xCAW;~}Ak`8Eu;hVJahsR=lP=b-oT^vQ$Wb3pkxHZj};fM^Jv zjuWMw>&@$m2VS%5A^Oy$gqRMHNm+`g%PL)Thq;b9IR(M-I1N9BFs%1ly&BrITPEa1 zw@vTe{npH8K=|n6to*j#n3u1hL!ABqoGMP@xI7i8x8z{d8v-d&dRQLJt5bmtT8=(8 z29JcfbdumjB+|#Po-^sZo}By5b2`NgwF(jllHL3VpZ3nv|DT6 z8L8?E~_320J*Vqm@cUHvvzOP}iIlS_s<8 zy=7trBv1+Q448*PZDix>^)Emzr61LpDn_EJndkCwmE%fTnClDRbXr%sPo61u?PlHW zVaVYjjws*jD>)S#mMa_H)QvA495d)Ats6b$;;e4^qyQQ0lT!(RvQAQXmxX{srh-{Z zZM5|FI-+xuGFo(Mm+}nkfgVVI>wp5_C5^w|4ZrG{qeqjIR5|qc>TXI{{a7A+SmTyPw_?e zqbyS=77;h?2Qp@WMFf&Yu$&J&r$bRSR;RejiMkC4bq{HC7Jx)|syN-;;Y4$EAcNW@ zLjj=BxB<&PlY!Fs)6ra8+A37H^mA_No&r*{4MP>2C(JfMd^oGr;i}k9U%}|04GM!y z6NIJC7ZQTyk{-Mu(VIlsnKmb}#+a>r}a$D&QB?1dVRf@2y9&cAmWvVA3uT!25$46+bVp%USe>jQ+S z6qz#1{X^r5_1f)qrO4WQx+M$vYAZ=Um2*?=7q@XJhyj4A{&;}9#;I`KbEyV1>W9Z4 zx&m0&;X8t&(`lnp_ zO1~`K1|@rX@P1jnx2}Ebbe;zCX_0o;;<@>lI*RrqQn1`q9_q#$VAHRcknwFwu=EDB zu3Fkcdc#I@bM=rX((@gG)z?wEx%>_<<%X(T8~$B=&cfMQ+_zP_O1)JA7=seRwz|Q0 zmVJPsvm@xR!$nZNczj8Rf{?~+bsvCKP6v7R@z8VBy#e-|^`m=GDX{z=G`_S|bP^jG zb0={46Y!Gi=nlYG>AssXTD_$Fi~v52In<5R^^G6`0YK}&rB6jKPHBlMVc#LD4y{Vg zZ!gnf(#7^t)LPOSQNgAq?Pe4E>CT{mbe{_+x%MNMr%w&7=B(6QR5T0@l7Q2sQly!r z=WqZ>iO8Na%eY#!Ku8l>$i8WogjsN%uNWP~BGIa?y;1DG#G}#2+|F%*IPpQ+2^?SL zD%Ir}<|NT=;c2Gs1N5fj4&IV;Lw2>dsw)Kk1PVZR|BKs2`I(wtyA0HN2hK#Us#LR^ z!;nOmROrx3dmVk1KH9D`{DDbFkj>kjWiEpwxyTxvKY8u=M86;MY`yXgrfaj3yYFi1 z6u3T6KAdkaMj%=#UP#LSMyDhSG*1sn`7wK)s|qngxE9aOQ~=|=V! zO4l?awXe7$1uy?HhtFzcukNfeF1qhbFmMwPd=_kZlr}mLZZom9wAo1Id6FH_HN=YM zG1PJkd8OX%)Y$u`@M|y~W?HuFTKFrpRJ?u%B?hNd$wJc`Dy}GGPdB>s(dmgEH`B}r zqko+Ba2Yz#n(#i=0&M*2yAAHb0ZhGaa)7>t2B;O`1Q~3lmVdg$^h#(KbkzcZv@(F2 zhhZ3-d~^KheH|Y3g4#9OqFNV@L44>Pe7Dwig{Gn>7vfZ9QV4D7ylR7NLrKB%uhS9P zN))K#xfPT@ipQBz(et0M+!QTcxafipF*k!f@Rr3pEks3KtMU{276UZlWj|!yvYfT{6nvbMnH&*%bv?9xeUMBlX;7_fWMvcO`wd1v z<2I7FbS7wN>y)FX-9S9+DVPE0YX!Eet+3MABZ#wcdM2!jx13|XJPvmq$hAHa6NqHf z+k6FsmiE|6z}#kotVd!sQhb9{=kg#$@wZSDuX$qWYUor08M|7zh?iD$FHR%$he3x1 zu?oiL+$#D#je=K)6+}@#oL)U|2vk*CG;s?RpNykaAJ-Pr2trLWM?rh<3f@!g5e9SbG z{C%@{Iv@7HETXZ+V}|>s&Gtqw460xrBv*yvQT4#G@O2v$+VtufKry~f9fVH8m+ zF^(JB3oKSGSksY`^}i5GF+p6Jy=Qs27CiicZ?{(CrY6k12e>tR;iTILlMS9gXdPYa z&W*055AaWy`>njLUQ>GtzS0ea7~+~9x-uEZ87&95V6NMzm1pc&njXldbqP(R_Z(_; z{PY2;``X$E{5DYP^tBahsxuCsv&?bzm59(NM-B_oL6$U=Bq&7J3DZW&+hl&MNST+_|Ja8Ad3i6^;-qGgh1s-;a!&;3-=VrExKzb8#Lg$Ny6Zci=_kV>b- zvAR=gjWAu77CJw@?ql;(@>B`=_D<-X&879|*uKF11#k1VlR^ zBWYG@1#^6F!nyYijB@k4KiuflD=)wk*;US)$#C7ws+>Y3|9&H_BHG;WQ=IH62~#v& zS!?v>2wPgjsp9mqB=ZnS(+Dga=AIgHP%GxruS|*1;oNDl2B^R;h1sWw?^YlKdb7zh z4)k1{4zR%;S`Z30JU?fr(~oBM4A}=~kV58sygA-Oi!Mod+t9T2JI=cngbs+B$9W5f zI^Dla9Teh$h3Fn9h|&1fT08;QkercZn%Whr1tpRaX}>R_f0-s-x_g<*WuRsxC^Pay z4&ausELEz|EzEQXdNu%p1aPoOrgW5QX1uoNq@R1;YNVR%j9+Jn7;w6mnLj7@iL9b# z_NSEwl4SLa?JE2LohvN`CL|eZ0`}ZLb(7{t{QBYezy9O(|7QI|{NGpn?@#i@iJ?OW zrfX|qsk?Ndo$hWufEiNKbPg2Ap-9r^OC7Noq_J<(kf*@|z9W-J=!p~yQT;kO`lXBRKHZ0D+ok;+@k5Ab!knTt$E{ zU~o?Z!7l{vO3A{ULCc_F0yi*7JpySb&EJ_VG2ZCsvvH)bPolMtL(T05T2V&79F+6o zc&<+8&)~Y#UcCk5Q9-W<{YbUV8T4dJE{6n#eK>=KV;WrmCCa8%=~*cA-PdhbIv%QE z%C-grAGXmznC>4aD?xk2oL$a3x}#}DB7nd!C+z~Vgibhie%B}#m(yNf9#d9C07BYz z#;lV!Wm)4L4htJgh9*PjIhl;tpml+b@+sA5J(5e6Fw_cbiyY9W2~@B~ zVdGB1U}VIszh-~~(;t143~+Qz++(1ruMbF}vb7Z?-Ud24Nnf%C8HpYLw(~}oxt?pE z(WO|XZC4);aOF7-qf()2O`i)Y{(T20B*^JDiQ9&Z=l2fz4(t(}kt+)Ba8ccQU&A5gBO|?J(b;30S#P~ct zj*vKMr|THcNE3KT zkp0nZ z&k=zO(gB&QmDNgsswJ@428yf$)Zkk!oWt}MJ6a=XVAe>fimdn zBjupgUB{dBG$P`7SW$ndSpfNTJ&7&8+keT>5%yAF$uT7#qBX9Ics z07p~b;oEdW)(E&=&vgM7>*-E3

y?S!D+VjmqhEPk;y7BwW*&Pm<|6?>~=)tS8FU z%A-9n*|k&fd1)|{fP~w2HodM5SW2eyN-PG4x@(%jVs6zG>i4?j8G3X*t6}&#^z0I# z`oL4^X%n)@NOaU>Ov!|yA!L&-`l9EoA96~5WU8ZR0W!cB1xEWAwv`qoogzgWg^OU` z5EKIa)!i?y?n-UiVa%XV=1|lJz@^8k&8Aqv?8cvNGT2hKV*~=p$^Z-9(>yYks}sgJ z?~!0b$jTpm@ktx3>FAy??4!YciPh%51KU5`?q%ebGMk`#4<{6tTQcJG+iSt@U?QsC zfFVrAMT1{OBg>(Vcv)yU1b)OIw^FbAr`q3h?CqLNACwL)t7ny5E`mJ_6f%ziXUF{5 z9ZKLSgR(t=@Ki(kx0j9oNZtI+=oQ^z+F89z-e;Ngh7l#$k(tc-TOm{O0LPJ^#m}Pg1myb=q z<`Yt;gax#0(oi}sh3;m=iY8PTy0ycT64JNo!LU2s^g%&uM&Zu=F+FvMd<8(&#q8<` zh+Q`T!L>Paqr1b*_jEm^ZxM57hq(f>l~kBOWoqhvP>(=!p;pw7UT6*syn^8kRw3I7E(0dUv3o#0zd z-Ebldk7q^#L>IYPtoIGu7P#<|GvzW+&XdV`zet1-CCo5jKyCuA74GAx_lKfMFNlYpLN-&;W`$-=u}{4r%B+hzny+WpE`$o%Kt!L#ENZBt92; zxjiiwp-7=*@ClRMNa=9lPSb#8a{{c_C^qM7hRlIh7Z`)_`cTtaL_pbn8Z4TAlnXa~ zLiQmeL6F0oXAm(InvcT)>j3dLM)#}^s4V9BJjI<340peR%QS~=;Y}Bdg+a*E_d?Q6 zB|?9&Lntx_$wS9pI`d>xX%a$IsvvzIa++L{_f4`6D5L@z3;~Fpr)t&{IL~UdQY^m| zpd{T?#&h2P6HyYCuoQ*;#jhp#8piK2911*PmU&0Qol0>%rR$B{&KA6eQqRKrZ~L_5goy|F8a7_&@kGe}Mnf zQ~28d`zgL4wiv6t`+iD}6zMv^jlU}#2D3tcD-T^^(iH?3LjXN@IcYk8D+4X9Anx;9 zQ40nZ8L>MiKvw3b0HeLoe_jB(g3_ zSAHEGRPc`G@hN=a0xvgwqWEjrwT>OYW-Z&Ds)2?5+^x`fse6C>t^ z2Ffo;T?*q4!_=Rk+-QIB;m}gjakMh4tQ5cWy>3s@HX$CGBgy+#s~|O^o3jZ1I}NUt ze2zRP`kYR+!BCc3EeTs@vc`vU05Zk=g}Uj|tRK*gQS0YAj$fRCp_ClVde5DXjBK1vs-Gswl!@vXTLr~uN&3sn&r zG!B)K_7<|5bbvX3^VDf1)W?S@)&ryC>LUp30(iyhasafZ&L%xxA6`(kH#4LXf~t-~ z&+17B0Bj(+l?E1#Eg-DQ&O$-cbD>XAl3~I2K{%O7kQaYvA~ek@f_)9&ZEpj9_Hh}Z z>-Pk4f@@)JXaTDNe?hDAu}hU9>+l*#<@uh0Ep4V!ky0&Gy+Ch9LKLF|xMzWyIJb5^P>26!O-0nt7goq6zJ?2#Tk2x0erq;zIj7fKMQS3LjDa zMT(Z0yGwPbA0m9oe(XWKVi8$H-;!HiFTfH-eBJpb!xTfeeTeD*&|>)BU*Bb73kOZ6Q(f`kpQ?Jk#UQ<>e0A7Yz&Q| zK4rHqLwT=x<=mzQzNx~n@`?o-rDqqFf?|~luqQkifH=qd@8kQ{KDBL1Up71dkmZ2f z82g*R+`8 z0=c4}%K}o;0{lGeC1SvA=_4n-UqB(M^_)NEH<2R&z7gd}tp?y5kr@f-5dgIj2`nAJ z@OqS#5C^@?5xfN%5V8gHVY`TwAQAg~j4mSJDiM)o;$)y`j>E=j($lZMjF5QcPq0*h zeYIA7GRo{KD)?$18yHRC=81mb!x$NiOl8nE1cdc^BfuZQ;5iRF=&m3}P}5@p(m}&0 zZ40Z300Rq!qJoL=l+48(ff}CVo3nBf_O~Dua$pYh7nQpE+h`Sk1ZV{PE4{CITr3<3UEkdPeda(!-$*WTZSjzQX_K`Jt1`Rt?shD1?i}r+n~XW{I%t#0df( zCXr}Zjh5|out<0^Fi?1{Q12nI2W5)z(}FU_0K*mY)g0hen{^Ca$<4GGey>88r5A1l zL6C$YmCKgh2-qVto{7~!m=h)a%zwd_5#pKudQme{-g^Yw8U^-=dPPcK1~(OLf=K&s zS_}0Fk1#bu!M7Nxt4IyT^LVmE3b8DE;fPpl8-YTwb1!7$;Q&RbWdIPiq74fZ-;Z-f z2KBT;VDRnqnV1Z+?RQkV(eq$<5@v`p5D-G;0RT{$Z5F@D8fhSkYIaejt~kiah8=4K zafsECq+y;}tz{oXviUg^_;0XB5UT;yJ^6fwUNmxxab@m7cVcaJcIWPmKz!ka&*F)z z_m%!DNp-s7tTu4eky~M`h@er8Tm$O@Hh5wUhZZxu-9Fje!^q;}yG{GtDF56E`ku#v z9zL~=o+}3CaYg9mus~~logCiJNqph7O3>FHYU}m{Nj8gs}hw^e|PJ?3&t{_G5|2e>e)7DfDK-eNu zc$J%NOqDRcfu%ze_xfiOu^$e1CqtHUB0Ju(9oigwj3XQh+;qoHCSY%7g2+n6P=)dc zjBun6=J43CJ_Qpw(RreCDEUosRQlNJ!ZbZVQ7e%I`BW-vfhvfJk#g zY0~PlG;_yH^Me zub2X8e^O&sw81LcB8O_5WQ=@074xLYq_A;~I3ff>zfPTe(!m=`bKj9}ZYg_=^H8^%- zw9CyDp8*RLBQ2;1Fp&5%(Cs;*{P8FAxD_Idc5$PSOleE;0;XaHIJ><&%j*w8_+ni6 zsi@P<1tncsJSqG&?p&X=6KuXlFcNrdO++WW0>BnR=BhJTaV1NXZCD6}y;rRvv(vMh z_>FEf6l#nY&QDyVd{J&Yr5ou1D4IWw&uIAerLa_zG8 z20)EPH%L?B+QN2f2+W1Y$=s5ul+$BSCft^u!3WkkSeIPOV?q2-edSTMjkK&~6{!o9 zi|#`~Y)}uyZBgecFr+ZVddf!dB}YB+ae@pA4j@??$<%;+{Fy3SXoSF&8U^JY3`xQ< zZNNM08_iYbUl*=QGCcT)u)bfH17;Z7U>VdYX@Lk_V)FooQQMHdsgd(~K_CWUe}ImJ z#er$M3f2K=r6IKX;el!h@h}tKvGi?Vs7w7|cPYRuf)G;oHxEf?U)+#VRpKKpaess#?rFPzM z&B}p3n1PPm;47v(d@uTL2u5sFi_H`;X1m7NgTh$xmv(AK&<&ZI+^?ZKtwIaNZzQT{ zxmBQHKDgN|gvIvQeFcyf*KZOu#H@YN7gA6#W$@gm)~7$h$Ri{BX1OLiaz%?pFY2WH zW?<1gwSO@ku{nywOMY+h8k0eTIDuRVYB3$qi_D~U>0Zxz#p{6iYTEXLnc}h2m!1k0 z9R=?5JUW&n)Z&kImbq5**0;2`@m^226El!vSw0t_*9HeV!AkUy?y=faTI6^#QTAt3 zxbF@I<>|K4RGv>X2a%j%8BWIn^kDA%p72P&aNJKuW`SVFS>L~LYvKS6jS&hsQTqSU zH^3T>{lLJ&54c(oYKO0n@J^ytqzok5 zA{Yr!f<2U_Qkyl7AeKfM4Pq-EIsU^&c33_KWSIQvtRA0pmdnSo!aUjXu|fFI5s-URyp}F9F#Xk z6C!D*RVW8&(>yhruxG#f0u+%kOt@N?kY8*u>97NU7w10s87HYa(^zNQ;QOqf>^C9(LCqsX zIl#HjIP*CvmF&ryXF(0Lty~_K3GLaMA=wM!M#0piR|lP4)N+ECyLO-JR&J=){RBa^ zJ-4-R=d4hEBQ{#inn|5u;a%$OK*xxn)%H38mhWzbWWcr^_Z_-;HCzy9LBnudUy-8! zMHL}tqX}B!V-;XC@ks>Dfd%2Y<_nGnzy7MA~9eoHeu8N&LxHAun)M0kN`LRPZI(3pP>Ks}Xgz;?RY!)u>*7G9_wyX(a@E2Aln93Y*eqzghr*MJwSXl z<{k5cp7AoYzs<3<9-4{qtM8(PSxCZ|=2iC&X;m@+H`)r*Ic8na41xYn1>$;n;L|+7 z3%Hgdhb*=$SDHIj}*^T`=n;jca zfikNsyvoh7>pm7kTZWIlni<|K>ou<7=Q%5NHlt7)eF!71OMki_1LP@q=Sa;L0=aT> z^jTvsivSl;>61n*=lLx!52X|7Etl$=3W?@hKL<(_M5DL zFuqA`MpA!e8%nc6Jz-|2?1}P>kJhcvSv?*CE?Zc~=Sdp5N3b%ZjsG+)@`$!s=n}!T zq(-y=aO|!oEVyGFJ zUK~7-BkSo4kt(h7@LJ?Mj4W3w1iw%V_@x_I^e=72_m zrqrl+H}qp_{q&l}lNNeU+T2LBLBxq`I?Wsl_4jCD06WI$H7#bY;7Pg|ib8gZSc4zB zat~8el3`X9cHuX)tR4)gA=-6Mw|Pd4kuX?2HNsi6T>VH{O`j zQ|^24J^3uk6haHW+rpze*2BlLy)cKJqB%T9Nn&uH2f0v$R~b03;>Fy_oUX+deZSCv zqS{zd58~M_6DBiU$AbdkrBPE1{j^D*@G|f}9s=5e;X~4d+EfD`VOj|o9pMqVRlCFj z-U{fE`Y?1D11)JK)n^Z&iy?u-B;X?@`%Qi3w?)9gEyQqGXsS_JA6l>BwgM6HnA5*r z@pMGwq#0U`9yk(_pOSN4c&N>$<{(?Wk5Qy;Jhx@O&+nM(9*HdagGkq zi@U|U1Z^dnDbKJOkLQVkSLR}s*bjmejoU`^Z}{KMfkK3YadSlGt;YgLp`{q0;-rup zu6uw#nL!h|Mw>O$2VGBKgO0+k5qJWI4~!w?bh{PP|L-qh?zSP|%q#&!fiMM8a9tDKwCO3>&LgQ7FyY_-%Gn3E`V<^ z{b^`J8Fe*iJ`0C-594Th^tjgYL7TO`8rT6NzmW0cAmcPdV9Jv6A+uZ?57Ako^a3iR zswkB2hKRM|H9%si#9DaXQTUgz@*|QBI07Q^aGQn!npIzGNUzrUwzk}d{ii{j z)pUlb$RKp@2wzhpA4;|i=8$}G))svyif>&?5qB2T>2@CIU>^>2toTkxZL^4< zgEODOTQg~7$DVk+!7ChXwzQ9j;l?_uX7gcMhkZYG&jE|A43M1p*7bR!G7*Sp`ilpT zuN_r?ZP|d0(ThZDu|#gbVLLcZUhf)azaLY!8@9fBhEl*6hcR2MQ_#LZ>kV~r7p-Ri z!w8)ac4uw|*)8(j=?F7?h4klU43ih^-%F@DXbWXm8NauU!$o@3reDxr=LuRL3y?k_ z!DU1Rsu`qGet50^g*H`YcBmOKn@x8X1dn#>6Z7}5avQ$n54lhQhO_W2=&?6KI@b9R zJfe!;{}asvs(1ma$6UD4DPtt-$%hG4wA<5c)dND0Z@Th8t=}zUXNJfVTHZg5DGbEH zu8$m^ru)HI4B-=?DkG>!pwr^(9?&hfHwN$~*rVCJ6^JYY{MeBLSpahIfb(O2C%|HrX1tlY5)pZJ4$A@sfmdVaAEkMejlfl{HbFe|8C{ipo3k%gCKW$u?!HK zQDg4cgWdfko-r`6lC6g)_!(aXykt-cG`k9gcjQo>lex~@y6w6SZyE#ym&xL^;vvAf;c=ojwk zpJvDJKmPim{XZohKc4>A{zLt*^_BnklYCiH05nDIk1bE_!qIb{P1!ixLc6g50C>f; zC|EqaHE{^W}`!U zhYn};(hqk(7`rmZx1*TK92oJL=tFO?&r%#o4Kbty*bya!YkCw!y3hCpyXF&JWqG^S-qjGY>e8nt(U=zr^N6a4ecfkBqZqAgkFgqStS>aW8O_$(vm#|5d zvN9ZYz#g_+%S~W(gw;<5nn3>O%dK2R!Tryt%u5D<-gwgM`-|Gb1bGx@05jliBTtvc zpa&HDV>`(epa&c_9!(fTAI>|&X@wq6*L11L8p-|*?evOav>h8Jx<|-d?o9Fz&U**j zy5yNazeu8LMuffFqi5yW46f*`w`#k!@G-%PT+njKhyvPDS=ep%P+K&&P__on^(0I1 zU35;Eta+kSZ$AMq>E$K9Zg@s@JHsd643&&N;%3asE^GGJ!F0(dF$(ZVO(g4dDuw^5 z8^z@J=&43Zph5MZa&^f8H;FvLq#WLJ13F=EJLd#U%E*5OYC^J#a9*@H7I`hg!ojS- zN9@%q1>Ai&g(Ma=u6~z!@s4&3C?kZkR?W6F}5B080jbd zMg|PigUEu>>>ci?zm6n{H20>&-2|3eGwmW^x^A)<+(3sHrn6v&ocGhsK|QIs%)_P4 zism_P2m?*Mp(nV$v{5+g^wEgk}&Q1znhdJP0%~oN!`1n|BHx$XDmV^ew2_+oArMDmo~n@bR&XUG{{rP^HT_}7n12?x6=Zr z2#WyxF~}ZPbfmQz{&P5nEVK~J)D|yR$ew0<)n`(IFCml0XJ?>Y&9m{z{ZZZwtUL!F z^eZha2UXtFMLY(Wa)t;C_v{)S2Q?Hy?PCI@H?OO}V<8q=t=#PlXg2s}#z5P3;3DkW zJh!GgK>{B>YK$vElEP*_ujvOBynae7Tx4)8l|zU_EeX-D@Ib+s!F76MZ}2M&Nz*SW z$4KWMH0rYKAci|xb%X)L>LSeph?NH5gMS0JLnx-kEXkT&6M4DZqV1btZ)@}P8&Ihm zxhFd!i~tcEUMOZhMc4shC5pzfVqA;sJTR_UU2vuy`q9q`7(NJw9OaY5kCPCHSvMss zdU^J7rIC*l9UH?|eU_$Y%9=EY*{D5W6Q*FaoBMViOb;?<{WJhkC}FQxuQ@Us8~p63 z3?i9}A7ZEm(*{#L11$?~E;O9tEW{xjGsvh_IYgtk^E~fxn?|DaWyoE|BAYz$H=%xq?a=$sw*16R~nQFF5n z!r}WFqpc{#vrSeW4lSE%gL@BNfiEpI(*3&(R!@865xAUKde#T{hrZq1x=!x`*CcD2 zR_LBqj=2bzdMV+J2x2j`Y!C_y5@%-l1!yYfpx(tX+M;)pmN7mY`xzSHgfqcJ_M3QS zwub4*2~vv0jKRZ*9nlU3&zIR6gX8tRAo^NVQ|mE*$%J54`wpUuh1b>_WOI8uOzfmr zN+%F_6^F=+qgFp6$e->Du_(}z-AR6^-t9i6UarQD@0QFEl;Q}U`_EG3#6JX zn!$tCYh0A7M>xx=hl#V8`shIC%+dKt{Z)*SV|Flr(ZyHl7y)Q;f+%eQNw0zMx9B^i z?V5cKJIBOm6Gos%JUWt#4(nsT6}vcL+vlKpTmhk!;G@OTxCo_2N)idxft*mJ zP>4s!gAl!#!7QuT@zJc)W58|(PmIbVD_3-{G2pmbFoPm{#1H{Ke+$b*N=z)YySZMcYf{v{S;rQ1VLFtLN|5W zGLY7oVz??0zsPvV$)eSxkQw@Bfpm`*wIdhBt9=e#*}?;&U|vs-d;*K1Cl(M=Iaa$3 z+NQgh;?#&tjTINENR3WsDxwIuq9T6N(mOe=>6)<}XXr(k=H9?|F5`T+YQdoF z5~@oX9&)w$oOBL{A{tp1a-$$&aG4Su!U^Dvj5Vq77?3^_gbAVNGKdSbjuIPO+sVm& zNnH^v4c?K;eMM_w#)G1vAWyCWNQRMNoVl@i9xO(CD5BJa303-ca3=v{xOc(46y=$1pnf~EaRdp7oCV0MixdV)H7bdTSjo-AHq@PQv zrruHja?4PaIjHMKI}Sp4u4yDeGS1kjHs$ zyEte6a6@4XV|lCMnR`;J1YXR{kh$ZK+9w6RdvcoR?k znbRgGy>4qa@IprIP(dO*(84mOw?{EkfDzO;W4QN|p@OE~eu85iCAMSndplwPkuudO z8;!;zRBXlA| zlz_GO1gQ_8X)ucczX7fxvk3**@UKg6_9dweEk~sMa!(xrP~UUu1+sIw&>WA*LxM%P zbFu4&1e#d|7rI3S2d^uPHc&GMrT?fFGH7jN^6|~1JZL<-<>wA7nyyM%voPUoy77oT zEjuZ^e&#Y96lwp$GlkWajFge{0e$pDaj>vquSc*P92H{0QRy-@m`;q7Iy`Z$V40r2 zNhuwV2N%?KzODu%ewZcq*upREM4azn8fg7D1YBSOW}uJIB(2v(utC$N(+vOAtbqwf zYCN*BDUGa*^v)HW&X~lpS;>!qR6q9cMpIwDP} zpkUC(v~$}-q@dXhbcD@nGB3cM`)Rdiik+tpJI%Rc@ggLtvKwxrFHhBCBMk(wfIqq8 z0!JxMrDhju*@N4%lWhMm3)+Z1KNRuwPzM~m7w$01@EzM0J z2v1Ak0@x&21>tyYi2}VBgd;q)IiIzPySJ>okr4({62R=CQ7^Q_T>u$E!L@2<1b>C{ zdc~hFLIXx1H6y$;>>6NJ@}y8sA-|fB4(DkoaERzSpwMmA5C=R26A&1U#rFn^V;pg2 zr^eJRjyNYw5O!%Ao?MZY%i1$f9`^M^mfG6{u}d`Fh5?Y)TqRZXQ}a9C`!Ism$q4t# zof)5CC1L_ROuD8sd~DW?1zleW4!#oH)3$A@jU+ES&Ygy7wh8l2Z%m5J9Sz%SkNS@E zn$y+}79}!vI$iNiUQZ4jOm4$4A?Q`Af79No2_k0ov0+vYb*5u9ue3onP6FVNcj|cZ zFp&Qx^k5pKH0{>A?Zy{^;J9v|B#Hl`bY~KdLpo4yXtzX1Y7mb;cNkV-qyC!4A&<77qo+uai}Lny^_6 z$EV}nIm?Y7CS-LRLSaaES%~fdP5{g+qsw9W zpZQc*&}RW>6>Dfbx2vT9(nq2eVGtY7h(%z*PpL*dSN}xg=e$pT>?krL8xOezv!w_4 zWi)H0fOm|MBC>P*oz^t|Q&#shRTR(}hbstu)N%Cct~w!;j1bcCj-ad=_i zy^xSbaH+WKM#qeYLdF&hRs~97Wf$+WYr)*`N}$-#jZS2wygOF!O99wyj0UbFeN_eq z%uC`1Nv@0~b8K#~RN-m+L?#wu)5SITTmo(ifUx`V8+zs@)<`!J%|F~pd1%@9J!v5) zSrTy{JXWx43|KNiy>?Q+!O?D9b8KPyVG;o%i2e1#I_Ah5eae(;NJBo%e& zWQ|OJQJDyiow*OnjC%O43b{hIX*oWunU=WK=7aGt{)H>x8ku~4JWnfpJ3xsuly{AI zEZIzk4x@l6-l_MAO>nLg$=auSAMA}t7Iq+i+wjL4zzEE;5&|(U1eg1n87Sd}aa>R8 zcA5D#ASHoy!lgjWA$>lkc7>3BakX zA=ZP{bLbJrZl9*2{i$^Je%RL!*8i*R$Ls&Ce*pj2zV`oqiZ3cxMrC*)hV^B`c{uRT z&jT&Jy*@$j6EHJ2DUlaOMj6SG61qj97qy~*sz~Jy%8ibMEljrMHauTs;=C-5IizjDFwm=(b)8X8{ zqz#JRo-{jPdDN)5;ut4e8*P}Km616b%{TKq=HKp;mKS~zK&2S>eYh5{SGauR^-0rU z#<752-Le3BWKT%&;A&yb?h`||f{LP)0GT{==*)HA=cNe*=Q0gHkn@V#>s7@AiH-Zr zT4^*?6!)T{WW{hY&vd(JworA%<{W!vM5=E3w+IbQ%hpAHi#E-rXOvrWfzU0y&;vk6 z1&zMj^P03@Xwsbl`^`AN@ccO5;I>Al+uI*4!B-1fBWOGx;|4f}rA-tJF;`$7v?6GS zP(&M6SAXjE+j@I79cL7@k+CdEfaowRQ18hx(Qbu%2|t{JV<54I@Nt-E3jv<<-0p|r zFUEOL&1^Wp-Rwp`P3wk_e#X-~2Q-9#;s~KyC)NSjO1?oIh(W9SQr0A#^+D*!c0taJ z@(JnJEkotVYaYxgazNGdcF+xIyX=SDv5*Lu_6YY)#^@y5p`xQ?4u@?<(79OLBXl$R zQJjmT#Bc#l+YWGNfhuJ*8y{`7GB-oLjK!GG0bd!G-kVnAm@O<&2JxMZZ7ar#&<5a_ zFoy^-V9XwM(B5m>U%MoIq!U^na{ zhK0)%^X?CKaiys7vOo4Dk1t7VQuz#Fn-w=JTaBwD69vl*7IW3L6q%qu4mQ%8@N&xo zhtej3>=A~~vJw+G$Kbe{9R>;vOx6+cuF);sPe^Xrpty`E&p2{J7fNQ0(l&DI;kq|` z1`a08>3fjNVmx|Y6B`I6;KY^$)K~wye5<+N3brk{L0lWGdyt(wXjvUR<@|X)dR|tK zBtcs!hbP&mE5aP)ri_6k1^1|@djOd=n&W&!E{&Gdcsg0P*!C$ldd#`4fcCI2=Gd;i zKBfFnk)X^D(iByOi5F_*Xd`lNjYwW9iU7L?4+j<)s1-U}ZYeMyBcf~z;&jqbl6bxE zw5;Z^1fGLqG;EF`;#St~7K02}sxp=~Nza{9D{>VddmY{?nygQ;oDL*_yc+duCfx^m z=gleLg{CVubyjtMW#U(3&<%+YVV|)5x5ev+UA`(Rs|)PZJC+a*ShgFa#IUd~D=6|9 z8=Eo{A2WGFrAo`vtxftX;PF-*^+1oE*9zyf!9U~XSUH$1`$YC8L5Lm_=84&zRwwf6 zagv35!~x*K6P?3DkHlB*0ZF+f1W}*>z%%23z$*Yor>T)X{lt=a8&kAV0gl)uKSN82 z#&8L34Sp15f5?-asSOwk*S#PjEsE$>2{wgXCv_5!6X__I<@C!6r-?%ky?YXYo%sTl zs$CWx|5)mtpO(*%fp6|z&&C#?4UUB4$6-xme2b$1K-Pv4=S-1jjz3v~HwrQgGy_tm zcd)HE%Ege7`2elo_DSfZOeRJ%w!|FT-V9L8KCDznasi$=xeAI>&?F1|`btZJ8x{vV za}VwJn>C9tCAJ-Ada)zu#l&U>A3H>+V^p#MmopQq&$-x8^Pc{uO@s*EOYIq9%F5FL+03a(m2m zJQ2yX$4)qgpY1Rcu`sDFq%*CejJEArYeS*s^QiI=9o0&QybULhU^!W(SCevo5-0;e6AYrWF=?4Q;c=NFrT$D3@ns zUTs}R6oDG!Mytdi&;_|9JySgqq$pA%I5MCBkWN!)bz57`F~;rRfqdtO#g7b6;|xqc zuGn$p-nd0+KcVKb><$iH98SV2!M$0p`tDdbVn~kM=`wr@ZasjuAi$V%44j(6pM{@+ z$afm{$3s{gtTH?!na8WS1nG*OByq+W2wDJizcN^>p)(<1lo6yMvfo>;!lj0i)zX|p z2Llnw=_o*BiBCDYSybq)0O4*v0om@OgNQtF1z{)$zBBlIkc=1r4Ra3=G9{#xCjefn zeT}EFR*26I(^H*d;HJGfJ0i$#FgW#FEM^ctv>B>D9m?oHHPJ`#a%TDu9W=HfSZI~~ zDXNFc>81>MsB;#%@?mz$34Vypm*AMpp{4xDMM*D;Q-~7t| z{Yk#MZQ?z`h?^MG(gL`{16m-s)Db1eJCiK3`-rNc{4hL{P3?o1;vv^+iT(6?mf1Jn zjWFdbZ}0tg5R~9pHs%fo;cCvN4+2FXo00_nHr?kzGRf)Hwgl?P0L?u_n;2(?VvIc! za}gON$>Gza2;45&b%cKEfFhnjS^KytD87)e1RqC$MZNq1q9`5H?}FZhZbJ20&shG@ zul>Pc#P^WJCDlKzx|N;-SiVW3r7}(Y$H3O&2$gpL5{5!Uy*H* z4n~dfHY#@Nrr{SdlXaWgQyKyoGHj-jF%g-7{TAY7Wpdk~L(As}_M#g#pNJAGZ?)(f z;VA$naD>;2qzR?O+|Pgmx)S4UV@}BTkV`v(?(5A+@=JI-7dUEr*agZ#1+Xn+X#LVOCEnK``QxEQrWMAv8@ zId1^^gTP0)VV5!23RX-n39^>OY3I=6vLvdDh4C}|Dxa_ysjuBXk5u$1%_vX059Nvi zI+O#}XsPSLJTU&FFiYombM3HL0vc;p+oL(`B9yH^os6>mVY42iY`D+1y+b^I9&5NO zH@LAl_YgelS~hHf8^L7JH?ANtI?SWCNdVQFJ6z8CAS@lDcRU!zb(f;fvvX|4&{4Nq z7iA~6wR;(ojx-zHrHRDrS*^}mkPzQX6pJY&>}UMy4hzX!hbn~zU<|~y?q|J4qJZ@{ zc`_DixY?$=if*`-g2Q>G`$y%|p5@9ig=PRr%Dn{5(q_C05%;2beRtqt)(Zs{V3rRd zs)HXk5u!l(rY#gf#NDT-4*~0fogs6h4;T!U@_O3-<3UDLLNUB}c)f`fSMqA4_K{YD ztvyNV$_?MbHmAs^mZ#)&Ts)A58}P-^(>%=qYs^Y()BM0nA9{S?14yi z$0=}}3bCMX~xk-tsILQ(~dEK{Xo`X^uZmBN5mW|AAuxAC7 zA@x*jCs)LYBdiRcnh&y5bFkzmUBeXf+)DbQBbE^a5kNdob)B0gnyug?Qg7hiybRmZx0_Zije*2MCdZ%;X;yfb9o(D&jXHvL5&VfY#|DOIU5ynXP*%;5-4mhPV&52a*~&SRL{lb{_(e+)q!2xmXSP(Vz*R&3 z-ByBAq)3#&*9JuQjOYckRqoW-kFH!Wsw_Le_ho}6YV4(c6ia1=A&v!BME7tB%$fLvi0jn;H!ejA9AB zocqve9*_JTwCQ*rdC0Z#cwQ%q7toet1eM+&i0A&!YZKg&TtU=w5`E3~~(P7$-?kfDcK1lpvPv<+imfou-+0{^@Fk zkSd_j`s{6C)({r&3l`As5o?ZDFia2S%C85DP7zEO=9AZ=%_sU6jIr1*8Je~X%A#XQ(-<-DVmJnR!D#T)Ea(&@9PKif7|`>^#4}* zgZ|&HU;BSQ#TPNv^eZO5iFei-!z3fZ!}bq{kP`wh3_1MIKHNG1UWFWiHElUKd@526 zT^Jwuge!+z%qcku(T6g^&oXY+81iF9BQx=cUx;IwDj5VzF&YCo-}H|bC8XaqNe#wX zPQJWtyG3naB_tFAGP%*sMfC;MFTfdYc@2x|zUoA?-0-7}RxE9P`i z1PMz?ay(#Lj*LztP-z9XQ693GsW-5pW_Jt3@|l7>u@Mj}Rt4+BO_Ck&swa75BpjK3 zk{|%)5N&rFU{o=Ds0b~ApW*bHD<^bpcKbF zPV>`U*^MMO=U?E2bVoD#i+W!OcO;-8YUl!~7RCd2AwWjWn!c!I3%7lHpS_+A)Oa|f z1qVybekqLPjDX@MrBtRn<8#yJ15$k;npX5#kX>iu>j@UrO|qgLrDFY(M?^JhPQV<% z0K6U@Vm&CZcs-E!Tb=sJb}{?0aiveY#sf5SfJ-<$Qjk04>HMKjzB4dXs){j**;y%OZaXxWk#!7zofYmULR4id1mko`e% z^(eiw#xc(1^bg8tAO~UrZdZxGp)B|uDC}Urf(Ff+Vgw8e?40n_2}s)wazF2FH6{wF zV05qD+i+Y#!MNLpd7#8b5Tutx{+B*Hjw*w$eUcq`Zq_YI?Wa4t%o>ddr&Z-S=AI7! zFuo3KhIbI-&2CNU4@2xw#2hPqJ*_EPHv#K$54gW03?8)hiXB42)w?ha4eNHUgAU=W zJ3Jy>OcvPAn8+>IJJ)UAbPusHrnH4s*=ger92-L^)`JCzQ$~O73=%-?jVM+d@3({M zv-!{U_Dt1Fc~4JeG#ie)C)vUxr!7R~$_PBtliQ-=k*YJ`nQ0NcWy}>S&8@MYYq#4$ zz(g|6#^2*S9S1sQ6Nu8&@P}{4{3%>%)?E62?rcIksjfevj!-Bd+!MD?CifTme2Wkw z5-`_-Zrf_E49Kd{hTdUJpipTGcU_MjP84#g>?xZ5Ad`djrq8J`5Kkv#MnE>XB|>Y~ zwj3ENnfQHUDq;I9TtVC8_tD_sni18=PhnBt5d;7NJm5)=#YHI1s{^E);m`)XM}P(o^*p9+N}GIriBOm$jdI?#6ld@?pkxtP9S7agG67Uf&MI~ zvDZ19bJ!{-OWSd%A0VSCB>k1jX zMw;Xt+Vj|!qoF5wA?fv}tGhw_^e2)+2O49=l^!-7BO8;n;81m};(}pgW_RXjNtF1; zfV`xYi4dE?kppsK%?}xT!8F5wH0?|vdKhJeV8`^wt*?wInNcEHAJH%ih=CYg=`?Dg z;U~~S`vNlGfn^(TQE16w+Oji7g#L+gLGFLnYrIzJ)RtA^ESh*>-v<*BLn7#|rPn7f zjgCEl>7+u8d#}Aa9qxeh0)~42rAU?FxSBQ}z)#rT!%1p`Ge$U!An~>|J(2mFq#669|fIHgx5U#BV$^%yqF2Q5=lw}!x)y~6CUYVs@n~f;l_>2$hgB8=)-0I zvFTGrHI!NCsswk8N7zIdv?m*KmG+Qa3gJkPYjZLxD$H4u$ zL=cZL6`b4Fp8pHF7pM=1a{_A2qN3rpQY0HB*Q*Hvi}np@ z!Nd#V9@ebGT4m1RkS}!Oj^mao_%WdyjUmYIaW2LRiJ?QFqa8Iq4iLX8;nqXaqTV17r!Sr#xset;gPabd|x=zU^L&GKOF2qXNMg z1Zho_xNf@gh=K~7>@3>yfdNbRLqu!K;u$XQpZ+$a!Z4U6b4=2Ut71MP0u5n=Qr8jHL3&TMqz za1U0zM%q-BqtXRE0-yUPiE>6{3DDwYK}XD)Cs_-^FS1Z2%oP3wQSEE2pkYj*zj?0?h!G@f5(UEHB2TX4)DNyyoF<4b6#GMtNJu$ zV>$&?@@+Dp4n*_&Bo&};6Rv_OQ5s6=3;ZC0QSi=|o`Z!v&|;u>IX|WJsp$HB15E2m zGZ)K;cg9iN!^|F_wtb}`fDt`SXuSKGswj)2Qi}38DJj#R#}6JTLd-}#5klBoPu(0Z zNc59pUwOVC~1`|F3}|H_Yu|J#4a|NZL!`;&ZC?9`OE-2f4ci9>bfnTa9ThA~y@ zZ^Cf5MD~Y=e-zQmEpx8TVy8HAi3~Pj=uGrX)2TPj2G)IoDWhVHik?&W#lI3UiHHO- zjYK2n^Bg1@iHofRGaY|tj*(MAKNnP1qB3e)=lS4W3&rUyPTh$zgK_Z*y2qfXb|TKh zO!1-rP7Fm$a9<3d%0V>xb+$Mc+#(H%-7>^GegM|Ka%U_s;|tu#TB^>p7}Omqh4whO z05Dpt92i&8{E5+ui5j+%bQ;5S9iM^qiWhWJu||p-Ekw#;IVn$zaJRBuo8M!SQl8LS zK)x8T^~QAYRIVSPZ^*KebQ~%utr^DbPqG~iQzlZD+V7}cF~@vz@)zo+57*~cMdykH zA%knuHT>M4d^hS9-IP+Ui4i0id@=>-Q-pE)oHI$|KvLt$asZE7HcYOs^JSgBYk-9* zfYTD)H^dxwWhrm+;8x%uB@&1p0JVEQrGG9^_M4e5AjIxGE;?}_A3C1PM!wKOWJGZ- z-`5irj6|_aeFkAn=JvH=gd>UR z@kjuMCS-dPbSxh60AxcB!pV&sTGvi2RO7~tVz=&|=I5ZHq0ho`mZ4K?kC@`O-f@FUo_ zA8B=aHTCv%?v2~&)X`3yPK#zv1jQJ~lJ}*MU`EVX<)+zXdFtJ5Rg!uJ0@b2e3WLVx z3+KLW0@~Ib8Z&I0W(dOy?zq^BVS}=$uS>}Ie${xnZ{3awI`zStAe_?1KF_^(H&;tR zcECmkDIS$G9GHg{b&Q>8@hD37U|9}q&8!&oEt(tn0ZTF6icC*1oRcSJ25Z^{Cq%hm zO)z1fpHpr?>=o8}iDDWVas?n5_nK=!#)#iN{GE&Pci1wtW56iC#2XYS(3oezMFSu7 zNy|(L1py>qnNWu;NwaDiLggif=`{EJ1|J2cWueyJa&}q}aHQo-0ojH=WDULxvHSa3{84@b zJ#q+Rq&XQx`S1jCLoqxeam@B%RwDa{q;EMDBf`{>{Nw=ajvawPtBoE_2u=?6-#d(y0zNuHGK?k#AA@XG0B`Zn1AvYX%ZRHgk0&(X0lc3j9T_~|9&9t z`(f2iu$?V)Ys?&4u@7|yDcIU>dmR&rATR8=1<*H)YHPHO3Bm5O_?%}kq6g=L(1dTg zBCfDeytnvdOESR}{eHSVP@Lhvyq~EEKvWRqfMvY4yEQiPa*6E_ZaMa5%`F>N3GF@e zdy+He@LC?5B7wN~Vl5xcK5Zj?UEt+w*sQ}pwO%!%2IpM{^E@`IF-1(N*+#Xct%iRZ zCvqeJ=&Ish7xp2bbjPnm5{i5qVCQ`SMMG3UCB&USe53YDN0EF*Qx)@7pI}Zu017?X z7Ssbz3_v`(MGY<%-AIlxUp3OU(XX>dSDZyjOOfRxJ@dQ-)df7JJ_Qo7>!3*&oD%{p zZ9LxZHN+Ce%k|mkM$tyd$fq5%VU*4@0!P;b7}~1Q6{X$PBk^fH-!&|#XbEFXX3kkp zLXe!62DxPnhRv|2Lyg-%>_!G5z92Is7Y{f3;Z#w&`dY(p@j;GUB4|HcSPeOJCz`C# zG@i71Ic)J9ORV0g2s^PM5CSk%e9E#j0drW7jy^Ob3s|h>lNqvHgm@f^vMnr@7Hpl7 zlKQC8GGf+*xNC{L3W6A@p0?-Iql?pm11Znv3VpPELMJfsvkggvD%mgo3;&E+!hG~( zjxj5*hnF@A0+ro^;iMo`A^TXAhZ>6(S8y|>S(IR=>5uG_MSR1ND&}cmZnDEvZ5D{6 z0~Uh{xT?&MpN|Ykg%W*35RS$bKUEXx+)!R3oDam967~^UP;R6Y$uefiBkmFYJY{=@ zrXLnd;buWyaf;ng(Fbz;DE$iN=&rs3w^`8y`B_(i)4I9{67vMH0xGM0s)GTk7y-tT zd4!)aTb!&&dl@cyVH%BTwJ|QpVr3Q-jD9*JoSOvF7WdUI+u@pQ3v-W*SO@YG3mL~u zeIZUzSW>$F$+i8y|N6oDf203+_`mjt{lBmHzn|udmJqp{4!yJ1XAg+^PiUkbH!|3= zqeO~pic|~xm=tOQQkN1)Kr{b~)Q1xj8@=g%-rZR2IhX+2V~6#acxOAxhCTxsmV>-j z*9@>nI<>&-mqA5frei$)v5hPyU6~+LS3!E)>q+gvua_~7_!!Eh#u%3_dj@rkdE+MfTbnp)2?iz@x5+=ioLdYr6U{(qq zlq0iwtGT>N98xu6lfKp9-tdE7=lQPz8G)GLgaOqT&^6IRJ`Akk((M#rZV>Qp;jP%% z{$_~T?6hD~yg&Mdb45l!KGV#BN>#iakr=`Kwbx_}l^k_zg|-J*ozGg>T?*?!fz*5e zJP@*`x&sUbGIL3jzACWz1NK==5|xi}hYacDe;xu?_TV8(zW~JhFZ#sx=TS zKjN~@M>plu&LtosZrUZd+7winVvInJMlB6#>ccD=y5bd=aTk6Jv;jM94{f%$;EfA3 z2`6&UD{uu0u9XbjKy_MY_bH$kQ`Iw}Ac52|$IW#yVK_BR-{>I#hb4OCzHC$?8a|Xp z?H2orV{qkZFe}-8V-7v`ogLtVHn?sUjL0g~iE-&8XeEg{B z(t({ow;5d>e@dnHC zL|vR4<_Qe2E;M-^{OumYKr4QfWm)eYYIXa@WijB`r?+MUfEu=?q9ol7ZdaJQkP{WS z{0JsfN6MNJ-d=ahEcLE&B*ieUkiZUIm^6)Q?0p8I-)9d_(x5e~g~ko1Ly$B&nlzT0 zf!hw&htuT5bHMFGgoYj;%Y0+lo>s?YE@Sc73}2yi*_-1fK0I}(@W$HV9bJvq$xcQ~ zz`eldi)L5D=ODUmwFazuMLZSpOMF+R8Gwf!#*$9U4(TP+10tx0rw+XIho)=DINEtS zod*y@HRDtxU@hO+&B#Zd2|?Bn3nm%KJ#N9_mCJHzkMw##i~4bR0U-f|4PXswpDpse zc@lXHL}#NOCzdJ9l-)gG(6w2ozEoOsU>qUN`F!mlByaGZ02OzMz9TDPTCUOzU7BU^ zMTyFhpa9whiu6AC1q68}DM>n3Iks3ut1GR#QA9 zX3UT|ai|-PTd4J`b@ zPoRhvb)a~?aiKA5-3GUZ2|8?-D|8G?cuw3=_UKmpB@;5_OYdMpUqxP-I+%tT09YR#t9lS`y z_+~L~?T-&)(?BF*o%J)1g(H&8p25^L=EA*O>tnJPU+s-VWPbwm2wH<(2(JWfJ$n34 zq6s4oCVV@C2|Fh;DL8M#K}I;nUuI6>I35iz1iBat0o?hB)c5p|C_3OUV=Rw1gRSnU zq}&$59W@dBHX5dxW1sN$?@xc0 z=rpiXsZwL{vJmG~)q#|l>n5WiVe%#Ay2kf0W&ViRDqN@$2TD@I0vEc^2oJj9eksrX zsoOF?+1C%||IYg3;s5qO;Qzb7_Wyp0FWX@w>p-78gOt7K2IOq8XC+p1Ic6BD*>WuN z8H-s^Dnt&yJ2V8`M#a@_5_D43HD)SIS^J4}D)zRm#iyeZYAg9~v}%#{0R$ACF*JZl zsuu-M2M0SGtdi;zT=D_!_-;lb4NA}n^hV(gEEb*z zNLdEkpHH9*6qUX`3?XKMD{t;7Ak(K@kZkJ1x?>GxStkSlS!-dZY`CDrGi7N{8fd8**fltiQT<1Y6`7HI>>`fwZ9JA~%fGpIl^ufQl_rO~Q}ACl2VfN(xM@_x!Y(qXqXq>=I8#%fOpl z8aQ;rWh6*pT!>NN4(pz|yesdPeL#!M`mJdXF`}lnkBqd5B1}*2v#=lPtV zr3G>AG4%ll(${<%{^;q6oC={3@16CX-Gx~pI$u^YO%JY5mbwx+qUUPsC)Bm_p5oBNB_K)2!qgPAg^Soxgi?)b5Q#D$tEe+P=eoz98 z3H78RRy1?vPETT0lV?VS;@!36 zn!m-j3pd_Csw)R8*rm{M0TT~!Z;$Xsj-#`++bQ)m>9G)bDu2T=RkVg58P{XhIAo&^ z(CwCutz5%~8rC4+?dJQ{V-f(h#J0NEJ44AEgc-eRAl~@K^3=;oSC<0=`yK;x_OKXq z$?%I^%Q4+^WGm~1_RP+I)XtmfgD9+lfFkR6qHlX5$*H^;qqa=17(|0u zeq8D497&fL-Vh!}+b(}B;uBZsnY(sa6V-xYmf=Af8c5l*NjtW(>apDdUogpbV}+9r zE#Bx}1GFV!In0P4H{0t~FHS=?m@#mo`{Bi3vH+`(AykfcaV8nMHqBfN$n|2a5Pb^wBD!H1xO z$c)_>l!HtOT<^Frv2q!w_J|ijjlXtw3}=V(Z^0KxFgn8}OSkgA88{7hpOD6p;e-ri zVNiOxm@5aNx^U;)zz*I}9@Ibi{Xcc<=8y9ApP>KuBl`cYKg9oi_5b)uzO3*!vEMi= z3K=jJRP#n+q78e&*B5tacj=4(dS-|Unl!+$>I%Qp-2z{iU!BCrl*{2n}_(4M=W_XV9 zBUsO+&Uh=#)}3^MSm2hwxe2m8!1RLFp%G;6Q}kh)t;LRACrBNgnyd#3>p{=Q;c`>k zA8_ZTzGQ52mL}38wQ_JP43V0I^TvmPnV^zE_;Jsb71qOY&EGmFaPw&wW2E!Cp91=P17mA3fO6Z;66HMX8cx4)o z6fA<}wgJjJ1wz8kHd_&~pkcV|y3NtF2ldaV3ktUk39J4dG2&(&rJCy^ipFKd?!!z* zz#){suFvsj!xXHZ^n20chsBlcS3Pn)Rz&j|h=C>=ye^|i;};a}+md6pVyl|=)(2!z z+p{DiWfn08|oG=J5x&;AE4zlqT7gjONUj~v0$>%i8)HTI77!!L1vqi(9L<|y=Q0zjeOsKbee=6*#`S2%ZlaQI?xDXVi7Ad8S za*erRIA|4NZ0CV6GVP(oysEwx6En~PGNlh;|=#|`**M#>18+$$; z2g6`;V+VBVAL`*#{mNe?no=t%@K<; zU8B^T?nA{)jT8zfEphgd;|_GLW-Ulhc_N@STd=~qK*l%>uFZO#mV{sFRK>3wjhv9` zrUeG(eF4mNR(5*+FwGF;jmgWdDd6lGjqGJ^oyx%ffyRKc3dd}Hf@%i^lT`6sUV0DNx-?GhUjQ3XXB;Qr&>Op z5wc_{QnN&2iSK~tG6hTnpT^RO?Ba)kod9$In9;}vSlX@k8?_~P?`ErgSTP<*Hm+3; zgWF=b3?|Z6X(J$fBWD8z?&g(jMf& z{m}I-RO&m0Y!o8zLzEOt2G|n8Z;U_$>JRX^`FLpV6K~vwKnfP@`pgsrgax#7n%y2H zp{2@v=t(%zG(`O_#Vd9`;3n+>ijYBDmN#}zgTO;~pXDRUOMX<3L{E=T-;I>~G}WPX zvZ86&F`zUbGbJ*{qp?-(fFjTZ|{ORtIQRZBAj8DX|niJxV~ppTYapRo@@E#~ex zg7Pqk8r9{`|7KOh8>y@sXWt!@`R}oj-s6&`zlgLGn|bK;n0I)Xk%H|@(u4?} z0{DIhCR5ky9bjL#4iUn_^&c5_IjW9#hQqOjXcOWU$cRNx&f}K~AQ@VnpA@!q&XoNy zh81^E$8I1FDE9#Bmj{LQejhE8nz&CR#YlK@MC%YBCIkteTT~5}pAS-x-H7^7O7#P# zf$6TpD8l@44|nKBqJ@EguZhq?HqxWV#T@hi%9xp@Lpm5Bx??~M((ukq4|RFW2B<^V zpGND?U;hdEfB)u3>;Lun_xpc348QjOeu^*S_BvqO9%mmMW(x`O(>P)zxfoy@@PY!$ z@JKj2Z@Wx*Mp)JA^i<`^3BhJr6!ayrSi?d}^(+U1Fsd>wHon))mu>bYXE@Z@LYzjzu&C^G z>HzJAK_NLe$s=^N@0kWTa{xVBrZd>2qX6l4u(e2{IW5jvE4t^m7-spAjz$GVF^ju& z*hwxEVtgJbZn>xD_{sh2HZaVr>_)R>y5&Sg<0?72D|Vi`w09og1|itnjUa5cwF6`5 z83vioq-hwH6=s#kITy zyF5Cxlgk{#I$9tBc1b3glv%-@6gJ8k1$B6aevys^8Gxn6r*wm|HTjUz5!q_=P za_&1`9fr6;RiILRoNWlVAlLSj{-snJ7U98)P0#<@j>ShW4i6P2wz#WY`(BjK3=}XE zdj94P;b2F6e4<|<`sgJ`DM!a9oZn?UUTG#+&!iZr_~M+?PEMOfv&V8`PUl)E2hYcZ zoaQ|J9ZT@i@_W=;mI&s4=~gwmdYHIeFQQ{_H`jyN0`!M?7MZm%h{NqQU=AGU;FQJU zquC>Qhnq?-3i3zn`k=Q$O`R{f=X0U<-=1PPF2b~Hoa$-WsgBg@NO`N>HyK=>Ffl!} zgH5sQq=-DRu?Z`_B6w%cmUbtPa&DU(ye7(ZEmfR5Uy!_5b_VgF5HM)$pv?JU>@r#@ zXaEIcYsxH|gDqNLcyG|g&nzDqBJh&??RU7Sbm=ntq_jOi_sXZEG+3tl45l}TL6BT; zOj#jOVj+?_FV3Z4ZBofCIwcklZ{bN_HwmBxJMRRH(=oubU*Mr@3o;;c1xxH$*u|nL zj1kash_g#^iM@jo>>!aT5V9`KeVd;28hn7%U|Z=7Q>H<=^Ssk8! zv6@s62BUh!p{3M7DNI)~0B~0cLJo-<_J6&xvPDyPwf)d0UC<4o8Tzx?#p_-i11Q)SEMut+_jrtw^c7wcO8b-9$DkD?Ra(x=+8EcjkoxP0@lPS^$ci^by>Bd1MNhyN2FQwhF zx=_jx22E^Qe@1(cDK5t#ptJB$(IAmLuI>zWUGPiuC+bAVnt3ka2cJ1^4UKm;Tx z2BvGw7&a{pq~R~;v(qSqALyjfo^Tu2AODH8b6_n1=#dY>*c6|f8tIV&V*x!#20LCT z=#X9`=bb8{mw}FaV9tm*dG8pth`a552E*`!u^X;E&^X8dKymzVk>ihnd7)%Qxh=HPZ0eOKm$iItQ~H}2PwO%xzNWRyN`F4=M&ct!;AF|xLoD7MYcgALTtsFw;MVph{eAsV zAY0QamLD-u0pYZ>g(xj>VR-KJt$GpSmK25x+(M?!Db`&Rpp-CQ?x$U=1293R9BWF9 zU=y?*X#FwhpB~pTF~CVQ6IdYFtb%0OP`dlZo*;1KFomD1Y_SY<%!vbu=5NdHpKUCV zoGp|DtIDuQPi7&-q8FrGFSt%?`_9+v(GFugKcXmbpq)UM_rn8wPA5O|(;y9walGlf zGQ`E1mCbprH$K8-ul!X z(wKXmuZ(T;$+!@yJQEHA1ThDnre;|Cp5?&j`mknuiDQeq zt~~8-6JLht9C?Jl7}>IJ(A+L0jHEdPYGOGjT#` zLBdLn{_&tQhUH}cL1_l@LRcV2q6JnyY54H1b<=reRB!B*JbjMI}zgTz{i(#C{ zdtXda24dCWow)OSXPlEv5Z(q#-V^}30&g65Dj8ij15JqjOk|R|z%V049YL&EC7R6O zby6$CVj#;q@FEn!js^a#?0?5~c3=Z6)WzsDtjEnOro?}G+}I0rO~-hYJ{!Q9*w&-C zeYw#vvj3@@Ghe^HetrG=`t|ke>(|$>uU}ukzJ7iE`ug=p{Q7?Yy2?OZ0B8XKP)2lz diff --git a/files/hpc-intro-data.zip b/files/hpc-intro-data.zip deleted file mode 100644 index 7efd5d6db1695f90a7db5c8096a0494dbf540c90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41253 zcmb5#Q;;t0wl?UuY}>YN+qP}nwr$&0vut~oZQE6|FpK^DyF0pL?e6~9UK#OZ$i&6Lq)qSY>H1$?#JB!`cEOgZm5j#) zDfZKw2`v~n^3Ab&mfWxte?B#pI^qan`qBJ?@#@y=^11g_vzF8V-~WXXP@zBe#CldV zcKD~s>v`_yU6+N2Ywl-b@51ZhakMe!Crs>VQU0pTC&6Rgr!4K&)l+YJSw@D39B@ce4R8~j{tWmh1$-Z0#Qa#o$ z5Z#ZN=HQ}Q;53g}@I=~s1vmUUKk`w>PW!+XfNbwzw`dlcBf)WeF3}Nm<@N|{jq6+E z`gZD-ia#pxCID^btgY62+JH7sRVv^1auX4)qjd;H!BxVUTUyQfWV~qrP4xgMLuqxk6}nT&)gs z^Vp5B80JZL$N2-r?I{k}Z}f!7?tj!hWga#yc6-(Kgu58tbm zfLf#~J+!EBBVsuW+BW8_3(!d#&(;(8LcuLB4UM9W(g<a_94xSX-+!G#)z(@L5vQUg<|haHC6cn05@J9D>BtKm zBJ=|FO5z1p`lBmTAycOz7@C9TKKpHG0W2v`Ss$vN)bo#iK7tU3ja)zP2#G8X1|BC0@bUjtf)l6%eC&PTcDsncG)ol3)>}mZXJ@qCdQ;5 zR`>`Zp0_EM*5eRUie4@yUUs3wREKp6zQ@{mnA7xZh!jFhsNvZ@N1b!&W4G;K$WrZ( ztjZP%F+=3NV?yVXx}*7Pj(`O2ksHN8h2KUvW#wXrFjtau+5Rp^ysCvS;c7&c*{Bt& zpNb-ykb*i(5ksksN^kJELYT-HiSiQzIU;tV1f-%a(+!~kR4RZS|8$!HOXJ=R9}MJ9 z(h-rqND^#dY()ycmi8KVnXE_7Rq=`Hx7c5h7ar^4MfFrIDu6F$6-beGvr6|Yf*ipl zj;hwT%ixq8bK4f?;rEHxBz45;al@*X4bU7AqL5G-M2JQ)5`I>s&t)mg1lNkhg{wO1 zVONt&X&^bq5lmr26>0zRm5Q=0Hb1v~ne*7w^{Rt*c!XIRq)zPcR;E0HOp5fR{L$jR zxBd0&Ln~iFnGV;<%#{$y>UWfyNL2^xO-~)jC!Z@uIBnwKJqn)S-=P!;)CkqvRa=fPDs$g5)(`FEwHJk1EDXNg#rsn|ZThm<}!PBfHEQoH%7cSUEgG&H~_F z(~&-q`4&)z0Ljx?>Ez*y!X2InU@p_x zEOjuZnKY8|FWT4wMIfy)V}3CCL+(xVL=jUI5f4oIth#H{B-#6TM6wV^fc zQJ|^N=7(c%y68XDtc68-ZX31=D-#j?f1}6nf$=SWg=5w#i0)>gJ(S~ki@C{J%=|GW_Xo}wzT%ink|Xm31m$mn20HG@;X2W zv)w!j>eW*9RS%0nr+hzinZYKqnl6Y$z0#xdXl@7=?%N?KNP$rYfdk zD`z)XO@ei|3-9ZX5y_d3;E(ng2Q!p6R`3NLt;0P|0rp}Lge|8BH^g7CRIHBM5@VXN z$-+Gf9}kF8FunSe737ZQ)w}43D0SS#QTSOjgfZre{yDU2;gQcmiO7D`9mCzYxAhFR z?#&%ygczr)IoQPK2JF>zHQOC`jC?7;L|=ffu2!|NWH}nzr>Wz z*Yu&C?0KE;Cauv*c*+4K{v5gUd8v!nnqm}OS}rY~zjP5_G45Ir^vDGnVtI0?^F+hQHbC0qt zg@{559@^asDM_-vSCi%2O@9=?y(eLVZZ5w&k4Wj~;=^Dvo+7$FToBj9}LXUS9O z^;SZM(>qnXQGWWM(C11;O1v!Z0HIe zL!SHnKY;&*r2hvpaZi5!Y5%Zh^PffiKZ1;fgM*8!k*mx9jxciaOz=aDh+9w4H8+wG z;du#!;mP{hC|1hJF;fFaQb@q)I_Y*Bbg?8ZA#qTLkWm&CkY=6A!Y%ua=!2Q3zZ4T| z*AHXs#Rwp=F~WM$sDVs%MOnr(%~D~f^TRnVPvJu>f&r2b)v>!MsU8GnHbx>;@CJLj@E`Dyj4+NelJI{FUde+r3 zXq%>o(wUHwfrupiCJXg&@S)CBwktKxSU#K&f5D?8HWWz#dJ7 z&b4V^!%;`mmj!Yb=$rWT#qcMD1^j3ObE&AP6cw*k5KbV>x?N>!iQJPyj`jhVfy|8Z zq!jtmk3t$r#X|x)&%5ZPg7n^0afhs4=ZN3 zer-G2t%QM3ddaG!UR$0Mmw2w={??>_oJI&+9 z(VgtyJYH`1qxfg`v%5XKD7jurj$IZGVoC>9umpr!(It5$c7G0Vy_cLcFLEcn zZF#Aw4&5IWUY;+C3HlwN)xJ>6S9xDf0Dn0?ay_iJSB9Dhh z+!e_e`0i=sr2lH%oNK}Ym_0ngxomg8>f+UNIUQgAuJFdVs%>4t%Ez~i9Dj(#n;iP?qOSZI|}l_59=XBRI<^ckV;dF#za!76uyz5n!};G?YbR$CO4eFU7!QCSq! z9kKgmk4+%Fi$&h7Ze8RyKxSgWHkWx;0_EeF`^(tiys^h#nTEvELM*ri`HvR(W>qOx z9#6R{qa$dEd0v~9dl}ot>GFa?#m3W{Em$o35|9$@w5v;W{ocI}KB4!v<`6spENQHp zI(CNCzQ9jbJsrpf0k*POX9)RTNvs>eG&0!5n~V6(hawISycP89qGp^CNTY|nOGqiw zWzrEW0~Qo`W0JWE2!hYpG2C*Va9!JN2$C}p$6B%L&0FiJ72TDg;EL~GUv2#_&WUc_JNM+Oo(rZO%|4F>*${+kKORSLw~34fx%g8FU%C2%?dQQgvMI0rN|V(KmpF~;K$ z2*lj#6JX~UeSW9|h2wyM`Hcmq=JY``J(R#+U%QJch^4Uoa|B`!Hg40?-NOLik;!Jo z6Lr8O(QC{o4eWWaN@lk3Rci3vpc6%wpvvQTtt^R&n2lAQ<#fL=Qpu()1bQB{@x$XB(4_zF;!n0Ov zQagXL+5{jD-z+om5hd!o*^i+~K~2lWiUlzd>U(~W=ksfYjabXgsaRWJ3t1s0TxZ&; z8*T#p3*+`HeC1eVQPK#DY$P#Dnm!s!i5QD767+fv*iqSS8o>=7ivO)kJ$ZJBn{}kI z_)xDD)^!77AqsKWF;nyHQ<4W4s#22Q#os^Bj%ei4dA;+MPce4yeJQ0F_vJCPD9lRo zYE1MDK@H@}q<28@psU@}HzZnb)1{vkfK{ZvE)ZhzJEeY;9eN|VYL9-LCRxHLs(qYxdE}958D@uzUCNbiC-j#RK9qSI51C5tQw+Lmxb~&(?>$=oPN2A> zH!R041w^cU>R@J^4phtjwF4={tx?zedXfjyp9#vfS~P-82*+2+H`+v5*uxliRmTYP zVyS0|g224_Td8(c+-Q;%_(Z7*KWE43!b*gqsc;(`!YLx)cu;C6w*g?hc=SAUx=I(- zpS~sJS~Kg}P4CON>lFEZN>re%e!(IFMpP0VTRY?C^BZkHQyv&nDMXuW zZef-%#~50WF7xR%FnH>CJI+GY*5i)#LD0titRlg)_^<$m%Nb_uK#F z7ipbzUI9{-{3RC`?Zzur_r#AZ7UlOF?2^zu#E(Mx7ULL&Ip+xMK-NRdxR3o{cLWYc z_*XM1 zBc^nOcN(O?c-u~@G)U$kyS_RBvT7tIudlldG`>5_fe7;=LohxJhCOmO5Gnl|;$Kk% z@kN#9_z!Bx{xj74GsWZlx2WOEP>LtxffV`iVbYKaM11LQ^0>zCRjR3wNs}H}53k+R zswsb={r=hN_q)qh>AgL$lJC=5;YhCF&AJ zTGeX(o60gPs-FLW(SPf)hrhUxvEXpKIeAuw_;TxG)8M)5ai)dzVeYj@n6lk{ii?IUtv zlQR`xNNM5s0E7St{;hOvnxmT7J0}5yXvknUe7BAyc&g!(Y-%V>*|7gftSihsZ!3Uc z%~>~iLLP+gw+Vg8x9eD-`~aD&|3mIvG#m$&zIU*rm$kYH6{T8SP7 z7&_QoiEJS=?|~>Yss$|RLm3<}myqvbXBhCDUu!sa0+R5Ma~Mp6+!E<1IUNf&yz(73 zB^{dGg%!0R=yCTd8jB4ozLtmL-z&YM-!oxCdL~i}P*U4!ysF4C1HSb&122VAa{M+I zq00@AU5_sNtWi2n5w+$J>ZX=WqUT_Zma+$CZOB~i9GIbN`<_*KjWGPGY)Wm_#4sTk zoRRW)Rh!Cu>GTDKwmlR8S;r>+_anP)y5xbO={A6 zSlkE=FPStz3)(s62_U4kzV;55-%HPvz)?=l$Q4^H$m5o+!mPlB=lNh59hG`V29?ww zQY?w{5qBgq=wB2W1D1hT-#F|-k3$WP1$2AAscvcx;AdXg^rgYEZf^b~+zhan{MbOx z{OwaYLLOX*l1AFkZ_#cnwJo?$5Gc4z)HhXjA=1&C#h^z;{ChfP#g`tI$eJoCY~cMF z6f!V7@}1;bShl1!QCmq@3Z@C^RfP=Xj9W1Q9C_l9UOSh&>RdMO{|*J7KU+8k z$Dd~3-@$3w@}_)XD25X_RPePP)0H=46-?HPxfW#meh5q=3>ueY;o4KUIij?f+3&|6 zm;?ADPa_Cy$YW7qhB^|+x^+KbV~jwptUX*^(~yEtNVVLEHI=|>IIk|gL49%8dKsxv zGMq{Vjmw#uc=_2pa9NJEI-8g@URm(~Y^Db`kW6Z5I%(w?Rx*rJ+ppILLb4{K>asSo ztMZxyo;R6|t_bfXVm|SEA~1W~=grTFY^xxqvu5WwU()7nkY!JfONkk)=8`le2;Eua zajD~p)D@EPMX$?#%2G|hfG1ozF-8{*RDtTO2&USOo(sbIiS&z{HQuHN zpXB$RE27b@)j?H=kUPR=^p~~2LlQ+Q?Wm}7r=GcN!s~6SB)^zK9A3T}%%UCi3lafl zBfI-v+oU=EU{t-=oT?bl#v>;X_V4<}oMcPEpYrbj^y=wQU$91g@7l8RLIhY2TTmA5 zi%s*Ga&5}b)T&G7BA0Y9gu8ojbCv`IvDxfyJ(zOpjwygDX4=6RTqTT?HdZAl$Ur-m zfNk8qDFa)C5n4LS0O|u9+pZ)Y>delm6h z(bhFER&TFDjgU6Rv=xMXIEyS!a|t$%rKUQGZ{4F!b>UvTI^{fF;fBQH9Q@T$XTO~M zH9%cO>k!W^uvEczg(>#zupp)JOh3AY=P}rRxvu+B!<`lv# znUgaeM&>j~Yh*U?9TY8^lTdBbMTI}a(o>wv-JA$HH0u6K2B5|==sm~wVGg!Q*WvC$MKI2eKG%R z%+2VFFYA7_G&~%3t}U7DU_JHhl%>6UxKt4o5bs!F4f=q$@g${-#>#ww=dPb8}xaAip>%)nS9Awf+h@C zU2#Kb7e<%tM6IEZ8lCB?AK88%xKhpO;6P?Z<2VeIv@YgxXSgLFdV>DW`5fl*W6`WR z4=VujI_gOOl2<0dYb=2G`HMb~7ktBaRRpWQ0d{fiyTh9oIq~sJhr-Ww<*bjZ7i_!5 zd9`0hzn?jP!q%uGi|wESeZB_IP{Qr9>Ckj}5?DRW<-PO0)B`u4?9?k;x>rqqW!P#N zQ{Ye?G!nW448{B~5;UOP-Mi+Ib+b{ydEMqfB}bJ6zm_l=gfx zu3KaXKektVNBJf~eGs?2;E2hem+`QK%`SONa%6sP59bgFyySHp;>1h6pq zUeqB^-x-R5n3!G;_B{4N7?WvhS%E%$0FJlb7dkq&KavvpjK{tqAHGn8H(0e?wkdD* z!UTA~Ic$3qRZ$=VPY$9Z#3i#9BDS(S#ME^wa2RY9Xw_0C77NJ3L4mw{P#pE?8LcI+ z2^Ss==yZXIMPeBmZFmAZ^JGu2fIo!5!Ebo_T2X(bGErVkwOL3t&coml+yYQBcMK^9 zm23;&gcfQ1Bd{ZNBvnLktiAC@HDEIy7#ZB}#`?Z6^Le~imuomPak-jQUrwx6@1>R1y0M1+L5VhuuNn7OSwdXh>k ze3+gCK%*!M@iXfV3{ltWIi_zljk}g*X&ct*!UQY*;I0U>*&XKYth!tiZHNugg7-j0 z2Oo)b)4BCRlKY&Uy85tb`c&Z0A&$2-&35~mJQO;SiU(N?Vpb3+MO(jH8jEFKfNkVW zNPvp)&bbZ=-Bn_lXz_H3?Oq%JNl?!rfnvn<%;j%JBd-9n3Rc&2kFk+m8t#l+AzVz7f zEAP}uUrC1il)os=1;_x0yM{DnJo6CjW90nVzH9ipqun@+*-(+{?Iwef=$6R(j98S_ zqs_U>YY9XFf1X8u?=PuZAGE8!kg}o4-S*!e;1P62$P*0G{$;TSosh3a^;Q-06)x`} z8)ZT*q>lI>p@?gytEkW%$&Bvvkh8ZkwW1^}t#1c{lE|2DcbJWbWWOApiyYGuIM=!o zgmt zw-%QZ^BYJEU3BDU>08J4Wg9>ZmD{nPEI+K2e^T1_MGdWeg*H2y{J?e#2osAGBuJFbP;!S`~bn6u^hFde}vf zk0uHk)y7fvTmD{ErTitfy3sf!nw!K^SCRO$**EL!ZvZWvd2^Y23%d?1A;hw#HUh1u zoG$h!mi|kMbRkqTs*EMRMzv8i>kJ0ED<#ZWmy!QiS3b9^*B1+NQGYUx+S{7#6@^{< z3`2+&35WZjs`g^CQVa!@2QzZb29=ihrg(eK>e;Sv`x%TW14R;+-+?R&$#C$gd%1|+ zFa%n_O=r+Tc(tOQQZR&Ftj+>qWXG^2$s1*C0_f$kFcI)$D#df}8z*@5P_S2FDz@MP z<$3-cTMyG5Uu{F;W>m>4?ho$QX-&l!EpZRf$zZS4_Dt1JKbQ4z7DEJ;L4Ss&eB~~w@url!!OKQUw;sw=7&ViV`0zKvbHfI-Gc@AM z;kRo=>-s3lhI>&~Q46y!2tmHg2mn)QSt!0Bn+0}dOyVJt^(fUE=kCKe76~8UiJ{Uv zGZN~^lvYp&EzGMyA{mxa9d=_LXyfvnvany{dy!Q#7#b65;rO?7lp2PlGd68Ok_O5o zPFS|mlc-N=^liz@SOdB0$ivjxGWf|@I>mRpF?B(s%l1W8`Vf?U=t7CT-Bg-xklJ-z z1^g>;-iNr;hQR@VUbz1#fW^$n^zTPEY#BDrm`so%KWB_sCQm&-qwi62JX!MXSEGca zLbDDYyIbx0Ctpo_8Vtq5vsndCKHMb3Q(d!18^0G7cDZf@USD@qUbp@2dt~n%XY*`E zOWL^xf5kMv_|(s-ZqIj5xHQjdCJcG2mT!pm?N{O?j2q-0Um#E1=J08)l{e&ha7ppw zdhwr)?r?HI9--vT9%Xi`=!suF!2Q(w+JzF|`bg#0&YYvQuB`B7Nc1)FpbAi7-o}9^ zlWHq+D8)!}yj(x@InA+7K(?H(RoqS6^KHguXrVl@S4HV8rI?oabg~kd^{QV~^Loo) z_c{%wTz1G>Jmo)~;vwf1y!sGV_J`pZociJW`+841HNTGKXZ4ld7IGTGn(+!ZJ*!Al z73eka+-`NyzKbu}zk9sFqw(pVu}4Tt#i{^5tW&iUe$l0wiN$fd1D_SJ> z%NG;e*PhL?u~Hw+XCf99-N#0-VXaADx;v6hgk6d|`Wc}|YhEL04okn0_^W_<$h*`n zHi%2^8m2%$&zlK)t&^l{KXPfDRX>e%1Jc=jf5~|)i$BFfl`jzwDB;nZAo)$P!$cmb zUWKm79LEjqLtr?dm$gMbn1Y1?rLYoqK>44~(9kc^6*7d0=bb=M2Qq(2nmtE>*O$7a zG0F$2ljdr7W-rHX*iJW9kw<0g5_E(*XI~!@t?knvo-D!(gm9#vKh!gKQA==EV`5k( z!=Dv=i)WT_8f*?GoH~S{NDB`UdQuwM?jt6r&Svx(0b7cbqGiyc(3!6=za`%gUGO_z zBr`?{Sg{j9iUP}RKS7gvhGU`mH{s${%s7L(q;YeC6E$6_6PhXt@i$rOOsA;EJiu>> z_=k;LKH1agJv!H=$ll*kd{;6{U?vQZ+axIr1(9zjh};Txq@EcVSn{*B%A3XfA_SnA zLWs)>i4ThjI1{*Cz!yEakd0pi&&!;>0DISt8SmHw2QNM-(t@i$EV(lIc5dVe{pES4A~!1;n0JM65Bme$q#rVn9|YykLZ85Ecz;#Y`?D3FcQ7%M zV(S^CHhimY(wTv=-I6X?#fl9LsHIH)W1u3KISuNVR}pvOw*?hFBn+_IDzf?~zPBnv zn2rvcQ$KQ=X?#z@X5d7TGij7Wf7&@O+I1% zHeA>1uc3_Q|0Sy{!WkMQU+|H{icjZuqqGlUlODaOD5$=#7~q3RXN*wXOfI=hg3iur zYZG1w_4=U`%t-coH1&zvC=Q#_L;@jyH0~||Q4=0CeQ_;~vaI#*4NA(%Z>5qxSYx0Z zz5VlyYYgcZdtPm6w*$-iO$Puk{hZ@UW$3teWIi`KYMWU}*iFImV8nyEzhWGh_QP9| z+4@upicp(?R}eJaH@XmKn9^(oNY~jW5VNSdJ5K2yrAc!UM7uPvIpzS}nkA$T0WT*C z&TTDSv2Wb1z~tdCNX_x~cH6p^7^YZcbfoi`Q5N^xv+bTXe-7z+A6akW^)=d7x)nhd_kjR?;nE2c>C*@6rpA75!8sN5Vz3N zZr;bVrk`Aj1dFBBop!8Ro@6BRqxZcFije{d*+Jxu@uXHL*fRY0kk1W2tcT@4(*T

Ib@xroT(*Heoof?DXmH1o;hZS{Z2 zyXBw0dAIwDJ?j3vmiMo->zsXV=??hTW8kT{yq*zhZhqOcd$mRA-9bmk^DR8{_0jah z`@U)Kt2}pUuK8^5pRu7m^~gUtby%gX)VtIEsJ-PyJut}Yrc>Ws9aeQ)-Fj7$>Pr_R zS1^H3F*_BKdQZzW&guSpu#)nsJiO^2@X#r7BL?Q@mfq~3veiVRSW9E;)8>ZyoCc(g z+J8!gxFZ?ARKDkxk>+90t!?8aPL~iV6!xPG6w!!}X)CtZwck)iO_kBBaC6wsF}K#W z^BK#qlaW{m1DyIq+h5_ESQtePs^BOp1O!fq;c(zH6Z))mS_^9IoqMAF`dFi}*S+F0 zjpP5zI>)-B$HbMiXL-!_?idnH0;Zu`A^5Ev%-b5c&M=DlkF2~4@yBJ6hMvxK>a}!` zErcZvcq>TLYHw;PKO@=a16-^J{|TLi9cHP0lg+@g>KqS7>-MS6!m#~rwWd4RU2F7y zH=kU%pB$Sj$n1)>qX367f{vbT5EuVPgic9Ns1LA^W;oC!50s~B6}!)<{=9)y4Y=Sg zs!MzU4e0AE%Jkn+DLrHpBhvQesL?IK9->0Vpuk4xm1jxu$6q=Xe;RSCHI>)>{EUV@ zMUi0?ssWQ~u7I~qMr1KKDL(l??9fZhwvGoov-!Y)@wwh}r8x!F-^MG$BJc`xm8`9< z2tHiI(Mm)->Od_seVSp7M{1Ocm#Vg+U`v+c+S-kVZ=Ax%)!QHMjD`mg@(PACs=gQ& z?V>gdAzYOV+~Q`&+B>IU+=>>PyG11uJ6c!*m=#|1;!X!N5z;q>R`KxK1ob2?C8duS zE;$?FG8E!t5J25;L_Q*ah$IGf(YI&~ znn@nH9%(Dd-WORByPe9%Ar?irreye7`!yNKyH(-3mGSo%A^BJvWwa*Glc_yynjS_Y zsSDGY;;f3b5@$?&PriMlH8;%^~99|uvatH#e%sk2wl;HIiTe_K@tW(~d6eV4}|&q2q1th+rSql-|% z7#Gl3;vWcNh3k7a>=|XpGd~niN63CB>tZ8_z#VX>zYlag`;V#61{w}_2Z6oZq0Fh5n(Bt`9OC1Nd_Hc%|%CHGFQ zi2&wAv|%ANc9r?o=k+QPyOq}uyOD_{1Orey%z8z3F6Ee&tT5Sa$}y&mS)r?gS1#d> zKF-WHF$L0nONN>F{Bj{QqAkBJd?bCbW-wn3HNP-bUIJ5aQ;DPGS_igw=+Bv8H$_ zmd4rc4O72zW&$cn)o&_#F6KO;+?SFPmb^ED)0#_(dRUw{La(SYYx$qqg{i4^hS z@jJK&CYW{MTO!*hlNWP~^dYyS*pPY+&y+(A7~r7$vB%|$ddgGLAmh9X=$*%dw3b9+ zk+g*Q8W*?7@jN4M>hlBxC50tlzR$;;rA%zkH~$*Or7Uca-;9s}@D;4oE7XOF_?>`jHKgo@M zD;l~?nO}HRu%c%UOdNuekT(G){8?rQ+`f8hgvx)Bdi`yTZx0pvU%yxHa(zhT_bz7U z);%5yxh=k59QtFo@ld`n`tzFXPcCHjZ(5xT&=~d>GEmgoeF`*uJJn}&CZ%VFt6t63 zX1X@b^7K4z`I_+Wc3ao(y~Y}2&~=@tuR8i>_VmePuBPRs^?W^o8C>RaeQx1z7MEJ@ zxESWtpt?2HbS?^A-qP<@C+}$gzIiHvA=EIm${XA``=_92d26lQ;?yu}{7RB8>;q3} z-%|_I>gDQDGCe2_t{nc$(a)*vu0OBZsmeaOhA{HdU<|$d>R(rqyRqCz8GXks+J|^ORbSjP3l86qEs&mf7Vdq#C{zMMGuLfE4UP1(a~>4=roqU=eK|g- zx=0^ao|~4BaONP$!>}Z}Vgrt}cYC&bw)=4A^+XWL>3N5)r}*rVShMmS#E*N;AEn&< zkq#GJY|#H{e&)Gnx?aU+!I5!cL7rZHM`wOHbB}A9({*oLcb0JMpa!aL>0ipVRm)1B^Xm* zc-9e9sBqUUE76#b;~`frDR5P}^@v(R+MQ&@?t;G4JQFH5G~Lzc6||)b()poA$QNGB50ry`{gwv8zrfb zz1o=C@qrM0LBmCp_7NcM*e_SWs`GD?eWkm+h?=+n;fWXxqqn1Sy32u8OX9jF10dw_ z%7%(7JN5A!t04AMUT5ZN7Yqwo-ibr~fpA>IwqAWxT}rH0Z0v`FAX15>`1=cu(#Xm9 zBUnFJsF@+5JIVVZymnvmK`h#><8cEci2}yC;|EWeqI(U>Jj_y)gC*k_L+ZZG#g~x0 z%6D|Sie1qeBFdL(9vz8Uk%ZM%s3Mlm`74bF!FIx&)~cWZW7land!lSP#Lbp zlIBJs<0S0buVUnIjQ#bt1d%~4A zQI`{4E!a(`dPJIt;uq`GU~v8fs<4EozPuW{5eG8 zWaBOZuL(yk?Aj05%fN+XTq-JyE@iKpvRd!MV#2`Hq7IKwCLE;DmxZo7V&tb4Hwh8h zot=WQj+mC@yun%(X#zV~#`YfEBdTP{K7fBsB-TR(htjAT#P-3~jsLdEJbzn~%8&@M zuA54=GDcQ*#pG!#i{3=^kEk)*H2tQ7h;KH-gp;gu9wm+=`&h!5Re0( z;2RSGk)UU$<_R3eQ+iMp zo4~i!*}WTU^2(BdmWMDKm2WfR>@MK2*HZl?C`fO};0!;}#m1mZal4*;qd~Orvse)f z5?JHlpe_h1SibioWPKDtpf5SuUv3pI)Vj-r+hU(xdg1~(witMf`^Bexsp6CW4NoNr z_ody}&*JfJUhd-f987kWGgY*~wAFV#R`ky=)>L}-m8tIP9)Ww@?{Uw71EY^UBk&r{q-p#()l67>pA&<6q9%`Q&{aM-9y=99q;5nny|+23 zjg-|E^DMr3T{?7)h>PC_I1uU11n>VXikiSj=M(e~Xt@4E&~UIZ{rkX1rp_z$nXc}Ny>>2Dc{jUBS`W@WWBF@`d7V$!_S6=mqz(texLK;Jum!y&U=1mC-v~%z19MT$t9zcD7E$DCR?53e_7Pl1P^kxZ5n*y zHFy}$7(6?P2ZxTFcYn3@I;34JXm(mk=^x4c$V;4u;ua_vb}5{6;zAZ8vtl6ruISEb z=)SJ*n})ekM#fX->t!!XK{XFH$`m3FWUZGW}_Tgk<_Ow!^fxxy0=Q>`Y4bYN7 zUC%AupOTZV^g=njr)F@!qO@(Zw$u3%q;vr(a0BZH>p3i~FhFw$)M#w|;$aw{Ev|&H z9BkI4G2CaJegiXC!ey(fIkd#Z+ff6e#8j}^+HQovUS46Z@jlUlKWUxr^|Oa6JLMe4 z{NCis2~`zzW0juEcR^ua0MvE+h=t?`rL47=5>y*|t5l!=A+Xs%dYq>xsB@$bJrh#; zx<0Ml}*GhH@L%FwoMY3ArfJ#Ybm>lStL`Dd){fxRhhnF6KIdsGWmtElN)%jZIxc53C9UY(CVYCpcSy zF-PJ`5L?s(sZXc!3jrz_j>B&AT>=%s>GO=s^G4WqE?O_Qd++6djza~QegAX1SOT#w zNoWiho#l(f=#8>suDtW$@WSo{{trRNYu8@@6k#neDCc9SEJ}7~%MEW^0yL&dH2bVL zomeYTj3VJLjXFGzP!O~d$z16E_(CgYdG#Ro#y6<1=%0ww>6jMs=|VcTR-Ij$`3_rD z1&3Zp^XfcpF$_+kto^rAB;Witu+>$kK2qIc}itZ2|=_ zfdDXMuv*yUZ@}LAPjVIm*_k5JtWeBPzcCIl9`4>4Gh0j8=eUV<3pjUXb)+5zMWA2D zD#Y_}3>Wa75-I!12}9#Db9Iy#40#w0p;k?m@K2{V$3E z=7>R}!La3?C9nU3vUdv4BwVzuV>@4L+eyc^ zZQDu5wr$($*tTukwmLe!{{QOin|;=8T~|FdYQA&Kk{JNRa;6g_9);Dnc_VZvm8>ob zbEGCVB&3Hp{C<`jueK#I(SQ4D!iONkGcY0iAY?;@xe{nVHLuEjkLuaWN( z4gKRZW0FQ!usti3&JL9HbIY9?bDhZO70OZq3u0QHeh+bkMAA_zl!KG$Xz?j_+~3rP z^6s=Of*tq8)I|lw%F02oe_)K)Q>=nhsWCTKy)SSO@5;8SAi=vb4q*Y1m$d;a9hCsf z2Hg{^1oUO-sh0O1TL~cSVfFyYTL8Z41Y*7(tt#1;-k?|=AvIRKNC^I;ils55|l>LQiojD zkJka~Y=VyDB3k6s*e}6^{q4mr1T&Tmz)TrqaN62y16_)u`DNDF-MN5_pgN=&7h_of zY)1ja*>=4J*OQpWLZ)37?eUa_*&OPo(Gr$Ecnb++CQ$oes+yIBQqgZmcq6{U0VGT? zoyXxfaNn``N(Ujk*nc;gUT(U;Xr|C!+Eo?|<{83cEN%Pc84K#_9q=YNmdQdb=P@yY z@;PH(i_Nt2eXg!nm`N2DLNGLF3xAnQnhMoU*>v(jaNjXM#pqnEBy+ZLc`)-D)Wl4w zOab#BmUgK^A>3lw*t25y;he|hCVxb|dtNjDV?nqyBFfuGma9k&knc2h%ty0~?2tyF z>;Uj@*~JK8+8Dy{c>>%e+Y7~CWXLDjEvSLwRFhxEp$=xqdxo{_=yyJSmY(JhkVwX4 z@P)j`17KFitNU0WX(h+e@}!hhElxTPps98SqgK)OlG>%n9$NL`Ryf7l2fKtLr^)Gf50qj}qA*UtO6`y{!%tt3JsFN5SrBRH(~J8IN;S3#+tA9) zK&(bT!&TWd>`O;(u}aqZutWl^Icqwx<$US zqZF5#*RZp(>%Su>r8;6~yJyb%F~3}I7Kga(S?|P8sHDf>03rZcK=6q!&>m1Ve=1lQ2$%hVfppH zW;g%orjk!$ugowYTCf7T7B($0B!ZZ0izKcXc1dz5mOt9tF1{nrauQtQk5^Y`^` z#a5$P>2B|*=*d;LS&rX*RBLJH?f9VEYj!g;wHgopEM3cbPFdjS>h#Z5d27;WG~20d zst2cUmhNFqo0Der*y5vSUYYp!zi6qn`PSF#<}#huw&}))-EK(J15MdhZ)$lXl|@Z< z*;DkYQ@mGx*O1QoMWy#pmW1nrE_Pn4F-)$j`(18{gWyD2i~1hv zO100!y`rl=-Pu^lP(puD0OY_P@h2xTcEl78Q5*7MH@w+XEJ6ike5oe;RHF<%gN-BK zISS?ORj%@O$LGcf6lfseGCR!&fGwd(H}$kxeP^aOJ&8trwf`oXaBwjt-uSXDO!l;9 z4BXxj9AN<5W`#s<8A7=Q(&N(h7qo?ENk+r4DjPnWR&) zq-XouZ4@YKkk7ILBHnaVd{E$U{*d$f#0(apb=ifVqt$2G_qXlt;|V-0$7PU6_d+Ps z0p9IGoZ5c0RhgQh=h%mZ9)x&VK`iCZYY|ob7rlkY(3;nlpcMC~?nuB~LcXnjUIWZ0 zZMn1`0Cgx`Sg1*(BIpI&kf^q= z{rL+;_AJf9-@4_|KeRibBQiN^Rt*Vb@-Djum_L$12;~;re|06!%^+$FNa$ZZb+Ih! z!W;W{1PWfqVIeJ4r|~b|oeKv61xlwEg84lNI@!XbIqfGz15l*P(S>)T+`6gQ{OBoJ z?uW8h(SO}OZe%-lccF}}2V4^p`<~A_6FN9)Yz`2KZOR-u-FTdXB9#kEyiAV5%1w#d zgfxwfF!gt{2i?ko#AgVprhqur1{c&W0hl!Hdw(V=EqzoRm*QKSTc7|6?EqSJ_`-sM z*1a$_OQoX1M2giV7$Ef~sFz_%6mJy)`@zUC>g-cFeCX?v0j1h+VAX@5qFD#rbSZ0r zXKr-9m&_EOG_3J1iRYWP+b3;8ce_zhH79}{aR|WaaWb>6DN{3Z&=gVIR@X1sN0I&LnS9*$9-=PzV9-=|YjUNTUQ__`^W8Ym%om|_E8Z{@K@og1vW5DP47WIWX8VO z73N)$tN1pAt9IUFozT9hP_2#~AnPmY(vkg&GSzXkcwf}i zA$_oYtU6a7j&~qeKv_ema?qj8*QZX45H^<1cpYuyz#g*mCq?~Z{PKzQLOfq#4!#Mu zQHFBgvo6t@rM9kwlix5tnO_WSlI6Nr2~-^rUFX@%O+^v}q0wZqJgmmJpo^%8j~eb$ zmkK(T>>LPQdr*{b#4N!VPWUMSl!m;?EC3UCUN+r1LXMcpyD5h4P(~$lJ^SZ@n=8(o zJ#LI4d8k2nr#h8bouIm$&7==DBnFW~)T<6=a!?;m#DEA9rP_w6nw+D7GmR2U9ci`X4gQGcM7A5Z5QsW$}|jB5b)*Z&A`4yp)8Z2wUW(|<`d98Apr4^*R>x<>u~ z1voyeK!IQOw5=04q_z6twsQ(W;k!D|axW&UpF_7B%d{(;UX52Zc)S#^i;LYJ_P5U$ zDQRWzo^GsFZiOr-PjYIfOV#s-D^={PF%>&Z9~6G})#XVPu{${&ig;lx7wSK?e77(6 z$X+=bACBcM?d@JGdfjP=>rRK)NhM_or(NueG!G3mtLAdgRYk`gE@9X21@UIn@w#fW z&keI$k!z5Mma^5Eo3edekLZ?p!&aBTw{AL+&nqIg!ZnqHG>*KeuTc)RQrb(Z;4#pu zNqMzTbQKOm$A>OqIYCNC6_InT{RtnAbCchbX@oSEnR1yQ5-#Srr>pgPYJAKps#No) zieAolbHmb69D5hpcyT?qUbtw}6B_2-5@?RqqpMj)(Xlz1vKOy^YFX{65U8EIRaKAW zuuEnVIYKmeKkBubXd-pe4vxzXh^mSpDAS{-7H6xbcO8*KX1r0K!iKjGxqzeGZaID6 zd8w5yr@GW?1RKf2qGud$Dh)@Xa6Uju9Jbk9_-vGv`?%v4p6qMOOeo{{RUv;#0C!M} z+H@%U2!ftvEAoSKe5{@Kq)o4BK|30?2MiuQ!&;*Nkf=)X=`Trv?4I8rCN{^txPr6S z{Ak8&3*!>!3#_N|rf&cgA1H!bDXodAlz~UwrU>QHZ=EC^0sXd(?+9r0R#iKF?2g-> z8LEm>;Yl=L=fo(C4Sx|Eah3aQvAA*6fGdAj%QJK=?LAPUV!G?wkmYynxks%@*ONDC z0OiZHkt7tf2(~9b!T5vqj?ilHNS)z@{_gY$CKl6lrPklE%m+P<_XsUBgP_gF2%m3K7*9{ z!}j|e&)E!ADB+T>%H8zmr^w==`*365M|?@p6)Ieu-r?_Y>#~bJ7_iA z?EM1feq<#Q0G{*Gz@wv8ke?cPk6$5+z$nsuicbd9?uLl-Co$vdg%t*EH={b)K(>N| zX^&)&vZ|M-gpFSa6j#q|J|e%d#Rt95REgGj7oh@J!SKN1|JD?` zgpJ9nAE8_30>Rt~-kKgF$bJp=A#HI}rRhOXq}iOc;8VzoCL3t_1?J#7Jq4&@U<$I3 z28eyx7|L;+>Aka61{N=-7oC|MDb)b3_kS#{I>dR=_CMr9nxZkjlPBfA}@X7BPYCCap}xCuCk!rrvddiZZhtR;DF6YHRhv&LGD? zW9iw>t93^*EO^=xMbyEjZ|I9`gpnU<2;Ttv9P>?Rxim7JR$ooP1 z6u{sG*cD)dwRb|#O>D?$)s5KrN7eTltg9A48X*ZTt00F1)Q-)R769?F{T@RM)+Ld` z^Dlf+I%_1D#LKykzxsrJW2dfS zL>M(@?{0+!BnYD4J(-5x$6D@V@% zEAvCGsZhzVgu#hJF^Mk^fj;1rbF;qCJeJJp4K!v$a;blL%B{W~CimyLxF zzWXJ=7tK-`BLaq8YL2~_ACZ!K4_=Tjh_5hy<&?MhI8Gn}G}J|S7%ef<4e{u7EgoeX$U1J!~^RF5xwNOJC z>AVFY_7wkMv9Th7y9p(<*%R!CzT>i?dR1i{@; zVZCC`3k-srTeZS-*p2*!7O-MzWn@po5eFd$-=GR(lzwT6+Il@t@I+y*LufFGEO|j-a4@8>+m$9c?0TM z>TvMU2Nt)WVOy1^@RiQPLbWCDbmfvLN^Esgi2-4UGj1FvYcg>llsypmMI`Xd|Y?$mKrQFPjYTfwtK-TGZmRNBOv6>45QO7r%dY}jS5$YM9(y=Q$-ASX%Q1Ycj1v=M zy&2TEt8~tK>h)!WJ`l%=*c(|xR8wNTyQlM+!b;(w`|fdS1NBAAYkp)~r;Vnv>fW~I zd-TGzwLKn4R75%wm6hM0|3f4Orcc26k4Q}X-(k&vMB@J)Ytj`Hahd*2q`WffjzGY@ z_G;wb6(?5On0wmLk`mp%H+XKIqn-O*ZrWPcPV&D#Xsny2$v>Vk#ibnE_6#gu=;gld zbjS919cp>3@7Vdh;Pteyc)Q&lU3%L5OWixVX0%3rvMKp=eOg3o-tTbbBkz3F?=1dq zbPrFePwGu}-MDeebJMLd-Bv};=uS?rQLEaCZ&Kx$U6}G-qPkwD&!FFM-*hNylWmpy z%T9}5=I)fqy)P>!USOC2Gs&B z#q&>>yLNI55j~yP>YqtqKVQz-xk||@)YX61x-$J-XR7Y#krM-2W%;_ME#BLmNy>Wl{Lnw6HkDNPsMJP)U zq+Cq{b3htc&vN%wJ2B&7N|XA3t=dbGZ}_oY)yB+1!E3dt^m!dLd2dxoS9g#R?Kp*o z&RN-p$&hI?u6pJ-J4Y@uA~31OcK#VD+Z$ZYK<9@)pQ}S?CZYkP&4_V0ED*{K%g!iD zKy3j_D;ya92G$(PDLquyh>129NrQ*q1ilO-voEZ`RJT6zu>{YIS*^uvnvmbWh8_Y(V2dnsYHJ`n0iO1pCt;Y4t>2HlKd!6q1v!Jr8YbIO4h( z^DnDvUxZ(XZN-D`?@{#*@U68F$J#KeZz!h8)&&xo4EH(431~9(p|@M;!T}Zlp=f^h z_3tS3p0{EIOP1ES+7?O%glme%R!=tKPAEZ9mfAi-50i|v|<6qcLgDeQWY_X~R}`^2v%pfJA(W z89=FX(T)s!9@2BhSWmY^!_Ua~t30|?g!B1i~on;lT zx%;8vMsSW5CGIGa1q=yiew)4-tI|5LN{n>(gSCdk114 zJFPX+JOTAP(3XW(U_NDFm5E}-E}BG=+3(5n;~yCE_8SGG zRE`7?m!j46qw8HuGY2uefpU(KcRO2QjXUk>X(gTTFbw^UN3o4zK~PKoL-aigREqHk zHFHt7r}l;w5hXm%2%wkv_wYeoLSP>Ce{jiA3R13Dg3-#4rzCCQo}e$p+Vb_gJs?2~ z!Qo>E{z6ZTlraWNhqy3vjpWgEw2)w)9vlm(7#jQlqL>9)XA!gM(!0x&3&dPXO4vAp zxWX9~&`C0}peEn2H}NN!vINnaJuBTJDNfJzdZ2~t54q*S?u$IfPUBlh<3t7O(9hq7 zDe><(RZX)l5xNwyGPOR>yvWdM%hSD+5ga=iV$FOsB74s>tXY4CQp7fLXPSg2?cSue z@5#d6<|wvR1>!%TW{o}X_&YtvhgwKuA(3R^K{ z-69=QYi!B=y;RgnQ+sO5gl~bFdUD+BEX)YV6r#?~w=Z9ZccdUfM&-KtT^^Ev!At{{ z5SqgzFpv0*84D;cque`Ll=7k*=KIe$-c7j{JalR8vJBs!(BFS7dHa!JRbWr`#=6w7 zfoC#(9ffMDCGLiC7vb`i0_S8raIAeXxa?p$NUWe{s-n>i_70d=@f=x_*MDv`>0<|h zZqybK{z=a9v|3%Fs&qudoJk71A+kNg4Hee5)(cadvRDLF2f+^<%Fxx5@eQn`M6n8j z-F?1H5U55*mHWSr)V)pIg_2}^dqy+*a&T%!rLl;!gdRMHQ607Vi_B{7NiIg@^$h0e zQm_j2(Yw5IPPk>x-H9?e^CpJTZ(qTpcN$g zZC-hz;e1+gV78skuIJPGFn^7IksaBH$gQg0rT*x=H0~|>toLEMQ;%WUdPYO{AtslB zz@>;ht$V-Z9BP_-;>VqE5#GVOni)3@bh7jDr^fg1z~Muh2bX1}{rl%t1q6Nel<%^f ztYG7>(>cSxdY3O#Z-DVMw+=IlXWX76q>&Wo58h2-^Cw5`-Q2>sqEew%zFsD#Kc`B) zUU^)7V)>Z6+~0q?PL0uCe`|~!^vgR6@9LrS zLB$U%t{Ml_fTRClr7&t$sHZ~H&PaHc)^@Z&U`^Toy*B6f36O`l1HP2aVAJHbFM#P8$70RMJ;R2FrJamFuy?WD3ktv$4A?fTiXU^rZxP@jT;E_P^8nI)xGbE2*r>- z8^uPDF6NhQbV_?tS{6!m0+q4c|F^f$8tT?Y3ZyzfQNH&v2s^MAtO2VKsRZIFdlMqa z2{rMS*o0saB-<@OiB9m0u=;EC#m6BaMcwZ0czS%F~UOlMl)F$G0ZcvsSrHLx>qI8Ff*y@r5z2wr3Mf^M{>f73*U#s>*@ z2YpXl3DetNGoZJqx)GxoWudvowElCQR}-odRtLBMNL? z`@6CX#>37@UnxEN!6X;PHvQ03;{kD|YD#P$)q~tsxj;lBVk9O;| zNf5J3@>2{~(?Fo^{w-AQQHb=Y%gqYR_a?Nv-5FkS)IgMCErnTWi>{6_J=Ru*OhrvS z6g=Y+0U4;8=gVj%7Nl3zqhQowaIz@VlmS#U);ymWFx4O*(;o>jAtPuF3Lgh^m%(P2 zagv}_8^YuUIcWYGqcRWWP5Q9R&J;VWLAWx3GKP>W`tBVgt0|&oDRb%?c5s7_=ZybukY5sW5s!?W+@x3``Ak_TU3#FS*#YBLfN1^a z!;v*?;44uIMtUlxIQp{2AHdTu&j@cYaVbn`HmUm}TSP{?{65^(h!ukF7*M*3bo7o2 z9GvRaKU9XXz>8T~S2&Wgf5#Wv0d84wCjVl^lz@IF98U)5I_%BaWerG=HvKq=i=3w1 z1wpD>w*8coprTE=3Rv$+AxamK8OXWz(G1cE9y^SwVudP!6N=h05&A?fLC8@1i%y{U z7cn5@JpgKxBTH%yM)x)db}_izz@BDgArJT1&YeVOlrNi9GyYuJ|Fui?luKP z%Mv+D9qTx(=)PJz_LFJ;hi|-xjOZIV5Kxm3k=7#tl1gANG*ldOfWgp*Wm2DoCP7gS zto697gp8SpveEc&A=KkBf8XcYSTC~N)DL+wIm__r)J5Fh&Qma2{#e)&lVTm*tp2jGK*k}*H)BVORgEEldlv`itV3oIW6x%^MC(H@ z#EZXKTq|ClGVQT;XZ}{5)T>SDJkA7et>i@FIPwr#4?y-S@3l>(|>;ROh8wYVJ+>>)-8)c`e#aO>@y9-Y?}| z`^vXw}04`X-3+1c-$k3y$Lx$@`=FOF#sZ8r6qMVFiD zN7UYV^Ott>+M}q;hVvUI<-CV%9Q&NTS8YIpnM;YHyG`nJ==t^O@1}6w0Si0!xhcHr zh9LNW@>*)$cjA@JY_oG+7)=nU@EVwMHinZc)-_JuKO6gwnX7ukGi5Al0iV)p3k}N- z%#W_Rywf@ik1jT7`d)Og$X+3@eO21J?nPQD~F=hF2#^k>&T>#s*@sAi)>eNo>R$F!{IIvp)95$ve9vQe;{_2u5! z7xfS45Mj!^-xr+FbF{53z~E_8*sYCxfqYUX6=j+BxW5ZEXFo=c|7Z>)vd0#olYzuC zfp37Sb|K*iRdU*ELONSezBxqxViW|&JC^T+JfMvTl@}rwu14&h}#B=Ol_DX;$A7T5PwuYiT=Or{1>~s5|+s`;NqZoRj zcbLR!Xs~Q{%_-J9qIQ2zYp0kQ<>bmf3qQI0?L;mImjy>ZEoz-MQ?diZ|0Ev-5?!3hdFZgDW`rNgX0ZJcpbVZr~fX_+;hiJU@ zY9G?JR31ord6f6bhCkGp9T2rl4?Y_$^YzIe`OPJzWr!||Z!4sp=vKkFaTzVQ;;p^q z@0tz+a1+ESQRLpj=G}T?BA?IS6BB9-rj%J49c5oVjB*wAf!FwhEyuMNv9{M+l@=PG zjY*=|tkF!04;-B^zm)Pht`^E55%tMUPJ4{IWKjQL?rlTh8_izWZKmvvb2w}PaYPuY zAqBgS)uK5Nkw>_=@F#`-k#^6HAx0tUw8?kN;teS1?*MKi=4A@0ekK|qxSf$z%3{x@ zKUKqoXAI-P$qnLsAH4}XT88}Vl|cDB`c7fZk|LGNx_Z^aR6jSaaHcqX$WN5l5K98T^C41IIX4=JBEaH7Rem zf5}(WeUsA|b$dkVecQ)az)oi=DQoA)^=-V~YFbmo=&Tmw;AwdDFNToxeZCOmkn;^T zAFX8TszqEiebL}K(#V%n#k^rUWQw%#rwdLJg&7QI+i)hUyY@dq!Q{g8nwNqpOu%Mvh6^Ks!(^cf z`?~?ZR6svU?UaDWFs*kG$RF)rtO#{zU@KyI% z)kvhAN%GF2V8A6(6&&m7est7DOK%lBKwKUO&c!@tO9Y|s`w=M=;B*rpMBg zm21tj%mj|aXe_xu1ZQF+gr{XrbMzY|p$8{Y+W(H7md7SL3Dv70X}H+G^!Kv9Kle>* zh#IZKl4Ds(@pITLDfuiuRCE+7@1eTf&{M$Li}#g^q;%toq$GOuu5ZzniHIpe04BxF ztotK?c#fn!so~cpS+4Y$JvEibzT+bwpO7OT%55zqu2WJnxT{q&3kKZawf+pGrj_&7 z@6lQ%K#NA<=vgA+?#H7R9^BXD4*w=$*ulnIN0@NZXNn+ZC(5NefgBx{?X3N) zn>8^D%F?m{{@KpnE%9TEpO0K1mYr*W=1y9Oq#E|kJ{Sen-kUz|Fjl&^%)RZe1O@;< zhcLI#U9LaWr1u_x;eyy`S(&&2LQm zG#i{UPsPL$!4z(AfN!pO9+ZNP{EhoYN+_}OqlG$xAelxMN8#N#F%fFM-rcGC3&L{W zQ~Qzh)aYuZLTLz}RJ%9^5cuH z;X5>M|9Q=syUHS(2}~G%nTwI2dDfdgsP!D3*$CuC%5W>bjTiiM0Gbk0`Ze8yxtnk@ zD8d%U;ym*}+MDS2V2q-_BOj4~6Cp^GkG$O=-|Flb)8D^ewEos$PgJ_@K4HEc1=Mrg zV1g9u;y$eC$F1~RvD6(csII-|6FR6G)!HFSz^@cy4mGz@eM15IQlh)yuM(I#(>HJE z8gtr;J8qbw0)G-OOI1oGp=S)4C_Vf^vgvdWlvxd&hus)V!HADiF)9f?;GTdvUQt1_ zKYD=1vF4xx=z4e}s#^=8?<&Qqrt|0B_}LQ&&tfN*pHB(?Zy{Vx?Irj>6E_(D9p3zB z;^u#iap?Y?Q($F8m>_hE+Ce~lhX@52U8mZ3y z)BW+ED$eDfiPqQcE}pJ#n!>}ctKF{CY|hr*_wijPuHEalkB9wpx5KLqds{l)cQf)? z>1(a1AB|2sK79RQ>Tj8y{rj888Jro3u)3S8tMc?|V2L2Li_o|5(%DkTOMW1$%Aw)g^yT(Bv2P>s1AW^#7H0!1az)2jpLY)4Z z|C(&NC#$1(dtq09R zHX6xIg0O5gI|4%TTjKDY;>VMr>O)DcL>n-SiWBFlCu6?A1}kBrtyJe-%?j)Voj~B9 zqKXoY!|I)qCrQ`WGT(IvgtJ;@^65!kU}Rjdy{FAa^rO2=C&Y}zLXR&bP@Z<<@>%TE zna()(0Y_gI1mJv2uMw6!Db4MMYH6u2DITzYPG;a_gZd25z!?sXbAy4i?VBg zf|Wi1;K{i_Hpfbvob3;f4p59t8sErI_mFBw?YG)Gteklh2OHp>JntSA!aqS_l(>T( zOXzin^tem&N7ZNh4PPl8OfyYq5>7@Z9>=wJdYIj?quFFhdK$aZyx92QVqji}`4R@`L~Lp%}y&|WbmQy)XVV#`C- zW!mnp7}q0`LF{IwYUJ<|oe{xe=W`gAP0x6BCd3{l6ija(!<>kHFf%)UdcgdGku${+ z4Egj0OzT9<{j$Ihy}br959xU;hGnk>Bf#4ExO+Dkyrj~z!9vTY0eLp+BYj>P4-Wa< zzaI3E>GU@66kwKJps9wOIW>m<*#{}M(6MqgZ2E1&tt%amKiu*=#7QxR$p{pk`0w?F zFdk<6wlTA)O1pnlICw1K zrY_WLt z3E^jU{x*aY70T_`58>ycf2ENSE&{Z#TqBAV!5-KqtCi|G2eNC?C`kdjcAp&H)v7y? z6%^Gplm8sntM=pt3&o#*kKN-lc}p0xvyMd(45uyJQDd~|CP;qV!Bf+`*MV2)164)+ zd)2iQOu|NgfRJ94C&y3oUzHQ43lI&Uk z;SOkI(}55|X$ubufBf}ts9=@W;=e5lf00Vev)>>HRPpkWv)_~=lbnXAo{p4&*B z???cieqbXm>CBjaYMKQ)g(%%ukXJ4Iuc)N502bkCIH)gAKziE2Tra4yznR5sLbS!z*-K?AH@MPpLjV7aIf zMrBa^OTr3rXCbrXXlc^IP$i?v5YURpGgse&K4@+R&okvpz}B!dbUC>wNnMOhjG(L1 zm5wHiwGpV~^IEwn3x|(bjWn_WDr80}cZD><5S$?&|E$l9L+p6IBewKt&(4E?pPa1r z6{C|QT*dOUUhasGx_&%@L-B#XKkRqrfk2)}ZaVc1N3s{FcuK_Ba-W;Az-NEC>q|+= zG+_`kX=-$}f&U@W4@30-2v;Q--(v5coq{Y^gsr6A|x;@tO zbik6+}g)PT3Fp$psuzbtod>_c!J;j_Y7n zI&rG*I-Ho}nobecfTvwfAD8Uw(lcM!PX^)jP)xEKFlSdRRCZ0|HyDAQsWm(3ooemu zZ*Om6t_1^qcp)n$&kLW{;X>tqu_DeAwZR+|UFy;V_+36k*Z)8x=!l+Pi(}hK^HNY_VKW%B4Tg z9J86hW}tMe{&rlJN3o#0d%;=Fr%^2y|3dt~iN^D-Ow!{Y(QyBlMDx!G|6cRs4lnC-+nA~+HU_@ z*^T_#d5WlUYX7*nitTi2zOl=lT0G4uy6kaKZ+uxgd2sGZVesH?a@@`Knc47kz4bA7 z>&EHms%_A4-^TQN6?wW&d&FPG@|ew#ExEt_9OnIWuWYsJdCmN=X1L9#*c}pyQ}bMI zG|OSX{f*UXy*8zDYe4x^cT$-BQ0_5dmiwGXMwi(G|y zl2!T?F?v*c9Di4CdnYXIdHJ&Q@}zLW*IU8b)-{R!TnPIF!{)a#wmJod%QHSGMmPBi zseq;MtC-21L?9<&Z_jJbzIr8*acCy+DOARXyIkLgD0b2ZEsQ$&=eg*-E$M?Dws=Z% zNytsM&CYc7be-%PsQAWq2gORRm$_ILvd12nj~e&cLg(l9YX-9os9OEHBE{*CNZJJ`h6oSZ-?k)Zr)162fyCJJJ`sZH^ zKY?M_N`np{I$mkuCnZ?6XY`zV^@IDIo21rl6_vu+g~P9^S2i44WrGG!mpL}n;u}wI z2N8L?>`TBzzCt%lwi64eO&`&-+7^``MCtC@M?OXcLIHk90&oaomvwGYENz-L6e~&1 zGc=Z=VyTyw(XCP00K+%L^G+c<`)5qSa$T6LMCWA=CQ#ln{sXc`D6Q-lXk38$CtJol zks4fOJL?GuQL<0x^OaIif7GN`%v@GPv$*QWmZrOeE*!&I5 zT~K$XtM?Yn`5=P|+OX)KkQ`{3a)DNKR#$s`A<=h@nL3ZRsR>r&jO4i9IoBD7A`7=| z8c|17w*EY`0w%d|BcW0X%ZBU?__@@2g(7dwy#+Jm3%V)54c2lPb^!F*@+b@2Z)e+P zZ^0|&w3fVEe6x&pN$NhoIn%02w4ad`ERlt1eAg%XS=>^#$G4a3g^Z;gn-O2&py31Me}Q z#L&dv$vb~pr$94LGG~>)OYgYGcnNv3%}9`77y zNPuL(H{r>FCbHu5LX{JrSHB3`qbiaa;gPxrl)(Wu)=&_$aJ-nb?c_HEnZMQZi-B#l z!fPD~eD#qa7Ldh6&N*XnxIM)gf!)3|p*@LEc-;$R0xlX7lA}F0i1h@k`IUpTT#VB0 zyU~!UN&ftATOJKTli(tQeTZvRn--2>=LCf4-!yx?q0o&(XG;bHh)bAiXvR!`?1ddm5o~)| zPoM2y>P!%$dbvCr4AP0scOd=o1A;8=(-NT^8_U72!2(}0c1Hd6R`JK7;qGJRU(CLo zc31ZP`U_xfz{z~zXiBMq+a(Qzi)4yV?9y1faKOK6Da-+YGESo8=8BJ>YW`gj0O0q{UEl~UKb6Kf-r2b>eF^k(KAF8Hv z^SA!30@M=RKD1b}u7TsEF}9Yvl6qz+`*f5pcT?rUr?MJh&46ueOqC_ez&SK7gmAgx zO8D|aob_To;%*QEu0W5anI>UJ6p%<9dJ)mq)FC>Pm(wikRmJ%G zqmh`ajAp}JIJfbk8-{dL+NQ+CrZNz-@*%O;r#d6LtJ2q9cSXKuxF02N8uYu)dx#Wa z!lj5=pL}biNMDa&mSiDj=wF|@QiZ!k_oSCs(oq4+%J6oVq=dwk9!=;)$LzGnS3&}z zdL{&?FR+K2>P}0dzgP09N%@E9XayiCSgjqDD_{(`=m4tHqbJMdy z)O?*Hf1%h4ynU2s176P^Y3-(XlQ;8RW*nmc6sQ}!OicZuZ^sUAB0PaR=8iD9Hdz?i zHR(Tpk{_5Mcb&xwMIM#xB!xm=cgF+OVA2qq(JAUSLPVBN=_U!waRmFMv1FFqjOI<0 zHkyetC7M8HnII*+WNbuZpX`pVC{>S=lc_Q!r4(zRgFqFMZyw=5G^FfeFEZnC0b)io z3{@of$CA_V5O|JDZX7Tdns{wjQzY6&u&nUGyzF2wB>IGY6e5+7dg!SEW49b&A378y zux&3UVpOE0UJI|^Q{<+nFkdq_MNgE+z4o^yN!aQ~LfsQl0oMARVEci$vdLZ~>wTpU z6##jNxM9q9CjT3nSbwE-LIs<&*!5Q&_1~KHy*v@?{B#mAE{#h4M`PU1!TpG=*zs*s zilYdBkw$;G1Iy>TS=m~Pl3xaZ+$JQMS}|MplfbL-32LE2Tn+Gk`Uri^f`MF2`a?}p zEtHE*!B4|U_$*pFz{oUDws#PEq{EWGvK{X>!P*phe*OpIoG94MIsZePU;i!QFmwK| zh{Kj{=!nVv|Bv_w#lZh77xP;#T5hD}=x!1UB?|w$?I_asJZCB(yXL-mi zN0ejm)U?^>Rdj3ijlMpex9W%T=~bw|Ztr+Kdb7M&Tj#iaVcP$#8GqHtdY2?Wvv7WOz?x?+1?B*2);|Flbo88CG}M^#FxCrP@+0d-)s<0aJg*ZJ1Nb z|38(T2T)V#7RM8c1Q3v3lwPDM1nDKT(4-?xKoF27y-JrBr1xF~0g-_8j)EXkq)C-d z1O);}ks=*m+~@AX?t8P@+{v9UlbQ3o=X}YXGv7Vm`PbWqEDRe|WXzQBvs1UUNMqf) ziUkV3;y|eot>NvLCw1yjE}@M>#0b0l$SAW4nH?2qW?Jsr++uXM4$3ryE$)?*pk}I&sgI_8~q*&w!axvx- zg29uqo34bdDw{f*$V$h+I(hb8xx@$7EF`oJ z0bh#=j<`sV^N0R}wt9cXq4A_~%x|*KLH~2BHm=mZA?2UQd|5 zOm^?Y;2RvVKiSw%cM2iAl_DK?cHRv?4-Ek1G<%_)-`D)P|! zGB5fC%#z*~w)Q}~@*vj-3wI2d^j5-Ksqk*Wr?@-nfgS170ig(?nboPcVFbVqLHK6x zR_-M>NIa8d?EFijjC_uxK(D& zkFZXwU=2%$!SH9o8A`?NMsM-M-Ysj<*|nMw*H*!NjEX-1a3M_q;IU^k40NWc;L3jM zG9HOzP7rf`CWD?iuqd*M5BBJBQbVpJujk`5W>AsB#-@I8njz^vWHO^NL--Lx0Na(h zb#HK^&2Y@ldf!cbS&rDaxN1LP-4PFEV7!35&Ewz@&LLmL{kp2#+tv!^U@cZ}3_hXL zAf}T}VQYYfdmf{LVHM9=oAe~+3zno+5}m7N=@MGG!yE({n5$MKm^KjukZJ#<4T?}2dniX@ly zibEQSq}l1^HRl~X^~E$b66DKFO%rbp5S24#->;}>a*5k_rDOA zn_+apAH5uu3nKi6tDHufi6jhhWJVN$ydi=j`h1-+B=N%v77Y^#P%~p9{~%+AJL+E& zUZyj=$>ybC#5CCBz#~jF?~IX8z=M}KmNHHVh($URg`5;1PvJxC3|(dr2o2zPXz|mC zE}0Wn+DR2R5EG}`!F8nWQMqd8ASpX)9Phi%@3PS2vUi#KYM*${n}xJgm1Ae#N4t)nNZNa*ThlI zX)%$Nv+nDaR}uYnhy7vs9V5PSTz9>;54S@DXkuSC;2<1x8Adfd48%wzHXig^=g%FO zN}Y7`!f>7=@Fui6a#N8C8)p%O+KNwV+@=eHx>8l{s|-PyRs&^>+=NDJlbSUuGpH#9aIJ0QqoVbcp+QbU%af|-e{34;x>Qwa3IInWxB3s z7h;{n)ggRM%z_dVKSD5wf#8|evn>q}MbCh+#OCgo7kFK40#F`H{vu_)dk5W3&kZrx zdGvapd2ngC`02U7mJseZwoa1nToHF@DRj_}PcPwN(~O)6`-0jMt-1`7XVfNO^IZD@ z9^9d@htibMmEg`deYuT#f7{-s*P z>CVSxv$Y?Wz-GFLGCl9R^&9Cv<{h7%Et!apKSI1`_On~!8#DZN_UvxZ(Ft08@u?K6 z-CknNko7%s+H7*PTz5~b{idHiWz>c!el#v|Y>cg)MVaXHCSTp07`gIZU$1L-_e$H8 zfsaE@ac#B2yhja0{BZm1`A4<#3?qz1H`3!-*M$nr{W-h7wkhj)CyUK7ZZK!<(G9_? zZB^SFQ~{H@FJtou-ec0eoxk^1cr8S?ZmXnuLPhGGKZ7Sxvc?j0ixYR;nn=L=nDsH& zN%H`xs;SsuBWg+NBeK0-u#)l#UcLiwL0QPD-W$%ruiTsi!%SMOQ`O`%s>@0b@ixbn z#E65MzQnd0Zym+)8S47x@iQTmc!+o^`0hc_v$pgo({R$VA-tsR2$(``Cc>& z_kn0FjIui+wrE?e3_E`K?i-2?$M!VkoAIoGNvCVFK`dVPNVfo85`Ow*k59!$>4hm_ z!}8Ta@0u0!_YUa#^1gS*(#!~&wBLz<9!Aj* zelv}$Uw}g&k!LF(_o?xTNip$nZmnIO6i>WXN?5SEDXaPAa?Q@PQNGH7If*Lv-L8Jh z9IQcP5k+POd9iFvEN5neBBA`V3hm2E5AKY&xsbgl{aVeHb-KY#=&xrYTF$mC9)zz* z-**#l2}4j|Ezshid4<0cCm;P+u2at z%U~fxA`8PL!1gC^4Pu?5b?kD=WI zpnjkAKt`8D4t&xfN|^ zqueFFx?yp%$UUhVX%WTYoy%qVQpKWMEXOc9s$z7&44A>yB%&8qf**gj(9jDx>Y+O- zaAH~Z4x4eYd8!jo1`P~rd{BqQh1oDC_lcz(Zchc{S{NdU^X-*lz5INKdgq}uLM<^x z1~Fj;>2Y!UyxQ2TNlq&lKdc0$KdZDjv*m74%rhw`r&As}N%%AgM9JI=4@vKy@pLwUU73|4B)*~Qz;L%y%120PHNDMmkHym z6_&Jrb|hVeMbRXLO`Bw^ZOYCE>&F(PDU-2nVO5-vuW>xmI|&%747clT2|l~VNIbYy zF;fo1n%P|9C%=LOt>{b_4%rVulp&=I>MOBiF;>BZkzgP#1oBXd8#@u zt}uNgunf8?&SDW%J0YgO1!ckET32tO>m1;YgY9p2>JiXNq+o~RWyuv}3uN90#5F5@Yf{p3{ zYK8CAE@K5Sy?mc_Js5=B8`r8Jj^Qz(?gZAAl-~R<8y?5l*N~&cOlYg{J~y{FxCfBJ zA?MD7sq-~TpDP01p!k&cVTuLJ>RaAe;`&ex*xg!GHG}n_2YIKSz)4;{sb92R<`6p{ zQv1-?)J6JHOfJ9bJE44TrSKN1DVs;;iw=8IIWD?`S(@EoPWTWnJot8009^1x6>hJr zgfzrPP)nA?!e+8%wgW^ZsT;m>G`w{@^`fxEcp-ZQq)qWv6oJI$yc|tcw{t15WM!O@ zce>|V`l~WO2>}t?PEE!Ga41NF&XB5i6qxHi=@)@CWlkvp;dMR#@I#2%fXn|o!>Q1x zm<+EMebQs}P5GBH*HC=`&LBPb91NNjH)A9DDcr>hZ}rC9jatnM8#7VPuWQBTnQgaT zRGHyzKBXB9$9>X7m-0zuR^_upLw%k`A8oRT8Fh&FrEU(f8A_sQyP|+^;MY4VG^Qmw zBcYpa^lMBiGK0lVI(L{74iBjixa`%Lp_Y=`%9tlAmd?mZF-P^+!jg$M7LpE0MV&tp z*NLzhH5KS@sZNf1lP`)`#%cx;Oz8M`b9i)`$#Z|R*uC*NTFfn8C|`DNhdiZ_HNizp zLnI6Yw*GSDSf<5XU1!))R4(lgVuzEU*5)u)76f3=nI~&(W_F3o)bZ#G-z$fk-TkKV zw2J(7_2C+qgJ7Fuw#WgEN}irDW%SFz;1=^>p2MurWXFVS?mVhbRe0*`trahQh}O&a z7zWw@@jB}9iw$wDOQE?#QEXl<#ScH`sz2c|xV(}lEVha{nSRFhUzb3HZ zefS&^rJKTd=Rs!Pa>}Ri8kI)n9twVD@ZPq}k~z+IKTw{*5}6X^6@tfVF)^XBJS*!U zw%LHi$DEwvOEjU9>^gfL!`r*jF$B}8c_)Q<1+Zo%VR7lDkIl8=@*5u@-&AjUz8Ey8 zbI$K9_ps*EfFjaLU)_52iu2fW?c-{%_`a@;=TpME_qXwmaxNhhv>q|soHWWb@;f4G z%d(H{A_hEREVYxaF>L@2wtnA7t=IrD=rHjBKVOBxGFrb4ew~N_RKG@70PE5}ht^K! zH*6f-TpVvaFm*HK(b2{RU|xSmec#;^9{|KcVgdlaoz5H0|BF$k3r3axZq&ii#m)ML zlc~9lrH%OwD=!y||8f}rABPNO0|YSike*VcQ`%|bl=P&PmQr80=aQ_yv|KXjLqob>vshjI> z(L{q6>noSthq8Yc1>v~BvqH_|U#94P^B&k(TB3^^F?DyaMFo}UZ$VALM+f&?P=98< zzY4L?3o6bjN3s)&%KA5|Eh_T=?BwrL`MVq_den=$>gRtza%|h-^Hf1Mw5jI`V$t*UVwuW$D##Cg z>Yqg`=us~|IXX{WgZ&HYAJ37{qh4ImIZu5bgEsZ&y#7P_fQ=Hi1^~!WhYKn}$WHzF F=|4f7P%i)g diff --git a/files/jargon.html b/files/jargon.html index 6a47ef45..9eb3d295 100644 --- a/files/jargon.html +++ b/files/jargon.html @@ -70,7 +70,7 @@ # Shared Computing Resources .center[ -![An HPC resource (img: [Julian Herzog](https://commons.wikimedia.org/wiki/File:High_Performance_Computing_Center_Stuttgart_HLRS_2015_07_Cray_XC40_Hazel_Hen_IO.jpg))](/fig/HPCCStuttgart_Hazel_Hen_XC40.png) +![An HPC resource (img: [Julian Herzog](https://commons.wikimedia.org/wiki/File:High_Performance_Computing_Center_Stuttgart_HLRS_2015_07_Cray_XC40_Hazel_Hen_IO.jpg))](../fig/HPCCStuttgart_Hazel_Hen_XC40.png) ] From 11ad96c9da885a72184cb2430e84eaddd576da52 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 23 Jun 2022 17:15:57 -0400 Subject: [PATCH 02/25] Revert "Merge pull request #410 from carpentries-incubator/revert-407-amdahl-code" This reverts commit bdf8529a958af90400f7a651accb84c2f6b41b81, reversing changes made to 86229ebc05571ca9d9dbdd4cc6f9faceacc56cbd. --- _episodes/15-transferring-files.md | 209 ++--- _episodes/16-parallel.md | 771 ++++++------------ .../parallel/eight-tasks-jobscript.snip | 14 + .../parallel/four-tasks-jobscript.snip | 5 +- ...jobscript.snip => one-task-jobscript.snip} | 5 +- .../parallel/eight-tasks-jobscript.snip} | 6 +- .../parallel/four-tasks-jobscript.snip | 6 +- ...jobscript.snip => one-task-jobscript.snip} | 6 +- ...script.snip => eight-tasks-jobscript.snip} | 7 +- .../parallel/four-tasks-jobscript.snip | 5 +- .../parallel/one-task-jobscript.snip} | 7 +- .../NIST_CTCMS_slurm/_config_options.yml | 4 +- .../parallel/eight-tasks-jobscript.snip | 11 + .../parallel/four-tasks-jobscript.snip | 5 +- .../parallel/one-task-jobscript.snip | 11 + .../parallel/eight-tasks-jobscript.snip | 14 + .../parallel/four-tasks-jobscript.snip | 5 +- ...jobscript.snip => one-task-jobscript.snip} | 5 +- .../parallel/eight-tasks-jobscript.snip | 13 + .../parallel/four-tasks-jobscript.snip | 6 +- .../parallel/one-task-jobscript.snip | 13 + files/hpc-intro-code.tar.gz | Bin 0 -> 3391 bytes files/hpc-intro-data.tar.gz | Bin 36535 -> 0 bytes files/hpc-intro-data.zip | Bin 41253 -> 0 bytes files/jargon.html | 2 +- 25 files changed, 433 insertions(+), 697 deletions(-) create mode 100644 _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip rename _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (70%) rename _includes/snippets_library/{UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip => EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip} (62%) rename _includes/snippets_library/EPCC_Cirrus_pbs/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (62%) rename _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/{one-task-with-memory-jobscript.snip => eight-tasks-jobscript.snip} (68%) rename _includes/snippets_library/{NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip => Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip} (65%) create mode 100644 _includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip create mode 100644 _includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip create mode 100644 _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip rename _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/{one-task-with-memory-jobscript.snip => one-task-jobscript.snip} (70%) create mode 100644 _includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip create mode 100644 _includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip create mode 100644 files/hpc-intro-code.tar.gz delete mode 100644 files/hpc-intro-data.tar.gz delete mode 100644 files/hpc-intro-data.zip diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 042d59af..5856e2dc 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -16,25 +16,27 @@ Performing work on a remote computer is not very useful if we cannot get files to or from the cluster. There are several options for transferring data between computing resources using CLI and GUI utilities, a few of which we will cover. -## Download Files From the Internet +## Download Lesson Files From the Internet One of the most straightforward ways to download files is to use either `curl` or `wget`. One of these is usually installed in most Linux shells, on Mac OS terminal and in GitBash. Any file that can be downloaded in your web browser -through a direct link can be downloaded using `curl -O` or `wget`. This is a -quick way to download datasets or source code. +through a direct link can be downloaded using `curl` or `wget`. This is a +quick way to download datasets or source code. The syntax for these commands is -The syntax for these commands is: `curl -O https://some/link/to/a/file` -and `wget https://some/link/to/a/file`. Try it out by downloading -some material we'll use later on, from a terminal on your local machine. +* `curl -O https://some/link/to/a/file` +* `wget https://some/link/to/a/file` + +Try it out by downloading some material we'll use later on, from a terminal on +your local machine. ``` -{{ site.local.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +{{ site.local.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz ``` {: .language-bash} or ``` -{{ site.local.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +{{ site.local.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz ``` {: .language-bash} @@ -44,8 +46,8 @@ or > This is an archive file format, just like `.zip`, commonly used and supported > by default on Linux, which is the operating system the majority of HPC > cluster machines run. You may also see the extension `.tgz`, which is exactly -> the same. We'll talk more about "tarballs," since "tar-dot-g-z" is a -> mouthful, later on. +> the same. We'll talk more about "tarballs" later, since "tar-dot-g-z" is a +> mouthful. {: .discussion} ## Transferring Single Files and Folders With `scp` @@ -59,47 +61,29 @@ mechanism. To _upload to_ another computer: ``` -{{ site.local.prompt }} scp path/to/local/file.txt {{ site.remote.user }}@{{ site.remote.login }}:/path/on/{{ site.remote.name }} -``` -{: .language-bash} - -To _download from_ another computer: - -``` -{{ site.local.prompt }} scp {{ site.remote.user }}@{{ site.remote.login }}:/path/on/{{ site.remote.name }}/file.txt path/to/local/ +{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_path ``` {: .language-bash} Note that everything after the `:` is relative to our home directory on the -remote computer. We can leave it at that if we don't care where the file goes. +remote computer. We can leave it at that if we don't have a more specific +destination in mind. + +Upload the lesson material to your remote home directory like so: ``` -{{ site.local.prompt }} scp local-file.txt {{ site.remote.user }}@{{ site.remote.login }}: +{{ site.local.prompt }} scp hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: ``` {: .language-bash} -> ## Upload a File -> -> Copy the file you just downloaded from the Internet to your home directory on -> {{ site.remote.name }}. -> -> > ## Solution -> > -> > ``` -> > {{ site.local.prompt }} scp hpc-intro-data.tar.gz {{ site.remote.user }}@{{ site.remote.login }}:~/ -> > ``` -> > {: .language-bash} -> {: .solution} -{: .challenge} - -Most computer clusters are protected from the open internet by a _firewall_. -This means that the `curl` command will fail, as an address outside the -firewall is unreachable from the inside. To get around this, run the `curl` or -`wget` command from your local machine to download the file, then use the `scp` -command to upload it to the cluster. - > ## Why Not Download on {{ site.remote.name }} Directly? > +> Most computer clusters are protected from the open internet by a _firewall_. +> This means that the `curl` command will fail, as an address outside the +> firewall is unreachable from the inside. To get around this, run the `curl` +> or `wget` command from your local machine to download the file, then use the +> `scp` command to upload it to the cluster. +> > Try downloading the file directly. Note that it may well fail, and that's > OK! > @@ -107,25 +91,29 @@ command to upload it to the cluster. > > > > ``` > > {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -> > {{ site.remote.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz -> > or -> > {{ site.remote.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-data.tar.gz +> > {{ site.remote.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz +> > # or +> > {{ site.remote.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz > > ``` > > {: .language-bash} > {: .solution} > > Did it work? If not, what does the terminal output tell you about what > happened? -> {: .challenge} {: .discussion} -To copy a whole directory, we add the `-r` flag, for "**r**ecursive": copy the +## Transferring a Directory + +If you went ahead and extracted the tarball, don't worry! `scp` can handle +entire directories as well as individual files. + +To copy a whole directory, we add the `-r` flag for "**r**ecursive": copy the item specified, and every item below it, and every item below those... until it reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r some-local-folder {{ site.remote.user }}@{{ site.remote.login }}:target-directory/ +{{ site.local.prompt }} scp -r hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ ``` {: .language-bash} @@ -157,7 +145,7 @@ A trailing slash on the target directory is optional, and has no effect for > ## A Note on `rsync` > > As you gain experience with transferring files, you may find the `scp` -> command limiting. The [rsync][rsync] utility provides +> command limiting. The [rsync] utility provides > advanced features for file transfer and is typically faster compared to both > `scp` and `sftp` (see below). It is especially useful for transferring large > and/or many files and creating synced backup folders. @@ -166,13 +154,15 @@ A trailing slash on the target directory is optional, and has no effect for > commonly used options: > > ``` -> {{ site.local.prompt }} rsync -avzP path/to/local/file.txt {{ site.remote.user }}@{{ site.remote.login }}:directory/path/on/{{ site.remote.name }}/ +> {{ site.local.prompt }} rsync -avzP hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > > The options are: -> * `a` (archive) to preserve file timestamps and permissions among other things -> * `v` (verbose) to get verbose output to help monitor the transfer +> +> * `a` (**a**rchive) to preserve file timestamps, permissions, and folders, +> among other things; implies recursion +> * `v` (**v**erbose) to get verbose output to help monitor the transfer > * `z` (compression) to compress the file during transit to reduce size and > transfer time > * `P` (partial/progress) to preserve partially transferred files in case @@ -181,27 +171,25 @@ A trailing slash on the target directory is optional, and has no effect for > To recursively copy a directory, we can use the same options: > > ``` -> {{ site.local.prompt }} rsync -avzP path/to/local/dir {{ site.remote.user }}@{{ site.remote.login }}:directory/path/on/{{ site.remote.name }}/ +> {{ site.local.prompt }} rsync -avzP hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ > ``` > {: .language-bash} > -> As written, this will place the local directory and its contents under the -> specified directory on the remote system. If the trailing slash is omitted on +> As written, this will place the local directory and its contents under your +> home directory on the remote system. If the trailing slash is omitted on > the destination, a new directory corresponding to the transferred directory -> ('dir' in the example) will not be created, and the contents of the source +> will not be created, and the contents of the source > directory will be copied directly into the destination directory. > -> The `a` (archive) option implies recursion. -> > To download a file, we simply change the source and destination: > > ``` -> {{ site.local.prompt }} rsync -avzP {{ site.remote.user }}@{{ site.remote.login }}:path/on/{{ site.remote.name }}/file.txt path/to/local/ +> {{ site.local.prompt }} rsync -avzP {{ site.remote.user }}@{{ site.remote.login }}:hpc-intro-code ./ > ``` > {: .language-bash} {: .callout} -All file transfers using the above methods use SSH to encrypt data sent through +File transfers using both `scp` and `rsync` use SSH to encrypt data sent through the network. So, if you can connect via SSH, you will be able to transfer files. By default, SSH uses network port 22. If a custom SSH port is in use, you will have to specify it using the appropriate flag, often `-p`, `-P`, or @@ -213,17 +201,18 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > modify this command? > > ``` -> {{ site.local.prompt }} rsync test.txt {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} rsync hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > > > ## Solution > > > > ``` +> > {{ site.local.prompt }} man rsync > > {{ site.local.prompt }} rsync --help | grep port > > --port=PORT specify double-colon alternate port number > > See http://rsync.samba.org/ for updates, bug reports, and answers -> > {{ site.local.prompt }} rsync --port=768 test.txt {{ site.remote.user }}@{{ site.remote.login }}: +> > {{ site.local.prompt }} rsync --port=768 hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} > {: .solution} @@ -279,57 +268,40 @@ The most common archiving command you will use on a (Linux) HPC cluster is optionally, compress it. Let's start with the file we downloaded from the lesson site, -`hpc-lesson-data.tar.gz`. The "gz" part stands for _gzip_, which is a -compression library. Reading this file name, it appears somebody took a folder -named "hpc-lesson-data," wrapped up all its contents in a single file with -`tar`, then compressed that archive with `gzip` to save space. Let's check -using `tar` with the `-t` flag, which prints the "**t**able of contents" -without unpacking the file, specified by `-f `, on the remote -computer. Note that you can concatenate the two flags, instead of writing -`-t -f` separately. +`hpc-intro-code.tar.gz`. The "gz" part stands for _gzip_, which is a +compression library. This kind of file can usually be interpreted by reading +its name: it appears somebody took a folder named "hpc-intro-code," wrapped up +all its contents in a single file with `tar`, then compressed that archive with +`gzip` to save space. Let's check using `tar` with the `-t` flag, which prints +the "**t**able of contents" without unpacking the file, specified by +`-f `, on the remote computer. Note that you can concatenate the two +flags, instead of writing `-t -f` separately. ``` {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -{{ site.remote.prompt }} tar -tf hpc-lesson-data.tar.gz -hpc-intro-data/ -hpc-intro-data/north-pacific-gyre/ -hpc-intro-data/north-pacific-gyre/NENE01971Z.txt -hpc-intro-data/north-pacific-gyre/goostats -hpc-intro-data/north-pacific-gyre/goodiff -hpc-intro-data/north-pacific-gyre/NENE02040B.txt -hpc-intro-data/north-pacific-gyre/NENE01978B.txt -hpc-intro-data/north-pacific-gyre/NENE02043B.txt -hpc-intro-data/north-pacific-gyre/NENE02018B.txt -hpc-intro-data/north-pacific-gyre/NENE01843A.txt -hpc-intro-data/north-pacific-gyre/NENE01978A.txt -hpc-intro-data/north-pacific-gyre/NENE01751B.txt -hpc-intro-data/north-pacific-gyre/NENE01736A.txt -hpc-intro-data/north-pacific-gyre/NENE01812A.txt -hpc-intro-data/north-pacific-gyre/NENE02043A.txt -hpc-intro-data/north-pacific-gyre/NENE01729B.txt -hpc-intro-data/north-pacific-gyre/NENE02040A.txt -hpc-intro-data/north-pacific-gyre/NENE01843B.txt -hpc-intro-data/north-pacific-gyre/NENE01751A.txt -hpc-intro-data/north-pacific-gyre/NENE01729A.txt -hpc-intro-data/north-pacific-gyre/NENE02040Z.txt +{{ site.remote.prompt }} tar -tf hpc-intro-code.tar.gz +hpc-intro-code/ +hpc-intro-code/amdahl +hpc-intro-code/README.md +hpc-intro-code/LICENSE.txt ``` {: .language-bash} -This shows a folder containing another folder, which contains a bunch of files. -If you've taken The Carpentries' Shell lesson recently, these might look -familiar. Let's see about that compression, using `du` for "**d**isk -**u**sage". +This shows a folder which contains a few files. Let's see about that +compression, using `du` for "**d**isk **u**sage". ``` -{{ site.remote.prompt }} du -sh hpc-lesson-data.tar.gz -36K hpc-intro-data.tar.gz +{{ site.remote.prompt }} du -sh hpc-intro-code.tar.gz +3.4K hpc-intro-code.tar.gz ``` {: .language-bash} > ## Files Occupy at Least One "Block" > -> If the filesystem block size is larger than 36 KB, you'll see a larger +> If the filesystem block size is larger than 3.4 KB, you'll see a larger > number: files cannot be smaller than one block. +> You can use the `--apparent-size` flag to see the exact size, although the +> unoccupied space in that filesystem block can't be used for anything else. {: .callout} Now let's unpack the archive. We'll run `tar` with a few common flags: @@ -351,49 +323,34 @@ When it's done, check the directory size with `du` and compare. > > ## Commands > > > > ``` -> > {{ site.remote.prompt }} tar -xvzf hpc-lesson-data.tar.gz +> > {{ site.remote.prompt }} tar -xvzf hpc-intro-code.tar.gz > > ``` > > {: .language-bash} > > > > ``` -> > hpc-intro-data/ -> > hpc-intro-data/north-pacific-gyre/ -> > hpc-intro-data/north-pacific-gyre/NENE01971Z.txt -> > hpc-intro-data/north-pacific-gyre/goostats -> > hpc-intro-data/north-pacific-gyre/goodiff -> > hpc-intro-data/north-pacific-gyre/NENE02040B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01978B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02043B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02018B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01843A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01978A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01751B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01736A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01812A.txt -> > hpc-intro-data/north-pacific-gyre/NENE02043A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01729B.txt -> > hpc-intro-data/north-pacific-gyre/NENE02040A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01843B.txt -> > hpc-intro-data/north-pacific-gyre/NENE01751A.txt -> > hpc-intro-data/north-pacific-gyre/NENE01729A.txt -> > hpc-intro-data/north-pacific-gyre/NENE02040Z.txt +> > hpc-intro-code/ +> > hpc-intro-code/amdahl +> > hpc-intro-code/README.md +> > hpc-intro-code/LICENSE.txt > > ``` > > {: .output} > > > > Note that we did not type out `-x -v -z -f`, thanks to the flag -> > concatenation, though the command works identically either way. +> > concatenation, though the command works identically either way -- +> > so long as the concatenated list ends with `f`, because the next string +> > must specify the name of the file to extract. > > > > ``` -> > {{ site.remote.prompt }} du -sh hpc-lesson-data -> > 144K hpc-intro-data +> > {{ site.remote.prompt }} du -sh hpc-intro-code +> > 16K hpc-intro-code > > ``` > > {: .language-bash} > {: .solution} > > > ## Was the Data Compressed? > > -> > Text files compress nicely: the "tarball" is one-quarter the total size of -> > the raw data! +> > Text files (including Python source code) compress nicely: the "tarball" is +> > one-quarter the total size of the raw data! > {: .discussion} {: .challenge} @@ -402,13 +359,13 @@ extracting it -- set a `c` flag instead of `x`, set the archive filename, then provide a directory to compress: ``` -{{ site.local.prompt }} tar -cvzf compressed_data.tar.gz hpc-intro-data +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz hpc-intro-code ``` {: .language-bash} > ## Working with Windows > -> When you transfer text files to from a Windows system to a Unix system (Mac, +> When you transfer text files from a Windows system to a Unix system (Mac, > Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files > slightly different than Unix, and adds an extra character to every line. > diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index 53b1db4f..8c70d96b 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -7,16 +7,13 @@ questions: - "What benefits arise from parallel execution?" - "What are the limits of gains from execution in parallel?" objectives: -- "Construct a program that can execute in parallel." - "Prepare a job submission script for the parallel executable." - "Launch jobs with parallel execution." - "Record and summarize the timing and accuracy of jobs." - "Describe the relationship between job parallelism and performance." keypoints: - "Parallel programming allows applications to take advantage of - parallel hardware; serial code will not 'just work.'" -- "Distributed memory parallelism is a common case, using the Message - Passing Interface (MPI)." + parallel hardware." - "The queuing system facilitates executing parallel tasks." - "Performance improvements from parallel execution do not scale linearly." --- @@ -25,536 +22,262 @@ We now have the tools we need to run a multi-processor job. This is a very important aspect of HPC systems, as parallelism is one of the primary tools we have to improve the performance of computational tasks. -Our example implements a stochastic algorithm for estimating the value of -π, the ratio of the circumference to the diameter of a circle. -The program generates a large number of random points on a 1×1 square -centered on (½,½), and checks how many of these points fall -inside the unit circle. -On average, π/4 of the randomly-selected points should fall in the -circle, so π can be estimated from 4*f*, where _f_ is the observed -fraction of points that fall in the circle. -Because each sample is independent, this algorithm is easily implemented -in parallel. - -{% include figure.html url="" caption="" max-width="40%" - file="/fig/pi.png" - alt="Algorithm for computing pi through random sampling" %} - -## A Serial Solution to the Problem - -We start from a Python script using concepts taught in Software Carpentry's -[Programming with Python][inflammation] workshops. -We want to allow the user to specify how many random points should be used -to calculate π through a command-line parameter. -This script will only use a single CPU for its entire run, so it's classified -as a serial process. - -Let's write a Python program, `pi.py`, to estimate π for us. -Start by importing the `numpy` module for calculating the results, -and the `sys` module to process command-line parameters: - -``` -import numpy as np -import sys -``` -{: .language-python} - -We define a Python function `inside_circle` that accepts a single parameter -for the number of random points used to calculate π. -See [Programming with Python: Creating Functions][python-func] -for a review of Python functions. -It randomly samples points with both _x_ and _y_ on the half-open interval -[0, 1). -It then computes their distances from the origin (i.e., radii), and returns -how many of those distances were less than or equal to 1.0. -All of this is done using _vectors_ of double-precision (64-bit) -floating-point values. - -``` -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count -``` -{: .language-python} - -Next, we create a main function to call the `inside_circle` function and -calculate π from its returned result. -See [Programming with Python: Command-Line Programs][cmd-line] -for a review of `main` functions and parsing command-line parameters. - -``` -def main(): - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - print(my_pi) - -if __name__ == '__main__': - main() -``` -{: .language-python} - -If we run the Python script locally with a command-line parameter, as in -`python pi-serial.py 1024`, we should see the script print its estimate of -π: +If you disconnected, log back in to the cluster. ``` -{{ site.local.prompt }} python pi-serial.py 1024 -3.10546875 +{{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} ``` {: .language-bash} -> ## Random Number Generation -> -> In the preceding code, random numbers are conveniently generated using the -> built-in capabilities of NumPy. In general, random-number generation is -> difficult to do well, it's easy to accidentally introduce correlations into -> the generated sequence. -> -> * Discuss why generating high quality random numbers might be difficult. -> * Is the quality of random numbers generated sufficient for estimating π -> in this implementation? -> -> > ## Solution -> > -> > * Computers are deterministic and produce pseudo random numbers using -> > an algorithm. The choice of algorithm and its parameters determines -> > how random the generated numbers are. Pseudo random number generation -> > algorithms usually produce a sequence numbers taking the previous output -> > as an input for generating the next number. At some point the sequence of -> > pseudo random numbers will repeat, so care is required to make sure the -> > repetition period is long and that the generated numbers have statistical -> > properties similar to those of true random numbers. -> > * Yes. -> {: .solution } -{: .discussion } - -## Measuring Performance of the Serial Solution - -The stochastic method used to estimate π should converge on the true -value as the number of random points increases. -But as the number of points increases, creating the variables `x`, `y`, and -`radii` requires more time and more memory. -Eventually, the memory required may exceed what's available on our local -laptop or desktop, or the time required may be too long to meet a deadline. -So we'd like to take some measurements of how much memory and time the script -requires, and later take the same measurements after creating a parallel -version of the script to see the benefits of parallelizing the calculations -required. - -### Estimating Memory Requirements - -Since the largest variables in the script are `x`, `y`, and `radii`, each -containing `n_samples` points, we'll modify the script to report their -total memory required. -Each point in `x`, `y`, or `radii` is stored as a NumPy `float64`, we can -use NumPy's [`dtype`][np-dtype] function to calculate the size of a `float64`. - -Replace the `print(my_pi)` line with the following: - -``` -size_of_float = np.dtype(np.float64).itemsize -memory_required = 3 * n_samples * size_of_float / (1024**3) -print("Pi: {}, memory: {} GiB".format(my_pi, memory_required)) -``` -{: .language-python} - -The first line calculates the bytes of memory required for a single -64-bit floating point number using the `dtype` function. -The second line estimates the total amount of memory required to store three -variables containing `n_samples` `float64` values, converting the value into -units of [gibibytes][units]. -The third line prints both the estimate of π and the estimated amount of -memory used by the script. - -The updated Python script is: - -``` -import numpy as np -import sys - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count - -def main(): - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB".format(my_pi, memory_required)) +## Help! -if __name__ == '__main__': - main() -``` -{: .language-python} - -Run the script again with a few different values for the number of samples, -and see how the memory required changes: - -``` -{{ site.local.prompt }} python pi-serial.py 1000 -Pi: 3.144, memory: 2.2351741790771484e-05 GiB -{{ site.local.prompt }} python pi-serial.py 2000 -Pi: 3.18, memory: 4.470348358154297e-05 GiB -{{ site.local.prompt }} python pi-serial.py 1000000 -Pi: 3.140944, memory: 0.022351741790771484 GiB -{{ site.local.prompt }} python pi-serial.py 100000000 -Pi: 3.14182724, memory: 2.2351741790771484 GiB -``` -{: .language-bash } - -Here we can see that the estimated amount of memory required scales linearly -with the number of samples used. -In practice, there is some memory required for other parts of the script, -but the `x`, `y`, and `radii` variables are by far the largest influence -on the total amount of memory required. - -### Estimating Calculation Time - -Most of the calculations required to estimate π are in the -`inside_circle` function: - -1. Generating `n_samples` random values for `x` and `y`. -1. Calculating `n_samples` values of `radii` from `x` and `y`. -1. Counting how many values in `radii` are under 1.0. - -There's also one multiplication operation and one division operation required -to convert the `counts` value to the final estimate of π in the main -function. - -A simple way to measure the calculation time is to use Python's `datetime` -module to store the computer's current date and time before and after the -calculations, and calculate the difference between those times. - -To add the time measurement to the script, add the following line below the -`import sys` line: - -``` -import datetime -``` -{: .language-python} - -Then, add the following line immediately above the line calculating `counts`: - -``` -start_time = datetime.datetime.now() -``` -{: .language-python} - -Add the following two lines immediately below the line calculating `counts`: +Many command-line programs include a "help" message. Navigate to the directory +of the decompressed files, then print the `amdahl` program's help message: ``` -end_time = datetime.datetime.now() -elapsed_time = (end_time - start_time).total_seconds() +{{ site.remote.prompt }} cd hpc-intro-code +{{ site.remote.prompt }} ./amdahl --help ``` -{: .language-python} - -And finally, modify the `print` statement with the following: +{: .language-bash} ``` -print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) -``` -{: .language-python} - -The final Python script for the serial solution is: +usage: amdahl [-h] [-p [PARALLEL_PROPORTION]] [-w [WORK_SECONDS]] +optional arguments: + -h, --help show this help message and exit + -p [PARALLEL_PROPORTION], --parallel-proportion [PARALLEL_PROPORTION] + Parallel proportion should be a float between 0 and 1 + -w [WORK_SECONDS], --work-seconds [WORK_SECONDS] + Total seconds of workload, should be an integer greater than 0 ``` -import numpy as np -import sys -import datetime +{: .output} -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x * x + y * y) - count = len(radii[np.where(radii<=1.0)]) - return count +This message doesn't tell us much about what the program _does_, but it does +tell us the important flags we might want to use when launching it. -def main(): - n_samples = int(sys.argv[1]) - start_time = datetime.datetime.now() - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - end_time = datetime.datetime.now() - elapsed_time = (end_time - start_time).total_seconds() - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) - -if __name__ == '__main__': - main() -``` -{: .language-python} +## Running the Job on a Compute Node -Run the script again with a few different values for the number of samples, -and see how the solution time changes: +Create a submission file, requesting one task on a single node, then launch it. ``` -{{ site.local.prompt }} python pi-serial.py 1000000 -Pi: 3.139612, memory: 0.022351741790771484 GiB, time: 0.034872 s -{{ site.local.prompt }} python pi-serial.py 10000000 -Pi: 3.1425492, memory: 0.22351741790771484 GiB, time: 0.351212 s -{{ site.local.prompt }} python pi-serial.py 100000000 -Pi: 3.14146608, memory: 2.2351741790771484 GiB, time: 3.735195 s +{{ site.remote.prompt }} nano serial-job.sh +{{ site.remote.prompt }} cat serial-job.sh ``` -{: .language-bash } - -Here we can see that the amount of time required scales approximately linearly -with the number of samples used. -There could be some variation in additional runs of the script with the same -number of samples, since the elapsed time is affected by other programs -running on the computer at the same time. -But if the script is the most computationally-intensive process running at the -time, its calculations are the largest influence on the elapsed time. - -Now that we've developed our initial script to estimate π, we can see -that as we increase the number of samples: - -1. The estimate of π tends to become more accurate. -1. The amount of memory required scales approximately linearly. -1. The amount of time to calculate scales approximately linearly. - -In general, achieving a better estimate of π requires a greater number of -points. -Take a closer look at `inside_circle`: should we expect to get high accuracy -on a single machine? - -Probably not. -The function allocates three arrays of size _N_ equal to the number of points -belonging to this process. -Using 64-bit floating point numbers, the memory footprint of these arrays can -get quite large. -Each 100,000,000 points sampled consumes 2.24 GiB of memory. -Sampling 400,000,000 points consumes 8.94 GiB of memory, -and if your machine has less RAM than that, it will grind to a halt. -If you have 16 GiB installed, you won't quite make it to 750,000,000 points. - -## Running the Serial Job on a Compute Node +{: .language-bash} -Create a submission file, requesting one task on a single node and enough -memory to prevent the job from running out of memory: +{% include {{ site.snippets }}/parallel/one-task-jobscript.snip %} ``` -{{ site.remote.prompt }} nano serial-pi.sh -{{ site.remote.prompt }} cat serial-pi.sh +{{ site.remote.prompt }} {{ site.sched.submit.name }} serial-job.sh ``` {: .language-bash} -{% include {{ site.snippets }}/parallel/one-task-with-memory-jobscript.snip %} - -Then submit your job. We will use the batch file to set the options, -rather than the command line. +As before, use the {{ site.sched.name }} status commands to check whether your job +is running and when it ends: ``` -{{ site.remote.prompt }} {{ site.sched.submit.name }} serial-pi.sh +{{ site.remote.prompt }} {{ site.sched.status }} {{ site.sched.flag.user }} ``` {: .language-bash} -As before, use the status commands to check when your job runs. -Use `ls` to locate the output file, and examine it. Is it what you expected? - -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? +Use `ls` to locate the output file. The `-t` flag sorts in +reverse-chronological order: newest first. What was the output? -Modify the job script to increase both the number of samples and the amount -of memory requested (perhaps by a factor of 2, then by a factor of 10), -and resubmit the job each time. +> ## Read the Job Output +> +> The cluster output should be written to a file in the folder you launched the +> job from. +> +> ``` +> {{ site.remote.prompt }} ls -t +> ``` +> {: .language-bash} +> ``` +> slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +> ``` +> {: .output} +> ``` +> {{ site.remote.prompt }} cat slurm-347087.out +> ``` +> {: .language-bash} +> ``` +> Doing 30.000000 seconds of 'work' on 1 processor, +> which should take 30.000000 seconds with 0.850000 parallel proportion of the workload. +> +> Hello, World! I am process 0 of 1 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. +> Hello, World! I am process 0 of 1 on {{ site.remote.node }}. I will do parallel 'work' for 25.500000 seconds. +> +> Total execution time (according to rank 0): 30.033140 seconds +> ``` +> {: .output} +{: .solution} -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? +`amdahl` takes two optional parameters as input: the amount of work and the +proportion of that work that is parallel in nature. Based on the output, we can +see that the code uses a default of 30 seconds of work that is 85% +parallel. The program ran for just over 30 seconds in total, and if we run the +numbers, it is true that 15% of it was marked 'serial' and 85% was 'parallel'. -Even with sufficient memory for necessary variables, -a script could require enormous amounts of time to calculate on a single CPU. -To reduce the amount of time required, -we need to modify the script to use multiple CPUs for the calculations. -In the largest problem scales, -we could use multiple CPUs in multiple compute nodes, -distributing the memory requirements across all the nodes used to -calculate the solution. +Since we only gave the job one CPU, this job wasn't really parallel: the +processor performed the 'serial' work for 4.5 seconds, then the 'parallel' part +for 25.5 seconds, and no time was saved. The cluster can do better, if we ask. ## Running the Parallel Job -We will run an example that uses the Message Passing Interface (MPI) for -parallelism -- this is a common tool on HPC systems. +The `amdahl` program uses the Message Passing Interface (MPI) for parallelism +-- this is a common tool on HPC systems. > ## What is MPI? > -> The Message Passing Interface is a set of tools which allow multiple parallel -> jobs to communicate with each other. +> The Message Passing Interface is a set of tools which allow multiple tasks +> running simultaneously to communicate with each other. > Typically, a single executable is run multiple times, possibly on different > machines, and the MPI tools are used to inform each instance of the -> executable about how many instances there are, which instance it is. -> MPI also provides tools to allow communication and coordination between -> instances. +> executable about its sibling processes, and which instance it is. +> MPI also provides tools to allow communication between instances to +> coordinate work, exchange information about elements of the task, or to +> transfer data. > An MPI instance typically has its own copy of all the local variables. {: .callout} -While MPI jobs can generally be run as stand-alone executables, in order for -them to run in parallel they must use an MPI _run-time system_, which is a -specific implementation of the MPI _standard_. -To do this, they should be started via a command such as `mpiexec` (or -`mpirun`, or `srun`, etc. depending on the MPI run-time you need to use), -which will ensure that the appropriate run-time support for parallelism is -included. +While MPI-aware executables can generally be run as stand-alone programs, in +order for them to run in parallel they must use an MPI _run-time environment_, +which is a specific implementation of the MPI _standard_. +To activate the MPI environment, the program should be started via a command +such as `mpiexec` (or `mpirun`, or `srun`, etc. depending on the MPI run-time +you need to use), which will ensure that the appropriate run-time support for +parallelism is included. > ## MPI Runtime Arguments > > On their own, commands such as `mpiexec` can take many arguments specifying > how many machines will participate in the execution, > and you might need these if you would like to run an MPI program on your -> laptop (for example). +> own (for example, on your laptop). > In the context of a queuing system, however, it is frequently the case that -> we do not need to specify this information as the MPI run-time will have been -> configured to obtain it from the queuing system, +> MPI run-time will obtain the necessary parameters from the queuing system, > by examining the environment variables set when the job is launched. {: .callout} -> ## What Changes Are Needed for an MPI Version of the π Calculator? -> -> First, we need to import the `MPI` object from the Python module `mpi4py` by -> adding an `from mpi4py import MPI` line immediately below the `import -> datetime` line. -> -> Second, we need to modify the "main" function to perform the overhead and -> accounting work required to: -> -> * subdivide the total number of points to be sampled, -> * _partition_ the total workload among the various parallel processors -> available, -> * have each parallel process report the results of its workload back -> to the "rank 0" process, -> which does the final calculations and prints out the result. -> -> The modifications to the serial script demonstrate four important concepts: -> -> * COMM_WORLD: the default MPI Communicator, providing a channel for all the -> processes involved in this `mpiexec` to exchange information with one -> another. -> * Scatter: A collective operation in which an array of data on one MPI rank -> is divided up, with separate portions being sent out to the partner ranks. -> Each partner rank receives data from the matching index of the host array. -> * Gather: The inverse of scatter. One rank populates a local array, -> with the array element at each index assigned the value provided by the -> corresponding partner rank -- including the host's own value. -> * Conditional Output: since every rank is running the _same code_, the -> partitioning, the final calculations, and the `print` statement are -> wrapped in a conditional so that only one rank performs these operations. -{: .discussion} - -We add the lines: +Let's modify the job script to request more cores and use the MPI run-time. +```bash +{{ site.remote.prompt }} cp serial-job.sh parallel-job.sh +{{ site.remote.prompt }} nano parallel-job.sh +{{ site.remote.prompt }} cat parallel-job.sh ``` -comm = MPI.COMM_WORLD -cpus = comm.Get_size() -rank = comm.Get_rank() -``` -{: .language-python} -immediately before the `n_samples` line to set up the MPI environment for -each process. +{% include {{ site.snippets }}/parallel/four-tasks-jobscript.snip %} -We replace the `start_time` and `counts` lines with the lines: +Then submit your job. Note that the submission command has not really changed +from how we submitted the serial job: all the parallel settings are in the +batch file rather than the command line. ``` -if rank == 0: - start_time = datetime.datetime.now() - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus -else: - partitions = None - counts = None +{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-job.sh ``` -{: .language-python} - -This ensures that only the rank 0 process measures times and coordinates -the work to be distributed to all the ranks, while the other ranks -get placeholder values for the `partitions` and `counts` variables. - -Immediately below these lines, let's - -* distribute the work among the ranks with MPI `scatter`, -* call the `inside_circle` function so each rank can perform its share - of the work, -* collect each rank's results into a `counts` variable on rank 0 using MPI - `gather`. +{: .language-bash} -by adding the following three lines: +As before, use the status commands to check when your job runs. ``` -partition_item = comm.scatter(partitions, root=0) -count_item = inside_circle(partition_item) -counts = comm.gather(count_item, root=0) +{{ site.remote.prompt }} ls -t ``` -{: .language-python} - -Illustrations of these steps are shown below. +{: .language-bash} +``` +slurm-347178.out parallel-job.sh slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +``` +{: .output} +``` +{{ site.remote.prompt }} cat slurm-347178.out +``` +{: .language-bash} +``` +Doing 30.000000 seconds of 'work' on 4 processors, +which should take 10.875000 seconds with 0.850000 parallel proportion of the workload. ---- + Hello, World! I am process 0 of 4 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. + Hello, World! I am process 2 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 1 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 3 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. + Hello, World! I am process 0 of 4 on {{ site.remote.node }}. I will do parallel 'work' for 6.375000 seconds. -Setup the MPI environment and initialize local variables -- including the -vector containing the number of points to generate on each parallel processor: +Total execution time (according to rank 0): 10.887713 seconds +``` +{: .output} -{% include figure.html url="" caption="" max-width="50%" - file="/fig/initialize.png" - alt="MPI initialize" %} +> ## Is it 4× faster? +> +> The parallel job received 4× more processors than the serial job: +> does that mean it finished in ¼ the time? +> +> > ## Solution +> > +> > The parallel job did take _less_ time: 11 seconds is better than 30! +> > But it is only a 2.7× improvement, not 4×. +> > +> > Look at the job output: +> > +> > * While "process 0" did serial work, processes 1 through 3 did their +> > parallel work. +> > * While process 0 caught up on its parallel work, +> > the rest did nothing at all. +> > +> > Process 0 always has to finish its serial task before it can start on the +> > parallel work. This sets a lower limit on the amount of time this job will +> > take, no matter how many cores you throw at it. +> > +> > This is the basic principle behind [Amdahl's Law][amdahl], which is one way +> > of predicting improvements in execution time for a __fixed__ workload that +> > can be subdivided and run in parallel to some extent. +> {: .solution} +{: .challenge} -Distribute the number of points from the originating vector to all the parallel -processors: +## How Much Does Parallel Execution Improve Performance? -{% include figure.html url="" caption="" max-width="50%" - file="/fig/scatter.png" - alt="MPI scatter" %} +In theory, dividing up a perfectly parallel calculation among _n_ MPI processes +should produce a decrease in total run time by a factor of _n_. +As we have just seen, real programs need some time for the MPI processes to +communicate and coordinate, and some types of calculations can't be subdivided: +they only run effectively on a single CPU. -Perform the computation in parallel: +Additionally, if the MPI processes operate on different physical CPUs in the +computer, or across multiple compute nodes, even more time is required for +communication than it takes when all processes operate on a single CPU. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/compute.png" - alt="MPI compute" %} +In practice, it's common to evaluate the parallelism of an MPI program by -Retrieve counts from all the parallel processes: +* running the program across a range of CPU counts, +* recording the execution time on each run, +* comparing each execution time to the time when using a single CPU. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/gather.png" - alt="MPI gather" %} +Since "more is better" -- improvement is easier to interpret from increases in +some quantity than decreases -- comparisons are made using the speedup factor +_S_, which is calculated as the single-CPU execution time divided by the multi-CPU +execution time. For a perfectly parallel program, a plot of the speedup _S_ +versus the number of CPUs _n_ would give a straight line, _S_ = _n_. -Print out the report: +Let's run one more job, so we can see how close to a straight line our `amdahl` +code gets. -{% include figure.html url="" caption="" max-width="50%" - file="/fig/finalize.png" - alt="MPI finalize" %} +```bash +{{ site.remote.prompt }} nano parallel-job.sh +{{ site.remote.prompt }} cat parallel-job.sh +``` ---- +{% include {{ site.snippets }}/parallel/eight-tasks-jobscript.snip %} -Finally, we'll ensure the `my_pi` through `print` lines only run on rank 0. -Otherwise, every parallel processor will print its local value, -and the report will become hopelessly garbled: +Then submit your job. Note that the submission command has not really changed +from how we submitted the serial job: all the parallel settings are in the +batch file rather than the command line. ``` -if rank == 0: - my_pi = 4.0 * sum(counts) / sum(partitions) - end_time = datetime.datetime.now() - elapsed_time = (end_time - start_time).total_seconds() - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * sum(partitions) * size_of_float / (1024**3) - print("Pi: {}, memory: {} GiB, time: {} s".format(my_pi, memory_required, - elapsed_time)) +{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-job.sh ``` -{: .language-python} +{: .language-bash} -A fully commented version of the final MPI parallel python code is available: -[pi-mpi.py]({{ site.url }}{{ site.baseurl }}/files/pi-mpi.py). +A fully commented version of the final MPI parallel python code is available +[here](/files/pi-mpi.py). Our purpose here is to exercise the parallel workflow of the cluster, not to optimize the program to minimize its memory footprint. @@ -564,93 +287,81 @@ node), let's give it to a cluster node with more resources. Create a submission file, requesting more than one task on a single node: ``` -{{ site.remote.prompt }} nano parallel-pi.sh -{{ site.remote.prompt }} cat parallel-pi.sh +{{ site.remote.prompt }} ls -t ``` {: .language-bash} - -{% include {{ site.snippets }}/parallel/four-tasks-jobscript.snip %} - -Then submit your job. We will use the batch file to set the options, -rather than the command line. - ``` -{{ site.remote.prompt }} {{ site.sched.submit.name }} parallel-pi.sh +slurm-347271.out parallel-job.sh slurm-347178.out slurm-347087.out serial-job.sh amdahl README.md LICENSE.txt +``` +{: .output} +``` +{{ site.remote.prompt }} cat slurm-347178.out ``` {: .language-bash} +``` +which should take 7.687500 seconds with 0.850000 parallel proportion of the workload. -As before, use the status commands to check when your job runs. -Use `ls` to locate the output file, and examine it. -Is it what you expected? - -* How good is the value for π? -* How much memory did it need? -* How much faster was this run than the serial run with 100000000 points? - -Modify the job script to increase both the number of samples and the amount -of memory requested (perhaps by a factor of 2, then by a factor of 10), -and resubmit the job each time. -You can also increase the number of CPUs. - -* How good is the value for π? -* How much memory did it need? -* How long did the job take to run? - -## How Much Does MPI Improve Performance? - -In theory, by dividing up the π calculations among _n_ MPI processes, -we should see run times reduce by a factor of _n_. -In practice, some time is required to start the additional MPI processes, -for the MPI processes to communicate and coordinate, and some types of -calculations may only be able to run effectively on a single CPU. - -Additionally, if the MPI processes operate on different physical CPUs -in the computer, or across multiple compute nodes, additional time is -required for communication compared to all processes operating on a -single CPU. - -[Amdahl's Law][amdahl] is one way of predicting improvements in execution time -for a __fixed__ parallel workload. If a workload needs 20 hours to complete on -a single core, and one hour of that time is spent on tasks that cannot be -parallelized, only the remaining 19 hours could be parallelized. Even if an -infinite number of cores were used for the parallel parts of the workload, the -total run time cannot be less than one hour. + Hello, World! I am process 4 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 0 of 8 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500000 seconds. + Hello, World! I am process 2 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 1 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 3 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 5 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 6 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 7 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. + Hello, World! I am process 0 of 8 on {{ site.remote.node }}. I will do parallel 'work' for 3.187500 seconds. -In practice, it's common to evaluate the parallelism of an MPI program by +Total execution time (according to rank 0): 7.697227 seconds +``` +{: .output} -* running the program across a range of CPU counts, -* recording the execution time on each run, -* comparing each execution time to the time when using a single CPU. +> ## Non-Linear Output +> +> When we ran the job with 4 parallel workers, the serial job wrote its output +> first, then the parallel processes wrote their output, with process 0 coming +> in first and last. +> +> With 8 workers, this is not the case: since the parallel workers take less +> time than the serial work, it is hard to say which process will write its +> output first, except that it will _not_ be process 0! +{: .discussion} + +Now, let's summarize the amount of time it took each job to run: + +| Number of CPUs | Runtime (sec) | +| --- | --- | +| 1 | 30.033140 | +| 4 | 10.887713 | +| 8 | 7.697227 | + +Then, use the first row to compute speedups _S_, using Python as a command-line calculator: + +``` +{{ site.remote.prompt }} for n in 30.033 10.888 7.6972; do python3 -c "print(30.033 / $n)"; done +``` +{: .language-bash} -The speedup factor _S_ is calculated as the single-CPU execution time divided -by the multi-CPU execution time. -For a laptop with 8 cores, the graph of speedup factor versus number of cores -used shows relatively consistent improvement when using 2, 4, or 8 cores, but -using additional cores shows a diminishing return. - -{% include figure.html url="" caption="" max-width="50%" - file="/fig/laptop-mpi_Speedup_factor.png" - alt="MPI speedup factors on an 8-core laptop" %} - -For a set of HPC nodes containing 28 cores each, the graph of speedup factor -versus number of cores shows consistent improvements up through three nodes -and 84 cores, but __worse__ performance when adding a fourth node with an -additional 28 cores. -This is due to the amount of communication and coordination required among -the MPI processes requiring more time than is gained by reducing the amount -of work each MPI process has to complete. This communication overhead is not -included in Amdahl's Law. - -{% include figure.html url="" caption="" max-width="50%" - file="/fig/hpc-mpi_Speedup_factor.png" - alt="MPI speedup factors on an 8-core laptop" %} - -In practice, MPI speedup factors are influenced by: - -* CPU design, -* the communication network between compute nodes, -* the MPI library implementations, and -* the details of the MPI program itself. +| Number of CPUs | Speedup | Ideal | +| --- | --- | --- | +| 1 | 1.0 | 1.0 | +| 4 | 2.75 | 4.0 | +| 8 | 3.90 | 8.0 | + +The job output files have been telling us that this program is performing 85% +of its work in parallel, leaving 15% to run in serial. This seems reasonably +high, but our quick study of speedup shows that in order to get a 4× speedup, +we have to use 8 or 9 processors in parallel. In real programs, the speedup +factor is influenced by + +* CPU design +* communication network between compute nodes +* MPI library implementations +* details of the MPI program itself + +Using Amdahl's Law, you can prove that with this program, it is _impossible_ +to reach 8× speedup, no matter how many processors you have on hand. Details of +that analysis, with results to back it up, are left for the next class in the +HPC Carpentry workshop, _HPC Workflows_. In an HPC environment, we try to reduce the execution time for all types of jobs, and MPI is an extremely common way to combine dozens, hundreds, or diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..ad8a8eee --- /dev/null +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,14 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip index ac8effab..dfa00e6b 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip similarity index 70% rename from _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip index 5838157f..91ebd101 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip similarity index 62% rename from _includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip index 56aee37a..9caa7145 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=1:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=8 # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip index b1d90eb9..04a6fb3a 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=4:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=4 # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip similarity index 62% rename from _includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip index 56aee37a..d267e8cd 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=1:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=1 # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip similarity index 68% rename from _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip index 13418f34..b052e666 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip @@ -1,10 +1,9 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 -{{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G +{{ site.sched.comment }} -n 8 # Load the computing environment we need # (mpi4py and numpy are in SciPy-bundle) @@ -12,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip index 1512adde..b24c7153 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip @@ -1,10 +1,9 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need # (mpi4py and numpy are in SciPy-bundle) @@ -12,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip similarity index 65% rename from _includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip index 5838157f..4c149443 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need -module load python3 +module load Python # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml index 393d5580..0f55d3f0 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml +++ b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml @@ -21,7 +21,7 @@ snippets: "/snippets_library/NIST_CTCMS_slurm" local: prompt: "[user@laptop ~]$" - bash_shebang: "#!/usr/bin/env bash" + bash_shebang: "#!/usr/bin/bash" remote: name: "ruth" @@ -32,7 +32,7 @@ remote: homedir: "/users" user: "yourUsername" prompt: "501 ruth%" - bash_shebang: "#!/usr/bin/env bash" + bash_shebang: "#!/bin/bash" sched: name: "Slurm" diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..09ab213e --- /dev/null +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,11 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip index 5eb930b4..af8f4653 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip @@ -1,12 +1,11 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip new file mode 100644 index 00000000..984f5740 --- /dev/null +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip @@ -0,0 +1,11 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 1 + +# Execute the task +./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..ad8a8eee --- /dev/null +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,14 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -N 1 +{{ site.sched.comment }} -n 8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip index ac8effab..dfa00e6b 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 4 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip similarity index 70% rename from _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip rename to _includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip index 5838157f..91ebd101 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-with-memory-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip @@ -1,15 +1,14 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} serial-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} {{ site.sched.comment }} -N 1 {{ site.sched.comment }} -n 1 -{{ site.sched.comment }} --mem=3G # Load the computing environment we need module load python3 # Execute the task -python pi.py 100000000 +./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip new file mode 100644 index 00000000..9caa7145 --- /dev/null +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip @@ -0,0 +1,13 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -l nodes=1:ppn=8 + +# Load the computing environment we need +module load python3 + +# Execute the task +mpiexec ./amdahl +``` +{: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip index b1d90eb9..04a6fb3a 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip @@ -1,13 +1,13 @@ ``` {{ site.remote.bash_shebang }} -{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-pi +{{ site.sched.comment }} {{ site.sched.flag.name }} parallel-job {{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} -{{ site.sched.comment }} -l nodes=1:ppn=4:mem=3G +{{ site.sched.comment }} -l nodes=1:ppn=4 # Load the computing environment we need module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec ./amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip new file mode 100644 index 00000000..d267e8cd --- /dev/null +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip @@ -0,0 +1,13 @@ +``` +{{ site.remote.bash_shebang }} +{{ site.sched.comment }} {{ site.sched.flag.name }} solo-job +{{ site.sched.comment }} {{ site.sched.flag.queue }} {{ site.sched.queue.testing }} +{{ site.sched.comment }} -l nodes=1:ppn=1 + +# Load the computing environment we need +module load python3 + +# Execute the task +./amdahl +``` +{: .language-bash} diff --git a/files/hpc-intro-code.tar.gz b/files/hpc-intro-code.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..1b2e3d9e60c002bbe0492992be0c15fe0eb5fb03 GIT binary patch literal 3391 zcmV-F4Z!jriwFP!000001MOPfa^gl7o@+hD3HH>0gE3${PF!(PjLnRx#oz)Q?^I0{ zX{5#|7HX|pGA7wnZS6DU5%zNLbCat)N}goT>6SqJ`EST?Z#>DNU^>JXDIN9 zxf9YY_{_bLkX8E0p(!|w`8R5yw140zHX#+>@t{Y)v$+W4YAAq>piVXjh748D1Cmm~ z>G^b-oKL?K^GRn4!4F**O{yokY($F^=QIb!GP#t(@6m&3wpMRKWSmec6k&IjIc} z(!dccnAs}_JIkC;0j8YKL1b58tdKDWd&*iyLb6NAXI|~FF^OR^#|#}7Xj%B3$ISo* zjCajI;7%H%57T%_Hd-u_a0=EJ7tAkK3~Oc%xpX@H)C6dgModA@Ro5GE}atpfLaRA4}s>iK1q(~H^s#QotRZYO~+hWkZU1Vi=|?k;w!siKpuP{g5zTDzS#4C{{oz$xJLHac zoVkdNlZduayzZlq=;Vu+!I*OwimKcyMeF4rBhYwWAaa4-$li~T1P?*Wa$v!>AVpWz zgNYOsXNr7M7z}A{z^~DW*DA_Umd2aKIdQh2&6zbb8mL-2@){toQNxH|^o2-x?OJp% z@&b%iu22=ZEU4@e^Y(QzcEz)Oc z6{aqT7kF7Jjax1TI*QtqMW%5IEp9nnpD&VDUD&2;3tc>yqFuqHR7ml4#F-5J_(-Bo zW>aOTvBx?*(SO-QiI#Mrg`hMW6dzXRvdrw{vgBT?4>Ie_z9Q{l)>vRTY1w=7><3rx zL4L>0Q2CTf@mXf#iAcpm)Rod`F?WGjA=nO<+i zY`53T=+UvC8UO9W{4;2w|I@BjPtR+Gk+sP+sLAuXQd!ggDV2+_OL70Fynk3x{hz~9 z+W+|!MCW=;yOvOa3JW}snK$g3f3I9SF-Tu?T%geo7W$6tiTh~rXXaVP4L zL4hoEqjfR)3bibTNE>4@t}d`drM%3o0?C03E0 z2R1`TP)-dYUHNO-FkcBwrl>0iM(oZWg`;W1ftf>_->RM$71xqZg3O3(AY8*Rwzm!H zy?q$?%>1$R(~nWp?I(H=HQu&6+tbEd1nhG%^DS46LGB~CY8ujUC~+j8u}Xp(+s!G? ztZz8g3QgcX9uTU!)rik%Vd^r{L=j#O16s}z;sz7*4L4{5z9GhW6G$e-yHU z=4+ro4mpC}U$tds$WKyn0{U0OTFLcC@~pWIS3!R?@>(-~Jn}lf-T?9%XpQ_l>rLtJ z%fOr9cs0+PL0$#DA>;_UoY*j%LSD`>8$(`F2qJ%^TdafL9M6l;n?#<6-YD`M^k$Kh z&>KchKyMm3f-dd+84f<5_R+scrKMO8<@jPhzNo|(2l2)0_~J0W_#(deGWuI8F1<=# zOIP|3O~+ls$kZ+B%1DzrcsS4C(O7+0u$@zH2IIFMbR1WG?(GRay~7hU9jK`%(A>`D z4eI}J$zA%6L3IQAx_54gfNs&uLezP>_L-+%>W|eSAIH+mW8E{rfg?LmhjsNfGVvmY zdLR~kbY#mR@@eQd0}@n!$`@m5EQ|!Ad5Kq2`qKi04|M;e@9>doO6jjFf?@pCi@_ih zX#ivpT9XVl5g5Ir*00A0%m+4Rlc>p)V0L6v%c0Ms?>4w`ZQpWaipd+@HmdeT7{qNi zU!eRW7&8eLis%Rrflyghhq=&tb;58E0fjPZSe;#*zzOp`)U7`$7@GHi&$%-JcFSx> z^=(2|eSd`-Kd*P8VVm4V0BR^1Uk!u6lSg}d*9aH(3#J(D2@}_0(T9Ow7ycJ}+$pW6 zh5mn|eo||8YK7n~*xVZT_y3FK;z33C|110WgTDW)l*{F`|Nki{5d#e4M0gWl#i*>A zgL1K4P6ivpxZwWCmNM#~Q$WUWgh7g8d0s^t_2K4_`PV$JqM|#YZmy&ctNQfXK=pLN z#B~hS)ON17|lz9iXUM5 z!X>AlSxKQ*{GOYtflb5pSFt1-z3jBr8!-C-$W%M{ zJCld2diSh#*##WYt~R^xq4gH3&G+z^dh;|7wZC4pYn=|X+D85SqEWBmy?XPcad}#A zzJoV7uh~KeuA>_xXtxElU<6gKbqMUd);>AIpVc?@M!ow!Z@jH{n}qqc)rKluRNLM9 z$z`M3hKtMgMXOW8$-1)T|bwv<(`jxr3{-8r`Dx zs`&4uTW>Wm5?a+ji?bZ;(21MoWRc8;I4R$s}StI8FU2rIb=iDW#NBN-3q3Qc5YMlu}A5rIb=iDW#NB VN-3q3Qc8bj`VV=+WO@Kl008V6sLlWY literal 0 HcmV?d00001 diff --git a/files/hpc-intro-data.tar.gz b/files/hpc-intro-data.tar.gz deleted file mode 100644 index 8ccc37cf1856388b61c1cf217eb44eb30d63978c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36535 zcmV({K+?Y-iwFSe!x>-z1MR&_lXb~?oyT$giZ3V*g?8RA135wtYiK#bLpeNv!3F`F z1Z1FT$r1kXUe8)tO^Kk*mW*zOps$EvcXd^rv(Mg{Z@!mx`PIMu#m|2Io9}-2+n@dA z&%gWmfAeQQ##hnTo|AvpJtForul}#!efvjP z5h6`_bFf3YfZtywV-DDuV3BE9t$f{#VYrx*{DEWhz*lJ@muYkKVAH zqCen;@0#PT)0bZPV|>%voV5?VEWh0ESxc9Db>$zO&sOzsMITOYI$EFl0Bx z+OCH`T(jRXuda#@w;cU@n`?J|x@Vp39JYjhc7v$Hylm%!x&py%JQcls zD|_t1BfD$q5$V>h&DGAUr>mQ;3--g;r_iI&WpV{oue!8S^iMt~cT=B7_onf#XXts> z+I4%cUa31e^iqc}NgumtS&UP78$InhwaWGxeqE&xyyZS#L|u#R^sATWgLbcItqs4c zZ=TlGUHdJYc6V9j^1FU&x<&iw-f3lM*%pqZIsMbpwAk9=sH%QxJ+Z6X+IqYnvmSZe<8`mg%%_h{MjsI)T9UbEHXwcV?pPUm6jDwgiiJf|K% zr|TkWN-$Xy&~?6$P-C(BUpp}V+ypZ;mTdNf+g+6&h!zv{COEsNzRdG6Bk^@14M z9m{vvx8kgN@7X%M^M`u+Y@e+^<}vFhx-m^_kq>?p85GTRUwJlVj-j2c-KX!$u3J~3 z8$A4*dds=4zH<8&Mzki_WPGcQuWi?KTZTS&(Gwm;4|4IcdifgCQEq~-vq@wg$l)+HGS##{W-O)%G5*f3e`I(^h_dHmZUzK zLc#D-Xxc4(Y7=Mm>R8nNB6M*7bUx}r^?@e0PYZLjrafEz!!@QB(ACYnOV`JfFSCwY zKNsAXe7NPcTPHX3=rT9AOy6!+oB}8hjbvP>^7iX0lxvgLqHS7zNw;ZTb5FUSe9)_h zTKuxSw=PTJ)sLy@f)s5P;)`#-0)sY}K2m#eujwKbw^>Cz^`aoiisV-tQMdBPEY+#Q zMVo#!KkhLWmv{Xp+8$cD+=t@$q^sB46T|%US*E&yxA~rNY1(8}tCL^W7VJaU&W*qN zBYmRW? zmbI={zwMXKLf-n|x^@Q*9bEe0+5%ciT-2VrTo&NqUA4d!zZZu9A6#djPB^W(!fB}u zspC=4QLn3GqfNycCd;>C)5Q78c65o3~VW<4zwA5anz+B!*sPDH1#M zXva@2vUcb2*-mV1lT$}8z$+b{YkJfQalUrqu$C$j*dd%MZ|V((Hj%DJx4(Nk==;J^wsYjwJ9M{nB07v-Z6bY)+nb{yQJk38kMD!_Z`ae+ z4)H?P8a`|Xt#U6)8B4&jGHKjB}v*_A6HLfP42*tD9VX zL@Rjl)6M0M{Z8;YS--5M=0LwlB6RL194&T?PDVWh?x@Zzg&Qs5eY`%2B3db4+Im+d zIP9NsyhHSk3T@t7m2*=EsqR{~NOv;Sm$yXKv7{Y89ouyVad2^(djDwuD(U0HlTU7L zp;2!!THgRQ{E3cdSkh+VL zC(P;4^dE9M*IV>5Ecxu$ zPTU+}`Zq-ZjtsB)!Q#|)bzWABTpPKtL!Asy4qJ{BuVhxt(CaF-ZVu7pOWcX-qmwgaU04R}jVpNYdx~^dNtco)f0LslWZBE2= z<@z+dqZ2^ItnJrX(-CEPRq9l%%IlPFI%XYe0U83{=1TIkLdk~tZt~IZOa7JX3{rnR z7>7kgVVx?M!097iA4=QraH8t`-y9ZtSua|x`%B`ok}m1vDXCXDE53H!86E3OpTK*n zE`<+{Kd0h~@_Om5rBVHG{c!ZlL;KaQrMFd3Qx@yghsQw@%`SCR&|1`CqVvrMqawT_ zzLsnPG|^5AzH?wSl0BVv(@S2rk77ovdQ3lh9fB&{w1xebdJ|=0MKw%KGNIoj$~yMc z&RMPGU3q}2qOOIo$cOzD^*kF9TFcSjDvi}asT*+;`*ns8T=cBHlm{8Oa;oJ=pli~j z>TD9eRbHXZLQcAlKdkPuHsZ_@)GbOBbn?y#w|y@7(_ODy1xPP*-EG}N1@wFx?yVlr zI4o@CW~WmTI)-S`D&8tx(lUKVrE*E_h(vI0xlMAr{o(X3r*n$ce5+`n_@ElsD^Ka# z;$**B6v~jue)>#t#F3#jr1g~)Q7_ST=khqT^|(Spw1c4HuOf<`my1H3+&U9oC-bP3 zy6aHTj(QbNqbAj@aayG(sdpxiW}kR3u=l;*_4pLmRmwP)DhbpPlhCCfc4~^HaPF(u z-)Rvy62q)BZMUwgygnAl-IPP92=W=C56+3Lt-|l>ZYXM$MHrzj)&i*a9eITukQJe4 z>TGQvG9wOg-%q`$YAjj?{7^ZBj$P_cr;|G6Rk7$XZfA@-gtP{=U|0^m@Vj-gaY#=} zHP+$caM!Ib?7w8NIy3czF8Ppa09|e!T4~;V`h&urN%ul$QQ<&T%%DUfO&_c|XWyOt zMO#>B;Kx68Pv)om`p5f!fA!nn{{Fk4fA{HFW>#@+b_TS^%acC`v12-{||oti=Y4XUw`@jm;T+azx&mf@BZCy|Lz^$@BaH=fA^(+{PNcyAHVMwU)sm7zWw=M>TSRDFMmFMR)dtz7-%Z-4Xa|KrPl z`{ghG>)OTt`CmV1|NrH$fBDP*AMf#x?f=>9-{1eE@2~s+r}+9Y_CL$zAKU)^ogc9K zKmOSle)ZemefyhVe9Hy#>$xai`}h3(+rRnkm;dwI-~ImAzx~ZW|MI`-fqwbjx8MEk z?|$>;=U;yJ^WXgCZ~x}|&-1%4f9}818-Dxc*MF&Z{IxFV&-1DN{4eUCfB6^xLjP=k zu21&&KG#3^ci;Z?@4x-K-znex#sBm#|Mh?WKmHF~`9Jl8{efRUaQ@eng8p|u+WfEX zzd!#gbA8?aKgAdQhozE=fyeX=O;y{Rs{YGLTY9M?Vvyp_27PlHNK|@hqc2q_ccy2w zRGh1jQ=wE?~D z?&qT7aL;KUl_Yb`N=3C*EOixu={3;2RR!a|EuXGV6;OM2w@0;C4!7nqb)%(BPwX;z zIxaKE)lI9T&b068V5m-Fj!Pw}Ev%C6xHr^QB~q_a&ieN2W9tE=ioZ+GaSRp4C$-~I zSyX0gce}u%iXQGp)^9G21y%LOYe%0&513wx2cD{>eM@xfT(B|*TU1=`kE>*!)caZ< zp6qlc%gkK4z&mI^Pu8aUPL%L;Ia|6WJhVRepSup1nWj>V!b0OsRF}8r!m`%cC=FG= zw)A7l?M!RBn@yDdXwLsHzQub$BvY2h_8dO_PKlDlC#Os5Vx0jwwNq4$zt;<4x z(98SM@#6{4jI?NTs{Xwm_2?FE=6ZLlCf7>qEd9cC+AUlazl(QuJ5b9}Yt*~WBR9RP zZcUrsQ=4C1Q}4O`*;LkRVQTv{+UJd~Btr(bYFunfQ=!c; znawUH{?#|OH#dJ5J!86A?!BGNTrx`54%_;1oo+U7Cc3(f`WSAL7n%=xm-l{b=S4rt zJ)}eTPpw z&Z!sex%bRl_veI<3DjiTkWjQc{d;$Q6os_D4`b$kuL~+TzDE2&)P>)G;q07CU ztPsX@>v!M{`IS-eKdYxks9|016tj!s(%p z){g=(&20X*(naHY-(BXadb}mw-(CT$xFMGTzgvP`7kaW1n=Y9Dy4}apve@*|bC>7< z>NwojO{K!fSm7^FfvqT@yP&hl%Uz4+b_GlwmY(7&W_6G??!&UMbgy;nauWy&oSiz| z?#ftH_mEj*8mqcgPr~0cq@78O-q!;Ig0aHe+?k0huy^ZNY=6YQK1 zjtt()<(%2?JYd4EFsHlhfi6vCZLvW!lxth$ov?%rTJl@z!Wd?`x_5eytV*;M7$0zU9ozW6zi2zd2BIu0jcMh(?nV}E8{Lsq+ z=+f#NRq1rpYZPwY<_=WG5ZpTj6Rp3YVBWp2$r+nLLfOe$%M5_ea+Y)H!80Gdezr%y zr!cwJF!q7F&$H9gNQ9j_p|#SSmJOEjCLO5caAzqa)lGaUh3<}?6+LDanoep304b0_lkhC%D5PTOV+N4_qG&OdDoW0xszSNvjV$um{^h z*Xd#CI80_;M|n+ily#@qKA*QiXgiZG(iCm1l6jRjrdFOZ#DHyd$PPepS}#G+=%SZ) zWcNiL_DL-Ok{rOu?u=|Ynfo^1{j$=sC$DAz6l_p;$kEa4i_n{Pet$a}>#)+Y7$mpO z3U2ek!*Bvrp+2kj3#mWjcx4Q4a0MLVQYG9h@^09OK1fe30gd51n3@FpKh z4BnaMWh}Iq`_x&PZ<8xCAW;~}Ak`8Eu;hVJahsR=lP=b-oT^vQ$Wb3pkxHZj};fM^Jv zjuWMw>&@$m2VS%5A^Oy$gqRMHNm+`g%PL)Thq;b9IR(M-I1N9BFs%1ly&BrITPEa1 zw@vTe{npH8K=|n6to*j#n3u1hL!ABqoGMP@xI7i8x8z{d8v-d&dRQLJt5bmtT8=(8 z29JcfbdumjB+|#Po-^sZo}By5b2`NgwF(jllHL3VpZ3nv|DT6 z8L8?E~_320J*Vqm@cUHvvzOP}iIlS_s<8 zy=7trBv1+Q448*PZDix>^)Emzr61LpDn_EJndkCwmE%fTnClDRbXr%sPo61u?PlHW zVaVYjjws*jD>)S#mMa_H)QvA495d)Ats6b$;;e4^qyQQ0lT!(RvQAQXmxX{srh-{Z zZM5|FI-+xuGFo(Mm+}nkfgVVI>wp5_C5^w|4ZrG{qeqjIR5|qc>TXI{{a7A+SmTyPw_?e zqbyS=77;h?2Qp@WMFf&Yu$&J&r$bRSR;RejiMkC4bq{HC7Jx)|syN-;;Y4$EAcNW@ zLjj=BxB<&PlY!Fs)6ra8+A37H^mA_No&r*{4MP>2C(JfMd^oGr;i}k9U%}|04GM!y z6NIJC7ZQTyk{-Mu(VIlsnKmb}#+a>r}a$D&QB?1dVRf@2y9&cAmWvVA3uT!25$46+bVp%USe>jQ+S z6qz#1{X^r5_1f)qrO4WQx+M$vYAZ=Um2*?=7q@XJhyj4A{&;}9#;I`KbEyV1>W9Z4 zx&m0&;X8t&(`lnp_ zO1~`K1|@rX@P1jnx2}Ebbe;zCX_0o;;<@>lI*RrqQn1`q9_q#$VAHRcknwFwu=EDB zu3Fkcdc#I@bM=rX((@gG)z?wEx%>_<<%X(T8~$B=&cfMQ+_zP_O1)JA7=seRwz|Q0 zmVJPsvm@xR!$nZNczj8Rf{?~+bsvCKP6v7R@z8VBy#e-|^`m=GDX{z=G`_S|bP^jG zb0={46Y!Gi=nlYG>AssXTD_$Fi~v52In<5R^^G6`0YK}&rB6jKPHBlMVc#LD4y{Vg zZ!gnf(#7^t)LPOSQNgAq?Pe4E>CT{mbe{_+x%MNMr%w&7=B(6QR5T0@l7Q2sQly!r z=WqZ>iO8Na%eY#!Ku8l>$i8WogjsN%uNWP~BGIa?y;1DG#G}#2+|F%*IPpQ+2^?SL zD%Ir}<|NT=;c2Gs1N5fj4&IV;Lw2>dsw)Kk1PVZR|BKs2`I(wtyA0HN2hK#Us#LR^ z!;nOmROrx3dmVk1KH9D`{DDbFkj>kjWiEpwxyTxvKY8u=M86;MY`yXgrfaj3yYFi1 z6u3T6KAdkaMj%=#UP#LSMyDhSG*1sn`7wK)s|qngxE9aOQ~=|=V! zO4l?awXe7$1uy?HhtFzcukNfeF1qhbFmMwPd=_kZlr}mLZZom9wAo1Id6FH_HN=YM zG1PJkd8OX%)Y$u`@M|y~W?HuFTKFrpRJ?u%B?hNd$wJc`Dy}GGPdB>s(dmgEH`B}r zqko+Ba2Yz#n(#i=0&M*2yAAHb0ZhGaa)7>t2B;O`1Q~3lmVdg$^h#(KbkzcZv@(F2 zhhZ3-d~^KheH|Y3g4#9OqFNV@L44>Pe7Dwig{Gn>7vfZ9QV4D7ylR7NLrKB%uhS9P zN))K#xfPT@ipQBz(et0M+!QTcxafipF*k!f@Rr3pEks3KtMU{276UZlWj|!yvYfT{6nvbMnH&*%bv?9xeUMBlX;7_fWMvcO`wd1v z<2I7FbS7wN>y)FX-9S9+DVPE0YX!Eet+3MABZ#wcdM2!jx13|XJPvmq$hAHa6NqHf z+k6FsmiE|6z}#kotVd!sQhb9{=kg#$@wZSDuX$qWYUor08M|7zh?iD$FHR%$he3x1 zu?oiL+$#D#je=K)6+}@#oL)U|2vk*CG;s?RpNykaAJ-Pr2trLWM?rh<3f@!g5e9SbG z{C%@{Iv@7HETXZ+V}|>s&Gtqw460xrBv*yvQT4#G@O2v$+VtufKry~f9fVH8m+ zF^(JB3oKSGSksY`^}i5GF+p6Jy=Qs27CiicZ?{(CrY6k12e>tR;iTILlMS9gXdPYa z&W*055AaWy`>njLUQ>GtzS0ea7~+~9x-uEZ87&95V6NMzm1pc&njXldbqP(R_Z(_; z{PY2;``X$E{5DYP^tBahsxuCsv&?bzm59(NM-B_oL6$U=Bq&7J3DZW&+hl&MNST+_|Ja8Ad3i6^;-qGgh1s-;a!&;3-=VrExKzb8#Lg$Ny6Zci=_kV>b- zvAR=gjWAu77CJw@?ql;(@>B`=_D<-X&879|*uKF11#k1VlR^ zBWYG@1#^6F!nyYijB@k4KiuflD=)wk*;US)$#C7ws+>Y3|9&H_BHG;WQ=IH62~#v& zS!?v>2wPgjsp9mqB=ZnS(+Dga=AIgHP%GxruS|*1;oNDl2B^R;h1sWw?^YlKdb7zh z4)k1{4zR%;S`Z30JU?fr(~oBM4A}=~kV58sygA-Oi!Mod+t9T2JI=cngbs+B$9W5f zI^Dla9Teh$h3Fn9h|&1fT08;QkercZn%Whr1tpRaX}>R_f0-s-x_g<*WuRsxC^Pay z4&ausELEz|EzEQXdNu%p1aPoOrgW5QX1uoNq@R1;YNVR%j9+Jn7;w6mnLj7@iL9b# z_NSEwl4SLa?JE2LohvN`CL|eZ0`}ZLb(7{t{QBYezy9O(|7QI|{NGpn?@#i@iJ?OW zrfX|qsk?Ndo$hWufEiNKbPg2Ap-9r^OC7Noq_J<(kf*@|z9W-J=!p~yQT;kO`lXBRKHZ0D+ok;+@k5Ab!knTt$E{ zU~o?Z!7l{vO3A{ULCc_F0yi*7JpySb&EJ_VG2ZCsvvH)bPolMtL(T05T2V&79F+6o zc&<+8&)~Y#UcCk5Q9-W<{YbUV8T4dJE{6n#eK>=KV;WrmCCa8%=~*cA-PdhbIv%QE z%C-grAGXmznC>4aD?xk2oL$a3x}#}DB7nd!C+z~Vgibhie%B}#m(yNf9#d9C07BYz z#;lV!Wm)4L4htJgh9*PjIhl;tpml+b@+sA5J(5e6Fw_cbiyY9W2~@B~ zVdGB1U}VIszh-~~(;t143~+Qz++(1ruMbF}vb7Z?-Ud24Nnf%C8HpYLw(~}oxt?pE z(WO|XZC4);aOF7-qf()2O`i)Y{(T20B*^JDiQ9&Z=l2fz4(t(}kt+)Ba8ccQU&A5gBO|?J(b;30S#P~ct zj*vKMr|THcNE3KT zkp0nZ z&k=zO(gB&QmDNgsswJ@428yf$)Zkk!oWt}MJ6a=XVAe>fimdn zBjupgUB{dBG$P`7SW$ndSpfNTJ&7&8+keT>5%yAF$uT7#qBX9Ics z07p~b;oEdW)(E&=&vgM7>*-E3

y?S!D+VjmqhEPk;y7BwW*&Pm<|6?>~=)tS8FU z%A-9n*|k&fd1)|{fP~w2HodM5SW2eyN-PG4x@(%jVs6zG>i4?j8G3X*t6}&#^z0I# z`oL4^X%n)@NOaU>Ov!|yA!L&-`l9EoA96~5WU8ZR0W!cB1xEWAwv`qoogzgWg^OU` z5EKIa)!i?y?n-UiVa%XV=1|lJz@^8k&8Aqv?8cvNGT2hKV*~=p$^Z-9(>yYks}sgJ z?~!0b$jTpm@ktx3>FAy??4!YciPh%51KU5`?q%ebGMk`#4<{6tTQcJG+iSt@U?QsC zfFVrAMT1{OBg>(Vcv)yU1b)OIw^FbAr`q3h?CqLNACwL)t7ny5E`mJ_6f%ziXUF{5 z9ZKLSgR(t=@Ki(kx0j9oNZtI+=oQ^z+F89z-e;Ngh7l#$k(tc-TOm{O0LPJ^#m}Pg1myb=q z<`Yt;gax#0(oi}sh3;m=iY8PTy0ycT64JNo!LU2s^g%&uM&Zu=F+FvMd<8(&#q8<` zh+Q`T!L>Paqr1b*_jEm^ZxM57hq(f>l~kBOWoqhvP>(=!p;pw7UT6*syn^8kRw3I7E(0dUv3o#0zd z-Ebldk7q^#L>IYPtoIGu7P#<|GvzW+&XdV`zet1-CCo5jKyCuA74GAx_lKfMFNlYpLN-&;W`$-=u}{4r%B+hzny+WpE`$o%Kt!L#ENZBt92; zxjiiwp-7=*@ClRMNa=9lPSb#8a{{c_C^qM7hRlIh7Z`)_`cTtaL_pbn8Z4TAlnXa~ zLiQmeL6F0oXAm(InvcT)>j3dLM)#}^s4V9BJjI<340peR%QS~=;Y}Bdg+a*E_d?Q6 zB|?9&Lntx_$wS9pI`d>xX%a$IsvvzIa++L{_f4`6D5L@z3;~Fpr)t&{IL~UdQY^m| zpd{T?#&h2P6HyYCuoQ*;#jhp#8piK2911*PmU&0Qol0>%rR$B{&KA6eQqRKrZ~L_5goy|F8a7_&@kGe}Mnf zQ~28d`zgL4wiv6t`+iD}6zMv^jlU}#2D3tcD-T^^(iH?3LjXN@IcYk8D+4X9Anx;9 zQ40nZ8L>MiKvw3b0HeLoe_jB(g3_ zSAHEGRPc`G@hN=a0xvgwqWEjrwT>OYW-Z&Ds)2?5+^x`fse6C>t^ z2Ffo;T?*q4!_=Rk+-QIB;m}gjakMh4tQ5cWy>3s@HX$CGBgy+#s~|O^o3jZ1I}NUt ze2zRP`kYR+!BCc3EeTs@vc`vU05Zk=g}Uj|tRK*gQS0YAj$fRCp_ClVde5DXjBK1vs-Gswl!@vXTLr~uN&3sn&r zG!B)K_7<|5bbvX3^VDf1)W?S@)&ryC>LUp30(iyhasafZ&L%xxA6`(kH#4LXf~t-~ z&+17B0Bj(+l?E1#Eg-DQ&O$-cbD>XAl3~I2K{%O7kQaYvA~ek@f_)9&ZEpj9_Hh}Z z>-Pk4f@@)JXaTDNe?hDAu}hU9>+l*#<@uh0Ep4V!ky0&Gy+Ch9LKLF|xMzWyIJb5^P>26!O-0nt7goq6zJ?2#Tk2x0erq;zIj7fKMQS3LjDa zMT(Z0yGwPbA0m9oe(XWKVi8$H-;!HiFTfH-eBJpb!xTfeeTeD*&|>)BU*Bb73kOZ6Q(f`kpQ?Jk#UQ<>e0A7Yz&Q| zK4rHqLwT=x<=mzQzNx~n@`?o-rDqqFf?|~luqQkifH=qd@8kQ{KDBL1Up71dkmZ2f z82g*R+`8 z0=c4}%K}o;0{lGeC1SvA=_4n-UqB(M^_)NEH<2R&z7gd}tp?y5kr@f-5dgIj2`nAJ z@OqS#5C^@?5xfN%5V8gHVY`TwAQAg~j4mSJDiM)o;$)y`j>E=j($lZMjF5QcPq0*h zeYIA7GRo{KD)?$18yHRC=81mb!x$NiOl8nE1cdc^BfuZQ;5iRF=&m3}P}5@p(m}&0 zZ40Z300Rq!qJoL=l+48(ff}CVo3nBf_O~Dua$pYh7nQpE+h`Sk1ZV{PE4{CITr3<3UEkdPeda(!-$*WTZSjzQX_K`Jt1`Rt?shD1?i}r+n~XW{I%t#0df( zCXr}Zjh5|out<0^Fi?1{Q12nI2W5)z(}FU_0K*mY)g0hen{^Ca$<4GGey>88r5A1l zL6C$YmCKgh2-qVto{7~!m=h)a%zwd_5#pKudQme{-g^Yw8U^-=dPPcK1~(OLf=K&s zS_}0Fk1#bu!M7Nxt4IyT^LVmE3b8DE;fPpl8-YTwb1!7$;Q&RbWdIPiq74fZ-;Z-f z2KBT;VDRnqnV1Z+?RQkV(eq$<5@v`p5D-G;0RT{$Z5F@D8fhSkYIaejt~kiah8=4K zafsECq+y;}tz{oXviUg^_;0XB5UT;yJ^6fwUNmxxab@m7cVcaJcIWPmKz!ka&*F)z z_m%!DNp-s7tTu4eky~M`h@er8Tm$O@Hh5wUhZZxu-9Fje!^q;}yG{GtDF56E`ku#v z9zL~=o+}3CaYg9mus~~logCiJNqph7O3>FHYU}m{Nj8gs}hw^e|PJ?3&t{_G5|2e>e)7DfDK-eNu zc$J%NOqDRcfu%ze_xfiOu^$e1CqtHUB0Ju(9oigwj3XQh+;qoHCSY%7g2+n6P=)dc zjBun6=J43CJ_Qpw(RreCDEUosRQlNJ!ZbZVQ7e%I`BW-vfhvfJk#g zY0~PlG;_yH^Me zub2X8e^O&sw81LcB8O_5WQ=@074xLYq_A;~I3ff>zfPTe(!m=`bKj9}ZYg_=^H8^%- zw9CyDp8*RLBQ2;1Fp&5%(Cs;*{P8FAxD_Idc5$PSOleE;0;XaHIJ><&%j*w8_+ni6 zsi@P<1tncsJSqG&?p&X=6KuXlFcNrdO++WW0>BnR=BhJTaV1NXZCD6}y;rRvv(vMh z_>FEf6l#nY&QDyVd{J&Yr5ou1D4IWw&uIAerLa_zG8 z20)EPH%L?B+QN2f2+W1Y$=s5ul+$BSCft^u!3WkkSeIPOV?q2-edSTMjkK&~6{!o9 zi|#`~Y)}uyZBgecFr+ZVddf!dB}YB+ae@pA4j@??$<%;+{Fy3SXoSF&8U^JY3`xQ< zZNNM08_iYbUl*=QGCcT)u)bfH17;Z7U>VdYX@Lk_V)FooQQMHdsgd(~K_CWUe}ImJ z#er$M3f2K=r6IKX;el!h@h}tKvGi?Vs7w7|cPYRuf)G;oHxEf?U)+#VRpKKpaess#?rFPzM z&B}p3n1PPm;47v(d@uTL2u5sFi_H`;X1m7NgTh$xmv(AK&<&ZI+^?ZKtwIaNZzQT{ zxmBQHKDgN|gvIvQeFcyf*KZOu#H@YN7gA6#W$@gm)~7$h$Ri{BX1OLiaz%?pFY2WH zW?<1gwSO@ku{nywOMY+h8k0eTIDuRVYB3$qi_D~U>0Zxz#p{6iYTEXLnc}h2m!1k0 z9R=?5JUW&n)Z&kImbq5**0;2`@m^226El!vSw0t_*9HeV!AkUy?y=faTI6^#QTAt3 zxbF@I<>|K4RGv>X2a%j%8BWIn^kDA%p72P&aNJKuW`SVFS>L~LYvKS6jS&hsQTqSU zH^3T>{lLJ&54c(oYKO0n@J^ytqzok5 zA{Yr!f<2U_Qkyl7AeKfM4Pq-EIsU^&c33_KWSIQvtRA0pmdnSo!aUjXu|fFI5s-URyp}F9F#Xk z6C!D*RVW8&(>yhruxG#f0u+%kOt@N?kY8*u>97NU7w10s87HYa(^zNQ;QOqf>^C9(LCqsX zIl#HjIP*CvmF&ryXF(0Lty~_K3GLaMA=wM!M#0piR|lP4)N+ECyLO-JR&J=){RBa^ zJ-4-R=d4hEBQ{#inn|5u;a%$OK*xxn)%H38mhWzbWWcr^_Z_-;HCzy9LBnudUy-8! zMHL}tqX}B!V-;XC@ks>Dfd%2Y<_nGnzy7MA~9eoHeu8N&LxHAun)M0kN`LRPZI(3pP>Ks}Xgz;?RY!)u>*7G9_wyX(a@E2Aln93Y*eqzghr*MJwSXl z<{k5cp7AoYzs<3<9-4{qtM8(PSxCZ|=2iC&X;m@+H`)r*Ic8na41xYn1>$;n;L|+7 z3%Hgdhb*=$SDHIj}*^T`=n;jca zfikNsyvoh7>pm7kTZWIlni<|K>ou<7=Q%5NHlt7)eF!71OMki_1LP@q=Sa;L0=aT> z^jTvsivSl;>61n*=lLx!52X|7Etl$=3W?@hKL<(_M5DL zFuqA`MpA!e8%nc6Jz-|2?1}P>kJhcvSv?*CE?Zc~=Sdp5N3b%ZjsG+)@`$!s=n}!T zq(-y=aO|!oEVyGFJ zUK~7-BkSo4kt(h7@LJ?Mj4W3w1iw%V_@x_I^e=72_m zrqrl+H}qp_{q&l}lNNeU+T2LBLBxq`I?Wsl_4jCD06WI$H7#bY;7Pg|ib8gZSc4zB zat~8el3`X9cHuX)tR4)gA=-6Mw|Pd4kuX?2HNsi6T>VH{O`j zQ|^24J^3uk6haHW+rpze*2BlLy)cKJqB%T9Nn&uH2f0v$R~b03;>Fy_oUX+deZSCv zqS{zd58~M_6DBiU$AbdkrBPE1{j^D*@G|f}9s=5e;X~4d+EfD`VOj|o9pMqVRlCFj z-U{fE`Y?1D11)JK)n^Z&iy?u-B;X?@`%Qi3w?)9gEyQqGXsS_JA6l>BwgM6HnA5*r z@pMGwq#0U`9yk(_pOSN4c&N>$<{(?Wk5Qy;Jhx@O&+nM(9*HdagGkq zi@U|U1Z^dnDbKJOkLQVkSLR}s*bjmejoU`^Z}{KMfkK3YadSlGt;YgLp`{q0;-rup zu6uw#nL!h|Mw>O$2VGBKgO0+k5qJWI4~!w?bh{PP|L-qh?zSP|%q#&!fiMM8a9tDKwCO3>&LgQ7FyY_-%Gn3E`V<^ z{b^`J8Fe*iJ`0C-594Th^tjgYL7TO`8rT6NzmW0cAmcPdV9Jv6A+uZ?57Ako^a3iR zswkB2hKRM|H9%si#9DaXQTUgz@*|QBI07Q^aGQn!npIzGNUzrUwzk}d{ii{j z)pUlb$RKp@2wzhpA4;|i=8$}G))svyif>&?5qB2T>2@CIU>^>2toTkxZL^4< zgEODOTQg~7$DVk+!7ChXwzQ9j;l?_uX7gcMhkZYG&jE|A43M1p*7bR!G7*Sp`ilpT zuN_r?ZP|d0(ThZDu|#gbVLLcZUhf)azaLY!8@9fBhEl*6hcR2MQ_#LZ>kV~r7p-Ri z!w8)ac4uw|*)8(j=?F7?h4klU43ih^-%F@DXbWXm8NauU!$o@3reDxr=LuRL3y?k_ z!DU1Rsu`qGet50^g*H`YcBmOKn@x8X1dn#>6Z7}5avQ$n54lhQhO_W2=&?6KI@b9R zJfe!;{}asvs(1ma$6UD4DPtt-$%hG4wA<5c)dND0Z@Th8t=}zUXNJfVTHZg5DGbEH zu8$m^ru)HI4B-=?DkG>!pwr^(9?&hfHwN$~*rVCJ6^JYY{MeBLSpahIfb(O2C%|HrX1tlY5)pZJ4$A@sfmdVaAEkMejlfl{HbFe|8C{ipo3k%gCKW$u?!HK zQDg4cgWdfko-r`6lC6g)_!(aXykt-cG`k9gcjQo>lex~@y6w6SZyE#ym&xL^;vvAf;c=ojwk zpJvDJKmPim{XZohKc4>A{zLt*^_BnklYCiH05nDIk1bE_!qIb{P1!ixLc6g50C>f; zC|EqaHE{^W}`!U zhYn};(hqk(7`rmZx1*TK92oJL=tFO?&r%#o4Kbty*bya!YkCw!y3hCpyXF&JWqG^S-qjGY>e8nt(U=zr^N6a4ecfkBqZqAgkFgqStS>aW8O_$(vm#|5d zvN9ZYz#g_+%S~W(gw;<5nn3>O%dK2R!Tryt%u5D<-gwgM`-|Gb1bGx@05jliBTtvc zpa&HDV>`(epa&c_9!(fTAI>|&X@wq6*L11L8p-|*?evOav>h8Jx<|-d?o9Fz&U**j zy5yNazeu8LMuffFqi5yW46f*`w`#k!@G-%PT+njKhyvPDS=ep%P+K&&P__on^(0I1 zU35;Eta+kSZ$AMq>E$K9Zg@s@JHsd643&&N;%3asE^GGJ!F0(dF$(ZVO(g4dDuw^5 z8^z@J=&43Zph5MZa&^f8H;FvLq#WLJ13F=EJLd#U%E*5OYC^J#a9*@H7I`hg!ojS- zN9@%q1>Ai&g(Ma=u6~z!@s4&3C?kZkR?W6F}5B080jbd zMg|PigUEu>>>ci?zm6n{H20>&-2|3eGwmW^x^A)<+(3sHrn6v&ocGhsK|QIs%)_P4 zism_P2m?*Mp(nV$v{5+g^wEgk}&Q1znhdJP0%~oN!`1n|BHx$XDmV^ew2_+oArMDmo~n@bR&XUG{{rP^HT_}7n12?x6=Zr z2#WyxF~}ZPbfmQz{&P5nEVK~J)D|yR$ew0<)n`(IFCml0XJ?>Y&9m{z{ZZZwtUL!F z^eZha2UXtFMLY(Wa)t;C_v{)S2Q?Hy?PCI@H?OO}V<8q=t=#PlXg2s}#z5P3;3DkW zJh!GgK>{B>YK$vElEP*_ujvOBynae7Tx4)8l|zU_EeX-D@Ib+s!F76MZ}2M&Nz*SW z$4KWMH0rYKAci|xb%X)L>LSeph?NH5gMS0JLnx-kEXkT&6M4DZqV1btZ)@}P8&Ihm zxhFd!i~tcEUMOZhMc4shC5pzfVqA;sJTR_UU2vuy`q9q`7(NJw9OaY5kCPCHSvMss zdU^J7rIC*l9UH?|eU_$Y%9=EY*{D5W6Q*FaoBMViOb;?<{WJhkC}FQxuQ@Us8~p63 z3?i9}A7ZEm(*{#L11$?~E;O9tEW{xjGsvh_IYgtk^E~fxn?|DaWyoE|BAYz$H=%xq?a=$sw*16R~nQFF5n z!r}WFqpc{#vrSeW4lSE%gL@BNfiEpI(*3&(R!@865xAUKde#T{hrZq1x=!x`*CcD2 zR_LBqj=2bzdMV+J2x2j`Y!C_y5@%-l1!yYfpx(tX+M;)pmN7mY`xzSHgfqcJ_M3QS zwub4*2~vv0jKRZ*9nlU3&zIR6gX8tRAo^NVQ|mE*$%J54`wpUuh1b>_WOI8uOzfmr zN+%F_6^F=+qgFp6$e->Du_(}z-AR6^-t9i6UarQD@0QFEl;Q}U`_EG3#6JX zn!$tCYh0A7M>xx=hl#V8`shIC%+dKt{Z)*SV|Flr(ZyHl7y)Q;f+%eQNw0zMx9B^i z?V5cKJIBOm6Gos%JUWt#4(nsT6}vcL+vlKpTmhk!;G@OTxCo_2N)idxft*mJ zP>4s!gAl!#!7QuT@zJc)W58|(PmIbVD_3-{G2pmbFoPm{#1H{Ke+$b*N=z)YySZMcYf{v{S;rQ1VLFtLN|5W zGLY7oVz??0zsPvV$)eSxkQw@Bfpm`*wIdhBt9=e#*}?;&U|vs-d;*K1Cl(M=Iaa$3 z+NQgh;?#&tjTINENR3WsDxwIuq9T6N(mOe=>6)<}XXr(k=H9?|F5`T+YQdoF z5~@oX9&)w$oOBL{A{tp1a-$$&aG4Su!U^Dvj5Vq77?3^_gbAVNGKdSbjuIPO+sVm& zNnH^v4c?K;eMM_w#)G1vAWyCWNQRMNoVl@i9xO(CD5BJa303-ca3=v{xOc(46y=$1pnf~EaRdp7oCV0MixdV)H7bdTSjo-AHq@PQv zrruHja?4PaIjHMKI}Sp4u4yDeGS1kjHs$ zyEte6a6@4XV|lCMnR`;J1YXR{kh$ZK+9w6RdvcoR?k znbRgGy>4qa@IprIP(dO*(84mOw?{EkfDzO;W4QN|p@OE~eu85iCAMSndplwPkuudO z8;!;zRBXlA| zlz_GO1gQ_8X)ucczX7fxvk3**@UKg6_9dweEk~sMa!(xrP~UUu1+sIw&>WA*LxM%P zbFu4&1e#d|7rI3S2d^uPHc&GMrT?fFGH7jN^6|~1JZL<-<>wA7nyyM%voPUoy77oT zEjuZ^e&#Y96lwp$GlkWajFge{0e$pDaj>vquSc*P92H{0QRy-@m`;q7Iy`Z$V40r2 zNhuwV2N%?KzODu%ewZcq*upREM4azn8fg7D1YBSOW}uJIB(2v(utC$N(+vOAtbqwf zYCN*BDUGa*^v)HW&X~lpS;>!qR6q9cMpIwDP} zpkUC(v~$}-q@dXhbcD@nGB3cM`)Rdiik+tpJI%Rc@ggLtvKwxrFHhBCBMk(wfIqq8 z0!JxMrDhju*@N4%lWhMm3)+Z1KNRuwPzM~m7w$01@EzM0J z2v1Ak0@x&21>tyYi2}VBgd;q)IiIzPySJ>okr4({62R=CQ7^Q_T>u$E!L@2<1b>C{ zdc~hFLIXx1H6y$;>>6NJ@}y8sA-|fB4(DkoaERzSpwMmA5C=R26A&1U#rFn^V;pg2 zr^eJRjyNYw5O!%Ao?MZY%i1$f9`^M^mfG6{u}d`Fh5?Y)TqRZXQ}a9C`!Ism$q4t# zof)5CC1L_ROuD8sd~DW?1zleW4!#oH)3$A@jU+ES&Ygy7wh8l2Z%m5J9Sz%SkNS@E zn$y+}79}!vI$iNiUQZ4jOm4$4A?Q`Af79No2_k0ov0+vYb*5u9ue3onP6FVNcj|cZ zFp&Qx^k5pKH0{>A?Zy{^;J9v|B#Hl`bY~KdLpo4yXtzX1Y7mb;cNkV-qyC!4A&<77qo+uai}Lny^_6 z$EV}nIm?Y7CS-LRLSaaES%~fdP5{g+qsw9W zpZQc*&}RW>6>Dfbx2vT9(nq2eVGtY7h(%z*PpL*dSN}xg=e$pT>?krL8xOezv!w_4 zWi)H0fOm|MBC>P*oz^t|Q&#shRTR(}hbstu)N%Cct~w!;j1bcCj-ad=_i zy^xSbaH+WKM#qeYLdF&hRs~97Wf$+WYr)*`N}$-#jZS2wygOF!O99wyj0UbFeN_eq z%uC`1Nv@0~b8K#~RN-m+L?#wu)5SITTmo(ifUx`V8+zs@)<`!J%|F~pd1%@9J!v5) zSrTy{JXWx43|KNiy>?Q+!O?D9b8KPyVG;o%i2e1#I_Ah5eae(;NJBo%e& zWQ|OJQJDyiow*OnjC%O43b{hIX*oWunU=WK=7aGt{)H>x8ku~4JWnfpJ3xsuly{AI zEZIzk4x@l6-l_MAO>nLg$=auSAMA}t7Iq+i+wjL4zzEE;5&|(U1eg1n87Sd}aa>R8 zcA5D#ASHoy!lgjWA$>lkc7>3BakX zA=ZP{bLbJrZl9*2{i$^Je%RL!*8i*R$Ls&Ce*pj2zV`oqiZ3cxMrC*)hV^B`c{uRT z&jT&Jy*@$j6EHJ2DUlaOMj6SG61qj97qy~*sz~Jy%8ibMEljrMHauTs;=C-5IizjDFwm=(b)8X8{ zqz#JRo-{jPdDN)5;ut4e8*P}Km616b%{TKq=HKp;mKS~zK&2S>eYh5{SGauR^-0rU z#<752-Le3BWKT%&;A&yb?h`||f{LP)0GT{==*)HA=cNe*=Q0gHkn@V#>s7@AiH-Zr zT4^*?6!)T{WW{hY&vd(JworA%<{W!vM5=E3w+IbQ%hpAHi#E-rXOvrWfzU0y&;vk6 z1&zMj^P03@Xwsbl`^`AN@ccO5;I>Al+uI*4!B-1fBWOGx;|4f}rA-tJF;`$7v?6GS zP(&M6SAXjE+j@I79cL7@k+CdEfaowRQ18hx(Qbu%2|t{JV<54I@Nt-E3jv<<-0p|r zFUEOL&1^Wp-Rwp`P3wk_e#X-~2Q-9#;s~KyC)NSjO1?oIh(W9SQr0A#^+D*!c0taJ z@(JnJEkotVYaYxgazNGdcF+xIyX=SDv5*Lu_6YY)#^@y5p`xQ?4u@?<(79OLBXl$R zQJjmT#Bc#l+YWGNfhuJ*8y{`7GB-oLjK!GG0bd!G-kVnAm@O<&2JxMZZ7ar#&<5a_ zFoy^-V9XwM(B5m>U%MoIq!U^na{ zhK0)%^X?CKaiys7vOo4Dk1t7VQuz#Fn-w=JTaBwD69vl*7IW3L6q%qu4mQ%8@N&xo zhtej3>=A~~vJw+G$Kbe{9R>;vOx6+cuF);sPe^Xrpty`E&p2{J7fNQ0(l&DI;kq|` z1`a08>3fjNVmx|Y6B`I6;KY^$)K~wye5<+N3brk{L0lWGdyt(wXjvUR<@|X)dR|tK zBtcs!hbP&mE5aP)ri_6k1^1|@djOd=n&W&!E{&Gdcsg0P*!C$ldd#`4fcCI2=Gd;i zKBfFnk)X^D(iByOi5F_*Xd`lNjYwW9iU7L?4+j<)s1-U}ZYeMyBcf~z;&jqbl6bxE zw5;Z^1fGLqG;EF`;#St~7K02}sxp=~Nza{9D{>VddmY{?nygQ;oDL*_yc+duCfx^m z=gleLg{CVubyjtMW#U(3&<%+YVV|)5x5ev+UA`(Rs|)PZJC+a*ShgFa#IUd~D=6|9 z8=Eo{A2WGFrAo`vtxftX;PF-*^+1oE*9zyf!9U~XSUH$1`$YC8L5Lm_=84&zRwwf6 zagv35!~x*K6P?3DkHlB*0ZF+f1W}*>z%%23z$*Yor>T)X{lt=a8&kAV0gl)uKSN82 z#&8L34Sp15f5?-asSOwk*S#PjEsE$>2{wgXCv_5!6X__I<@C!6r-?%ky?YXYo%sTl zs$CWx|5)mtpO(*%fp6|z&&C#?4UUB4$6-xme2b$1K-Pv4=S-1jjz3v~HwrQgGy_tm zcd)HE%Ege7`2elo_DSfZOeRJ%w!|FT-V9L8KCDznasi$=xeAI>&?F1|`btZJ8x{vV za}VwJn>C9tCAJ-Ada)zu#l&U>A3H>+V^p#MmopQq&$-x8^Pc{uO@s*EOYIq9%F5FL+03a(m2m zJQ2yX$4)qgpY1Rcu`sDFq%*CejJEArYeS*s^QiI=9o0&QybULhU^!W(SCevo5-0;e6AYrWF=?4Q;c=NFrT$D3@ns zUTs}R6oDG!Mytdi&;_|9JySgqq$pA%I5MCBkWN!)bz57`F~;rRfqdtO#g7b6;|xqc zuGn$p-nd0+KcVKb><$iH98SV2!M$0p`tDdbVn~kM=`wr@ZasjuAi$V%44j(6pM{@+ z$afm{$3s{gtTH?!na8WS1nG*OByq+W2wDJizcN^>p)(<1lo6yMvfo>;!lj0i)zX|p z2Llnw=_o*BiBCDYSybq)0O4*v0om@OgNQtF1z{)$zBBlIkc=1r4Ra3=G9{#xCjefn zeT}EFR*26I(^H*d;HJGfJ0i$#FgW#FEM^ctv>B>D9m?oHHPJ`#a%TDu9W=HfSZI~~ zDXNFc>81>MsB;#%@?mz$34Vypm*AMpp{4xDMM*D;Q-~7t| z{Yk#MZQ?z`h?^MG(gL`{16m-s)Db1eJCiK3`-rNc{4hL{P3?o1;vv^+iT(6?mf1Jn zjWFdbZ}0tg5R~9pHs%fo;cCvN4+2FXo00_nHr?kzGRf)Hwgl?P0L?u_n;2(?VvIc! za}gON$>Gza2;45&b%cKEfFhnjS^KytD87)e1RqC$MZNq1q9`5H?}FZhZbJ20&shG@ zul>Pc#P^WJCDlKzx|N;-SiVW3r7}(Y$H3O&2$gpL5{5!Uy*H* z4n~dfHY#@Nrr{SdlXaWgQyKyoGHj-jF%g-7{TAY7Wpdk~L(As}_M#g#pNJAGZ?)(f z;VA$naD>;2qzR?O+|Pgmx)S4UV@}BTkV`v(?(5A+@=JI-7dUEr*agZ#1+Xn+X#LVOCEnK``QxEQrWMAv8@ zId1^^gTP0)VV5!23RX-n39^>OY3I=6vLvdDh4C}|Dxa_ysjuBXk5u$1%_vX059Nvi zI+O#}XsPSLJTU&FFiYombM3HL0vc;p+oL(`B9yH^os6>mVY42iY`D+1y+b^I9&5NO zH@LAl_YgelS~hHf8^L7JH?ANtI?SWCNdVQFJ6z8CAS@lDcRU!zb(f;fvvX|4&{4Nq z7iA~6wR;(ojx-zHrHRDrS*^}mkPzQX6pJY&>}UMy4hzX!hbn~zU<|~y?q|J4qJZ@{ zc`_DixY?$=if*`-g2Q>G`$y%|p5@9ig=PRr%Dn{5(q_C05%;2beRtqt)(Zs{V3rRd zs)HXk5u!l(rY#gf#NDT-4*~0fogs6h4;T!U@_O3-<3UDLLNUB}c)f`fSMqA4_K{YD ztvyNV$_?MbHmAs^mZ#)&Ts)A58}P-^(>%=qYs^Y()BM0nA9{S?14yi z$0=}}3bCMX~xk-tsILQ(~dEK{Xo`X^uZmBN5mW|AAuxAC7 zA@x*jCs)LYBdiRcnh&y5bFkzmUBeXf+)DbQBbE^a5kNdob)B0gnyug?Qg7hiybRmZx0_Zije*2MCdZ%;X;yfb9o(D&jXHvL5&VfY#|DOIU5ynXP*%;5-4mhPV&52a*~&SRL{lb{_(e+)q!2xmXSP(Vz*R&3 z-ByBAq)3#&*9JuQjOYckRqoW-kFH!Wsw_Le_ho}6YV4(c6ia1=A&v!BME7tB%$fLvi0jn;H!ejA9AB zocqve9*_JTwCQ*rdC0Z#cwQ%q7toet1eM+&i0A&!YZKg&TtU=w5`E3~~(P7$-?kfDcK1lpvPv<+imfou-+0{^@Fk zkSd_j`s{6C)({r&3l`As5o?ZDFia2S%C85DP7zEO=9AZ=%_sU6jIr1*8Je~X%A#XQ(-<-DVmJnR!D#T)Ea(&@9PKif7|`>^#4}* zgZ|&HU;BSQ#TPNv^eZO5iFei-!z3fZ!}bq{kP`wh3_1MIKHNG1UWFWiHElUKd@526 zT^Jwuge!+z%qcku(T6g^&oXY+81iF9BQx=cUx;IwDj5VzF&YCo-}H|bC8XaqNe#wX zPQJWtyG3naB_tFAGP%*sMfC;MFTfdYc@2x|zUoA?-0-7}RxE9P`i z1PMz?ay(#Lj*LztP-z9XQ693GsW-5pW_Jt3@|l7>u@Mj}Rt4+BO_Ck&swa75BpjK3 zk{|%)5N&rFU{o=Ds0b~ApW*bHD<^bpcKbF zPV>`U*^MMO=U?E2bVoD#i+W!OcO;-8YUl!~7RCd2AwWjWn!c!I3%7lHpS_+A)Oa|f z1qVybekqLPjDX@MrBtRn<8#yJ15$k;npX5#kX>iu>j@UrO|qgLrDFY(M?^JhPQV<% z0K6U@Vm&CZcs-E!Tb=sJb}{?0aiveY#sf5SfJ-<$Qjk04>HMKjzB4dXs){j**;y%OZaXxWk#!7zofYmULR4id1mko`e% z^(eiw#xc(1^bg8tAO~UrZdZxGp)B|uDC}Urf(Ff+Vgw8e?40n_2}s)wazF2FH6{wF zV05qD+i+Y#!MNLpd7#8b5Tutx{+B*Hjw*w$eUcq`Zq_YI?Wa4t%o>ddr&Z-S=AI7! zFuo3KhIbI-&2CNU4@2xw#2hPqJ*_EPHv#K$54gW03?8)hiXB42)w?ha4eNHUgAU=W zJ3Jy>OcvPAn8+>IJJ)UAbPusHrnH4s*=ger92-L^)`JCzQ$~O73=%-?jVM+d@3({M zv-!{U_Dt1Fc~4JeG#ie)C)vUxr!7R~$_PBtliQ-=k*YJ`nQ0NcWy}>S&8@MYYq#4$ zz(g|6#^2*S9S1sQ6Nu8&@P}{4{3%>%)?E62?rcIksjfevj!-Bd+!MD?CifTme2Wkw z5-`_-Zrf_E49Kd{hTdUJpipTGcU_MjP84#g>?xZ5Ad`djrq8J`5Kkv#MnE>XB|>Y~ zwj3ENnfQHUDq;I9TtVC8_tD_sni18=PhnBt5d;7NJm5)=#YHI1s{^E);m`)XM}P(o^*p9+N}GIriBOm$jdI?#6ld@?pkxtP9S7agG67Uf&MI~ zvDZ19bJ!{-OWSd%A0VSCB>k1jX zMw;Xt+Vj|!qoF5wA?fv}tGhw_^e2)+2O49=l^!-7BO8;n;81m};(}pgW_RXjNtF1; zfV`xYi4dE?kppsK%?}xT!8F5wH0?|vdKhJeV8`^wt*?wInNcEHAJH%ih=CYg=`?Dg z;U~~S`vNlGfn^(TQE16w+Oji7g#L+gLGFLnYrIzJ)RtA^ESh*>-v<*BLn7#|rPn7f zjgCEl>7+u8d#}Aa9qxeh0)~42rAU?FxSBQ}z)#rT!%1p`Ge$U!An~>|J(2mFq#669|fIHgx5U#BV$^%yqF2Q5=lw}!x)y~6CUYVs@n~f;l_>2$hgB8=)-0I zvFTGrHI!NCsswk8N7zIdv?m*KmG+Qa3gJkPYjZLxD$H4u$ zL=cZL6`b4Fp8pHF7pM=1a{_A2qN3rpQY0HB*Q*Hvi}np@ z!Nd#V9@ebGT4m1RkS}!Oj^mao_%WdyjUmYIaW2LRiJ?QFqa8Iq4iLX8;nqXaqTV17r!Sr#xset;gPabd|x=zU^L&GKOF2qXNMg z1Zho_xNf@gh=K~7>@3>yfdNbRLqu!K;u$XQpZ+$a!Z4U6b4=2Ut71MP0u5n=Qr8jHL3&TMqz za1U0zM%q-BqtXRE0-yUPiE>6{3DDwYK}XD)Cs_-^FS1Z2%oP3wQSEE2pkYj*zj?0?h!G@f5(UEHB2TX4)DNyyoF<4b6#GMtNJu$ zV>$&?@@+Dp4n*_&Bo&};6Rv_OQ5s6=3;ZC0QSi=|o`Z!v&|;u>IX|WJsp$HB15E2m zGZ)K;cg9iN!^|F_wtb}`fDt`SXuSKGswj)2Qi}38DJj#R#}6JTLd-}#5klBoPu(0Z zNc59pUwOVC~1`|F3}|H_Yu|J#4a|NZL!`;&ZC?9`OE-2f4ci9>bfnTa9ThA~y@ zZ^Cf5MD~Y=e-zQmEpx8TVy8HAi3~Pj=uGrX)2TPj2G)IoDWhVHik?&W#lI3UiHHO- zjYK2n^Bg1@iHofRGaY|tj*(MAKNnP1qB3e)=lS4W3&rUyPTh$zgK_Z*y2qfXb|TKh zO!1-rP7Fm$a9<3d%0V>xb+$Mc+#(H%-7>^GegM|Ka%U_s;|tu#TB^>p7}Omqh4whO z05Dpt92i&8{E5+ui5j+%bQ;5S9iM^qiWhWJu||p-Ekw#;IVn$zaJRBuo8M!SQl8LS zK)x8T^~QAYRIVSPZ^*KebQ~%utr^DbPqG~iQzlZD+V7}cF~@vz@)zo+57*~cMdykH zA%knuHT>M4d^hS9-IP+Ui4i0id@=>-Q-pE)oHI$|KvLt$asZE7HcYOs^JSgBYk-9* zfYTD)H^dxwWhrm+;8x%uB@&1p0JVEQrGG9^_M4e5AjIxGE;?}_A3C1PM!wKOWJGZ- z-`5irj6|_aeFkAn=JvH=gd>UR z@kjuMCS-dPbSxh60AxcB!pV&sTGvi2RO7~tVz=&|=I5ZHq0ho`mZ4K?kC@`O-f@FUo_ zA8B=aHTCv%?v2~&)X`3yPK#zv1jQJ~lJ}*MU`EVX<)+zXdFtJ5Rg!uJ0@b2e3WLVx z3+KLW0@~Ib8Z&I0W(dOy?zq^BVS}=$uS>}Ie${xnZ{3awI`zStAe_?1KF_^(H&;tR zcECmkDIS$G9GHg{b&Q>8@hD37U|9}q&8!&oEt(tn0ZTF6icC*1oRcSJ25Z^{Cq%hm zO)z1fpHpr?>=o8}iDDWVas?n5_nK=!#)#iN{GE&Pci1wtW56iC#2XYS(3oezMFSu7 zNy|(L1py>qnNWu;NwaDiLggif=`{EJ1|J2cWueyJa&}q}aHQo-0ojH=WDULxvHSa3{84@b zJ#q+Rq&XQx`S1jCLoqxeam@B%RwDa{q;EMDBf`{>{Nw=ajvawPtBoE_2u=?6-#d(y0zNuHGK?k#AA@XG0B`Zn1AvYX%ZRHgk0&(X0lc3j9T_~|9&9t z`(f2iu$?V)Ys?&4u@7|yDcIU>dmR&rATR8=1<*H)YHPHO3Bm5O_?%}kq6g=L(1dTg zBCfDeytnvdOESR}{eHSVP@Lhvyq~EEKvWRqfMvY4yEQiPa*6E_ZaMa5%`F>N3GF@e zdy+He@LC?5B7wN~Vl5xcK5Zj?UEt+w*sQ}pwO%!%2IpM{^E@`IF-1(N*+#Xct%iRZ zCvqeJ=&Ish7xp2bbjPnm5{i5qVCQ`SMMG3UCB&USe53YDN0EF*Qx)@7pI}Zu017?X z7Ssbz3_v`(MGY<%-AIlxUp3OU(XX>dSDZyjOOfRxJ@dQ-)df7JJ_Qo7>!3*&oD%{p zZ9LxZHN+Ce%k|mkM$tyd$fq5%VU*4@0!P;b7}~1Q6{X$PBk^fH-!&|#XbEFXX3kkp zLXe!62DxPnhRv|2Lyg-%>_!G5z92Is7Y{f3;Z#w&`dY(p@j;GUB4|HcSPeOJCz`C# zG@i71Ic)J9ORV0g2s^PM5CSk%e9E#j0drW7jy^Ob3s|h>lNqvHgm@f^vMnr@7Hpl7 zlKQC8GGf+*xNC{L3W6A@p0?-Iql?pm11Znv3VpPELMJfsvkggvD%mgo3;&E+!hG~( zjxj5*hnF@A0+ro^;iMo`A^TXAhZ>6(S8y|>S(IR=>5uG_MSR1ND&}cmZnDEvZ5D{6 z0~Uh{xT?&MpN|Ykg%W*35RS$bKUEXx+)!R3oDam967~^UP;R6Y$uefiBkmFYJY{=@ zrXLnd;buWyaf;ng(Fbz;DE$iN=&rs3w^`8y`B_(i)4I9{67vMH0xGM0s)GTk7y-tT zd4!)aTb!&&dl@cyVH%BTwJ|QpVr3Q-jD9*JoSOvF7WdUI+u@pQ3v-W*SO@YG3mL~u zeIZUzSW>$F$+i8y|N6oDf203+_`mjt{lBmHzn|udmJqp{4!yJ1XAg+^PiUkbH!|3= zqeO~pic|~xm=tOQQkN1)Kr{b~)Q1xj8@=g%-rZR2IhX+2V~6#acxOAxhCTxsmV>-j z*9@>nI<>&-mqA5frei$)v5hPyU6~+LS3!E)>q+gvua_~7_!!Eh#u%3_dj@rkdE+MfTbnp)2?iz@x5+=ioLdYr6U{(qq zlq0iwtGT>N98xu6lfKp9-tdE7=lQPz8G)GLgaOqT&^6IRJ`Akk((M#rZV>Qp;jP%% z{$_~T?6hD~yg&Mdb45l!KGV#BN>#iakr=`Kwbx_}l^k_zg|-J*ozGg>T?*?!fz*5e zJP@*`x&sUbGIL3jzACWz1NK==5|xi}hYacDe;xu?_TV8(zW~JhFZ#sx=TS zKjN~@M>plu&LtosZrUZd+7winVvInJMlB6#>ccD=y5bd=aTk6Jv;jM94{f%$;EfA3 z2`6&UD{uu0u9XbjKy_MY_bH$kQ`Iw}Ac52|$IW#yVK_BR-{>I#hb4OCzHC$?8a|Xp z?H2orV{qkZFe}-8V-7v`ogLtVHn?sUjL0g~iE-&8XeEg{B z(t({ow;5d>e@dnHC zL|vR4<_Qe2E;M-^{OumYKr4QfWm)eYYIXa@WijB`r?+MUfEu=?q9ol7ZdaJQkP{WS z{0JsfN6MNJ-d=ahEcLE&B*ieUkiZUIm^6)Q?0p8I-)9d_(x5e~g~ko1Ly$B&nlzT0 zf!hw&htuT5bHMFGgoYj;%Y0+lo>s?YE@Sc73}2yi*_-1fK0I}(@W$HV9bJvq$xcQ~ zz`eldi)L5D=ODUmwFazuMLZSpOMF+R8Gwf!#*$9U4(TP+10tx0rw+XIho)=DINEtS zod*y@HRDtxU@hO+&B#Zd2|?Bn3nm%KJ#N9_mCJHzkMw##i~4bR0U-f|4PXswpDpse zc@lXHL}#NOCzdJ9l-)gG(6w2ozEoOsU>qUN`F!mlByaGZ02OzMz9TDPTCUOzU7BU^ zMTyFhpa9whiu6AC1q68}DM>n3Iks3ut1GR#QA9 zX3UT|ai|-PTd4J`b@ zPoRhvb)a~?aiKA5-3GUZ2|8?-D|8G?cuw3=_UKmpB@;5_OYdMpUqxP-I+%tT09YR#t9lS`y z_+~L~?T-&)(?BF*o%J)1g(H&8p25^L=EA*O>tnJPU+s-VWPbwm2wH<(2(JWfJ$n34 zq6s4oCVV@C2|Fh;DL8M#K}I;nUuI6>I35iz1iBat0o?hB)c5p|C_3OUV=Rw1gRSnU zq}&$59W@dBHX5dxW1sN$?@xc0 z=rpiXsZwL{vJmG~)q#|l>n5WiVe%#Ay2kf0W&ViRDqN@$2TD@I0vEc^2oJj9eksrX zsoOF?+1C%||IYg3;s5qO;Qzb7_Wyp0FWX@w>p-78gOt7K2IOq8XC+p1Ic6BD*>WuN z8H-s^Dnt&yJ2V8`M#a@_5_D43HD)SIS^J4}D)zRm#iyeZYAg9~v}%#{0R$ACF*JZl zsuu-M2M0SGtdi;zT=D_!_-;lb4NA}n^hV(gEEb*z zNLdEkpHH9*6qUX`3?XKMD{t;7Ak(K@kZkJ1x?>GxStkSlS!-dZY`CDrGi7N{8fd8**fltiQT<1Y6`7HI>>`fwZ9JA~%fGpIl^ufQl_rO~Q}ACl2VfN(xM@_x!Y(qXqXq>=I8#%fOpl z8aQ;rWh6*pT!>NN4(pz|yesdPeL#!M`mJdXF`}lnkBqd5B1}*2v#=lPtV zr3G>AG4%ll(${<%{^;q6oC={3@16CX-Gx~pI$u^YO%JY5mbwx+qUUPsC)Bm_p5oBNB_K)2!qgPAg^Soxgi?)b5Q#D$tEe+P=eoz98 z3H78RRy1?vPETT0lV?VS;@!36 zn!m-j3pd_Csw)R8*rm{M0TT~!Z;$Xsj-#`++bQ)m>9G)bDu2T=RkVg58P{XhIAo&^ z(CwCutz5%~8rC4+?dJQ{V-f(h#J0NEJ44AEgc-eRAl~@K^3=;oSC<0=`yK;x_OKXq z$?%I^%Q4+^WGm~1_RP+I)XtmfgD9+lfFkR6qHlX5$*H^;qqa=17(|0u zeq8D497&fL-Vh!}+b(}B;uBZsnY(sa6V-xYmf=Af8c5l*NjtW(>apDdUogpbV}+9r zE#Bx}1GFV!In0P4H{0t~FHS=?m@#mo`{Bi3vH+`(AykfcaV8nMHqBfN$n|2a5Pb^wBD!H1xO z$c)_>l!HtOT<^Frv2q!w_J|ijjlXtw3}=V(Z^0KxFgn8}OSkgA88{7hpOD6p;e-ri zVNiOxm@5aNx^U;)zz*I}9@Ibi{Xcc<=8y9ApP>KuBl`cYKg9oi_5b)uzO3*!vEMi= z3K=jJRP#n+q78e&*B5tacj=4(dS-|Unl!+$>I%Qp-2z{iU!BCrl*{2n}_(4M=W_XV9 zBUsO+&Uh=#)}3^MSm2hwxe2m8!1RLFp%G;6Q}kh)t;LRACrBNgnyd#3>p{=Q;c`>k zA8_ZTzGQ52mL}38wQ_JP43V0I^TvmPnV^zE_;Jsb71qOY&EGmFaPw&wW2E!Cp91=P17mA3fO6Z;66HMX8cx4)o z6fA<}wgJjJ1wz8kHd_&~pkcV|y3NtF2ldaV3ktUk39J4dG2&(&rJCy^ipFKd?!!z* zz#){suFvsj!xXHZ^n20chsBlcS3Pn)Rz&j|h=C>=ye^|i;};a}+md6pVyl|=)(2!z z+p{DiWfn08|oG=J5x&;AE4zlqT7gjONUj~v0$>%i8)HTI77!!L1vqi(9L<|y=Q0zjeOsKbee=6*#`S2%ZlaQI?xDXVi7Ad8S za*erRIA|4NZ0CV6GVP(oysEwx6En~PGNlh;|=#|`**M#>18+$$; z2g6`;V+VBVAL`*#{mNe?no=t%@K<; zU8B^T?nA{)jT8zfEphgd;|_GLW-Ulhc_N@STd=~qK*l%>uFZO#mV{sFRK>3wjhv9` zrUeG(eF4mNR(5*+FwGF;jmgWdDd6lGjqGJ^oyx%ffyRKc3dd}Hf@%i^lT`6sUV0DNx-?GhUjQ3XXB;Qr&>Op z5wc_{QnN&2iSK~tG6hTnpT^RO?Ba)kod9$In9;}vSlX@k8?_~P?`ErgSTP<*Hm+3; zgWF=b3?|Z6X(J$fBWD8z?&g(jMf& z{m}I-RO&m0Y!o8zLzEOt2G|n8Z;U_$>JRX^`FLpV6K~vwKnfP@`pgsrgax#7n%y2H zp{2@v=t(%zG(`O_#Vd9`;3n+>ijYBDmN#}zgTO;~pXDRUOMX<3L{E=T-;I>~G}WPX zvZ86&F`zUbGbJ*{qp?-(fFjTZ|{ORtIQRZBAj8DX|niJxV~ppTYapRo@@E#~ex zg7Pqk8r9{`|7KOh8>y@sXWt!@`R}oj-s6&`zlgLGn|bK;n0I)Xk%H|@(u4?} z0{DIhCR5ky9bjL#4iUn_^&c5_IjW9#hQqOjXcOWU$cRNx&f}K~AQ@VnpA@!q&XoNy zh81^E$8I1FDE9#Bmj{LQejhE8nz&CR#YlK@MC%YBCIkteTT~5}pAS-x-H7^7O7#P# zf$6TpD8l@44|nKBqJ@EguZhq?HqxWV#T@hi%9xp@Lpm5Bx??~M((ukq4|RFW2B<^V zpGND?U;hdEfB)u3>;Lun_xpc348QjOeu^*S_BvqO9%mmMW(x`O(>P)zxfoy@@PY!$ z@JKj2Z@Wx*Mp)JA^i<`^3BhJr6!ayrSi?d}^(+U1Fsd>wHon))mu>bYXE@Z@LYzjzu&C^G z>HzJAK_NLe$s=^N@0kWTa{xVBrZd>2qX6l4u(e2{IW5jvE4t^m7-spAjz$GVF^ju& z*hwxEVtgJbZn>xD_{sh2HZaVr>_)R>y5&Sg<0?72D|Vi`w09og1|itnjUa5cwF6`5 z83vioq-hwH6=s#kITy zyF5Cxlgk{#I$9tBc1b3glv%-@6gJ8k1$B6aevys^8Gxn6r*wm|HTjUz5!q_=P za_&1`9fr6;RiILRoNWlVAlLSj{-snJ7U98)P0#<@j>ShW4i6P2wz#WY`(BjK3=}XE zdj94P;b2F6e4<|<`sgJ`DM!a9oZn?UUTG#+&!iZr_~M+?PEMOfv&V8`PUl)E2hYcZ zoaQ|J9ZT@i@_W=;mI&s4=~gwmdYHIeFQQ{_H`jyN0`!M?7MZm%h{NqQU=AGU;FQJU zquC>Qhnq?-3i3zn`k=Q$O`R{f=X0U<-=1PPF2b~Hoa$-WsgBg@NO`N>HyK=>Ffl!} zgH5sQq=-DRu?Z`_B6w%cmUbtPa&DU(ye7(ZEmfR5Uy!_5b_VgF5HM)$pv?JU>@r#@ zXaEIcYsxH|gDqNLcyG|g&nzDqBJh&??RU7Sbm=ntq_jOi_sXZEG+3tl45l}TL6BT; zOj#jOVj+?_FV3Z4ZBofCIwcklZ{bN_HwmBxJMRRH(=oubU*Mr@3o;;c1xxH$*u|nL zj1kash_g#^iM@jo>>!aT5V9`KeVd;28hn7%U|Z=7Q>H<=^Ssk8! zv6@s62BUh!p{3M7DNI)~0B~0cLJo-<_J6&xvPDyPwf)d0UC<4o8Tzx?#p_-i11Q)SEMut+_jrtw^c7wcO8b-9$DkD?Ra(x=+8EcjkoxP0@lPS^$ci^by>Bd1MNhyN2FQwhF zx=_jx22E^Qe@1(cDK5t#ptJB$(IAmLuI>zWUGPiuC+bAVnt3ka2cJ1^4UKm;Tx z2BvGw7&a{pq~R~;v(qSqALyjfo^Tu2AODH8b6_n1=#dY>*c6|f8tIV&V*x!#20LCT z=#X9`=bb8{mw}FaV9tm*dG8pth`a552E*`!u^X;E&^X8dKymzVk>ihnd7)%Qxh=HPZ0eOKm$iItQ~H}2PwO%xzNWRyN`F4=M&ct!;AF|xLoD7MYcgALTtsFw;MVph{eAsV zAY0QamLD-u0pYZ>g(xj>VR-KJt$GpSmK25x+(M?!Db`&Rpp-CQ?x$U=1293R9BWF9 zU=y?*X#FwhpB~pTF~CVQ6IdYFtb%0OP`dlZo*;1KFomD1Y_SY<%!vbu=5NdHpKUCV zoGp|DtIDuQPi7&-q8FrGFSt%?`_9+v(GFugKcXmbpq)UM_rn8wPA5O|(;y9walGlf zGQ`E1mCbprH$K8-ul!X z(wKXmuZ(T;$+!@yJQEHA1ThDnre;|Cp5?&j`mknuiDQeq zt~~8-6JLht9C?Jl7}>IJ(A+L0jHEdPYGOGjT#` zLBdLn{_&tQhUH}cL1_l@LRcV2q6JnyY54H1b<=reRB!B*JbjMI}zgTz{i(#C{ zdtXda24dCWow)OSXPlEv5Z(q#-V^}30&g65Dj8ij15JqjOk|R|z%V049YL&EC7R6O zby6$CVj#;q@FEn!js^a#?0?5~c3=Z6)WzsDtjEnOro?}G+}I0rO~-hYJ{!Q9*w&-C zeYw#vvj3@@Ghe^HetrG=`t|ke>(|$>uU}ukzJ7iE`ug=p{Q7?Yy2?OZ0B8XKP)2lz diff --git a/files/hpc-intro-data.zip b/files/hpc-intro-data.zip deleted file mode 100644 index 7efd5d6db1695f90a7db5c8096a0494dbf540c90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41253 zcmb5#Q;;t0wl?UuY}>YN+qP}nwr$&0vut~oZQE6|FpK^DyF0pL?e6~9UK#OZ$i&6Lq)qSY>H1$?#JB!`cEOgZm5j#) zDfZKw2`v~n^3Ab&mfWxte?B#pI^qan`qBJ?@#@y=^11g_vzF8V-~WXXP@zBe#CldV zcKD~s>v`_yU6+N2Ywl-b@51ZhakMe!Crs>VQU0pTC&6Rgr!4K&)l+YJSw@D39B@ce4R8~j{tWmh1$-Z0#Qa#o$ z5Z#ZN=HQ}Q;53g}@I=~s1vmUUKk`w>PW!+XfNbwzw`dlcBf)WeF3}Nm<@N|{jq6+E z`gZD-ia#pxCID^btgY62+JH7sRVv^1auX4)qjd;H!BxVUTUyQfWV~qrP4xgMLuqxk6}nT&)gs z^Vp5B80JZL$N2-r?I{k}Z}f!7?tj!hWga#yc6-(Kgu58tbm zfLf#~J+!EBBVsuW+BW8_3(!d#&(;(8LcuLB4UM9W(g<a_94xSX-+!G#)z(@L5vQUg<|haHC6cn05@J9D>BtKm zBJ=|FO5z1p`lBmTAycOz7@C9TKKpHG0W2v`Ss$vN)bo#iK7tU3ja)zP2#G8X1|BC0@bUjtf)l6%eC&PTcDsncG)ol3)>}mZXJ@qCdQ;5 zR`>`Zp0_EM*5eRUie4@yUUs3wREKp6zQ@{mnA7xZh!jFhsNvZ@N1b!&W4G;K$WrZ( ztjZP%F+=3NV?yVXx}*7Pj(`O2ksHN8h2KUvW#wXrFjtau+5Rp^ysCvS;c7&c*{Bt& zpNb-ykb*i(5ksksN^kJELYT-HiSiQzIU;tV1f-%a(+!~kR4RZS|8$!HOXJ=R9}MJ9 z(h-rqND^#dY()ycmi8KVnXE_7Rq=`Hx7c5h7ar^4MfFrIDu6F$6-beGvr6|Yf*ipl zj;hwT%ixq8bK4f?;rEHxBz45;al@*X4bU7AqL5G-M2JQ)5`I>s&t)mg1lNkhg{wO1 zVONt&X&^bq5lmr26>0zRm5Q=0Hb1v~ne*7w^{Rt*c!XIRq)zPcR;E0HOp5fR{L$jR zxBd0&Ln~iFnGV;<%#{$y>UWfyNL2^xO-~)jC!Z@uIBnwKJqn)S-=P!;)CkqvRa=fPDs$g5)(`FEwHJk1EDXNg#rsn|ZThm<}!PBfHEQoH%7cSUEgG&H~_F z(~&-q`4&)z0Ljx?>Ez*y!X2InU@p_x zEOjuZnKY8|FWT4wMIfy)V}3CCL+(xVL=jUI5f4oIth#H{B-#6TM6wV^fc zQJ|^N=7(c%y68XDtc68-ZX31=D-#j?f1}6nf$=SWg=5w#i0)>gJ(S~ki@C{J%=|GW_Xo}wzT%ink|Xm31m$mn20HG@;X2W zv)w!j>eW*9RS%0nr+hzinZYKqnl6Y$z0#xdXl@7=?%N?KNP$rYfdk zD`z)XO@ei|3-9ZX5y_d3;E(ng2Q!p6R`3NLt;0P|0rp}Lge|8BH^g7CRIHBM5@VXN z$-+Gf9}kF8FunSe737ZQ)w}43D0SS#QTSOjgfZre{yDU2;gQcmiO7D`9mCzYxAhFR z?#&%ygczr)IoQPK2JF>zHQOC`jC?7;L|=ffu2!|NWH}nzr>Wz z*Yu&C?0KE;Cauv*c*+4K{v5gUd8v!nnqm}OS}rY~zjP5_G45Ir^vDGnVtI0?^F+hQHbC0qt zg@{559@^asDM_-vSCi%2O@9=?y(eLVZZ5w&k4Wj~;=^Dvo+7$FToBj9}LXUS9O z^;SZM(>qnXQGWWM(C11;O1v!Z0HIe zL!SHnKY;&*r2hvpaZi5!Y5%Zh^PffiKZ1;fgM*8!k*mx9jxciaOz=aDh+9w4H8+wG z;du#!;mP{hC|1hJF;fFaQb@q)I_Y*Bbg?8ZA#qTLkWm&CkY=6A!Y%ua=!2Q3zZ4T| z*AHXs#Rwp=F~WM$sDVs%MOnr(%~D~f^TRnVPvJu>f&r2b)v>!MsU8GnHbx>;@CJLj@E`Dyj4+NelJI{FUde+r3 zXq%>o(wUHwfrupiCJXg&@S)CBwktKxSU#K&f5D?8HWWz#dJ7 z&b4V^!%;`mmj!Yb=$rWT#qcMD1^j3ObE&AP6cw*k5KbV>x?N>!iQJPyj`jhVfy|8Z zq!jtmk3t$r#X|x)&%5ZPg7n^0afhs4=ZN3 zer-G2t%QM3ddaG!UR$0Mmw2w={??>_oJI&+9 z(VgtyJYH`1qxfg`v%5XKD7jurj$IZGVoC>9umpr!(It5$c7G0Vy_cLcFLEcn zZF#Aw4&5IWUY;+C3HlwN)xJ>6S9xDf0Dn0?ay_iJSB9Dhh z+!e_e`0i=sr2lH%oNK}Ym_0ngxomg8>f+UNIUQgAuJFdVs%>4t%Ez~i9Dj(#n;iP?qOSZI|}l_59=XBRI<^ckV;dF#za!76uyz5n!};G?YbR$CO4eFU7!QCSq! z9kKgmk4+%Fi$&h7Ze8RyKxSgWHkWx;0_EeF`^(tiys^h#nTEvELM*ri`HvR(W>qOx z9#6R{qa$dEd0v~9dl}ot>GFa?#m3W{Em$o35|9$@w5v;W{ocI}KB4!v<`6spENQHp zI(CNCzQ9jbJsrpf0k*POX9)RTNvs>eG&0!5n~V6(hawISycP89qGp^CNTY|nOGqiw zWzrEW0~Qo`W0JWE2!hYpG2C*Va9!JN2$C}p$6B%L&0FiJ72TDg;EL~GUv2#_&WUc_JNM+Oo(rZO%|4F>*${+kKORSLw~34fx%g8FU%C2%?dQQgvMI0rN|V(KmpF~;K$ z2*lj#6JX~UeSW9|h2wyM`Hcmq=JY``J(R#+U%QJch^4Uoa|B`!Hg40?-NOLik;!Jo z6Lr8O(QC{o4eWWaN@lk3Rci3vpc6%wpvvQTtt^R&n2lAQ<#fL=Qpu()1bQB{@x$XB(4_zF;!n0Ov zQagXL+5{jD-z+om5hd!o*^i+~K~2lWiUlzd>U(~W=ksfYjabXgsaRWJ3t1s0TxZ&; z8*T#p3*+`HeC1eVQPK#DY$P#Dnm!s!i5QD767+fv*iqSS8o>=7ivO)kJ$ZJBn{}kI z_)xDD)^!77AqsKWF;nyHQ<4W4s#22Q#os^Bj%ei4dA;+MPce4yeJQ0F_vJCPD9lRo zYE1MDK@H@}q<28@psU@}HzZnb)1{vkfK{ZvE)ZhzJEeY;9eN|VYL9-LCRxHLs(qYxdE}958D@uzUCNbiC-j#RK9qSI51C5tQw+Lmxb~&(?>$=oPN2A> zH!R041w^cU>R@J^4phtjwF4={tx?zedXfjyp9#vfS~P-82*+2+H`+v5*uxliRmTYP zVyS0|g224_Td8(c+-Q;%_(Z7*KWE43!b*gqsc;(`!YLx)cu;C6w*g?hc=SAUx=I(- zpS~sJS~Kg}P4CON>lFEZN>re%e!(IFMpP0VTRY?C^BZkHQyv&nDMXuW zZef-%#~50WF7xR%FnH>CJI+GY*5i)#LD0titRlg)_^<$m%Nb_uK#F z7ipbzUI9{-{3RC`?Zzur_r#AZ7UlOF?2^zu#E(Mx7ULL&Ip+xMK-NRdxR3o{cLWYc z_*XM1 zBc^nOcN(O?c-u~@G)U$kyS_RBvT7tIudlldG`>5_fe7;=LohxJhCOmO5Gnl|;$Kk% z@kN#9_z!Bx{xj74GsWZlx2WOEP>LtxffV`iVbYKaM11LQ^0>zCRjR3wNs}H}53k+R zswsb={r=hN_q)qh>AgL$lJC=5;YhCF&AJ zTGeX(o60gPs-FLW(SPf)hrhUxvEXpKIeAuw_;TxG)8M)5ai)dzVeYj@n6lk{ii?IUtv zlQR`xNNM5s0E7St{;hOvnxmT7J0}5yXvknUe7BAyc&g!(Y-%V>*|7gftSihsZ!3Uc z%~>~iLLP+gw+Vg8x9eD-`~aD&|3mIvG#m$&zIU*rm$kYH6{T8SP7 z7&_QoiEJS=?|~>Yss$|RLm3<}myqvbXBhCDUu!sa0+R5Ma~Mp6+!E<1IUNf&yz(73 zB^{dGg%!0R=yCTd8jB4ozLtmL-z&YM-!oxCdL~i}P*U4!ysF4C1HSb&122VAa{M+I zq00@AU5_sNtWi2n5w+$J>ZX=WqUT_Zma+$CZOB~i9GIbN`<_*KjWGPGY)Wm_#4sTk zoRRW)Rh!Cu>GTDKwmlR8S;r>+_anP)y5xbO={A6 zSlkE=FPStz3)(s62_U4kzV;55-%HPvz)?=l$Q4^H$m5o+!mPlB=lNh59hG`V29?ww zQY?w{5qBgq=wB2W1D1hT-#F|-k3$WP1$2AAscvcx;AdXg^rgYEZf^b~+zhan{MbOx z{OwaYLLOX*l1AFkZ_#cnwJo?$5Gc4z)HhXjA=1&C#h^z;{ChfP#g`tI$eJoCY~cMF z6f!V7@}1;bShl1!QCmq@3Z@C^RfP=Xj9W1Q9C_l9UOSh&>RdMO{|*J7KU+8k z$Dd~3-@$3w@}_)XD25X_RPePP)0H=46-?HPxfW#meh5q=3>ueY;o4KUIij?f+3&|6 zm;?ADPa_Cy$YW7qhB^|+x^+KbV~jwptUX*^(~yEtNVVLEHI=|>IIk|gL49%8dKsxv zGMq{Vjmw#uc=_2pa9NJEI-8g@URm(~Y^Db`kW6Z5I%(w?Rx*rJ+ppILLb4{K>asSo ztMZxyo;R6|t_bfXVm|SEA~1W~=grTFY^xxqvu5WwU()7nkY!JfONkk)=8`le2;Eua zajD~p)D@EPMX$?#%2G|hfG1ozF-8{*RDtTO2&USOo(sbIiS&z{HQuHN zpXB$RE27b@)j?H=kUPR=^p~~2LlQ+Q?Wm}7r=GcN!s~6SB)^zK9A3T}%%UCi3lafl zBfI-v+oU=EU{t-=oT?bl#v>;X_V4<}oMcPEpYrbj^y=wQU$91g@7l8RLIhY2TTmA5 zi%s*Ga&5}b)T&G7BA0Y9gu8ojbCv`IvDxfyJ(zOpjwygDX4=6RTqTT?HdZAl$Ur-m zfNk8qDFa)C5n4LS0O|u9+pZ)Y>delm6h z(bhFER&TFDjgU6Rv=xMXIEyS!a|t$%rKUQGZ{4F!b>UvTI^{fF;fBQH9Q@T$XTO~M zH9%cO>k!W^uvEczg(>#zupp)JOh3AY=P}rRxvu+B!<`lv# znUgaeM&>j~Yh*U?9TY8^lTdBbMTI}a(o>wv-JA$HH0u6K2B5|==sm~wVGg!Q*WvC$MKI2eKG%R z%+2VFFYA7_G&~%3t}U7DU_JHhl%>6UxKt4o5bs!F4f=q$@g${-#>#ww=dPb8}xaAip>%)nS9Awf+h@C zU2#Kb7e<%tM6IEZ8lCB?AK88%xKhpO;6P?Z<2VeIv@YgxXSgLFdV>DW`5fl*W6`WR z4=VujI_gOOl2<0dYb=2G`HMb~7ktBaRRpWQ0d{fiyTh9oIq~sJhr-Ww<*bjZ7i_!5 zd9`0hzn?jP!q%uGi|wESeZB_IP{Qr9>Ckj}5?DRW<-PO0)B`u4?9?k;x>rqqW!P#N zQ{Ye?G!nW448{B~5;UOP-Mi+Ib+b{ydEMqfB}bJ6zm_l=gfx zu3KaXKektVNBJf~eGs?2;E2hem+`QK%`SONa%6sP59bgFyySHp;>1h6pq zUeqB^-x-R5n3!G;_B{4N7?WvhS%E%$0FJlb7dkq&KavvpjK{tqAHGn8H(0e?wkdD* z!UTA~Ic$3qRZ$=VPY$9Z#3i#9BDS(S#ME^wa2RY9Xw_0C77NJ3L4mw{P#pE?8LcI+ z2^Ss==yZXIMPeBmZFmAZ^JGu2fIo!5!Ebo_T2X(bGErVkwOL3t&coml+yYQBcMK^9 zm23;&gcfQ1Bd{ZNBvnLktiAC@HDEIy7#ZB}#`?Z6^Le~imuomPak-jQUrwx6@1>R1y0M1+L5VhuuNn7OSwdXh>k ze3+gCK%*!M@iXfV3{ltWIi_zljk}g*X&ct*!UQY*;I0U>*&XKYth!tiZHNugg7-j0 z2Oo)b)4BCRlKY&Uy85tb`c&Z0A&$2-&35~mJQO;SiU(N?Vpb3+MO(jH8jEFKfNkVW zNPvp)&bbZ=-Bn_lXz_H3?Oq%JNl?!rfnvn<%;j%JBd-9n3Rc&2kFk+m8t#l+AzVz7f zEAP}uUrC1il)os=1;_x0yM{DnJo6CjW90nVzH9ipqun@+*-(+{?Iwef=$6R(j98S_ zqs_U>YY9XFf1X8u?=PuZAGE8!kg}o4-S*!e;1P62$P*0G{$;TSosh3a^;Q-06)x`} z8)ZT*q>lI>p@?gytEkW%$&Bvvkh8ZkwW1^}t#1c{lE|2DcbJWbWWOApiyYGuIM=!o zgmt zw-%QZ^BYJEU3BDU>08J4Wg9>ZmD{nPEI+K2e^T1_MGdWeg*H2y{J?e#2osAGBuJFbP;!S`~bn6u^hFde}vf zk0uHk)y7fvTmD{ErTitfy3sf!nw!K^SCRO$**EL!ZvZWvd2^Y23%d?1A;hw#HUh1u zoG$h!mi|kMbRkqTs*EMRMzv8i>kJ0ED<#ZWmy!QiS3b9^*B1+NQGYUx+S{7#6@^{< z3`2+&35WZjs`g^CQVa!@2QzZb29=ihrg(eK>e;Sv`x%TW14R;+-+?R&$#C$gd%1|+ zFa%n_O=r+Tc(tOQQZR&Ftj+>qWXG^2$s1*C0_f$kFcI)$D#df}8z*@5P_S2FDz@MP z<$3-cTMyG5Uu{F;W>m>4?ho$QX-&l!EpZRf$zZS4_Dt1JKbQ4z7DEJ;L4Ss&eB~~w@url!!OKQUw;sw=7&ViV`0zKvbHfI-Gc@AM z;kRo=>-s3lhI>&~Q46y!2tmHg2mn)QSt!0Bn+0}dOyVJt^(fUE=kCKe76~8UiJ{Uv zGZN~^lvYp&EzGMyA{mxa9d=_LXyfvnvany{dy!Q#7#b65;rO?7lp2PlGd68Ok_O5o zPFS|mlc-N=^liz@SOdB0$ivjxGWf|@I>mRpF?B(s%l1W8`Vf?U=t7CT-Bg-xklJ-z z1^g>;-iNr;hQR@VUbz1#fW^$n^zTPEY#BDrm`so%KWB_sCQm&-qwi62JX!MXSEGca zLbDDYyIbx0Ctpo_8Vtq5vsndCKHMb3Q(d!18^0G7cDZf@USD@qUbp@2dt~n%XY*`E zOWL^xf5kMv_|(s-ZqIj5xHQjdCJcG2mT!pm?N{O?j2q-0Um#E1=J08)l{e&ha7ppw zdhwr)?r?HI9--vT9%Xi`=!suF!2Q(w+JzF|`bg#0&YYvQuB`B7Nc1)FpbAi7-o}9^ zlWHq+D8)!}yj(x@InA+7K(?H(RoqS6^KHguXrVl@S4HV8rI?oabg~kd^{QV~^Loo) z_c{%wTz1G>Jmo)~;vwf1y!sGV_J`pZociJW`+841HNTGKXZ4ld7IGTGn(+!ZJ*!Al z73eka+-`NyzKbu}zk9sFqw(pVu}4Tt#i{^5tW&iUe$l0wiN$fd1D_SJ> z%NG;e*PhL?u~Hw+XCf99-N#0-VXaADx;v6hgk6d|`Wc}|YhEL04okn0_^W_<$h*`n zHi%2^8m2%$&zlK)t&^l{KXPfDRX>e%1Jc=jf5~|)i$BFfl`jzwDB;nZAo)$P!$cmb zUWKm79LEjqLtr?dm$gMbn1Y1?rLYoqK>44~(9kc^6*7d0=bb=M2Qq(2nmtE>*O$7a zG0F$2ljdr7W-rHX*iJW9kw<0g5_E(*XI~!@t?knvo-D!(gm9#vKh!gKQA==EV`5k( z!=Dv=i)WT_8f*?GoH~S{NDB`UdQuwM?jt6r&Svx(0b7cbqGiyc(3!6=za`%gUGO_z zBr`?{Sg{j9iUP}RKS7gvhGU`mH{s${%s7L(q;YeC6E$6_6PhXt@i$rOOsA;EJiu>> z_=k;LKH1agJv!H=$ll*kd{;6{U?vQZ+axIr1(9zjh};Txq@EcVSn{*B%A3XfA_SnA zLWs)>i4ThjI1{*Cz!yEakd0pi&&!;>0DISt8SmHw2QNM-(t@i$EV(lIc5dVe{pES4A~!1;n0JM65Bme$q#rVn9|YykLZ85Ecz;#Y`?D3FcQ7%M zV(S^CHhimY(wTv=-I6X?#fl9LsHIH)W1u3KISuNVR}pvOw*?hFBn+_IDzf?~zPBnv zn2rvcQ$KQ=X?#z@X5d7TGij7Wf7&@O+I1% zHeA>1uc3_Q|0Sy{!WkMQU+|H{icjZuqqGlUlODaOD5$=#7~q3RXN*wXOfI=hg3iur zYZG1w_4=U`%t-coH1&zvC=Q#_L;@jyH0~||Q4=0CeQ_;~vaI#*4NA(%Z>5qxSYx0Z zz5VlyYYgcZdtPm6w*$-iO$Puk{hZ@UW$3teWIi`KYMWU}*iFImV8nyEzhWGh_QP9| z+4@upicp(?R}eJaH@XmKn9^(oNY~jW5VNSdJ5K2yrAc!UM7uPvIpzS}nkA$T0WT*C z&TTDSv2Wb1z~tdCNX_x~cH6p^7^YZcbfoi`Q5N^xv+bTXe-7z+A6akW^)=d7x)nhd_kjR?;nE2c>C*@6rpA75!8sN5Vz3N zZr;bVrk`Aj1dFBBop!8Ro@6BRqxZcFije{d*+Jxu@uXHL*fRY0kk1W2tcT@4(*T

Ib@xroT(*Heoof?DXmH1o;hZS{Z2 zyXBw0dAIwDJ?j3vmiMo->zsXV=??hTW8kT{yq*zhZhqOcd$mRA-9bmk^DR8{_0jah z`@U)Kt2}pUuK8^5pRu7m^~gUtby%gX)VtIEsJ-PyJut}Yrc>Ws9aeQ)-Fj7$>Pr_R zS1^H3F*_BKdQZzW&guSpu#)nsJiO^2@X#r7BL?Q@mfq~3veiVRSW9E;)8>ZyoCc(g z+J8!gxFZ?ARKDkxk>+90t!?8aPL~iV6!xPG6w!!}X)CtZwck)iO_kBBaC6wsF}K#W z^BK#qlaW{m1DyIq+h5_ESQtePs^BOp1O!fq;c(zH6Z))mS_^9IoqMAF`dFi}*S+F0 zjpP5zI>)-B$HbMiXL-!_?idnH0;Zu`A^5Ev%-b5c&M=DlkF2~4@yBJ6hMvxK>a}!` zErcZvcq>TLYHw;PKO@=a16-^J{|TLi9cHP0lg+@g>KqS7>-MS6!m#~rwWd4RU2F7y zH=kU%pB$Sj$n1)>qX367f{vbT5EuVPgic9Ns1LA^W;oC!50s~B6}!)<{=9)y4Y=Sg zs!MzU4e0AE%Jkn+DLrHpBhvQesL?IK9->0Vpuk4xm1jxu$6q=Xe;RSCHI>)>{EUV@ zMUi0?ssWQ~u7I~qMr1KKDL(l??9fZhwvGoov-!Y)@wwh}r8x!F-^MG$BJc`xm8`9< z2tHiI(Mm)->Od_seVSp7M{1Ocm#Vg+U`v+c+S-kVZ=Ax%)!QHMjD`mg@(PACs=gQ& z?V>gdAzYOV+~Q`&+B>IU+=>>PyG11uJ6c!*m=#|1;!X!N5z;q>R`KxK1ob2?C8duS zE;$?FG8E!t5J25;L_Q*ah$IGf(YI&~ znn@nH9%(Dd-WORByPe9%Ar?irreye7`!yNKyH(-3mGSo%A^BJvWwa*Glc_yynjS_Y zsSDGY;;f3b5@$?&PriMlH8;%^~99|uvatH#e%sk2wl;HIiTe_K@tW(~d6eV4}|&q2q1th+rSql-|% z7#Gl3;vWcNh3k7a>=|XpGd~niN63CB>tZ8_z#VX>zYlag`;V#61{w}_2Z6oZq0Fh5n(Bt`9OC1Nd_Hc%|%CHGFQ zi2&wAv|%ANc9r?o=k+QPyOq}uyOD_{1Orey%z8z3F6Ee&tT5Sa$}y&mS)r?gS1#d> zKF-WHF$L0nONN>F{Bj{QqAkBJd?bCbW-wn3HNP-bUIJ5aQ;DPGS_igw=+Bv8H$_ zmd4rc4O72zW&$cn)o&_#F6KO;+?SFPmb^ED)0#_(dRUw{La(SYYx$qqg{i4^hS z@jJK&CYW{MTO!*hlNWP~^dYyS*pPY+&y+(A7~r7$vB%|$ddgGLAmh9X=$*%dw3b9+ zk+g*Q8W*?7@jN4M>hlBxC50tlzR$;;rA%zkH~$*Or7Uca-;9s}@D;4oE7XOF_?>`jHKgo@M zD;l~?nO}HRu%c%UOdNuekT(G){8?rQ+`f8hgvx)Bdi`yTZx0pvU%yxHa(zhT_bz7U z);%5yxh=k59QtFo@ld`n`tzFXPcCHjZ(5xT&=~d>GEmgoeF`*uJJn}&CZ%VFt6t63 zX1X@b^7K4z`I_+Wc3ao(y~Y}2&~=@tuR8i>_VmePuBPRs^?W^o8C>RaeQx1z7MEJ@ zxESWtpt?2HbS?^A-qP<@C+}$gzIiHvA=EIm${XA``=_92d26lQ;?yu}{7RB8>;q3} z-%|_I>gDQDGCe2_t{nc$(a)*vu0OBZsmeaOhA{HdU<|$d>R(rqyRqCz8GXks+J|^ORbSjP3l86qEs&mf7Vdq#C{zMMGuLfE4UP1(a~>4=roqU=eK|g- zx=0^ao|~4BaONP$!>}Z}Vgrt}cYC&bw)=4A^+XWL>3N5)r}*rVShMmS#E*N;AEn&< zkq#GJY|#H{e&)Gnx?aU+!I5!cL7rZHM`wOHbB}A9({*oLcb0JMpa!aL>0ipVRm)1B^Xm* zc-9e9sBqUUE76#b;~`frDR5P}^@v(R+MQ&@?t;G4JQFH5G~Lzc6||)b()poA$QNGB50ry`{gwv8zrfb zz1o=C@qrM0LBmCp_7NcM*e_SWs`GD?eWkm+h?=+n;fWXxqqn1Sy32u8OX9jF10dw_ z%7%(7JN5A!t04AMUT5ZN7Yqwo-ibr~fpA>IwqAWxT}rH0Z0v`FAX15>`1=cu(#Xm9 zBUnFJsF@+5JIVVZymnvmK`h#><8cEci2}yC;|EWeqI(U>Jj_y)gC*k_L+ZZG#g~x0 z%6D|Sie1qeBFdL(9vz8Uk%ZM%s3Mlm`74bF!FIx&)~cWZW7land!lSP#Lbp zlIBJs<0S0buVUnIjQ#bt1d%~4A zQI`{4E!a(`dPJIt;uq`GU~v8fs<4EozPuW{5eG8 zWaBOZuL(yk?Aj05%fN+XTq-JyE@iKpvRd!MV#2`Hq7IKwCLE;DmxZo7V&tb4Hwh8h zot=WQj+mC@yun%(X#zV~#`YfEBdTP{K7fBsB-TR(htjAT#P-3~jsLdEJbzn~%8&@M zuA54=GDcQ*#pG!#i{3=^kEk)*H2tQ7h;KH-gp;gu9wm+=`&h!5Re0( z;2RSGk)UU$<_R3eQ+iMp zo4~i!*}WTU^2(BdmWMDKm2WfR>@MK2*HZl?C`fO};0!;}#m1mZal4*;qd~Orvse)f z5?JHlpe_h1SibioWPKDtpf5SuUv3pI)Vj-r+hU(xdg1~(witMf`^Bexsp6CW4NoNr z_ody}&*JfJUhd-f987kWGgY*~wAFV#R`ky=)>L}-m8tIP9)Ww@?{Uw71EY^UBk&r{q-p#()l67>pA&<6q9%`Q&{aM-9y=99q;5nny|+23 zjg-|E^DMr3T{?7)h>PC_I1uU11n>VXikiSj=M(e~Xt@4E&~UIZ{rkX1rp_z$nXc}Ny>>2Dc{jUBS`W@WWBF@`d7V$!_S6=mqz(texLK;Jum!y&U=1mC-v~%z19MT$t9zcD7E$DCR?53e_7Pl1P^kxZ5n*y zHFy}$7(6?P2ZxTFcYn3@I;34JXm(mk=^x4c$V;4u;ua_vb}5{6;zAZ8vtl6ruISEb z=)SJ*n})ekM#fX->t!!XK{XFH$`m3FWUZGW}_Tgk<_Ow!^fxxy0=Q>`Y4bYN7 zUC%AupOTZV^g=njr)F@!qO@(Zw$u3%q;vr(a0BZH>p3i~FhFw$)M#w|;$aw{Ev|&H z9BkI4G2CaJegiXC!ey(fIkd#Z+ff6e#8j}^+HQovUS46Z@jlUlKWUxr^|Oa6JLMe4 z{NCis2~`zzW0juEcR^ua0MvE+h=t?`rL47=5>y*|t5l!=A+Xs%dYq>xsB@$bJrh#; zx<0Ml}*GhH@L%FwoMY3ArfJ#Ybm>lStL`Dd){fxRhhnF6KIdsGWmtElN)%jZIxc53C9UY(CVYCpcSy zF-PJ`5L?s(sZXc!3jrz_j>B&AT>=%s>GO=s^G4WqE?O_Qd++6djza~QegAX1SOT#w zNoWiho#l(f=#8>suDtW$@WSo{{trRNYu8@@6k#neDCc9SEJ}7~%MEW^0yL&dH2bVL zomeYTj3VJLjXFGzP!O~d$z16E_(CgYdG#Ro#y6<1=%0ww>6jMs=|VcTR-Ij$`3_rD z1&3Zp^XfcpF$_+kto^rAB;Witu+>$kK2qIc}itZ2|=_ zfdDXMuv*yUZ@}LAPjVIm*_k5JtWeBPzcCIl9`4>4Gh0j8=eUV<3pjUXb)+5zMWA2D zD#Y_}3>Wa75-I!12}9#Db9Iy#40#w0p;k?m@K2{V$3E z=7>R}!La3?C9nU3vUdv4BwVzuV>@4L+eyc^ zZQDu5wr$($*tTukwmLe!{{QOin|;=8T~|FdYQA&Kk{JNRa;6g_9);Dnc_VZvm8>ob zbEGCVB&3Hp{C<`jueK#I(SQ4D!iONkGcY0iAY?;@xe{nVHLuEjkLuaWN( z4gKRZW0FQ!usti3&JL9HbIY9?bDhZO70OZq3u0QHeh+bkMAA_zl!KG$Xz?j_+~3rP z^6s=Of*tq8)I|lw%F02oe_)K)Q>=nhsWCTKy)SSO@5;8SAi=vb4q*Y1m$d;a9hCsf z2Hg{^1oUO-sh0O1TL~cSVfFyYTL8Z41Y*7(tt#1;-k?|=AvIRKNC^I;ils55|l>LQiojD zkJka~Y=VyDB3k6s*e}6^{q4mr1T&Tmz)TrqaN62y16_)u`DNDF-MN5_pgN=&7h_of zY)1ja*>=4J*OQpWLZ)37?eUa_*&OPo(Gr$Ecnb++CQ$oes+yIBQqgZmcq6{U0VGT? zoyXxfaNn``N(Ujk*nc;gUT(U;Xr|C!+Eo?|<{83cEN%Pc84K#_9q=YNmdQdb=P@yY z@;PH(i_Nt2eXg!nm`N2DLNGLF3xAnQnhMoU*>v(jaNjXM#pqnEBy+ZLc`)-D)Wl4w zOab#BmUgK^A>3lw*t25y;he|hCVxb|dtNjDV?nqyBFfuGma9k&knc2h%ty0~?2tyF z>;Uj@*~JK8+8Dy{c>>%e+Y7~CWXLDjEvSLwRFhxEp$=xqdxo{_=yyJSmY(JhkVwX4 z@P)j`17KFitNU0WX(h+e@}!hhElxTPps98SqgK)OlG>%n9$NL`Ryf7l2fKtLr^)Gf50qj}qA*UtO6`y{!%tt3JsFN5SrBRH(~J8IN;S3#+tA9) zK&(bT!&TWd>`O;(u}aqZutWl^Icqwx<$US zqZF5#*RZp(>%Su>r8;6~yJyb%F~3}I7Kga(S?|P8sHDf>03rZcK=6q!&>m1Ve=1lQ2$%hVfppH zW;g%orjk!$ugowYTCf7T7B($0B!ZZ0izKcXc1dz5mOt9tF1{nrauQtQk5^Y`^` z#a5$P>2B|*=*d;LS&rX*RBLJH?f9VEYj!g;wHgopEM3cbPFdjS>h#Z5d27;WG~20d zst2cUmhNFqo0Der*y5vSUYYp!zi6qn`PSF#<}#huw&}))-EK(J15MdhZ)$lXl|@Z< z*;DkYQ@mGx*O1QoMWy#pmW1nrE_Pn4F-)$j`(18{gWyD2i~1hv zO100!y`rl=-Pu^lP(puD0OY_P@h2xTcEl78Q5*7MH@w+XEJ6ike5oe;RHF<%gN-BK zISS?ORj%@O$LGcf6lfseGCR!&fGwd(H}$kxeP^aOJ&8trwf`oXaBwjt-uSXDO!l;9 z4BXxj9AN<5W`#s<8A7=Q(&N(h7qo?ENk+r4DjPnWR&) zq-XouZ4@YKkk7ILBHnaVd{E$U{*d$f#0(apb=ifVqt$2G_qXlt;|V-0$7PU6_d+Ps z0p9IGoZ5c0RhgQh=h%mZ9)x&VK`iCZYY|ob7rlkY(3;nlpcMC~?nuB~LcXnjUIWZ0 zZMn1`0Cgx`Sg1*(BIpI&kf^q= z{rL+;_AJf9-@4_|KeRibBQiN^Rt*Vb@-Djum_L$12;~;re|06!%^+$FNa$ZZb+Ih! z!W;W{1PWfqVIeJ4r|~b|oeKv61xlwEg84lNI@!XbIqfGz15l*P(S>)T+`6gQ{OBoJ z?uW8h(SO}OZe%-lccF}}2V4^p`<~A_6FN9)Yz`2KZOR-u-FTdXB9#kEyiAV5%1w#d zgfxwfF!gt{2i?ko#AgVprhqur1{c&W0hl!Hdw(V=EqzoRm*QKSTc7|6?EqSJ_`-sM z*1a$_OQoX1M2giV7$Ef~sFz_%6mJy)`@zUC>g-cFeCX?v0j1h+VAX@5qFD#rbSZ0r zXKr-9m&_EOG_3J1iRYWP+b3;8ce_zhH79}{aR|WaaWb>6DN{3Z&=gVIR@X1sN0I&LnS9*$9-=PzV9-=|YjUNTUQ__`^W8Ym%om|_E8Z{@K@og1vW5DP47WIWX8VO z73N)$tN1pAt9IUFozT9hP_2#~AnPmY(vkg&GSzXkcwf}i zA$_oYtU6a7j&~qeKv_ema?qj8*QZX45H^<1cpYuyz#g*mCq?~Z{PKzQLOfq#4!#Mu zQHFBgvo6t@rM9kwlix5tnO_WSlI6Nr2~-^rUFX@%O+^v}q0wZqJgmmJpo^%8j~eb$ zmkK(T>>LPQdr*{b#4N!VPWUMSl!m;?EC3UCUN+r1LXMcpyD5h4P(~$lJ^SZ@n=8(o zJ#LI4d8k2nr#h8bouIm$&7==DBnFW~)T<6=a!?;m#DEA9rP_w6nw+D7GmR2U9ci`X4gQGcM7A5Z5QsW$}|jB5b)*Z&A`4yp)8Z2wUW(|<`d98Apr4^*R>x<>u~ z1voyeK!IQOw5=04q_z6twsQ(W;k!D|axW&UpF_7B%d{(;UX52Zc)S#^i;LYJ_P5U$ zDQRWzo^GsFZiOr-PjYIfOV#s-D^={PF%>&Z9~6G})#XVPu{${&ig;lx7wSK?e77(6 z$X+=bACBcM?d@JGdfjP=>rRK)NhM_or(NueG!G3mtLAdgRYk`gE@9X21@UIn@w#fW z&keI$k!z5Mma^5Eo3edekLZ?p!&aBTw{AL+&nqIg!ZnqHG>*KeuTc)RQrb(Z;4#pu zNqMzTbQKOm$A>OqIYCNC6_InT{RtnAbCchbX@oSEnR1yQ5-#Srr>pgPYJAKps#No) zieAolbHmb69D5hpcyT?qUbtw}6B_2-5@?RqqpMj)(Xlz1vKOy^YFX{65U8EIRaKAW zuuEnVIYKmeKkBubXd-pe4vxzXh^mSpDAS{-7H6xbcO8*KX1r0K!iKjGxqzeGZaID6 zd8w5yr@GW?1RKf2qGud$Dh)@Xa6Uju9Jbk9_-vGv`?%v4p6qMOOeo{{RUv;#0C!M} z+H@%U2!ftvEAoSKe5{@Kq)o4BK|30?2MiuQ!&;*Nkf=)X=`Trv?4I8rCN{^txPr6S z{Ak8&3*!>!3#_N|rf&cgA1H!bDXodAlz~UwrU>QHZ=EC^0sXd(?+9r0R#iKF?2g-> z8LEm>;Yl=L=fo(C4Sx|Eah3aQvAA*6fGdAj%QJK=?LAPUV!G?wkmYynxks%@*ONDC z0OiZHkt7tf2(~9b!T5vqj?ilHNS)z@{_gY$CKl6lrPklE%m+P<_XsUBgP_gF2%m3K7*9{ z!}j|e&)E!ADB+T>%H8zmr^w==`*365M|?@p6)Ieu-r?_Y>#~bJ7_iA z?EM1feq<#Q0G{*Gz@wv8ke?cPk6$5+z$nsuicbd9?uLl-Co$vdg%t*EH={b)K(>N| zX^&)&vZ|M-gpFSa6j#q|J|e%d#Rt95REgGj7oh@J!SKN1|JD?` zgpJ9nAE8_30>Rt~-kKgF$bJp=A#HI}rRhOXq}iOc;8VzoCL3t_1?J#7Jq4&@U<$I3 z28eyx7|L;+>Aka61{N=-7oC|MDb)b3_kS#{I>dR=_CMr9nxZkjlPBfA}@X7BPYCCap}xCuCk!rrvddiZZhtR;DF6YHRhv&LGD? zW9iw>t93^*EO^=xMbyEjZ|I9`gpnU<2;Ttv9P>?Rxim7JR$ooP1 z6u{sG*cD)dwRb|#O>D?$)s5KrN7eTltg9A48X*ZTt00F1)Q-)R769?F{T@RM)+Ld` z^Dlf+I%_1D#LKykzxsrJW2dfS zL>M(@?{0+!BnYD4J(-5x$6D@V@% zEAvCGsZhzVgu#hJF^Mk^fj;1rbF;qCJeJJp4K!v$a;blL%B{W~CimyLxF zzWXJ=7tK-`BLaq8YL2~_ACZ!K4_=Tjh_5hy<&?MhI8Gn}G}J|S7%ef<4e{u7EgoeX$U1J!~^RF5xwNOJC z>AVFY_7wkMv9Th7y9p(<*%R!CzT>i?dR1i{@; zVZCC`3k-srTeZS-*p2*!7O-MzWn@po5eFd$-=GR(lzwT6+Il@t@I+y*LufFGEO|j-a4@8>+m$9c?0TM z>TvMU2Nt)WVOy1^@RiQPLbWCDbmfvLN^Esgi2-4UGj1FvYcg>llsypmMI`Xd|Y?$mKrQFPjYTfwtK-TGZmRNBOv6>45QO7r%dY}jS5$YM9(y=Q$-ASX%Q1Ycj1v=M zy&2TEt8~tK>h)!WJ`l%=*c(|xR8wNTyQlM+!b;(w`|fdS1NBAAYkp)~r;Vnv>fW~I zd-TGzwLKn4R75%wm6hM0|3f4Orcc26k4Q}X-(k&vMB@J)Ytj`Hahd*2q`WffjzGY@ z_G;wb6(?5On0wmLk`mp%H+XKIqn-O*ZrWPcPV&D#Xsny2$v>Vk#ibnE_6#gu=;gld zbjS919cp>3@7Vdh;Pteyc)Q&lU3%L5OWixVX0%3rvMKp=eOg3o-tTbbBkz3F?=1dq zbPrFePwGu}-MDeebJMLd-Bv};=uS?rQLEaCZ&Kx$U6}G-qPkwD&!FFM-*hNylWmpy z%T9}5=I)fqy)P>!USOC2Gs&B z#q&>>yLNI55j~yP>YqtqKVQz-xk||@)YX61x-$J-XR7Y#krM-2W%;_ME#BLmNy>Wl{Lnw6HkDNPsMJP)U zq+Cq{b3htc&vN%wJ2B&7N|XA3t=dbGZ}_oY)yB+1!E3dt^m!dLd2dxoS9g#R?Kp*o z&RN-p$&hI?u6pJ-J4Y@uA~31OcK#VD+Z$ZYK<9@)pQ}S?CZYkP&4_V0ED*{K%g!iD zKy3j_D;ya92G$(PDLquyh>129NrQ*q1ilO-voEZ`RJT6zu>{YIS*^uvnvmbWh8_Y(V2dnsYHJ`n0iO1pCt;Y4t>2HlKd!6q1v!Jr8YbIO4h( z^DnDvUxZ(XZN-D`?@{#*@U68F$J#KeZz!h8)&&xo4EH(431~9(p|@M;!T}Zlp=f^h z_3tS3p0{EIOP1ES+7?O%glme%R!=tKPAEZ9mfAi-50i|v|<6qcLgDeQWY_X~R}`^2v%pfJA(W z89=FX(T)s!9@2BhSWmY^!_Ua~t30|?g!B1i~on;lT zx%;8vMsSW5CGIGa1q=yiew)4-tI|5LN{n>(gSCdk114 zJFPX+JOTAP(3XW(U_NDFm5E}-E}BG=+3(5n;~yCE_8SGG zRE`7?m!j46qw8HuGY2uefpU(KcRO2QjXUk>X(gTTFbw^UN3o4zK~PKoL-aigREqHk zHFHt7r}l;w5hXm%2%wkv_wYeoLSP>Ce{jiA3R13Dg3-#4rzCCQo}e$p+Vb_gJs?2~ z!Qo>E{z6ZTlraWNhqy3vjpWgEw2)w)9vlm(7#jQlqL>9)XA!gM(!0x&3&dPXO4vAp zxWX9~&`C0}peEn2H}NN!vINnaJuBTJDNfJzdZ2~t54q*S?u$IfPUBlh<3t7O(9hq7 zDe><(RZX)l5xNwyGPOR>yvWdM%hSD+5ga=iV$FOsB74s>tXY4CQp7fLXPSg2?cSue z@5#d6<|wvR1>!%TW{o}X_&YtvhgwKuA(3R^K{ z-69=QYi!B=y;RgnQ+sO5gl~bFdUD+BEX)YV6r#?~w=Z9ZccdUfM&-KtT^^Ev!At{{ z5SqgzFpv0*84D;cque`Ll=7k*=KIe$-c7j{JalR8vJBs!(BFS7dHa!JRbWr`#=6w7 zfoC#(9ffMDCGLiC7vb`i0_S8raIAeXxa?p$NUWe{s-n>i_70d=@f=x_*MDv`>0<|h zZqybK{z=a9v|3%Fs&qudoJk71A+kNg4Hee5)(cadvRDLF2f+^<%Fxx5@eQn`M6n8j z-F?1H5U55*mHWSr)V)pIg_2}^dqy+*a&T%!rLl;!gdRMHQ607Vi_B{7NiIg@^$h0e zQm_j2(Yw5IPPk>x-H9?e^CpJTZ(qTpcN$g zZC-hz;e1+gV78skuIJPGFn^7IksaBH$gQg0rT*x=H0~|>toLEMQ;%WUdPYO{AtslB zz@>;ht$V-Z9BP_-;>VqE5#GVOni)3@bh7jDr^fg1z~Muh2bX1}{rl%t1q6Nel<%^f ztYG7>(>cSxdY3O#Z-DVMw+=IlXWX76q>&Wo58h2-^Cw5`-Q2>sqEew%zFsD#Kc`B) zUU^)7V)>Z6+~0q?PL0uCe`|~!^vgR6@9LrS zLB$U%t{Ml_fTRClr7&t$sHZ~H&PaHc)^@Z&U`^Toy*B6f36O`l1HP2aVAJHbFM#P8$70RMJ;R2FrJamFuy?WD3ktv$4A?fTiXU^rZxP@jT;E_P^8nI)xGbE2*r>- z8^uPDF6NhQbV_?tS{6!m0+q4c|F^f$8tT?Y3ZyzfQNH&v2s^MAtO2VKsRZIFdlMqa z2{rMS*o0saB-<@OiB9m0u=;EC#m6BaMcwZ0czS%F~UOlMl)F$G0ZcvsSrHLx>qI8Ff*y@r5z2wr3Mf^M{>f73*U#s>*@ z2YpXl3DetNGoZJqx)GxoWudvowElCQR}-odRtLBMNL? z`@6CX#>37@UnxEN!6X;PHvQ03;{kD|YD#P$)q~tsxj;lBVk9O;| zNf5J3@>2{~(?Fo^{w-AQQHb=Y%gqYR_a?Nv-5FkS)IgMCErnTWi>{6_J=Ru*OhrvS z6g=Y+0U4;8=gVj%7Nl3zqhQowaIz@VlmS#U);ymWFx4O*(;o>jAtPuF3Lgh^m%(P2 zagv}_8^YuUIcWYGqcRWWP5Q9R&J;VWLAWx3GKP>W`tBVgt0|&oDRb%?c5s7_=ZybukY5sW5s!?W+@x3``Ak_TU3#FS*#YBLfN1^a z!;v*?;44uIMtUlxIQp{2AHdTu&j@cYaVbn`HmUm}TSP{?{65^(h!ukF7*M*3bo7o2 z9GvRaKU9XXz>8T~S2&Wgf5#Wv0d84wCjVl^lz@IF98U)5I_%BaWerG=HvKq=i=3w1 z1wpD>w*8coprTE=3Rv$+AxamK8OXWz(G1cE9y^SwVudP!6N=h05&A?fLC8@1i%y{U z7cn5@JpgKxBTH%yM)x)db}_izz@BDgArJT1&YeVOlrNi9GyYuJ|Fui?luKP z%Mv+D9qTx(=)PJz_LFJ;hi|-xjOZIV5Kxm3k=7#tl1gANG*ldOfWgp*Wm2DoCP7gS zto697gp8SpveEc&A=KkBf8XcYSTC~N)DL+wIm__r)J5Fh&Qma2{#e)&lVTm*tp2jGK*k}*H)BVORgEEldlv`itV3oIW6x%^MC(H@ z#EZXKTq|ClGVQT;XZ}{5)T>SDJkA7et>i@FIPwr#4?y-S@3l>(|>;ROh8wYVJ+>>)-8)c`e#aO>@y9-Y?}| z`^vXw}04`X-3+1c-$k3y$Lx$@`=FOF#sZ8r6qMVFiD zN7UYV^Ott>+M}q;hVvUI<-CV%9Q&NTS8YIpnM;YHyG`nJ==t^O@1}6w0Si0!xhcHr zh9LNW@>*)$cjA@JY_oG+7)=nU@EVwMHinZc)-_JuKO6gwnX7ukGi5Al0iV)p3k}N- z%#W_Rywf@ik1jT7`d)Og$X+3@eO21J?nPQD~F=hF2#^k>&T>#s*@sAi)>eNo>R$F!{IIvp)95$ve9vQe;{_2u5! z7xfS45Mj!^-xr+FbF{53z~E_8*sYCxfqYUX6=j+BxW5ZEXFo=c|7Z>)vd0#olYzuC zfp37Sb|K*iRdU*ELONSezBxqxViW|&JC^T+JfMvTl@}rwu14&h}#B=Ol_DX;$A7T5PwuYiT=Or{1>~s5|+s`;NqZoRj zcbLR!Xs~Q{%_-J9qIQ2zYp0kQ<>bmf3qQI0?L;mImjy>ZEoz-MQ?diZ|0Ev-5?!3hdFZgDW`rNgX0ZJcpbVZr~fX_+;hiJU@ zY9G?JR31ord6f6bhCkGp9T2rl4?Y_$^YzIe`OPJzWr!||Z!4sp=vKkFaTzVQ;;p^q z@0tz+a1+ESQRLpj=G}T?BA?IS6BB9-rj%J49c5oVjB*wAf!FwhEyuMNv9{M+l@=PG zjY*=|tkF!04;-B^zm)Pht`^E55%tMUPJ4{IWKjQL?rlTh8_izWZKmvvb2w}PaYPuY zAqBgS)uK5Nkw>_=@F#`-k#^6HAx0tUw8?kN;teS1?*MKi=4A@0ekK|qxSf$z%3{x@ zKUKqoXAI-P$qnLsAH4}XT88}Vl|cDB`c7fZk|LGNx_Z^aR6jSaaHcqX$WN5l5K98T^C41IIX4=JBEaH7Rem zf5}(WeUsA|b$dkVecQ)az)oi=DQoA)^=-V~YFbmo=&Tmw;AwdDFNToxeZCOmkn;^T zAFX8TszqEiebL}K(#V%n#k^rUWQw%#rwdLJg&7QI+i)hUyY@dq!Q{g8nwNqpOu%Mvh6^Ks!(^cf z`?~?ZR6svU?UaDWFs*kG$RF)rtO#{zU@KyI% z)kvhAN%GF2V8A6(6&&m7est7DOK%lBKwKUO&c!@tO9Y|s`w=M=;B*rpMBg zm21tj%mj|aXe_xu1ZQF+gr{XrbMzY|p$8{Y+W(H7md7SL3Dv70X}H+G^!Kv9Kle>* zh#IZKl4Ds(@pITLDfuiuRCE+7@1eTf&{M$Li}#g^q;%toq$GOuu5ZzniHIpe04BxF ztotK?c#fn!so~cpS+4Y$JvEibzT+bwpO7OT%55zqu2WJnxT{q&3kKZawf+pGrj_&7 z@6lQ%K#NA<=vgA+?#H7R9^BXD4*w=$*ulnIN0@NZXNn+ZC(5NefgBx{?X3N) zn>8^D%F?m{{@KpnE%9TEpO0K1mYr*W=1y9Oq#E|kJ{Sen-kUz|Fjl&^%)RZe1O@;< zhcLI#U9LaWr1u_x;eyy`S(&&2LQm zG#i{UPsPL$!4z(AfN!pO9+ZNP{EhoYN+_}OqlG$xAelxMN8#N#F%fFM-rcGC3&L{W zQ~Qzh)aYuZLTLz}RJ%9^5cuH z;X5>M|9Q=syUHS(2}~G%nTwI2dDfdgsP!D3*$CuC%5W>bjTiiM0Gbk0`Ze8yxtnk@ zD8d%U;ym*}+MDS2V2q-_BOj4~6Cp^GkG$O=-|Flb)8D^ewEos$PgJ_@K4HEc1=Mrg zV1g9u;y$eC$F1~RvD6(csII-|6FR6G)!HFSz^@cy4mGz@eM15IQlh)yuM(I#(>HJE z8gtr;J8qbw0)G-OOI1oGp=S)4C_Vf^vgvdWlvxd&hus)V!HADiF)9f?;GTdvUQt1_ zKYD=1vF4xx=z4e}s#^=8?<&Qqrt|0B_}LQ&&tfN*pHB(?Zy{Vx?Irj>6E_(D9p3zB z;^u#iap?Y?Q($F8m>_hE+Ce~lhX@52U8mZ3y z)BW+ED$eDfiPqQcE}pJ#n!>}ctKF{CY|hr*_wijPuHEalkB9wpx5KLqds{l)cQf)? z>1(a1AB|2sK79RQ>Tj8y{rj888Jro3u)3S8tMc?|V2L2Li_o|5(%DkTOMW1$%Aw)g^yT(Bv2P>s1AW^#7H0!1az)2jpLY)4Z z|C(&NC#$1(dtq09R zHX6xIg0O5gI|4%TTjKDY;>VMr>O)DcL>n-SiWBFlCu6?A1}kBrtyJe-%?j)Voj~B9 zqKXoY!|I)qCrQ`WGT(IvgtJ;@^65!kU}Rjdy{FAa^rO2=C&Y}zLXR&bP@Z<<@>%TE zna()(0Y_gI1mJv2uMw6!Db4MMYH6u2DITzYPG;a_gZd25z!?sXbAy4i?VBg zf|Wi1;K{i_Hpfbvob3;f4p59t8sErI_mFBw?YG)Gteklh2OHp>JntSA!aqS_l(>T( zOXzin^tem&N7ZNh4PPl8OfyYq5>7@Z9>=wJdYIj?quFFhdK$aZyx92QVqji}`4R@`L~Lp%}y&|WbmQy)XVV#`C- zW!mnp7}q0`LF{IwYUJ<|oe{xe=W`gAP0x6BCd3{l6ija(!<>kHFf%)UdcgdGku${+ z4Egj0OzT9<{j$Ihy}br959xU;hGnk>Bf#4ExO+Dkyrj~z!9vTY0eLp+BYj>P4-Wa< zzaI3E>GU@66kwKJps9wOIW>m<*#{}M(6MqgZ2E1&tt%amKiu*=#7QxR$p{pk`0w?F zFdk<6wlTA)O1pnlICw1K zrY_WLt z3E^jU{x*aY70T_`58>ycf2ENSE&{Z#TqBAV!5-KqtCi|G2eNC?C`kdjcAp&H)v7y? z6%^Gplm8sntM=pt3&o#*kKN-lc}p0xvyMd(45uyJQDd~|CP;qV!Bf+`*MV2)164)+ zd)2iQOu|NgfRJ94C&y3oUzHQ43lI&Uk z;SOkI(}55|X$ubufBf}ts9=@W;=e5lf00Vev)>>HRPpkWv)_~=lbnXAo{p4&*B z???cieqbXm>CBjaYMKQ)g(%%ukXJ4Iuc)N502bkCIH)gAKziE2Tra4yznR5sLbS!z*-K?AH@MPpLjV7aIf zMrBa^OTr3rXCbrXXlc^IP$i?v5YURpGgse&K4@+R&okvpz}B!dbUC>wNnMOhjG(L1 zm5wHiwGpV~^IEwn3x|(bjWn_WDr80}cZD><5S$?&|E$l9L+p6IBewKt&(4E?pPa1r z6{C|QT*dOUUhasGx_&%@L-B#XKkRqrfk2)}ZaVc1N3s{FcuK_Ba-W;Az-NEC>q|+= zG+_`kX=-$}f&U@W4@30-2v;Q--(v5coq{Y^gsr6A|x;@tO zbik6+}g)PT3Fp$psuzbtod>_c!J;j_Y7n zI&rG*I-Ho}nobecfTvwfAD8Uw(lcM!PX^)jP)xEKFlSdRRCZ0|HyDAQsWm(3ooemu zZ*Om6t_1^qcp)n$&kLW{;X>tqu_DeAwZR+|UFy;V_+36k*Z)8x=!l+Pi(}hK^HNY_VKW%B4Tg z9J86hW}tMe{&rlJN3o#0d%;=Fr%^2y|3dt~iN^D-Ow!{Y(QyBlMDx!G|6cRs4lnC-+nA~+HU_@ z*^T_#d5WlUYX7*nitTi2zOl=lT0G4uy6kaKZ+uxgd2sGZVesH?a@@`Knc47kz4bA7 z>&EHms%_A4-^TQN6?wW&d&FPG@|ew#ExEt_9OnIWuWYsJdCmN=X1L9#*c}pyQ}bMI zG|OSX{f*UXy*8zDYe4x^cT$-BQ0_5dmiwGXMwi(G|y zl2!T?F?v*c9Di4CdnYXIdHJ&Q@}zLW*IU8b)-{R!TnPIF!{)a#wmJod%QHSGMmPBi zseq;MtC-21L?9<&Z_jJbzIr8*acCy+DOARXyIkLgD0b2ZEsQ$&=eg*-E$M?Dws=Z% zNytsM&CYc7be-%PsQAWq2gORRm$_ILvd12nj~e&cLg(l9YX-9os9OEHBE{*CNZJJ`h6oSZ-?k)Zr)162fyCJJJ`sZH^ zKY?M_N`np{I$mkuCnZ?6XY`zV^@IDIo21rl6_vu+g~P9^S2i44WrGG!mpL}n;u}wI z2N8L?>`TBzzCt%lwi64eO&`&-+7^``MCtC@M?OXcLIHk90&oaomvwGYENz-L6e~&1 zGc=Z=VyTyw(XCP00K+%L^G+c<`)5qSa$T6LMCWA=CQ#ln{sXc`D6Q-lXk38$CtJol zks4fOJL?GuQL<0x^OaIif7GN`%v@GPv$*QWmZrOeE*!&I5 zT~K$XtM?Yn`5=P|+OX)KkQ`{3a)DNKR#$s`A<=h@nL3ZRsR>r&jO4i9IoBD7A`7=| z8c|17w*EY`0w%d|BcW0X%ZBU?__@@2g(7dwy#+Jm3%V)54c2lPb^!F*@+b@2Z)e+P zZ^0|&w3fVEe6x&pN$NhoIn%02w4ad`ERlt1eAg%XS=>^#$G4a3g^Z;gn-O2&py31Me}Q z#L&dv$vb~pr$94LGG~>)OYgYGcnNv3%}9`77y zNPuL(H{r>FCbHu5LX{JrSHB3`qbiaa;gPxrl)(Wu)=&_$aJ-nb?c_HEnZMQZi-B#l z!fPD~eD#qa7Ldh6&N*XnxIM)gf!)3|p*@LEc-;$R0xlX7lA}F0i1h@k`IUpTT#VB0 zyU~!UN&ftATOJKTli(tQeTZvRn--2>=LCf4-!yx?q0o&(XG;bHh)bAiXvR!`?1ddm5o~)| zPoM2y>P!%$dbvCr4AP0scOd=o1A;8=(-NT^8_U72!2(}0c1Hd6R`JK7;qGJRU(CLo zc31ZP`U_xfz{z~zXiBMq+a(Qzi)4yV?9y1faKOK6Da-+YGESo8=8BJ>YW`gj0O0q{UEl~UKb6Kf-r2b>eF^k(KAF8Hv z^SA!30@M=RKD1b}u7TsEF}9Yvl6qz+`*f5pcT?rUr?MJh&46ueOqC_ez&SK7gmAgx zO8D|aob_To;%*QEu0W5anI>UJ6p%<9dJ)mq)FC>Pm(wikRmJ%G zqmh`ajAp}JIJfbk8-{dL+NQ+CrZNz-@*%O;r#d6LtJ2q9cSXKuxF02N8uYu)dx#Wa z!lj5=pL}biNMDa&mSiDj=wF|@QiZ!k_oSCs(oq4+%J6oVq=dwk9!=;)$LzGnS3&}z zdL{&?FR+K2>P}0dzgP09N%@E9XayiCSgjqDD_{(`=m4tHqbJMdy z)O?*Hf1%h4ynU2s176P^Y3-(XlQ;8RW*nmc6sQ}!OicZuZ^sUAB0PaR=8iD9Hdz?i zHR(Tpk{_5Mcb&xwMIM#xB!xm=cgF+OVA2qq(JAUSLPVBN=_U!waRmFMv1FFqjOI<0 zHkyetC7M8HnII*+WNbuZpX`pVC{>S=lc_Q!r4(zRgFqFMZyw=5G^FfeFEZnC0b)io z3{@of$CA_V5O|JDZX7Tdns{wjQzY6&u&nUGyzF2wB>IGY6e5+7dg!SEW49b&A378y zux&3UVpOE0UJI|^Q{<+nFkdq_MNgE+z4o^yN!aQ~LfsQl0oMARVEci$vdLZ~>wTpU z6##jNxM9q9CjT3nSbwE-LIs<&*!5Q&_1~KHy*v@?{B#mAE{#h4M`PU1!TpG=*zs*s zilYdBkw$;G1Iy>TS=m~Pl3xaZ+$JQMS}|MplfbL-32LE2Tn+Gk`Uri^f`MF2`a?}p zEtHE*!B4|U_$*pFz{oUDws#PEq{EWGvK{X>!P*phe*OpIoG94MIsZePU;i!QFmwK| zh{Kj{=!nVv|Bv_w#lZh77xP;#T5hD}=x!1UB?|w$?I_asJZCB(yXL-mi zN0ejm)U?^>Rdj3ijlMpex9W%T=~bw|Ztr+Kdb7M&Tj#iaVcP$#8GqHtdY2?Wvv7WOz?x?+1?B*2);|Flbo88CG}M^#FxCrP@+0d-)s<0aJg*ZJ1Nb z|38(T2T)V#7RM8c1Q3v3lwPDM1nDKT(4-?xKoF27y-JrBr1xF~0g-_8j)EXkq)C-d z1O);}ks=*m+~@AX?t8P@+{v9UlbQ3o=X}YXGv7Vm`PbWqEDRe|WXzQBvs1UUNMqf) ziUkV3;y|eot>NvLCw1yjE}@M>#0b0l$SAW4nH?2qW?Jsr++uXM4$3ryE$)?*pk}I&sgI_8~q*&w!axvx- zg29uqo34bdDw{f*$V$h+I(hb8xx@$7EF`oJ z0bh#=j<`sV^N0R}wt9cXq4A_~%x|*KLH~2BHm=mZA?2UQd|5 zOm^?Y;2RvVKiSw%cM2iAl_DK?cHRv?4-Ek1G<%_)-`D)P|! zGB5fC%#z*~w)Q}~@*vj-3wI2d^j5-Ksqk*Wr?@-nfgS170ig(?nboPcVFbVqLHK6x zR_-M>NIa8d?EFijjC_uxK(D& zkFZXwU=2%$!SH9o8A`?NMsM-M-Ysj<*|nMw*H*!NjEX-1a3M_q;IU^k40NWc;L3jM zG9HOzP7rf`CWD?iuqd*M5BBJBQbVpJujk`5W>AsB#-@I8njz^vWHO^NL--Lx0Na(h zb#HK^&2Y@ldf!cbS&rDaxN1LP-4PFEV7!35&Ewz@&LLmL{kp2#+tv!^U@cZ}3_hXL zAf}T}VQYYfdmf{LVHM9=oAe~+3zno+5}m7N=@MGG!yE({n5$MKm^KjukZJ#<4T?}2dniX@ly zibEQSq}l1^HRl~X^~E$b66DKFO%rbp5S24#->;}>a*5k_rDOA zn_+apAH5uu3nKi6tDHufi6jhhWJVN$ydi=j`h1-+B=N%v77Y^#P%~p9{~%+AJL+E& zUZyj=$>ybC#5CCBz#~jF?~IX8z=M}KmNHHVh($URg`5;1PvJxC3|(dr2o2zPXz|mC zE}0Wn+DR2R5EG}`!F8nWQMqd8ASpX)9Phi%@3PS2vUi#KYM*${n}xJgm1Ae#N4t)nNZNa*ThlI zX)%$Nv+nDaR}uYnhy7vs9V5PSTz9>;54S@DXkuSC;2<1x8Adfd48%wzHXig^=g%FO zN}Y7`!f>7=@Fui6a#N8C8)p%O+KNwV+@=eHx>8l{s|-PyRs&^>+=NDJlbSUuGpH#9aIJ0QqoVbcp+QbU%af|-e{34;x>Qwa3IInWxB3s z7h;{n)ggRM%z_dVKSD5wf#8|evn>q}MbCh+#OCgo7kFK40#F`H{vu_)dk5W3&kZrx zdGvapd2ngC`02U7mJseZwoa1nToHF@DRj_}PcPwN(~O)6`-0jMt-1`7XVfNO^IZD@ z9^9d@htibMmEg`deYuT#f7{-s*P z>CVSxv$Y?Wz-GFLGCl9R^&9Cv<{h7%Et!apKSI1`_On~!8#DZN_UvxZ(Ft08@u?K6 z-CknNko7%s+H7*PTz5~b{idHiWz>c!el#v|Y>cg)MVaXHCSTp07`gIZU$1L-_e$H8 zfsaE@ac#B2yhja0{BZm1`A4<#3?qz1H`3!-*M$nr{W-h7wkhj)CyUK7ZZK!<(G9_? zZB^SFQ~{H@FJtou-ec0eoxk^1cr8S?ZmXnuLPhGGKZ7Sxvc?j0ixYR;nn=L=nDsH& zN%H`xs;SsuBWg+NBeK0-u#)l#UcLiwL0QPD-W$%ruiTsi!%SMOQ`O`%s>@0b@ixbn z#E65MzQnd0Zym+)8S47x@iQTmc!+o^`0hc_v$pgo({R$VA-tsR2$(``Cc>& z_kn0FjIui+wrE?e3_E`K?i-2?$M!VkoAIoGNvCVFK`dVPNVfo85`Ow*k59!$>4hm_ z!}8Ta@0u0!_YUa#^1gS*(#!~&wBLz<9!Aj* zelv}$Uw}g&k!LF(_o?xTNip$nZmnIO6i>WXN?5SEDXaPAa?Q@PQNGH7If*Lv-L8Jh z9IQcP5k+POd9iFvEN5neBBA`V3hm2E5AKY&xsbgl{aVeHb-KY#=&xrYTF$mC9)zz* z-**#l2}4j|Ezshid4<0cCm;P+u2at z%U~fxA`8PL!1gC^4Pu?5b?kD=WI zpnjkAKt`8D4t&xfN|^ zqueFFx?yp%$UUhVX%WTYoy%qVQpKWMEXOc9s$z7&44A>yB%&8qf**gj(9jDx>Y+O- zaAH~Z4x4eYd8!jo1`P~rd{BqQh1oDC_lcz(Zchc{S{NdU^X-*lz5INKdgq}uLM<^x z1~Fj;>2Y!UyxQ2TNlq&lKdc0$KdZDjv*m74%rhw`r&As}N%%AgM9JI=4@vKy@pLwUU73|4B)*~Qz;L%y%120PHNDMmkHym z6_&Jrb|hVeMbRXLO`Bw^ZOYCE>&F(PDU-2nVO5-vuW>xmI|&%747clT2|l~VNIbYy zF;fo1n%P|9C%=LOt>{b_4%rVulp&=I>MOBiF;>BZkzgP#1oBXd8#@u zt}uNgunf8?&SDW%J0YgO1!ckET32tO>m1;YgY9p2>JiXNq+o~RWyuv}3uN90#5F5@Yf{p3{ zYK8CAE@K5Sy?mc_Js5=B8`r8Jj^Qz(?gZAAl-~R<8y?5l*N~&cOlYg{J~y{FxCfBJ zA?MD7sq-~TpDP01p!k&cVTuLJ>RaAe;`&ex*xg!GHG}n_2YIKSz)4;{sb92R<`6p{ zQv1-?)J6JHOfJ9bJE44TrSKN1DVs;;iw=8IIWD?`S(@EoPWTWnJot8009^1x6>hJr zgfzrPP)nA?!e+8%wgW^ZsT;m>G`w{@^`fxEcp-ZQq)qWv6oJI$yc|tcw{t15WM!O@ zce>|V`l~WO2>}t?PEE!Ga41NF&XB5i6qxHi=@)@CWlkvp;dMR#@I#2%fXn|o!>Q1x zm<+EMebQs}P5GBH*HC=`&LBPb91NNjH)A9DDcr>hZ}rC9jatnM8#7VPuWQBTnQgaT zRGHyzKBXB9$9>X7m-0zuR^_upLw%k`A8oRT8Fh&FrEU(f8A_sQyP|+^;MY4VG^Qmw zBcYpa^lMBiGK0lVI(L{74iBjixa`%Lp_Y=`%9tlAmd?mZF-P^+!jg$M7LpE0MV&tp z*NLzhH5KS@sZNf1lP`)`#%cx;Oz8M`b9i)`$#Z|R*uC*NTFfn8C|`DNhdiZ_HNizp zLnI6Yw*GSDSf<5XU1!))R4(lgVuzEU*5)u)76f3=nI~&(W_F3o)bZ#G-z$fk-TkKV zw2J(7_2C+qgJ7Fuw#WgEN}irDW%SFz;1=^>p2MurWXFVS?mVhbRe0*`trahQh}O&a z7zWw@@jB}9iw$wDOQE?#QEXl<#ScH`sz2c|xV(}lEVha{nSRFhUzb3HZ zefS&^rJKTd=Rs!Pa>}Ri8kI)n9twVD@ZPq}k~z+IKTw{*5}6X^6@tfVF)^XBJS*!U zw%LHi$DEwvOEjU9>^gfL!`r*jF$B}8c_)Q<1+Zo%VR7lDkIl8=@*5u@-&AjUz8Ey8 zbI$K9_ps*EfFjaLU)_52iu2fW?c-{%_`a@;=TpME_qXwmaxNhhv>q|soHWWb@;f4G z%d(H{A_hEREVYxaF>L@2wtnA7t=IrD=rHjBKVOBxGFrb4ew~N_RKG@70PE5}ht^K! zH*6f-TpVvaFm*HK(b2{RU|xSmec#;^9{|KcVgdlaoz5H0|BF$k3r3axZq&ii#m)ML zlc~9lrH%OwD=!y||8f}rABPNO0|YSike*VcQ`%|bl=P&PmQr80=aQ_yv|KXjLqob>vshjI> z(L{q6>noSthq8Yc1>v~BvqH_|U#94P^B&k(TB3^^F?DyaMFo}UZ$VALM+f&?P=98< zzY4L?3o6bjN3s)&%KA5|Eh_T=?BwrL`MVq_den=$>gRtza%|h-^Hf1Mw5jI`V$t*UVwuW$D##Cg z>Yqg`=us~|IXX{WgZ&HYAJ37{qh4ImIZu5bgEsZ&y#7P_fQ=Hi1^~!WhYKn}$WHzF F=|4f7P%i)g diff --git a/files/jargon.html b/files/jargon.html index b7d03d65..11d35eeb 100644 --- a/files/jargon.html +++ b/files/jargon.html @@ -70,7 +70,7 @@ # Shared Computing Resources .center[ -![An HPC resource (img: [Julian Herzog](https://commons.wikimedia.org/wiki/File:High_Performance_Computing_Center_Stuttgart_HLRS_2015_07_Cray_XC40_Hazel_Hen_IO.jpg))](/fig/HPCCStuttgart_Hazel_Hen_XC40.png) +![An HPC resource (img: [Julian Herzog](https://commons.wikimedia.org/wiki/File:High_Performance_Computing_Center_Stuttgart_HLRS_2015_07_Cray_XC40_Hazel_Hen_IO.jpg))](../fig/HPCCStuttgart_Hazel_Hen_XC40.png) ] From 8a6f91b832ce6f4e02c37e0e899343d5b7580667 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 14:10:49 -0500 Subject: [PATCH 03/25] Substitute Amdahl tarball for lesson files --- _episodes/15-transferring-files.md | 178 +++++++++++++++++------------ files/goostats.py | 136 ---------------------- files/hpc-intro-code.tar.gz | Bin 3391 -> 0 bytes 3 files changed, 106 insertions(+), 208 deletions(-) delete mode 100755 files/goostats.py delete mode 100644 files/hpc-intro-code.tar.gz diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 5856e2dc..563a2c27 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -24,32 +24,49 @@ terminal and in GitBash. Any file that can be downloaded in your web browser through a direct link can be downloaded using `curl` or `wget`. This is a quick way to download datasets or source code. The syntax for these commands is -* `curl -O https://some/link/to/a/file` -* `wget https://some/link/to/a/file` +* `curl -O https://some/link/to/a/file [-o new_name]` +* `wget https://some/link/to/a/file [-O new_name]` Try it out by downloading some material we'll use later on, from a terminal on -your local machine. +your local machine, using the URL of the current codebase: + -``` -{{ site.local.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz -``` +> ## Download the "Tarball" +> +> The word "tarball" in the above URL refers to a compressed archive format +> commonly used on Linux, which is the operating system the majority of HPC +> cluster machines run. +> A tarball is a lot like a `.zip` file. +> The actual file extension is `.tar.gz`, which reflects the two-stage process +> used to create the file: +> the files or folders are merged into a single file using `tar`, which is then +> compressed using `gzip`, so the file extension is "tar-dot-g-z." +> That's a mouthful, so people often say "the _xyz_ tarball" instead. +> +> You may also see the extension `.tgz`, which is just an abbreviation of +> `.tar.gz`. +> +> By default, `curl` and `wget` download files to the same name as the URL. +> In this case, that would be "main," which is not very clear. +> Use one of the above commands to save the tarball to "amdahl.tar.gz" instead. +> +> > ## Solution +> > +> > ``` +> > {{ site.local.prompt }} curl -O https://github.com/hpc-carpentry/amdahl/tarball/main -o amdahl.tar.gz +> > # or +> > {{ site.local.prompt }} wget https://github.com/hpc-carpentry/amdahl/tarball/main -O amdahl.tar.gz +> > ``` +> {: .solution} {: .language-bash} -or + +After downloading the file, use `ls` to see it in your working directory: + ``` -{{ site.local.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz +{{ site.local.prompt }} ls ``` {: .language-bash} - -> ## `tar.gz`? -> -> This is an archive file format, just like `.zip`, commonly used and supported -> by default on Linux, which is the operating system the majority of HPC -> cluster machines run. You may also see the extension `.tgz`, which is exactly -> the same. We'll talk more about "tarballs" later, since "tar-dot-g-z" is a -> mouthful. -{: .discussion} - ## Transferring Single Files and Folders With `scp` To copy a single file to or from the cluster, we can use `scp` ("secure copy"). @@ -72,17 +89,18 @@ destination in mind. Upload the lesson material to your remote home directory like so: ``` -{{ site.local.prompt }} scp hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +{{ site.local.prompt }} scp amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: ``` {: .language-bash} > ## Why Not Download on {{ site.remote.name }} Directly? > > Most computer clusters are protected from the open internet by a _firewall_. -> This means that the `curl` command will fail, as an address outside the -> firewall is unreachable from the inside. To get around this, run the `curl` -> or `wget` command from your local machine to download the file, then use the -> `scp` command to upload it to the cluster. +> For enhanced security, some are configured to allow traffic _inbound_, but +> not _outbound_. +> This means that an authenticated user can send a file to a cluster machine, +> but a cluster machine cannot retrieve files from a user's machine or the +> open Internet. > > Try downloading the file directly. Note that it may well fail, and that's > OK! @@ -91,9 +109,9 @@ Upload the lesson material to your remote home directory like so: > > > > ``` > > {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -> > {{ site.remote.prompt }} curl -O {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz +> > {{ site.remote.prompt }} curl -O https://github.com/hpc-carpentry/amdahl/tarball/main -o amdahl.tar.gz > > # or -> > {{ site.remote.prompt }} wget {{ site.url }}{{ site.baseurl }}/files/hpc-intro-code.tar.gz +> > {{ site.remote.prompt }} wget https://github.com/hpc-carpentry/amdahl/tarball/main -O amdahl.tar.gz > > ``` > > {: .language-bash} > {: .solution} @@ -113,7 +131,7 @@ reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ +{{ site.local.prompt }} scp -r hpc-carpentry-amdahl-46c9b4b {{ site.remote.user }}@{{ site.remote.login }}:~/ ``` {: .language-bash} @@ -154,24 +172,22 @@ A trailing slash on the target directory is optional, and has no effect for > commonly used options: > > ``` -> {{ site.local.prompt }} rsync -avzP hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} rsync -avP amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > > The options are: > -> * `a` (**a**rchive) to preserve file timestamps, permissions, and folders, +> * `-a` (**a**rchive) to preserve file timestamps, permissions, and folders, > among other things; implies recursion -> * `v` (**v**erbose) to get verbose output to help monitor the transfer -> * `z` (compression) to compress the file during transit to reduce size and -> transfer time -> * `P` (partial/progress) to preserve partially transferred files in case +> * `-v` (**v**erbose) to get verbose output to help monitor the transfer +> * `-P` (partial/progress) to preserve partially transferred files in case > of an interruption and also displays the progress of the transfer. > > To recursively copy a directory, we can use the same options: > > ``` -> {{ site.local.prompt }} rsync -avzP hpc-intro-code {{ site.remote.user }}@{{ site.remote.login }}:~/ +> {{ site.local.prompt }} rsync -avP hpc-carpentry-amdahl-46c9b4b {{ site.remote.user }}@{{ site.remote.login }}:~/ > ``` > {: .language-bash} > @@ -184,7 +200,7 @@ A trailing slash on the target directory is optional, and has no effect for > To download a file, we simply change the source and destination: > > ``` -> {{ site.local.prompt }} rsync -avzP {{ site.remote.user }}@{{ site.remote.login }}:hpc-intro-code ./ +> {{ site.local.prompt }} rsync -avP {{ site.remote.user }}@{{ site.remote.login }}:hpc-carpentry-amdahl-46c9b4b ./ > ``` > {: .language-bash} {: .callout} @@ -201,7 +217,7 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > modify this command? > > ``` -> {{ site.local.prompt }} rsync hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} rsync amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} > @@ -212,7 +228,7 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > > {{ site.local.prompt }} rsync --help | grep port > > --port=PORT specify double-colon alternate port number > > See http://rsync.samba.org/ for updates, bug reports, and answers -> > {{ site.local.prompt }} rsync --port=768 hpc-intro-code.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> > {{ site.local.prompt }} rsync --port=768 amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} > {: .solution} @@ -267,23 +283,33 @@ The most common archiving command you will use on a (Linux) HPC cluster is `tar`. `tar` can be used to combine files into a single archive file and, optionally, compress it. -Let's start with the file we downloaded from the lesson site, -`hpc-intro-code.tar.gz`. The "gz" part stands for _gzip_, which is a -compression library. This kind of file can usually be interpreted by reading -its name: it appears somebody took a folder named "hpc-intro-code," wrapped up -all its contents in a single file with `tar`, then compressed that archive with -`gzip` to save space. Let's check using `tar` with the `-t` flag, which prints -the "**t**able of contents" without unpacking the file, specified by -`-f `, on the remote computer. Note that you can concatenate the two -flags, instead of writing `-t -f` separately. +Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. +The "gz" part stands for _gzip_, which is a compression library. +This kind of file can usually be interpreted by reading its name: +it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," +wrapped up all its contents in a single file with `tar`, +then compressed that archive with `gzip` to save space. +Let's check using `tar` with the `-t` flag, which prints the "**t**able of +contents" without unpacking the file, specified by `-f `, +on the remote computer. +Note that you can concatenate the two flags, instead of writing `-t -f` separately. ``` {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -{{ site.remote.prompt }} tar -tf hpc-intro-code.tar.gz -hpc-intro-code/ -hpc-intro-code/amdahl -hpc-intro-code/README.md -hpc-intro-code/LICENSE.txt +{{ site.remote.prompt }} tar -tf amdahl.tar.gz +hpc-carpentry-amdahl-46c9b4b/ +hpc-carpentry-amdahl-46c9b4b/.github/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +hpc-carpentry-amdahl-46c9b4b/.gitignore +hpc-carpentry-amdahl-46c9b4b/LICENSE +hpc-carpentry-amdahl-46c9b4b/README.md +hpc-carpentry-amdahl-46c9b4b/amdahl/ +hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +hpc-carpentry-amdahl-46c9b4b/requirements.txt +hpc-carpentry-amdahl-46c9b4b/setup.py ``` {: .language-bash} @@ -291,8 +317,8 @@ This shows a folder which contains a few files. Let's see about that compression, using `du` for "**d**isk **u**sage". ``` -{{ site.remote.prompt }} du -sh hpc-intro-code.tar.gz -3.4K hpc-intro-code.tar.gz +{{ site.remote.prompt }} du -sh amdahl.tar.gz +8.0K amdahl.tar.gz ``` {: .language-bash} @@ -323,43 +349,51 @@ When it's done, check the directory size with `du` and compare. > > ## Commands > > > > ``` -> > {{ site.remote.prompt }} tar -xvzf hpc-intro-code.tar.gz +> > {{ site.remote.prompt }} tar -xvzf amdahl.tar.gz > > ``` > > {: .language-bash} > > > > ``` -> > hpc-intro-code/ -> > hpc-intro-code/amdahl -> > hpc-intro-code/README.md -> > hpc-intro-code/LICENSE.txt +> > hpc-carpentry-amdahl-46c9b4b/ +> > hpc-carpentry-amdahl-46c9b4b/.github/ +> > hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +> > hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +> > hpc-carpentry-amdahl-46c9b4b/.gitignore +> > hpc-carpentry-amdahl-46c9b4b/LICENSE +> > hpc-carpentry-amdahl-46c9b4b/README.md +> > hpc-carpentry-amdahl-46c9b4b/amdahl/ +> > hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +> > hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +> > hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +> > hpc-carpentry-amdahl-46c9b4b/requirements.txt +> > hpc-carpentry-amdahl-46c9b4b/setup.py > > ``` > > {: .output} > > -> > Note that we did not type out `-x -v -z -f`, thanks to the flag -> > concatenation, though the command works identically either way -- +> > Note that we did not type out `-x -v -z -f`, thanks to flag concatenation, +> > though the command works identically either way -- > > so long as the concatenated list ends with `f`, because the next string > > must specify the name of the file to extract. -> > -> > ``` -> > {{ site.remote.prompt }} du -sh hpc-intro-code -> > 16K hpc-intro-code -> > ``` -> > {: .language-bash} > {: .solution} -> -> > ## Was the Data Compressed? -> > -> > Text files (including Python source code) compress nicely: the "tarball" is -> > one-quarter the total size of the raw data! -> {: .discussion} {: .challenge} +Check the size of the extracted directory: + +``` +{{ site.remote.prompt }} du -sh hpc-carpentry-amdahl-46c9b4b +48K hpc-carpentry-amdahl-46c9b4b +``` +{: .language-bash} + +Text files (including Python source code) compress nicely: +the "tarball" is one-sixth the total size of the raw data! + If you want to reverse the process -- compressing raw data instead of extracting it -- set a `c` flag instead of `x`, set the archive filename, then provide a directory to compress: ``` -{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz hpc-intro-code +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz hpc-carpentry-amdahl-46c9b4b ``` {: .language-bash} diff --git a/files/goostats.py b/files/goostats.py deleted file mode 100755 index 654b44c0..00000000 --- a/files/goostats.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 - -""" -Parallel code to extract mean, min, and max of Nelle Nemo's assay results -""" - -import locale as l10n -from mpi4py import MPI -import numpy as np -import os -import sys -l10n.setlocale(l10n.LC_ALL, "") - -# Declare an MPI Communicator for the parallel processes to talk through -comm = MPI.COMM_WORLD - -# Read the number of parallel processes tied into the comm channel -cpus = comm.Get_size() - -# Find out the index ("rank") of *this* process -rank = comm.Get_rank() - - -def list_assay_files(path): - """ - Walk the specified path, using one rank *only*. - Record NENE*.txt files labeled A or B (not Z). - Return list of file paths. - """ - if rank != 0: - print("Rank {} tried scanning the directory.".format(rank)) - sys.exit() - - valid_names = [] - for root, dirs, files in os.walk(path): - for f in files: - if f.startswith("NENE") and f.endswith(("A.txt", "B.txt")): - fullpath = os.path.join(root, f) - valid_names.append(fullpath) - - return valid_names - - -def partition_files(list_of_files, number_of_parts): - """ - Split the provided list of files into a number of roughly-equal parts - """ - return np.array_split(list_of_files, number_of_parts) - - -def get_local_file_names(path): - if rank == 0: - # Let only one MPI process scan the directory for files. - all_files = list_assay_files(path) - partitions = partition_files(all_files, cpus) - else: - partitions = [] - - # Every rank gets its own chunk of the list of assay files. - # This function is *blocking*: no rank returns until all are able to. - return comm.scatter(partitions, root = 0) - - -def extract_concentrations(goo_file): - """ - Read file `goo_file` into NumPy array. - Return array if it contains 300 entries. - """ - concentrations = np.loadtxt(goo_file) - if len(concentrations) != 300: - return None - return concentrations - - -def get_assay_results(files): - # Every rank reads their private list of files into NumPy arrays - concentrations = [] - for f in files: - result = extract_concentrations(f) - if result is not None: - concentrations.append(result) - - print("Rank {} crunched data from {} files.".format(comm.Get_rank(), len(concentrations))) - - # Convert list of NumPy arrays into a 2-D NumPy array - return np.array(concentrations) - - -# "Main" program - -if __name__ == '__main__': - """ - This program is entered as many times as there are MPI processes. - - Each process knows its index, called 'rank', and the number of - ranks, called 'cpus', from the MPI calls at the top of the module. - """ - - # Guard against improper invocations of the program - - usage_string = "Usage:\n mpirun -np {} {} directory_name" - - if len(sys.argv) != 2 or sys.argv[1] == "--help": - if rank == 0: - print(usage_string.format(cpus, sys.argv[0])) - sys.exit() - - # Distribute assay files in the specified directory to the parallel ranks - path = sys.argv[1] - files = get_local_file_names(path) - - # Read local set of files into NumPy array -- ignoring partial results - concentrations = get_assay_results(files) - - # Calculate the total number of valid assay results from local numbers - valid_results = len(concentrations) # local - valid_results = comm.reduce(valid_results) # global - - # For each protein, collect the mean, min, and max values from all files - assay_avg = np.sum(concentrations, axis=0).tolist() - assay_min = np.amin(concentrations, axis=0).tolist() - assay_max = np.amax(concentrations, axis=0).tolist() - - for i in range(len(assay_avg)): - assay_avg[i] = comm.reduce(assay_avg[i], op=MPI.SUM) - assay_min[i] = comm.reduce(assay_min[i], op=MPI.MIN) - assay_max[i] = comm.reduce(assay_max[i], op=MPI.MAX) - - # Generate the global report using Rank 0, only - if rank == 0: - assay_avg = np.divide(assay_avg, valid_results) - csv_name = "{}.csv".format(path.rstrip("/")) # prevent "path/.csv", which would be a hidden file - with open(csv_name, "w") as csv: - print("mean,min,max", file=csv) - for a, n, x in zip(assay_avg, assay_min, assay_max): - print("{},{},{}".format(a, n, x), file=csv) diff --git a/files/hpc-intro-code.tar.gz b/files/hpc-intro-code.tar.gz deleted file mode 100644 index 1b2e3d9e60c002bbe0492992be0c15fe0eb5fb03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3391 zcmV-F4Z!jriwFP!000001MOPfa^gl7o@+hD3HH>0gE3${PF!(PjLnRx#oz)Q?^I0{ zX{5#|7HX|pGA7wnZS6DU5%zNLbCat)N}goT>6SqJ`EST?Z#>DNU^>JXDIN9 zxf9YY_{_bLkX8E0p(!|w`8R5yw140zHX#+>@t{Y)v$+W4YAAq>piVXjh748D1Cmm~ z>G^b-oKL?K^GRn4!4F**O{yokY($F^=QIb!GP#t(@6m&3wpMRKWSmec6k&IjIc} z(!dccnAs}_JIkC;0j8YKL1b58tdKDWd&*iyLb6NAXI|~FF^OR^#|#}7Xj%B3$ISo* zjCajI;7%H%57T%_Hd-u_a0=EJ7tAkK3~Oc%xpX@H)C6dgModA@Ro5GE}atpfLaRA4}s>iK1q(~H^s#QotRZYO~+hWkZU1Vi=|?k;w!siKpuP{g5zTDzS#4C{{oz$xJLHac zoVkdNlZduayzZlq=;Vu+!I*OwimKcyMeF4rBhYwWAaa4-$li~T1P?*Wa$v!>AVpWz zgNYOsXNr7M7z}A{z^~DW*DA_Umd2aKIdQh2&6zbb8mL-2@){toQNxH|^o2-x?OJp% z@&b%iu22=ZEU4@e^Y(QzcEz)Oc z6{aqT7kF7Jjax1TI*QtqMW%5IEp9nnpD&VDUD&2;3tc>yqFuqHR7ml4#F-5J_(-Bo zW>aOTvBx?*(SO-QiI#Mrg`hMW6dzXRvdrw{vgBT?4>Ie_z9Q{l)>vRTY1w=7><3rx zL4L>0Q2CTf@mXf#iAcpm)Rod`F?WGjA=nO<+i zY`53T=+UvC8UO9W{4;2w|I@BjPtR+Gk+sP+sLAuXQd!ggDV2+_OL70Fynk3x{hz~9 z+W+|!MCW=;yOvOa3JW}snK$g3f3I9SF-Tu?T%geo7W$6tiTh~rXXaVP4L zL4hoEqjfR)3bibTNE>4@t}d`drM%3o0?C03E0 z2R1`TP)-dYUHNO-FkcBwrl>0iM(oZWg`;W1ftf>_->RM$71xqZg3O3(AY8*Rwzm!H zy?q$?%>1$R(~nWp?I(H=HQu&6+tbEd1nhG%^DS46LGB~CY8ujUC~+j8u}Xp(+s!G? ztZz8g3QgcX9uTU!)rik%Vd^r{L=j#O16s}z;sz7*4L4{5z9GhW6G$e-yHU z=4+ro4mpC}U$tds$WKyn0{U0OTFLcC@~pWIS3!R?@>(-~Jn}lf-T?9%XpQ_l>rLtJ z%fOr9cs0+PL0$#DA>;_UoY*j%LSD`>8$(`F2qJ%^TdafL9M6l;n?#<6-YD`M^k$Kh z&>KchKyMm3f-dd+84f<5_R+scrKMO8<@jPhzNo|(2l2)0_~J0W_#(deGWuI8F1<=# zOIP|3O~+ls$kZ+B%1DzrcsS4C(O7+0u$@zH2IIFMbR1WG?(GRay~7hU9jK`%(A>`D z4eI}J$zA%6L3IQAx_54gfNs&uLezP>_L-+%>W|eSAIH+mW8E{rfg?LmhjsNfGVvmY zdLR~kbY#mR@@eQd0}@n!$`@m5EQ|!Ad5Kq2`qKi04|M;e@9>doO6jjFf?@pCi@_ih zX#ivpT9XVl5g5Ir*00A0%m+4Rlc>p)V0L6v%c0Ms?>4w`ZQpWaipd+@HmdeT7{qNi zU!eRW7&8eLis%Rrflyghhq=&tb;58E0fjPZSe;#*zzOp`)U7`$7@GHi&$%-JcFSx> z^=(2|eSd`-Kd*P8VVm4V0BR^1Uk!u6lSg}d*9aH(3#J(D2@}_0(T9Ow7ycJ}+$pW6 zh5mn|eo||8YK7n~*xVZT_y3FK;z33C|110WgTDW)l*{F`|Nki{5d#e4M0gWl#i*>A zgL1K4P6ivpxZwWCmNM#~Q$WUWgh7g8d0s^t_2K4_`PV$JqM|#YZmy&ctNQfXK=pLN z#B~hS)ON17|lz9iXUM5 z!X>AlSxKQ*{GOYtflb5pSFt1-z3jBr8!-C-$W%M{ zJCld2diSh#*##WYt~R^xq4gH3&G+z^dh;|7wZC4pYn=|X+D85SqEWBmy?XPcad}#A zzJoV7uh~KeuA>_xXtxElU<6gKbqMUd);>AIpVc?@M!ow!Z@jH{n}qqc)rKluRNLM9 z$z`M3hKtMgMXOW8$-1)T|bwv<(`jxr3{-8r`Dx zs`&4uTW>Wm5?a+ji?bZ;(21MoWRc8;I4R$s}StI8FU2rIb=iDW#NBN-3q3Qc5YMlu}A5rIb=iDW#NB VN-3q3Qc8bj`VV=+WO@Kl008V6sLlWY From 2309e99b1ccb9988292647db6f54382d5ecc1810 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 15:51:58 -0500 Subject: [PATCH 04/25] pip install amdahl in parallel episode --- _episodes/15-transferring-files.md | 106 ++++++++++------ _episodes/16-parallel.md | 113 ++++++++++++++++-- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- .../parallel/eight-tasks-jobscript.snip | 2 +- .../parallel/four-tasks-jobscript.snip | 2 +- .../parallel/one-task-jobscript.snip | 2 +- 21 files changed, 188 insertions(+), 69 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 563a2c27..1583d33e 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -335,53 +335,65 @@ Now let's unpack the archive. We'll run `tar` with a few common flags: * `-x` to e**x**tract the archive * `-v` for **v**erbose output * `-z` for g**z**ip compression -* `-f` for the file to be unpacked +* `-f «tarball»` for the file to be unpacked -When it's done, check the directory size with `du` and compare. +The folder inside has an unfortunate name, so we'll change that as well using + +* `-C «folder»` to **c**hange directories before extracting + (note that the new directory must exist!) +* `--strip-components=«n»` to remove $n$ levels of depth from the extracted + file hierarchy > ## Extract the Archive > -> Using the four flags above, unpack the lesson data using `tar`. -> Then, check the size of the whole unpacked directory using `du`. +> Using the four flags above, unpack the source code tarball into a new +> directory named "amdahl" using `tar`. > -> Hint: `tar` lets you concatenate flags. +> ``` +> {{ site.remote.prompt }} mkdir amdahl +> {{ site.remote.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 +> ``` +> {: .language-bash} > -> > ## Commands -> > -> > ``` -> > {{ site.remote.prompt }} tar -xvzf amdahl.tar.gz -> > ``` -> > {: .language-bash} -> > -> > ``` -> > hpc-carpentry-amdahl-46c9b4b/ -> > hpc-carpentry-amdahl-46c9b4b/.github/ -> > hpc-carpentry-amdahl-46c9b4b/.github/workflows/ -> > hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml -> > hpc-carpentry-amdahl-46c9b4b/.gitignore -> > hpc-carpentry-amdahl-46c9b4b/LICENSE -> > hpc-carpentry-amdahl-46c9b4b/README.md -> > hpc-carpentry-amdahl-46c9b4b/amdahl/ -> > hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py -> > hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py -> > hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py -> > hpc-carpentry-amdahl-46c9b4b/requirements.txt -> > hpc-carpentry-amdahl-46c9b4b/setup.py -> > ``` -> > {: .output} -> > -> > Note that we did not type out `-x -v -z -f`, thanks to flag concatenation, -> > though the command works identically either way -- -> > so long as the concatenated list ends with `f`, because the next string -> > must specify the name of the file to extract. -> {: .solution} -{: .challenge} +> ``` +> hpc-carpentry-amdahl-46c9b4b/ +> hpc-carpentry-amdahl-46c9b4b/.github/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +> hpc-carpentry-amdahl-46c9b4b/.gitignore +> hpc-carpentry-amdahl-46c9b4b/LICENSE +> hpc-carpentry-amdahl-46c9b4b/README.md +> hpc-carpentry-amdahl-46c9b4b/amdahl/ +> hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +> hpc-carpentry-amdahl-46c9b4b/requirements.txt +> hpc-carpentry-amdahl-46c9b4b/setup.py +> ``` +> {: .output} +> +> Note that we did not need to type out `-x -v -z -f`, thanks to flag +> concatenation, though the command works identically either way -- +> so long as the concatenated list ends with `f`, because the next string +> must specify the name of the file to extract. +> +> We couldn't concatenate `-C` because the next string must name the directory. +> +> Long options (`--strip-components`) also can't be concatenated. +> +> Since order doesn't generally matter, an equivalent command would be +> ``` +> {{ site.remote.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 +> ``` +> {: .language-bash} +{: .discussion} -Check the size of the extracted directory: +Check the size of the extracted directory, and compare to the compressed +file size: ``` -{{ site.remote.prompt }} du -sh hpc-carpentry-amdahl-46c9b4b -48K hpc-carpentry-amdahl-46c9b4b +{{ site.remote.prompt }} du -sh amdahl +48K amdahl ``` {: .language-bash} @@ -393,9 +405,25 @@ extracting it -- set a `c` flag instead of `x`, set the archive filename, then provide a directory to compress: ``` -{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz hpc-carpentry-amdahl-46c9b4b +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl ``` {: .language-bash} +> ``` +> amdahl/ +> amdahl/.github/ +> amdahl/.github/workflows/ +> amdahl/.github/workflows/python-publish.yml +> amdahl/.gitignore +> amdahl/LICENSE +> amdahl/README.md +> amdahl/amdahl/ +> amdahl/amdahl/__init__.py +> amdahl/amdahl/__main__.py +> amdahl/amdahl/amdahl.py +> amdahl/requirements.txt +> amdahl/setup.py +> ``` +> {: .output} > ## Working with Windows > diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index d7a8abb7..0ebce180 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -7,6 +7,7 @@ questions: - "What benefits arise from parallel execution?" - "What are the limits of gains from execution in parallel?" objectives: +- "Install a Python package using `pip`" - "Prepare a job submission script for the parallel executable." - "Launch jobs with parallel execution." - "Record and summarize the timing and accuracy of jobs." @@ -29,26 +30,116 @@ If you disconnected, log back in to the cluster. ``` {: .language-bash} +## Install the Amdahl Program + +With the Amdahl source code on the cluster, we can install it, which will +provide access to the `amdahl` executable. +Move into the extracted directory, then use the Package Installer for Python, +or `pip`, to install it in your ("user") home directory: + +``` +{{ site.remote.prompt }} cd amdahl +{{ site.remote.prompt }} python3 -m pip install --user . +``` +{: .language-bash} + +> ## Amdahl is Python Code +> +> The Amdahl program is written in Python, and installing or using it requires +> locating the `python3` executable on the login node. +> If it can't be found, try listing available modules using `module avail`, +> load the appropriate one, and try the command again. + +### MPI for Python + +The Amdahl code has one dependency: __mpi4py__. +If it hasn't already been installed on the cluster, `pip` will attempt to +collect mpi4py from the Internet and install it for you. +If this fails due to a one-way firewall, you must retrieve mpi4py on your +local machine and upload it, just as we did for Amdahl. + +> ## Retrieve and Push mpi4py +> +> If installing Amdahl failed because mpi4py could not be installed, +> retrieve the tarball from +> then `rsync` it to the cluster, extract, and install: +> +> ``` +> {{ site.local.prompt }} wget https://github.com/mpi4py/mpi4py/tarball/master -O mpi4py.tar.gz +> {{ site.local.prompt }} scp amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> # or +> {{ site.local.prompt }} rsync -avP amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> ``` +> {: .language-bash} +> ``` +> {{ site.remote.prompt }} mkdir mpi4py +> {{ site.remote.prompt }} tar -xvzf mpi4py.tar.gz -C amdahl --strip-components=1 +> {{ site.remote.prompt }} cd mpi4py +> {{ site.remote.prompt }} python3 -m pip install --user . +> {{ site.remote.prompt }} cd ../amdahl +> {{ site.remote.prompt }} python3 -m pip install --user . +> ``` +> {: .language-bash} +{: .discussion} + +`pip` may warn that your user package binaries are not in your PATH. + +> ## If Pip Raises a Warning... +> +> > ## Warning Message +> > +> > WARNING: The script amdahl is installed in "${HOME}/.local/bin" which is +> > not on PATH. Consider adding this directory to PATH or, if you prefer to +> > suppress this warning, use --no-warn-script-location. +> {: .warning} +> +> To check whether this warning is a problem, use `which` to search for the +> `amdahl` program: +> +> ``` +> {{ site.remote.prompt }} which amdahl +> ``` +> {: .language-bash} +> +> If the command returns no output, displaying a new prompt, then you must +> update the environment variable named `PATH` to include the missing folder. +> Run the following command to update your shell configuration file, then log +> off the cluster and back on again. +> +> ``` +> {{ site.remote.prompt }} echo "export PATH=${PATH}:${HOME}/.local/bin" >> ~/.bashrc +> {{ site.remote.prompt }} logout +> {{ site.local.prompt }} ... +> ``` +> +> `which` should now be able to find `amdahl` without difficulties. +> If you had to load a Python module, load it again! +> {: .language-bash} +{: .discussion} + ## Help! Many command-line programs include a "help" message. Navigate to the directory of the decompressed files, then print the `amdahl` program's help message: ``` -{{ site.remote.prompt }} cd hpc-intro-code -{{ site.remote.prompt }} ./amdahl --help +{{ site.remote.prompt }} amdahl --help ``` {: .language-bash} ``` -usage: amdahl [-h] [-p [PARALLEL_PROPORTION]] [-w [WORK_SECONDS]] +usage: amdahl [-h] [-p [PARALLEL_PROPORTION]] [-w [WORK_SECONDS]] [-t] [-e] [-j [JITTER_PROPORTION]] optional arguments: -h, --help show this help message and exit -p [PARALLEL_PROPORTION], --parallel-proportion [PARALLEL_PROPORTION] - Parallel proportion should be a float between 0 and 1 + Parallel proportion: a float between 0 and 1 -w [WORK_SECONDS], --work-seconds [WORK_SECONDS] - Total seconds of workload, should be an integer greater than 0 + Total seconds of workload: an integer greater than 0 + -t, --terse Format output as a machine-readable object for easier analysis + -e, --exact Exactly match requested timing by disabling random jitter + -j [JITTER_PROPORTION], --jitter-proportion [JITTER_PROPORTION] + Random jitter: a float between -1 and +1 ``` {: .output} @@ -101,7 +192,7 @@ reverse-chronological order: newest first. What was the output? > ``` > {: .language-bash} > ``` -> Doing 30.0000 seconds of 'work' on 1 processor, +> Doing 30.000 seconds of 'work' on 1 processor, > which should take 30.000 seconds with 0.850 parallel proportion of the workload. > > Hello, World! I am process 0 of 1 on {{ site.remote.node }}. I will do all the serial 'work' for 4.500 seconds. @@ -118,7 +209,7 @@ see that the code uses a default of 30 seconds of work that is 85% parallel. The program ran for just over 30 seconds in total, and if we run the numbers, it is true that 15% of it was marked 'serial' and 85% was 'parallel'. -Since we only gave the job one CPU, this job wasn't really parallel: the +Since we only gave the job one CPU, this job wasn't really parallel: the same processor performed the 'serial' work for 4.5 seconds, then the 'parallel' part for 25.5 seconds, and no time was saved. The cluster can do better, if we ask. @@ -329,15 +420,15 @@ Now, let's summarize the amount of time it took each job to run: Then, use the first row to compute speedups _S_, using Python as a command-line calculator: ``` -{{ site.remote.prompt }} for n in 30.033 10.888 7.6972; do python3 -c "print(30.033 / $n)"; done +{{ site.remote.prompt }} for n in 30.033 10.888 7.697; do python3 -c "print(30.033 / $n)"; done ``` {: .language-bash} | Number of CPUs | Speedup | Ideal | | --- | --- | --- | -| 1 | 1.0 | 1.0 | -| 4 | 2.75 | 4.0 | -| 8 | 3.90 | 8.0 | +| 1 | 1.0 | 1 | +| 4 | 2.75 | 4 | +| 8 | 3.90 | 8 | The job output files have been telling us that this program is performing 85% of its work in parallel, leaving 15% to run in serial. This seems reasonably diff --git a/_includes/snippets_library/Birmingham_Baskerville_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Birmingham_Baskerville_slurm/parallel/four-tasks-jobscript.snip index ac8effab..83220026 100644 --- a/_includes/snippets_library/Birmingham_Baskerville_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Birmingham_Baskerville_slurm/parallel/four-tasks-jobscript.snip @@ -10,6 +10,6 @@ module load python3 # Execute the task -mpiexec python pi.py 100000000 +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip index ad8a8eee..38365d68 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/eight-tasks-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip index dfa00e6b..14046d46 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/four-tasks-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip index 91ebd101..0e2a9947 100644 --- a/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/ComputeCanada_Graham_slurm/parallel/one-task-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -./amdahl +amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip index 9caa7145..f0e8716f 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/eight-tasks-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip index 04a6fb3a..e1ba5c5c 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/four-tasks-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip index d267e8cd..42fdd337 100644 --- a/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/EPCC_Cirrus_pbs/parallel/one-task-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -./amdahl +amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip index b052e666..7f99b7fe 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/eight-tasks-jobscript.snip @@ -11,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip index b24c7153..a777f674 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/four-tasks-jobscript.snip @@ -11,6 +11,6 @@ module load Python module load SciPy-bundle # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip index 4c149443..fa665b10 100644 --- a/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/Magic_Castle_EESSI_slurm/parallel/one-task-jobscript.snip @@ -9,6 +9,6 @@ module load Python # Execute the task -./amdahl +amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip index 09ab213e..7fa5d183 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/eight-tasks-jobscript.snip @@ -6,6 +6,6 @@ {{ site.sched.comment }} -n 8 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip index af8f4653..0303186a 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/four-tasks-jobscript.snip @@ -6,6 +6,6 @@ {{ site.sched.comment }} -n 4 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip index 984f5740..e5fe4b59 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/parallel/one-task-jobscript.snip @@ -6,6 +6,6 @@ {{ site.sched.comment }} -n 1 # Execute the task -./amdahl +amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip index ad8a8eee..38365d68 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/eight-tasks-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip index dfa00e6b..14046d46 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/four-tasks-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip index 91ebd101..0e2a9947 100644 --- a/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/Norway_SIGMA2_SAGA_slurm/parallel/one-task-jobscript.snip @@ -9,6 +9,6 @@ module load python3 # Execute the task -./amdahl +amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip index 9caa7145..f0e8716f 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/eight-tasks-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip index 04a6fb3a..e1ba5c5c 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/four-tasks-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -mpiexec ./amdahl +mpiexec amdahl ``` {: .language-bash} diff --git a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip index d267e8cd..42fdd337 100644 --- a/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip +++ b/_includes/snippets_library/UCL_Myriad_sge/parallel/one-task-jobscript.snip @@ -8,6 +8,6 @@ module load python3 # Execute the task -./amdahl +amdahl ``` {: .language-bash} From 971442d7e9d680686420e29e102e13944d07fe32 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 16:22:22 -0500 Subject: [PATCH 05/25] fix missing blockquote tags --- _episodes/15-transferring-files.md | 37 +++++++++++++++--------------- _episodes/16-parallel.md | 4 +++- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 1583d33e..92b98d31 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -50,15 +50,16 @@ your local machine, using the URL of the current codebase: > In this case, that would be "main," which is not very clear. > Use one of the above commands to save the tarball to "amdahl.tar.gz" instead. > -> > ## Solution +> > ## Curl & Wget Commands > > > > ``` > > {{ site.local.prompt }} curl -O https://github.com/hpc-carpentry/amdahl/tarball/main -o amdahl.tar.gz > > # or > > {{ site.local.prompt }} wget https://github.com/hpc-carpentry/amdahl/tarball/main -O amdahl.tar.gz > > ``` +> > {: .language-bash} > {: .solution} -{: .language-bash} +{: .challenge} After downloading the file, use `ls` to see it in your working directory: @@ -408,22 +409,22 @@ then provide a directory to compress: {{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl ``` {: .language-bash} -> ``` -> amdahl/ -> amdahl/.github/ -> amdahl/.github/workflows/ -> amdahl/.github/workflows/python-publish.yml -> amdahl/.gitignore -> amdahl/LICENSE -> amdahl/README.md -> amdahl/amdahl/ -> amdahl/amdahl/__init__.py -> amdahl/amdahl/__main__.py -> amdahl/amdahl/amdahl.py -> amdahl/requirements.txt -> amdahl/setup.py -> ``` -> {: .output} +``` +amdahl/ +amdahl/.github/ +amdahl/.github/workflows/ +amdahl/.github/workflows/python-publish.yml +amdahl/.gitignore +amdahl/LICENSE +amdahl/README.md +amdahl/amdahl/ +amdahl/amdahl/__init__.py +amdahl/amdahl/__main__.py +amdahl/amdahl/amdahl.py +amdahl/requirements.txt +amdahl/setup.py +``` +{: .output} > ## Working with Windows > diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index 0ebce180..cbf95c54 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -49,6 +49,7 @@ or `pip`, to install it in your ("user") home directory: > locating the `python3` executable on the login node. > If it can't be found, try listing available modules using `module avail`, > load the appropriate one, and try the command again. +{: .callout} ### MPI for Python @@ -111,10 +112,10 @@ local machine and upload it, just as we did for Amdahl. > {{ site.remote.prompt }} logout > {{ site.local.prompt }} ... > ``` +> {: .language-bash} > > `which` should now be able to find `amdahl` without difficulties. > If you had to load a Python module, load it again! -> {: .language-bash} {: .discussion} ## Help! @@ -257,6 +258,7 @@ Let's modify the job script to request more cores and use the MPI run-time. {{ site.remote.prompt }} nano parallel-job.sh {{ site.remote.prompt }} cat parallel-job.sh ``` +{: .language-bash} {% include {{ site.snippets }}/parallel/four-tasks-jobscript.snip %} From 64e034aed1c492dba5ff5492d04e731f26673fe7 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 15:58:27 -0500 Subject: [PATCH 06/25] see amdahl not cpi in history --- _episodes/17-resources.md | 10 +++++----- .../NIST_CTCMS_slurm/resources/account-history.snip | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/_episodes/17-resources.md b/_episodes/17-resources.md index 493a66ab..61359428 100644 --- a/_episodes/17-resources.md +++ b/_episodes/17-resources.md @@ -47,9 +47,9 @@ finish and free up the resources needed to match what you asked for. ## Stats -Since we already submitted `pi.py` to run on the cluster, we can query the +Since we already submitted `amdahl` to run on the cluster, we can query the scheduler to see how long our job took and what resources were used. We will -use `{{ site.sched.hist }}` to get statistics about `parallel-pi.sh`. +use `{{ site.sched.hist }}` to get statistics about `parallel-job.sh`. ``` {{ site.remote.prompt }} {{ site.sched.hist }} @@ -87,12 +87,12 @@ keys to scroll through fields). ## Improving Resource Requests -From the job history, we see that `pi.py` jobs finished executing in +From the job history, we see that `amdahl` jobs finished executing in at most a few minutes, once dispatched. The time estimate we provided in the job script was far too long! This makes it harder for the queuing system to accurately estimate when resources will become free for other jobs. Practically, this means that the queuing system waits -to dispatch our `pi.py` job until the full requested time slot opens, +to dispatch our `amdahl` job until the full requested time slot opens, instead of "sneaking it in" a much shorter window where the job could actually finish. Specifying the expected runtime in the submission script more accurately will help alleviate cluster congestion and may @@ -100,7 +100,7 @@ get your job dispatched earlier. > ## Narrow the Time Estimate > -> Edit `parallel_pi.sh` to set a better time estimate. How close can +> Edit `parallel_job.sh` to set a better time estimate. How close can > you get? > > Hint: use `{{ site.sched.flag.time }}`. diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/resources/account-history.snip b/_includes/snippets_library/NIST_CTCMS_slurm/resources/account-history.snip index 1fd637fa..61ac15e5 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/resources/account-history.snip +++ b/_includes/snippets_library/NIST_CTCMS_slurm/resources/account-history.snip @@ -6,11 +6,11 @@ 212341 env {{ site.sched.queue.debug }} 2 COMPLETED 212342 mpirun {{ site.sched.queue.testing }} 2 COMPLETED 212343 mpirun {{ site.sched.queue.testing }} 2 COMPLETED -212344 cpi {{ site.sched.queue.testing }} 2 COMPLETED -212345 cpi {{ site.sched.queue.testing }} 2 COMPLETED +212344 amdahl {{ site.sched.queue.testing }} 2 COMPLETED +212345 amdahl {{ site.sched.queue.testing }} 2 COMPLETED 212346 bash {{ site.sched.queue.testing }} 2 COMPLETED 212346.0 bash 2 COMPLETED -212346.1 cpi 2 COMPLETED -212347 cpi {{ site.sched.queue.testing }} 2 FAILED +212346.1 amdahl 2 COMPLETED +212347 amdahl {{ site.sched.queue.testing }} 2 FAILED ``` {: .output} From 0eb41953f9cb0c637b15924ee61b72b1a23ef549 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 16:04:22 -0500 Subject: [PATCH 07/25] update Python version --- .github/workflows/test_and_build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 6c8ed1b0..892ec2bd 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -15,10 +15,10 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: '3.9' - name: Install codespell run: | - pip3 install codespell + python3 -m pip install codespell - name: Check spelling run: | make spellcheck @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: actions/setup-ruby@v1 + - uses: ruby/setup-ruby@v1 with: ruby-version: '2.7' - name: Install basic requirements @@ -64,7 +64,7 @@ jobs: - Magic_Castle_EESSI_slurm steps: - uses: actions/checkout@v2 - - uses: actions/setup-ruby@v1 + - uses: ruby/setup-ruby@v1 with: ruby-version: '2.7' - name: Install basic requirements From fb93453e710cd8d95d44ae910e2882ab7ddb05a7 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 17:17:26 -0500 Subject: [PATCH 08/25] updated server name --- .../snippets_library/NIST_CTCMS_slurm/_config_options.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml index 0f55d3f0..d1a31a18 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml +++ b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml @@ -24,14 +24,14 @@ local: bash_shebang: "#!/usr/bin/bash" remote: - name: "ruth" - login: "ruth.nist.gov" - host: "ruth" + name: "mr-french" + login: "mr-french.nist.gov" + host: "mr-french" node: "r001" location: "National Institute of Standards and Technology" homedir: "/users" user: "yourUsername" - prompt: "501 ruth%" + prompt: "501 mr-french%" bash_shebang: "#!/bin/bash" sched: From 115f82d1abcf00f7f7b5face3235e9200ddf57cd Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Thu, 19 Jan 2023 18:20:00 -0500 Subject: [PATCH 09/25] a few readability fixes --- _episodes/15-transferring-files.md | 4 ++- _episodes/16-parallel.md | 26 +++++++++---------- _episodes/17-resources.md | 4 +-- .../NIST_CTCMS_slurm/_config_options.yml | 2 +- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 92b98d31..055dc9b2 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -232,6 +232,8 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > > {{ site.local.prompt }} rsync --port=768 amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} +> (Note that this command will fail, as the correct port in this case is the +> default: 22.) > {: .solution} {: .challenge} @@ -347,7 +349,7 @@ The folder inside has an unfortunate name, so we'll change that as well using > ## Extract the Archive > -> Using the four flags above, unpack the source code tarball into a new +> Using the flags above, unpack the source code tarball into a new > directory named "amdahl" using `tar`. > > ``` diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index cbf95c54..20984412 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -87,11 +87,11 @@ local machine and upload it, just as we did for Amdahl. > ## If Pip Raises a Warning... > -> > ## Warning Message -> > -> > WARNING: The script amdahl is installed in "${HOME}/.local/bin" which is -> > not on PATH. Consider adding this directory to PATH or, if you prefer to -> > suppress this warning, use --no-warn-script-location. +> ``` +> WARNING: The script amdahl is installed in "${HOME}/.local/bin" which is +> not on PATH. Consider adding this directory to PATH or, if you prefer to +> suppress this warning, use --no-warn-script-location. +> ``` > {: .warning} > > To check whether this warning is a problem, use `which` to search for the @@ -102,10 +102,11 @@ local machine and upload it, just as we did for Amdahl. > ``` > {: .language-bash} > -> If the command returns no output, displaying a new prompt, then you must -> update the environment variable named `PATH` to include the missing folder. +> If the command returns no output, displaying a new prompt, it means the file +> `amdahl` has not been found. You must update the environment variable named +> `PATH` to include the missing folder. > Run the following command to update your shell configuration file, then log -> off the cluster and back on again. +> off the cluster and back on again so it takes effect. > > ``` > {{ site.remote.prompt }} echo "export PATH=${PATH}:${HOME}/.local/bin" >> ~/.bashrc @@ -120,8 +121,7 @@ local machine and upload it, just as we did for Amdahl. ## Help! -Many command-line programs include a "help" message. Navigate to the directory -of the decompressed files, then print the `amdahl` program's help message: +Many command-line programs include a "help" message. Try it with `amdahl`: ``` {{ site.remote.prompt }} amdahl --help @@ -204,9 +204,9 @@ reverse-chronological order: newest first. What was the output? > {: .output} {: .solution} -`amdahl` takes two optional parameters as input: the amount of work and the -proportion of that work that is parallel in nature. Based on the output, we can -see that the code uses a default of 30 seconds of work that is 85% +As we saw before, two of the `amdahl` program flags set the amount of work and +the proportion of that work that is parallel in nature. Based on the output, we +can see that the code uses a default of 30 seconds of work that is 85% parallel. The program ran for just over 30 seconds in total, and if we run the numbers, it is true that 15% of it was marked 'serial' and 85% was 'parallel'. diff --git a/_episodes/17-resources.md b/_episodes/17-resources.md index 61359428..cee174dd 100644 --- a/_episodes/17-resources.md +++ b/_episodes/17-resources.md @@ -58,7 +58,7 @@ use `{{ site.sched.hist }}` to get statistics about `parallel-job.sh`. {% include {{ site.snippets }}/resources/account-history.snip %} -This shows all the jobs we ran recently (note that there are multiple entries +This shows all the jobs we ran today (note that there are multiple entries per job). To get info about a specific job, we change command slightly. ``` @@ -72,7 +72,7 @@ information to `less` to make it easier to view (use the left and right arrow keys to scroll through fields). ``` -{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} +{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} 1965 | less ``` {: .language-bash} diff --git a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml index d1a31a18..114b0fa4 100644 --- a/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml +++ b/_includes/snippets_library/NIST_CTCMS_slurm/_config_options.yml @@ -46,7 +46,7 @@ sched: flag: user: "-u yourUsername" interactive: "" - histdetail: "--format=JobID,JobName,Submit,Start,State,ReqCPUS,Reserved,Elapsed,MaxRSS" + histdetail: "--format=JobName,Submit,Start,State,ReqCPUS,Reserved,Elapsed,MaxRSS -j" name: "-J" time: "-t" queue: "-p" From decb12174647686e04b00e3ea794206f87e21139 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:45:13 -0500 Subject: [PATCH 10/25] remove unused tarball, consolidate pi code in new tarball --- files/hpc-intro-code.tar.gz | Bin 3391 -> 0 bytes files/hpc-intro-pi-code.tar.gz | Bin 0 -> 3492 bytes files/pi-mpi-minimal.py | 29 -------- files/pi-mpi.py | 125 ------------------------------- files/pi-serial-minimized.py | 16 ---- files/pi-serial.py | 80 -------------------- files/pi.py | 127 -------------------------------- files/simple-pi-illustration.py | 39 ---------- 8 files changed, 416 deletions(-) delete mode 100644 files/hpc-intro-code.tar.gz create mode 100644 files/hpc-intro-pi-code.tar.gz delete mode 100755 files/pi-mpi-minimal.py delete mode 100755 files/pi-mpi.py delete mode 100644 files/pi-serial-minimized.py delete mode 100755 files/pi-serial.py delete mode 100755 files/pi.py delete mode 100644 files/simple-pi-illustration.py diff --git a/files/hpc-intro-code.tar.gz b/files/hpc-intro-code.tar.gz deleted file mode 100644 index 1b2e3d9e60c002bbe0492992be0c15fe0eb5fb03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3391 zcmV-F4Z!jriwFP!000001MOPfa^gl7o@+hD3HH>0gE3${PF!(PjLnRx#oz)Q?^I0{ zX{5#|7HX|pGA7wnZS6DU5%zNLbCat)N}goT>6SqJ`EST?Z#>DNU^>JXDIN9 zxf9YY_{_bLkX8E0p(!|w`8R5yw140zHX#+>@t{Y)v$+W4YAAq>piVXjh748D1Cmm~ z>G^b-oKL?K^GRn4!4F**O{yokY($F^=QIb!GP#t(@6m&3wpMRKWSmec6k&IjIc} z(!dccnAs}_JIkC;0j8YKL1b58tdKDWd&*iyLb6NAXI|~FF^OR^#|#}7Xj%B3$ISo* zjCajI;7%H%57T%_Hd-u_a0=EJ7tAkK3~Oc%xpX@H)C6dgModA@Ro5GE}atpfLaRA4}s>iK1q(~H^s#QotRZYO~+hWkZU1Vi=|?k;w!siKpuP{g5zTDzS#4C{{oz$xJLHac zoVkdNlZduayzZlq=;Vu+!I*OwimKcyMeF4rBhYwWAaa4-$li~T1P?*Wa$v!>AVpWz zgNYOsXNr7M7z}A{z^~DW*DA_Umd2aKIdQh2&6zbb8mL-2@){toQNxH|^o2-x?OJp% z@&b%iu22=ZEU4@e^Y(QzcEz)Oc z6{aqT7kF7Jjax1TI*QtqMW%5IEp9nnpD&VDUD&2;3tc>yqFuqHR7ml4#F-5J_(-Bo zW>aOTvBx?*(SO-QiI#Mrg`hMW6dzXRvdrw{vgBT?4>Ie_z9Q{l)>vRTY1w=7><3rx zL4L>0Q2CTf@mXf#iAcpm)Rod`F?WGjA=nO<+i zY`53T=+UvC8UO9W{4;2w|I@BjPtR+Gk+sP+sLAuXQd!ggDV2+_OL70Fynk3x{hz~9 z+W+|!MCW=;yOvOa3JW}snK$g3f3I9SF-Tu?T%geo7W$6tiTh~rXXaVP4L zL4hoEqjfR)3bibTNE>4@t}d`drM%3o0?C03E0 z2R1`TP)-dYUHNO-FkcBwrl>0iM(oZWg`;W1ftf>_->RM$71xqZg3O3(AY8*Rwzm!H zy?q$?%>1$R(~nWp?I(H=HQu&6+tbEd1nhG%^DS46LGB~CY8ujUC~+j8u}Xp(+s!G? ztZz8g3QgcX9uTU!)rik%Vd^r{L=j#O16s}z;sz7*4L4{5z9GhW6G$e-yHU z=4+ro4mpC}U$tds$WKyn0{U0OTFLcC@~pWIS3!R?@>(-~Jn}lf-T?9%XpQ_l>rLtJ z%fOr9cs0+PL0$#DA>;_UoY*j%LSD`>8$(`F2qJ%^TdafL9M6l;n?#<6-YD`M^k$Kh z&>KchKyMm3f-dd+84f<5_R+scrKMO8<@jPhzNo|(2l2)0_~J0W_#(deGWuI8F1<=# zOIP|3O~+ls$kZ+B%1DzrcsS4C(O7+0u$@zH2IIFMbR1WG?(GRay~7hU9jK`%(A>`D z4eI}J$zA%6L3IQAx_54gfNs&uLezP>_L-+%>W|eSAIH+mW8E{rfg?LmhjsNfGVvmY zdLR~kbY#mR@@eQd0}@n!$`@m5EQ|!Ad5Kq2`qKi04|M;e@9>doO6jjFf?@pCi@_ih zX#ivpT9XVl5g5Ir*00A0%m+4Rlc>p)V0L6v%c0Ms?>4w`ZQpWaipd+@HmdeT7{qNi zU!eRW7&8eLis%Rrflyghhq=&tb;58E0fjPZSe;#*zzOp`)U7`$7@GHi&$%-JcFSx> z^=(2|eSd`-Kd*P8VVm4V0BR^1Uk!u6lSg}d*9aH(3#J(D2@}_0(T9Ow7ycJ}+$pW6 zh5mn|eo||8YK7n~*xVZT_y3FK;z33C|110WgTDW)l*{F`|Nki{5d#e4M0gWl#i*>A zgL1K4P6ivpxZwWCmNM#~Q$WUWgh7g8d0s^t_2K4_`PV$JqM|#YZmy&ctNQfXK=pLN z#B~hS)ON17|lz9iXUM5 z!X>AlSxKQ*{GOYtflb5pSFt1-z3jBr8!-C-$W%M{ zJCld2diSh#*##WYt~R^xq4gH3&G+z^dh;|7wZC4pYn=|X+D85SqEWBmy?XPcad}#A zzJoV7uh~KeuA>_xXtxElU<6gKbqMUd);>AIpVc?@M!ow!Z@jH{n}qqc)rKluRNLM9 z$z`M3hKtMgMXOW8$-1)T|bwv<(`jxr3{-8r`Dx zs`&4uTW>Wm5?a+ji?bZ;(21MoWRc8;I4R$s}StI8FU2rIb=iDW#NBN-3q3Qc5YMlu}A5rIb=iDW#NB VN-3q3Qc8bj`VV=+WO@Kl008V6sLlWY diff --git a/files/hpc-intro-pi-code.tar.gz b/files/hpc-intro-pi-code.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..33b43cb9a99622bccd7e79ea5d34c87663460830 GIT binary patch literal 3492 zcmV;V4O{XbiwFP!000001MOSuZX3rH_HRDLltQ6hb-CK5C|eQA08Y{b0qiDlQsjpq z5X0ReIn~~>Gb@QP4CH6~ba{t9NzXYmdm%;YV#})2a1)zL&h5;(e&@`%kb|^P;;MRv)GrC#LOQBS5tUTT(AX|ZHnv8&e}4V_x4m90W=v*E#$po5GD?J_bIp@UlvkN{PkRiWYc_`2{E}yJ zp88cLXL*@AO8z3o-Oki^`GCG9kEJyI)z77Nt`Dx+U)b_sX_~1gG$$f+sOuSYyjqA- zn6G~whkoFm+qs0U%FK-E_2i6ACK*q~WWvT{wm+GqTxOHW{;8cgO<`hSfdA!puU}7o z`tHrEuS`o(DQLkC{x?ETkZ`A_HoNFH;dk4VO%zXyMBq@FX$KhY^KyQ97Fz0n9y5H* z0z(2m1uwPKGS6__8I5dbIwV?LcFqpytrk4XX55G&96=0iPR#k zx2htpwJ4o-CXbbQuE#+~O7)0UO)4gHwoNJJ$)6TIv_hz=G%SnN1Gx7u2xT;b! z9(Z*~-&#uDmJcQJiblG<|Fr>s;8T$Q|2TfXes1^wVK^MF`+o=t$2i2p-K}Vj|eZ+bXHidw~ zl+L3Chb|H0$viKmUZg(zGAENS8SfdZ$06vR!Hrb3rX&=Rvv2pL5M11-g{E`%-y zvBtES^|@S{d#3{ye#FVKmmb@LZ65y03%eDXhn1YVVbK8#2=)s zz_A(2IN%Jhu_aPiSaXxVaN6l$Knv2SeAY88HgYgw-6{vJ9*aoEhPeXCX26Xs2U_&L zs?DZlD{@I@u_%BRpcQ2RE7NK@jkd7CV=BjAm&CBCU@=B_B~#cH&qQ9SWa%?dn>>Rp zWC9cv_QB`#Qp{`1kGbZ^6~jB~U^(AI`89H#(LOea)JIp;y91EgS6EEIV6hdT-V33^ zdOrX!6EOh$0Z*cU4w3>A#M!*es{(muxdh_HQQwG6fX;y9v8GvD>NV|bi5kL}iUtA> zWSw0=X8-A}DRPPYo8H8t~J9gHY=2J2p4lMhf1ZX76gI;Er%g=;HT%WCzzy z1E6XI?5%R4p}}>#slqJDxqd!!?vV+sqzw-mcUKPauq0wb7I9Y^0g`wP-@)ICsM35& zE*Trhcg(8H*hyJgJ=kAS{ z5idJ&p;5h`D~Zu76={x+RV2K?3BdJ5RbreNS|jN&&BCJy{9UxfDVkoOIpDI92A-(K zJH%`HxyQZ|Q9`{kR8YVQX$ta$gaWJ*EfZ;~eJHc4E6DeQoG+jm>~_AmC1tbgyb(NJ zv$RdqO4un_LT+LxP6&V|%S3Wp>SveuT9U6bUJ*Ux>0#a(v?l&)4e&wvEWc_b8d!yC`HQRID4rS!b zei3EPrj<+}KI^Nz_Sa-i2n`@omP-SFIe1pwVt04grB=gMb82Q%6jh77N@6tO)+E}& z2vR_og;gl@G%}mz?P_%xXj$|L<@{E&UjmLO33JiFNERqoi=Fy5{qM89ATi~@2otBc zQgWUtI}3JBa)eMY0*v0|55|@kLP8WSz&JqWI629Q+y>B*Y0I9y^$LVs51*}Qpo#cv zJ5iDFh=f*HJ8oyToB^N-kk@~#a>&XEvqan5lKQtsSBQOp%8I-| zFDnRhb|<{#z`kjX10GO#xtF9(EoOtDRh6m+>+I1C`(3(J>r^Q~v%cACzeDGJDn7S* zqVEu|mW|P0LHcQI)P_Vb&jj*d2p0xHUx2g82-_w}Ykjwsjv6y;dfVt&J2YpDM`MGI z<8yrjQj~~tNE>R0Z#MCo!fqReK`+4#OUNnOjFONhXymoWmKI79#6f)AnQB$w06Od7 zOfinVmS4198D^a1vk8GK1C?UEEQHenF4rePLdTX>MC+uXb#o z(wU_mG-my`r|~;bdO(^A>UcJL1OfSRT}f{&rFnMUiMo#u1@XQzh<4k z+Ri4r+97o)#em_pGW6Dk?5(K0w*vmxpPIey@4lY;dwU8wPy!-BVvYd83gbsF24j`? z`Dp9;AGD3u>c6A#`HNls_bHzDI{*7)9`xOrNj2bPl{Ib7(Ypy7?3i`L@MK3!js1Vb zBcMCZ|KX=#&HjhOV6?OUr+98T|M`LRVt4wpJN?<6{(Q*k&wHF-><%in9#rfO1+Zav zC_pOsiB-NIRO7yPeftq%s(@L>vTcPRxMWzogFnzS* z(!C2y8;eTeqg0f3<)nK*AN%efK6l;!ga6m^zu-lXiq%1wgDvyKJ=OqR}HP;9DD;OD1ylk$&SH9^bWTsgtyh3>R~ za==%CiemD*lxIsSP$*_1en}ym=4uSnPDQtE?2nZ6nM*cga+JmFT-i(g08N( z=erA$|Dy|$pZtx;r@ayxL57Ir0M;bYwFSdEJJ@hbGSAuzj9r2s`)=gI=DilV8{NXx z&v`1?vcuDFLjQmppAS0G)iv2yj>fp_>Bk z&7qJgLCGicLi$rf~=Q>)Lv<*7gZ=35F&xglY z;lOjY;Ysh7i;9`syX9&kwDt%c!+QSpMHD;ksRcLi!%WVBNe+BaXXn^Ewq6}u`L$F| zAL0mvq4wA7M5fN!An=25cnCZU9`j9T-$28YEPw}mk z^M&r8ci=3{I*+*H`l^tbP-pQumc}NRQc1u<&exC)3AfjwkWWS8^siyIB3Fch#!ld$ zcr0+cgqm*4KEe)7PGuad41oduiQAnTAk&3%&Zq@%ZvMUZ~E6j z(HupqF;Eyd$@!4M86$L90~$%QA9k11=sq#CkCvdJJ$l@D|@VR=jYxp#GYN z&D(JN8N5b$l9%KD6yWj&Pzfd=%p6DU-i$toDF?zl-WVtVPyhgtZOPXF literal 0 HcmV?d00001 diff --git a/files/pi-mpi-minimal.py b/files/pi-mpi-minimal.py deleted file mode 100755 index 407d90f6..00000000 --- a/files/pi-mpi-minimal.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -import sys -from mpi4py import MPI - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x*x + y*y) - count = len(radii[np.where(radii<=1.0)]) - return count - -if __name__ == '__main__': - comm = MPI.COMM_WORLD - cpus = comm.Get_size() - rank = comm.Get_rank() - n_samples = int(sys.argv[1]) - if rank == 0: - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - partition_item = comm.scatter(partitions, root=0) - count_item = inside_circle(partition_item) - counts = comm.gather(count_item, root=0) - if rank == 0: - my_pi = 4.0 * sum(counts) / sum(partitions) - print(my_pi) diff --git a/files/pi-mpi.py b/files/pi-mpi.py deleted file mode 100755 index e4b2e53f..00000000 --- a/files/pi-mpi.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env python3 - -"""Parallel example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. - -This task lends itself naturally to parallelization -- the task of -selecting a sample point and deciding whether or not it's inside the -circle is independent of all the other samples, so they can be done -simultaneously. We only need to aggregate the data at the end to compute -our fraction f and our estimate for π. -""" - -import numpy as np -import sys -import datetime -from mpi4py import MPI - - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - - host_name = MPI.Get_processor_name() - print("Rank {} generating {:n} samples on host {}.".format( - rank, total_count, host_name)) - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main executable. - - This function runs the 'inside_circle' function with a defined number - of samples. The results are then used to estimate π. - - An estimate of the required memory, elapsed calculation time, and - accuracy of calculating π are also computed. - """ - - # Declare an MPI Communicator for the parallel processes to talk through - comm = MPI.COMM_WORLD - - # Read the number of parallel processes tied into the comm channel - cpus = comm.Get_size() - - # Find out the index ("rank") of *this* process - rank = comm.Get_rank() - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - else: - n_samples = 8738128 # trust me, this number is not random :-) - - if rank == 0: - # Time how long it takes to estimate π. - start_time = datetime.datetime.now() - print("Generating {:n} samples.".format(n_samples)) - # Rank zero builds two arrays with one entry for each rank: - # one for the number of samples they should run, and - # one to store the count info each rank returns. - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - - # All ranks participate in the "scatter" operation, which assigns - # the local scalar values to their appropriate array components. - # partition_item is the number of samples this rank should generate, - # and count_item is the place to put the number of counts we see. - partition_item = comm.scatter(partitions, root=0) - count_item = comm.scatter(counts, root=0) - - # Each rank locally populates its count_item variable. - count_item = inside_circle(partition_item) - - # All ranks participate in the "gather" operation, which sums the - # rank's count_items into the total "counts". - counts = comm.gather(count_item, root=0) - - if rank == 0: - # Only rank zero writes the result, although it's known to all. - my_pi = 4.0 * sum(counts) / n_samples - elapsed_time = (datetime.datetime.now() - start_time).total_seconds() - - # Memory required is dominated by the size of x, y, and radii from - # inside_circle(), calculated in MiB - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**2) - - # accuracy is calculated as a percent difference from a known estimate - # of π. - pi_specific = np.pi - accuracy = 100*(1-my_pi/pi_specific) - - # Uncomment either summary format for verbose or terse output - # summary = "{:d} core(s), {:d} samples, {:f} MiB memory, {:f} seconds, {:f}% error" - summary = "{:d},{:d},{:f},{:f},{:f}" - print(summary.format(cpus, n_samples, memory_required, elapsed_time, - accuracy)) diff --git a/files/pi-serial-minimized.py b/files/pi-serial-minimized.py deleted file mode 100644 index acc99d31..00000000 --- a/files/pi-serial-minimized.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -import sys - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x*x + y*y) - count = len(radii[np.where(radii<=1.0)]) - return count - -if __name__ == '__main__': - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - print(my_pi) diff --git a/files/pi-serial.py b/files/pi-serial.py deleted file mode 100755 index c5289c84..00000000 --- a/files/pi-serial.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 - -"""Serial example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. -""" - -import numpy as np -import sys -import datetime - - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main executable. - - This function runs the 'inside_circle' function with a defined number - of samples. The results are then used to estimate π. - - An estimate of the required memory, elapsed calculation time, and - accuracy of calculating π are also computed. - """ - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - else: - n_samples = 8738128 # trust me, this number is not random :-) - - # Time how long it takes to estimate π. - start_time = datetime.datetime.now() - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - elapsed_time = (datetime.datetime.now() - start_time).total_seconds() - - # Memory required is dominated by the size of x, y, and radii from - # inside_circle(), calculated in MiB - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**2) - - # accuracy is calculated as a percent difference from a known estimate - # of π. - pi_specific = np.pi - accuracy = 100*(1-my_pi/pi_specific) - - # Uncomment either summary format for verbose or terse output - # summary = "{:d} core(s), {:d} samples, {:f} MiB memory, {:f} seconds, {:f}% error" - summary = "{:d},{:d},{:f},{:f},{:f}" - print(summary.format(1, n_samples, memory_required, elapsed_time, - accuracy)) diff --git a/files/pi.py b/files/pi.py deleted file mode 100755 index bac13585..00000000 --- a/files/pi.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python3 - -"""Parallel example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -This task lends itself naturally to parallelization -- the task of -selecting a sample point and deciding whether or not it's inside the -circle is independent of all the other samples, so they can be done -simultaneously. We only need to aggregate the data at the end to compute -our fraction f and our estimate for π. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. -""" - -import locale as l10n -from mpi4py import MPI -import numpy as np -import sys - -l10n.setlocale(l10n.LC_ALL, "") - -# Declare an MPI Communicator for the parallel processes to talk through -comm = MPI.COMM_WORLD - -# Read the number of parallel processes tied into the comm channel -cpus = comm.Get_size() - - -# Find out the index ("rank") of *this* process -rank = comm.Get_rank() - -np.random.seed(14159265 + rank) - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - host_name = MPI.Get_processor_name() - print("Rank {} generating {:n} samples on host {}.".format( - rank, total_count, host_name)) - - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main MPI executable. - - This conditional is entered as many times as there are MPI processes. - - Each process knows its index, called 'rank', and the number of - ranks, called 'cpus', from the MPI calls at the top of the module. - - Rank 0 divides the data arrays among the ranks (including itself), - then each rank independently runs the 'inside_circle' function with - its share of the samples. The disparate results are then aggregated - via the 'gather' operation, and then the estimate for π is - computed. - - An estimate of the required memory is also computed. - """ - - n_samples = 8738128 # trust me, this number is not random :-) - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - - if rank == 0: - print("Generating {:n} samples.".format(n_samples)) - # Rank zero builds two arrays with one entry for each rank: - # one for the number of samples they should run, and - # one to store the count info each rank returns. - partitions = [ int(n_samples / cpus) for item in range(cpus)] - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - - # All ranks participate in the "scatter" operation, which assigns - # the local scalar values to their appropriate array components. - # partition_item is the number of samples this rank should generate, - # and count_item is the place to put the number of counts we see. - - partition_item = comm.scatter(partitions, root=0) - count_item = comm.scatter(counts, root=0) - - # Each rank locally populates its count_item variable. - - count_item = inside_circle(partition_item) - - # All ranks participate in the "gather" operation, which creates an array - # of all the rank's count_items on rank zero. - - counts = comm.gather(count_item, root=0) - - if rank == 0: - # Only rank zero has the entire array of results, so only it can - # compute and print the final answer. - my_pi = 4.0 * sum(counts) / n_samples - size_of_float = np.dtype(np.float64).itemsize - run_type = "serial" if cpus == 1 else "mpi" - print("[{:>8} version] required memory {:.1f} MB".format( - run_type, 3 * n_samples * size_of_float / (1024**2))) - print("[using {:>3} cores ] π is {:n} from {:n} samples".format( - cpus, my_pi, n_samples)) diff --git a/files/simple-pi-illustration.py b/files/simple-pi-illustration.py deleted file mode 100644 index e8ba7548..00000000 --- a/files/simple-pi-illustration.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- - -# This program generates a picture of the algorithm used to estimate the value -# of π by random sampling. - -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.patches as pltpatches - -np.random.seed(14159625) - -n = 128 -x = np.random.uniform(size=n) -y = np.random.uniform(size=n) - -with plt.xkcd(): - - plt.figure(figsize=(5,5)) - plt.axis("equal") - plt.xlim([-0.0125, 1.0125]) - plt.ylim([-0.0125, 1.0125]) - - for d in ["left", "top", "bottom", "right"]: - plt.gca().spines[d].set_visible(False) - - plt.xlabel("x", position=(0.8, 0)) - plt.ylabel("y", rotation=0, position=(0, 0.8)) - - plt.xticks([0, 0.5, 1], ["0", "1/2", "1"]) - plt.yticks([0, 0.5, 1], ["0", "1/2", "1"]) - - plt.scatter(x, y, s=8, c=np.random.uniform(size=(n,3))) - - circ = pltpatches.Arc((0, 0), width=2, height=2, angle=0, theta1=0, theta2=90, color="black", linewidth=3) - plt.gca().add_artist(circ) - squa = plt.Rectangle((0, 0), width=1, height=1, fill=None, linewidth=3) - plt.gca().add_artist(squa) - - plt.savefig("pi.png", bbox_inches="tight", dpi=400) From 2155be809ed9900de3b1b07237aa0de23bd5dc5f Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:53:31 -0500 Subject: [PATCH 11/25] Ask learners to think about why a transfer may not work Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 055dc9b2..6a02994f 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -119,7 +119,7 @@ Upload the lesson material to your remote home directory like so: > > Did it work? If not, what does the terminal output tell you about what > happened? -{: .discussion} +{: .challenge} ## Transferring a Directory From b2baaa7e1eee83ea5f865ef8844376a9294853a0 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:55:14 -0500 Subject: [PATCH 12/25] Expand on the remote_destination for scp Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 6a02994f..5fab2ad5 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -84,8 +84,14 @@ To _upload to_ another computer: {: .language-bash} Note that everything after the `:` is relative to our home directory on the -remote computer. We can leave it at that if we don't have a more specific -destination in mind. +remote computer. If we don't have a specific destination in mind we can +omit the `remote_destination` and the file will be copied to our home +directory on the remote computer (with its original name). If we include +a `remote_destination` we should note that `scp` interprets this the same +way `cp` does: if it exists and is a folder, the file is copied inside the +folder; if it exists and is a file, the file is overwritten with the +contents of `local_file`; if it does not exist, it is assumed to be a +destination filename for `local_file`. Upload the lesson material to your remote home directory like so: From 44dee4fccd94f5887119af5dd238fee35069e789 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:56:54 -0500 Subject: [PATCH 13/25] Clarify that remote_path may not exist Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 5fab2ad5..225d8974 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -79,7 +79,7 @@ mechanism. To _upload to_ another computer: ``` -{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_path +{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_destination ``` {: .language-bash} From 4e305bef8f1baa4056e47ff1fad3991e2f717cb5 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 17:09:48 -0500 Subject: [PATCH 14/25] discuss the tarball before transferring it --- _episodes/15-transferring-files.md | 374 ++++++++++++++--------------- 1 file changed, 187 insertions(+), 187 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 055dc9b2..795806c8 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -68,6 +68,189 @@ After downloading the file, use `ls` to see it in your working directory: ``` {: .language-bash} +## Archiving Files + +One of the biggest challenges we often face when transferring data between +remote HPC systems is that of large numbers of files. There is an overhead to +transferring each individual file and when we are transferring large numbers of +files these overheads combine to slow down our transfers to a large degree. + +The solution to this problem is to _archive_ multiple files into smaller +numbers of larger files before we transfer the data to improve our transfer +efficiency. Sometimes we will combine archiving with _compression_ to reduce +the amount of data we have to transfer and so speed up the transfer. + +The most common archiving command you will use on a (Linux) HPC cluster is +`tar`. `tar` can be used to combine files into a single archive file and, +optionally, compress it. + +Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. +The "gz" part stands for _gzip_, which is a compression library. +This kind of file can usually be interpreted by reading its name: +it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," +wrapped up all its contents in a single file with `tar`, +then compressed that archive with `gzip` to save space. +Let's check using `tar` with the `-t` flag, which prints the "**t**able of +contents" without unpacking the file, specified by `-f `, +on the remote computer. +Note that you can concatenate the two flags, instead of writing `-t -f` separately. + +``` +{{ site.local.prompt }} tar -tf amdahl.tar.gz +hpc-carpentry-amdahl-46c9b4b/ +hpc-carpentry-amdahl-46c9b4b/.github/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +hpc-carpentry-amdahl-46c9b4b/.gitignore +hpc-carpentry-amdahl-46c9b4b/LICENSE +hpc-carpentry-amdahl-46c9b4b/README.md +hpc-carpentry-amdahl-46c9b4b/amdahl/ +hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +hpc-carpentry-amdahl-46c9b4b/requirements.txt +hpc-carpentry-amdahl-46c9b4b/setup.py +``` +{: .language-bash} + +This shows a folder which contains a few files. Let's see about that +compression, using `du` for "**d**isk **u**sage". + +``` +{{ site.local.prompt }} du -sh amdahl.tar.gz +8.0K amdahl.tar.gz +``` +{: .language-bash} + +> ## Files Occupy at Least One "Block" +> +> If the filesystem block size is larger than 3.4 KB, you'll see a larger +> number: files cannot be smaller than one block. +> You can use the `--apparent-size` flag to see the exact size, although the +> unoccupied space in that filesystem block can't be used for anything else. +{: .callout} + +Now let's unpack the archive. We'll run `tar` with a few common flags: + +* `-x` to e**x**tract the archive +* `-v` for **v**erbose output +* `-z` for g**z**ip compression +* `-f «tarball»` for the file to be unpacked + +The folder inside has an unfortunate name, so we'll change that as well using + +* `-C «folder»` to **c**hange directories before extracting + (note that the new directory must exist!) +* `--strip-components=«n»` to remove $n$ levels of depth from the extracted + file hierarchy + +> ## Extract the Archive +> +> Using the flags above, unpack the source code tarball into a new +> directory named "amdahl" using `tar`. +> +> ``` +> {{ site.local.prompt }} mkdir amdahl +> {{ site.local.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 +> ``` +> {: .language-bash} +> +> ``` +> hpc-carpentry-amdahl-46c9b4b/ +> hpc-carpentry-amdahl-46c9b4b/.github/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +> hpc-carpentry-amdahl-46c9b4b/.gitignore +> hpc-carpentry-amdahl-46c9b4b/LICENSE +> hpc-carpentry-amdahl-46c9b4b/README.md +> hpc-carpentry-amdahl-46c9b4b/amdahl/ +> hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +> hpc-carpentry-amdahl-46c9b4b/requirements.txt +> hpc-carpentry-amdahl-46c9b4b/setup.py +> ``` +> {: .output} +> +> Note that we did not need to type out `-x -v -z -f`, thanks to flag +> concatenation, though the command works identically either way -- +> so long as the concatenated list ends with `f`, because the next string +> must specify the name of the file to extract. +> +> We couldn't concatenate `-C` because the next string must name the directory. +> +> Long options (`--strip-components`) also can't be concatenated. +> +> Since order doesn't generally matter, an equivalent command would be +> ``` +> {{ site.local.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 +> ``` +> {: .language-bash} +{: .discussion} + +Check the size of the extracted directory, and compare to the compressed +file size: + +``` +{{ site.local.prompt }} du -sh amdahl +48K amdahl +``` +{: .language-bash} + +Text files (including Python source code) compress nicely: +the "tarball" is one-sixth the total size of the raw data! + +If you want to reverse the process -- compressing raw data instead of +extracting it -- set a `c` flag instead of `x`, set the archive filename, +then provide a directory to compress: + +``` +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl +``` +{: .language-bash} +``` +amdahl/ +amdahl/.github/ +amdahl/.github/workflows/ +amdahl/.github/workflows/python-publish.yml +amdahl/.gitignore +amdahl/LICENSE +amdahl/README.md +amdahl/amdahl/ +amdahl/amdahl/__init__.py +amdahl/amdahl/__main__.py +amdahl/amdahl/amdahl.py +amdahl/requirements.txt +amdahl/setup.py +``` +{: .output} + +If you give `amdahl.tar.gz` as the filename in the above command, `tar` will +update the existing tarball with any changes you made to the files, instead of +recompressing everything. + +> ## Working with Windows +> +> When you transfer text files from a Windows system to a Unix system (Mac, +> Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files +> slightly different than Unix, and adds an extra character to every line. +> +> On a Unix system, every line in a file ends with a `\n` (newline). On +> Windows, every line in a file ends with a `\r\n` (carriage return + newline). +> This causes problems sometimes. +> +> Though most modern programming languages and software handles this correctly, +> in some rare instances, you may run into an issue. The solution is to convert +> a file from Windows to Unix encoding with the `dos2unix` command. +> +> You can identify if a file has Windows line endings with `cat -A filename`. A +> file with Windows line endings will have `^M$` at the end of every line. A +> file with Unix line endings will have `$` at the end of a line. +> +> To convert the file, just run `dos2unix filename`. (Conversely, to convert +> back to Windows format, you can run `unix2dos filename`.) +{: .callout} + ## Transferring Single Files and Folders With `scp` To copy a single file to or from the cluster, we can use `scp` ("secure copy"). @@ -123,16 +306,13 @@ Upload the lesson material to your remote home directory like so: ## Transferring a Directory -If you went ahead and extracted the tarball, don't worry! `scp` can handle -entire directories as well as individual files. - -To copy a whole directory, we add the `-r` flag for "**r**ecursive": copy the -item specified, and every item below it, and every item below those... until it -reaches the bottom of the directory tree rooted at the folder name you +To transfer an entire directory, we add the `-r` flag for "**r**ecursive": copy +the item specified, and every item below it, and every item below those... +until it reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r hpc-carpentry-amdahl-46c9b4b {{ site.remote.user }}@{{ site.remote.login }}:~/ +{{ site.local.prompt }} scp -r amdahl {{ site.remote.user }}@{{ site.remote.login }}:~/ ``` {: .language-bash} @@ -270,186 +450,6 @@ will be more efficient than using FileZilla (or related applications) that would copy from the source to your local machine, then to the destination machine. -## Archiving Files - -One of the biggest challenges we often face when transferring data between -remote HPC systems is that of large numbers of files. There is an overhead to -transferring each individual file and when we are transferring large numbers of -files these overheads combine to slow down our transfers to a large degree. - -The solution to this problem is to _archive_ multiple files into smaller -numbers of larger files before we transfer the data to improve our transfer -efficiency. Sometimes we will combine archiving with _compression_ to reduce -the amount of data we have to transfer and so speed up the transfer. - -The most common archiving command you will use on a (Linux) HPC cluster is -`tar`. `tar` can be used to combine files into a single archive file and, -optionally, compress it. - -Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. -The "gz" part stands for _gzip_, which is a compression library. -This kind of file can usually be interpreted by reading its name: -it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," -wrapped up all its contents in a single file with `tar`, -then compressed that archive with `gzip` to save space. -Let's check using `tar` with the `-t` flag, which prints the "**t**able of -contents" without unpacking the file, specified by `-f `, -on the remote computer. -Note that you can concatenate the two flags, instead of writing `-t -f` separately. - -``` -{{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -{{ site.remote.prompt }} tar -tf amdahl.tar.gz -hpc-carpentry-amdahl-46c9b4b/ -hpc-carpentry-amdahl-46c9b4b/.github/ -hpc-carpentry-amdahl-46c9b4b/.github/workflows/ -hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml -hpc-carpentry-amdahl-46c9b4b/.gitignore -hpc-carpentry-amdahl-46c9b4b/LICENSE -hpc-carpentry-amdahl-46c9b4b/README.md -hpc-carpentry-amdahl-46c9b4b/amdahl/ -hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py -hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py -hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py -hpc-carpentry-amdahl-46c9b4b/requirements.txt -hpc-carpentry-amdahl-46c9b4b/setup.py -``` -{: .language-bash} - -This shows a folder which contains a few files. Let's see about that -compression, using `du` for "**d**isk **u**sage". - -``` -{{ site.remote.prompt }} du -sh amdahl.tar.gz -8.0K amdahl.tar.gz -``` -{: .language-bash} - -> ## Files Occupy at Least One "Block" -> -> If the filesystem block size is larger than 3.4 KB, you'll see a larger -> number: files cannot be smaller than one block. -> You can use the `--apparent-size` flag to see the exact size, although the -> unoccupied space in that filesystem block can't be used for anything else. -{: .callout} - -Now let's unpack the archive. We'll run `tar` with a few common flags: - -* `-x` to e**x**tract the archive -* `-v` for **v**erbose output -* `-z` for g**z**ip compression -* `-f «tarball»` for the file to be unpacked - -The folder inside has an unfortunate name, so we'll change that as well using - -* `-C «folder»` to **c**hange directories before extracting - (note that the new directory must exist!) -* `--strip-components=«n»` to remove $n$ levels of depth from the extracted - file hierarchy - -> ## Extract the Archive -> -> Using the flags above, unpack the source code tarball into a new -> directory named "amdahl" using `tar`. -> -> ``` -> {{ site.remote.prompt }} mkdir amdahl -> {{ site.remote.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 -> ``` -> {: .language-bash} -> -> ``` -> hpc-carpentry-amdahl-46c9b4b/ -> hpc-carpentry-amdahl-46c9b4b/.github/ -> hpc-carpentry-amdahl-46c9b4b/.github/workflows/ -> hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml -> hpc-carpentry-amdahl-46c9b4b/.gitignore -> hpc-carpentry-amdahl-46c9b4b/LICENSE -> hpc-carpentry-amdahl-46c9b4b/README.md -> hpc-carpentry-amdahl-46c9b4b/amdahl/ -> hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py -> hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py -> hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py -> hpc-carpentry-amdahl-46c9b4b/requirements.txt -> hpc-carpentry-amdahl-46c9b4b/setup.py -> ``` -> {: .output} -> -> Note that we did not need to type out `-x -v -z -f`, thanks to flag -> concatenation, though the command works identically either way -- -> so long as the concatenated list ends with `f`, because the next string -> must specify the name of the file to extract. -> -> We couldn't concatenate `-C` because the next string must name the directory. -> -> Long options (`--strip-components`) also can't be concatenated. -> -> Since order doesn't generally matter, an equivalent command would be -> ``` -> {{ site.remote.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 -> ``` -> {: .language-bash} -{: .discussion} - -Check the size of the extracted directory, and compare to the compressed -file size: - -``` -{{ site.remote.prompt }} du -sh amdahl -48K amdahl -``` -{: .language-bash} - -Text files (including Python source code) compress nicely: -the "tarball" is one-sixth the total size of the raw data! - -If you want to reverse the process -- compressing raw data instead of -extracting it -- set a `c` flag instead of `x`, set the archive filename, -then provide a directory to compress: - -``` -{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl -``` -{: .language-bash} -``` -amdahl/ -amdahl/.github/ -amdahl/.github/workflows/ -amdahl/.github/workflows/python-publish.yml -amdahl/.gitignore -amdahl/LICENSE -amdahl/README.md -amdahl/amdahl/ -amdahl/amdahl/__init__.py -amdahl/amdahl/__main__.py -amdahl/amdahl/amdahl.py -amdahl/requirements.txt -amdahl/setup.py -``` -{: .output} - -> ## Working with Windows -> -> When you transfer text files from a Windows system to a Unix system (Mac, -> Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files -> slightly different than Unix, and adds an extra character to every line. -> -> On a Unix system, every line in a file ends with a `\n` (newline). On -> Windows, every line in a file ends with a `\r\n` (carriage return + newline). -> This causes problems sometimes. -> -> Though most modern programming languages and software handles this correctly, -> in some rare instances, you may run into an issue. The solution is to convert -> a file from Windows to Unix encoding with the `dos2unix` command. -> -> You can identify if a file has Windows line endings with `cat -A filename`. A -> file with Windows line endings will have `^M$` at the end of every line. A -> file with Unix line endings will have `$` at the end of a line. -> -> To convert the file, just run `dos2unix filename`. (Conversely, to convert -> back to Windows format, you can run `unix2dos filename`.) -{: .callout} - {% include links.md %} [rsync]: https://rsync.samba.org/ From de82b5d4d407e0bcebd992065c6b7ff248d3c68e Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:53:31 -0500 Subject: [PATCH 15/25] Ask learners to think about why a transfer may not work Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 055dc9b2..6a02994f 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -119,7 +119,7 @@ Upload the lesson material to your remote home directory like so: > > Did it work? If not, what does the terminal output tell you about what > happened? -{: .discussion} +{: .challenge} ## Transferring a Directory From 4181a65a269b0a99b87bb9c5d6fbc0ed3e0ed32e Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:55:14 -0500 Subject: [PATCH 16/25] Expand on the remote_destination for scp Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 6a02994f..5fab2ad5 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -84,8 +84,14 @@ To _upload to_ another computer: {: .language-bash} Note that everything after the `:` is relative to our home directory on the -remote computer. We can leave it at that if we don't have a more specific -destination in mind. +remote computer. If we don't have a specific destination in mind we can +omit the `remote_destination` and the file will be copied to our home +directory on the remote computer (with its original name). If we include +a `remote_destination` we should note that `scp` interprets this the same +way `cp` does: if it exists and is a folder, the file is copied inside the +folder; if it exists and is a file, the file is overwritten with the +contents of `local_file`; if it does not exist, it is assumed to be a +destination filename for `local_file`. Upload the lesson material to your remote home directory like so: From 5eafc008f2b1f1f74cb53a99008377c8c1e9d5c5 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:56:54 -0500 Subject: [PATCH 17/25] Clarify that remote_path may not exist Co-authored-by: ocaisa --- _episodes/15-transferring-files.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 5fab2ad5..225d8974 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -79,7 +79,7 @@ mechanism. To _upload to_ another computer: ``` -{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_path +{{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_destination ``` {: .language-bash} From 2851268ed6d1049e3219fbfbe5c64ddf46752634 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 16:45:13 -0500 Subject: [PATCH 18/25] remove unused tarball, consolidate pi code in new tarball --- files/hpc-intro-code.tar.gz | Bin 3391 -> 0 bytes files/hpc-intro-pi-code.tar.gz | Bin 0 -> 3492 bytes files/pi-mpi-minimal.py | 29 -------- files/pi-mpi.py | 125 ------------------------------- files/pi-serial-minimized.py | 16 ---- files/pi-serial.py | 80 -------------------- files/pi.py | 127 -------------------------------- files/simple-pi-illustration.py | 39 ---------- 8 files changed, 416 deletions(-) delete mode 100644 files/hpc-intro-code.tar.gz create mode 100644 files/hpc-intro-pi-code.tar.gz delete mode 100755 files/pi-mpi-minimal.py delete mode 100755 files/pi-mpi.py delete mode 100644 files/pi-serial-minimized.py delete mode 100755 files/pi-serial.py delete mode 100755 files/pi.py delete mode 100644 files/simple-pi-illustration.py diff --git a/files/hpc-intro-code.tar.gz b/files/hpc-intro-code.tar.gz deleted file mode 100644 index 1b2e3d9e60c002bbe0492992be0c15fe0eb5fb03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3391 zcmV-F4Z!jriwFP!000001MOPfa^gl7o@+hD3HH>0gE3${PF!(PjLnRx#oz)Q?^I0{ zX{5#|7HX|pGA7wnZS6DU5%zNLbCat)N}goT>6SqJ`EST?Z#>DNU^>JXDIN9 zxf9YY_{_bLkX8E0p(!|w`8R5yw140zHX#+>@t{Y)v$+W4YAAq>piVXjh748D1Cmm~ z>G^b-oKL?K^GRn4!4F**O{yokY($F^=QIb!GP#t(@6m&3wpMRKWSmec6k&IjIc} z(!dccnAs}_JIkC;0j8YKL1b58tdKDWd&*iyLb6NAXI|~FF^OR^#|#}7Xj%B3$ISo* zjCajI;7%H%57T%_Hd-u_a0=EJ7tAkK3~Oc%xpX@H)C6dgModA@Ro5GE}atpfLaRA4}s>iK1q(~H^s#QotRZYO~+hWkZU1Vi=|?k;w!siKpuP{g5zTDzS#4C{{oz$xJLHac zoVkdNlZduayzZlq=;Vu+!I*OwimKcyMeF4rBhYwWAaa4-$li~T1P?*Wa$v!>AVpWz zgNYOsXNr7M7z}A{z^~DW*DA_Umd2aKIdQh2&6zbb8mL-2@){toQNxH|^o2-x?OJp% z@&b%iu22=ZEU4@e^Y(QzcEz)Oc z6{aqT7kF7Jjax1TI*QtqMW%5IEp9nnpD&VDUD&2;3tc>yqFuqHR7ml4#F-5J_(-Bo zW>aOTvBx?*(SO-QiI#Mrg`hMW6dzXRvdrw{vgBT?4>Ie_z9Q{l)>vRTY1w=7><3rx zL4L>0Q2CTf@mXf#iAcpm)Rod`F?WGjA=nO<+i zY`53T=+UvC8UO9W{4;2w|I@BjPtR+Gk+sP+sLAuXQd!ggDV2+_OL70Fynk3x{hz~9 z+W+|!MCW=;yOvOa3JW}snK$g3f3I9SF-Tu?T%geo7W$6tiTh~rXXaVP4L zL4hoEqjfR)3bibTNE>4@t}d`drM%3o0?C03E0 z2R1`TP)-dYUHNO-FkcBwrl>0iM(oZWg`;W1ftf>_->RM$71xqZg3O3(AY8*Rwzm!H zy?q$?%>1$R(~nWp?I(H=HQu&6+tbEd1nhG%^DS46LGB~CY8ujUC~+j8u}Xp(+s!G? ztZz8g3QgcX9uTU!)rik%Vd^r{L=j#O16s}z;sz7*4L4{5z9GhW6G$e-yHU z=4+ro4mpC}U$tds$WKyn0{U0OTFLcC@~pWIS3!R?@>(-~Jn}lf-T?9%XpQ_l>rLtJ z%fOr9cs0+PL0$#DA>;_UoY*j%LSD`>8$(`F2qJ%^TdafL9M6l;n?#<6-YD`M^k$Kh z&>KchKyMm3f-dd+84f<5_R+scrKMO8<@jPhzNo|(2l2)0_~J0W_#(deGWuI8F1<=# zOIP|3O~+ls$kZ+B%1DzrcsS4C(O7+0u$@zH2IIFMbR1WG?(GRay~7hU9jK`%(A>`D z4eI}J$zA%6L3IQAx_54gfNs&uLezP>_L-+%>W|eSAIH+mW8E{rfg?LmhjsNfGVvmY zdLR~kbY#mR@@eQd0}@n!$`@m5EQ|!Ad5Kq2`qKi04|M;e@9>doO6jjFf?@pCi@_ih zX#ivpT9XVl5g5Ir*00A0%m+4Rlc>p)V0L6v%c0Ms?>4w`ZQpWaipd+@HmdeT7{qNi zU!eRW7&8eLis%Rrflyghhq=&tb;58E0fjPZSe;#*zzOp`)U7`$7@GHi&$%-JcFSx> z^=(2|eSd`-Kd*P8VVm4V0BR^1Uk!u6lSg}d*9aH(3#J(D2@}_0(T9Ow7ycJ}+$pW6 zh5mn|eo||8YK7n~*xVZT_y3FK;z33C|110WgTDW)l*{F`|Nki{5d#e4M0gWl#i*>A zgL1K4P6ivpxZwWCmNM#~Q$WUWgh7g8d0s^t_2K4_`PV$JqM|#YZmy&ctNQfXK=pLN z#B~hS)ON17|lz9iXUM5 z!X>AlSxKQ*{GOYtflb5pSFt1-z3jBr8!-C-$W%M{ zJCld2diSh#*##WYt~R^xq4gH3&G+z^dh;|7wZC4pYn=|X+D85SqEWBmy?XPcad}#A zzJoV7uh~KeuA>_xXtxElU<6gKbqMUd);>AIpVc?@M!ow!Z@jH{n}qqc)rKluRNLM9 z$z`M3hKtMgMXOW8$-1)T|bwv<(`jxr3{-8r`Dx zs`&4uTW>Wm5?a+ji?bZ;(21MoWRc8;I4R$s}StI8FU2rIb=iDW#NBN-3q3Qc5YMlu}A5rIb=iDW#NB VN-3q3Qc8bj`VV=+WO@Kl008V6sLlWY diff --git a/files/hpc-intro-pi-code.tar.gz b/files/hpc-intro-pi-code.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..33b43cb9a99622bccd7e79ea5d34c87663460830 GIT binary patch literal 3492 zcmV;V4O{XbiwFP!000001MOSuZX3rH_HRDLltQ6hb-CK5C|eQA08Y{b0qiDlQsjpq z5X0ReIn~~>Gb@QP4CH6~ba{t9NzXYmdm%;YV#})2a1)zL&h5;(e&@`%kb|^P;;MRv)GrC#LOQBS5tUTT(AX|ZHnv8&e}4V_x4m90W=v*E#$po5GD?J_bIp@UlvkN{PkRiWYc_`2{E}yJ zp88cLXL*@AO8z3o-Oki^`GCG9kEJyI)z77Nt`Dx+U)b_sX_~1gG$$f+sOuSYyjqA- zn6G~whkoFm+qs0U%FK-E_2i6ACK*q~WWvT{wm+GqTxOHW{;8cgO<`hSfdA!puU}7o z`tHrEuS`o(DQLkC{x?ETkZ`A_HoNFH;dk4VO%zXyMBq@FX$KhY^KyQ97Fz0n9y5H* z0z(2m1uwPKGS6__8I5dbIwV?LcFqpytrk4XX55G&96=0iPR#k zx2htpwJ4o-CXbbQuE#+~O7)0UO)4gHwoNJJ$)6TIv_hz=G%SnN1Gx7u2xT;b! z9(Z*~-&#uDmJcQJiblG<|Fr>s;8T$Q|2TfXes1^wVK^MF`+o=t$2i2p-K}Vj|eZ+bXHidw~ zl+L3Chb|H0$viKmUZg(zGAENS8SfdZ$06vR!Hrb3rX&=Rvv2pL5M11-g{E`%-y zvBtES^|@S{d#3{ye#FVKmmb@LZ65y03%eDXhn1YVVbK8#2=)s zz_A(2IN%Jhu_aPiSaXxVaN6l$Knv2SeAY88HgYgw-6{vJ9*aoEhPeXCX26Xs2U_&L zs?DZlD{@I@u_%BRpcQ2RE7NK@jkd7CV=BjAm&CBCU@=B_B~#cH&qQ9SWa%?dn>>Rp zWC9cv_QB`#Qp{`1kGbZ^6~jB~U^(AI`89H#(LOea)JIp;y91EgS6EEIV6hdT-V33^ zdOrX!6EOh$0Z*cU4w3>A#M!*es{(muxdh_HQQwG6fX;y9v8GvD>NV|bi5kL}iUtA> zWSw0=X8-A}DRPPYo8H8t~J9gHY=2J2p4lMhf1ZX76gI;Er%g=;HT%WCzzy z1E6XI?5%R4p}}>#slqJDxqd!!?vV+sqzw-mcUKPauq0wb7I9Y^0g`wP-@)ICsM35& zE*Trhcg(8H*hyJgJ=kAS{ z5idJ&p;5h`D~Zu76={x+RV2K?3BdJ5RbreNS|jN&&BCJy{9UxfDVkoOIpDI92A-(K zJH%`HxyQZ|Q9`{kR8YVQX$ta$gaWJ*EfZ;~eJHc4E6DeQoG+jm>~_AmC1tbgyb(NJ zv$RdqO4un_LT+LxP6&V|%S3Wp>SveuT9U6bUJ*Ux>0#a(v?l&)4e&wvEWc_b8d!yC`HQRID4rS!b zei3EPrj<+}KI^Nz_Sa-i2n`@omP-SFIe1pwVt04grB=gMb82Q%6jh77N@6tO)+E}& z2vR_og;gl@G%}mz?P_%xXj$|L<@{E&UjmLO33JiFNERqoi=Fy5{qM89ATi~@2otBc zQgWUtI}3JBa)eMY0*v0|55|@kLP8WSz&JqWI629Q+y>B*Y0I9y^$LVs51*}Qpo#cv zJ5iDFh=f*HJ8oyToB^N-kk@~#a>&XEvqan5lKQtsSBQOp%8I-| zFDnRhb|<{#z`kjX10GO#xtF9(EoOtDRh6m+>+I1C`(3(J>r^Q~v%cACzeDGJDn7S* zqVEu|mW|P0LHcQI)P_Vb&jj*d2p0xHUx2g82-_w}Ykjwsjv6y;dfVt&J2YpDM`MGI z<8yrjQj~~tNE>R0Z#MCo!fqReK`+4#OUNnOjFONhXymoWmKI79#6f)AnQB$w06Od7 zOfinVmS4198D^a1vk8GK1C?UEEQHenF4rePLdTX>MC+uXb#o z(wU_mG-my`r|~;bdO(^A>UcJL1OfSRT}f{&rFnMUiMo#u1@XQzh<4k z+Ri4r+97o)#em_pGW6Dk?5(K0w*vmxpPIey@4lY;dwU8wPy!-BVvYd83gbsF24j`? z`Dp9;AGD3u>c6A#`HNls_bHzDI{*7)9`xOrNj2bPl{Ib7(Ypy7?3i`L@MK3!js1Vb zBcMCZ|KX=#&HjhOV6?OUr+98T|M`LRVt4wpJN?<6{(Q*k&wHF-><%in9#rfO1+Zav zC_pOsiB-NIRO7yPeftq%s(@L>vTcPRxMWzogFnzS* z(!C2y8;eTeqg0f3<)nK*AN%efK6l;!ga6m^zu-lXiq%1wgDvyKJ=OqR}HP;9DD;OD1ylk$&SH9^bWTsgtyh3>R~ za==%CiemD*lxIsSP$*_1en}ym=4uSnPDQtE?2nZ6nM*cga+JmFT-i(g08N( z=erA$|Dy|$pZtx;r@ayxL57Ir0M;bYwFSdEJJ@hbGSAuzj9r2s`)=gI=DilV8{NXx z&v`1?vcuDFLjQmppAS0G)iv2yj>fp_>Bk z&7qJgLCGicLi$rf~=Q>)Lv<*7gZ=35F&xglY z;lOjY;Ysh7i;9`syX9&kwDt%c!+QSpMHD;ksRcLi!%WVBNe+BaXXn^Ewq6}u`L$F| zAL0mvq4wA7M5fN!An=25cnCZU9`j9T-$28YEPw}mk z^M&r8ci=3{I*+*H`l^tbP-pQumc}NRQc1u<&exC)3AfjwkWWS8^siyIB3Fch#!ld$ zcr0+cgqm*4KEe)7PGuad41oduiQAnTAk&3%&Zq@%ZvMUZ~E6j z(HupqF;Eyd$@!4M86$L90~$%QA9k11=sq#CkCvdJJ$l@D|@VR=jYxp#GYN z&D(JN8N5b$l9%KD6yWj&Pzfd=%p6DU-i$toDF?zl-WVtVPyhgtZOPXF literal 0 HcmV?d00001 diff --git a/files/pi-mpi-minimal.py b/files/pi-mpi-minimal.py deleted file mode 100755 index 407d90f6..00000000 --- a/files/pi-mpi-minimal.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -import sys -from mpi4py import MPI - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x*x + y*y) - count = len(radii[np.where(radii<=1.0)]) - return count - -if __name__ == '__main__': - comm = MPI.COMM_WORLD - cpus = comm.Get_size() - rank = comm.Get_rank() - n_samples = int(sys.argv[1]) - if rank == 0: - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - partition_item = comm.scatter(partitions, root=0) - count_item = inside_circle(partition_item) - counts = comm.gather(count_item, root=0) - if rank == 0: - my_pi = 4.0 * sum(counts) / sum(partitions) - print(my_pi) diff --git a/files/pi-mpi.py b/files/pi-mpi.py deleted file mode 100755 index e4b2e53f..00000000 --- a/files/pi-mpi.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env python3 - -"""Parallel example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. - -This task lends itself naturally to parallelization -- the task of -selecting a sample point and deciding whether or not it's inside the -circle is independent of all the other samples, so they can be done -simultaneously. We only need to aggregate the data at the end to compute -our fraction f and our estimate for π. -""" - -import numpy as np -import sys -import datetime -from mpi4py import MPI - - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - - host_name = MPI.Get_processor_name() - print("Rank {} generating {:n} samples on host {}.".format( - rank, total_count, host_name)) - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main executable. - - This function runs the 'inside_circle' function with a defined number - of samples. The results are then used to estimate π. - - An estimate of the required memory, elapsed calculation time, and - accuracy of calculating π are also computed. - """ - - # Declare an MPI Communicator for the parallel processes to talk through - comm = MPI.COMM_WORLD - - # Read the number of parallel processes tied into the comm channel - cpus = comm.Get_size() - - # Find out the index ("rank") of *this* process - rank = comm.Get_rank() - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - else: - n_samples = 8738128 # trust me, this number is not random :-) - - if rank == 0: - # Time how long it takes to estimate π. - start_time = datetime.datetime.now() - print("Generating {:n} samples.".format(n_samples)) - # Rank zero builds two arrays with one entry for each rank: - # one for the number of samples they should run, and - # one to store the count info each rank returns. - partitions = [ int(n_samples / cpus) ] * cpus - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - - # All ranks participate in the "scatter" operation, which assigns - # the local scalar values to their appropriate array components. - # partition_item is the number of samples this rank should generate, - # and count_item is the place to put the number of counts we see. - partition_item = comm.scatter(partitions, root=0) - count_item = comm.scatter(counts, root=0) - - # Each rank locally populates its count_item variable. - count_item = inside_circle(partition_item) - - # All ranks participate in the "gather" operation, which sums the - # rank's count_items into the total "counts". - counts = comm.gather(count_item, root=0) - - if rank == 0: - # Only rank zero writes the result, although it's known to all. - my_pi = 4.0 * sum(counts) / n_samples - elapsed_time = (datetime.datetime.now() - start_time).total_seconds() - - # Memory required is dominated by the size of x, y, and radii from - # inside_circle(), calculated in MiB - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**2) - - # accuracy is calculated as a percent difference from a known estimate - # of π. - pi_specific = np.pi - accuracy = 100*(1-my_pi/pi_specific) - - # Uncomment either summary format for verbose or terse output - # summary = "{:d} core(s), {:d} samples, {:f} MiB memory, {:f} seconds, {:f}% error" - summary = "{:d},{:d},{:f},{:f},{:f}" - print(summary.format(cpus, n_samples, memory_required, elapsed_time, - accuracy)) diff --git a/files/pi-serial-minimized.py b/files/pi-serial-minimized.py deleted file mode 100644 index acc99d31..00000000 --- a/files/pi-serial-minimized.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -import sys - -def inside_circle(total_count): - x = np.random.uniform(size=total_count) - y = np.random.uniform(size=total_count) - radii = np.sqrt(x*x + y*y) - count = len(radii[np.where(radii<=1.0)]) - return count - -if __name__ == '__main__': - n_samples = int(sys.argv[1]) - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - print(my_pi) diff --git a/files/pi-serial.py b/files/pi-serial.py deleted file mode 100755 index c5289c84..00000000 --- a/files/pi-serial.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 - -"""Serial example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. -""" - -import numpy as np -import sys -import datetime - - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main executable. - - This function runs the 'inside_circle' function with a defined number - of samples. The results are then used to estimate π. - - An estimate of the required memory, elapsed calculation time, and - accuracy of calculating π are also computed. - """ - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - else: - n_samples = 8738128 # trust me, this number is not random :-) - - # Time how long it takes to estimate π. - start_time = datetime.datetime.now() - counts = inside_circle(n_samples) - my_pi = 4.0 * counts / n_samples - elapsed_time = (datetime.datetime.now() - start_time).total_seconds() - - # Memory required is dominated by the size of x, y, and radii from - # inside_circle(), calculated in MiB - size_of_float = np.dtype(np.float64).itemsize - memory_required = 3 * n_samples * size_of_float / (1024**2) - - # accuracy is calculated as a percent difference from a known estimate - # of π. - pi_specific = np.pi - accuracy = 100*(1-my_pi/pi_specific) - - # Uncomment either summary format for verbose or terse output - # summary = "{:d} core(s), {:d} samples, {:f} MiB memory, {:f} seconds, {:f}% error" - summary = "{:d},{:d},{:f},{:f},{:f}" - print(summary.format(1, n_samples, memory_required, elapsed_time, - accuracy)) diff --git a/files/pi.py b/files/pi.py deleted file mode 100755 index bac13585..00000000 --- a/files/pi.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python3 - -"""Parallel example code for estimating the value of π. - -We can estimate the value of π by a stochastic algorithm. Consider a -circle of radius 1, inside a square that bounds it, with vertices at -(1,1), (1,-1), (-1,-1), and (-1,1). The area of the circle is just π, -whereas the area of the square is 4. So, the fraction of the area of the -square which is covered by the circle is π/4. - -A point selected at random uniformly from the square thus has a -probability π/4 of being within the circle. - -We can estimate π by examining a large number of randomly-selected -points from the square, and seeing what fraction of them lie within the -circle. If this fraction is f, then our estimate for π is π ≈ 4f. - -This task lends itself naturally to parallelization -- the task of -selecting a sample point and deciding whether or not it's inside the -circle is independent of all the other samples, so they can be done -simultaneously. We only need to aggregate the data at the end to compute -our fraction f and our estimate for π. - -Thanks to symmetry, we can compute points in one quadrant, rather -than within the entire unit square, and arrive at identical results. -""" - -import locale as l10n -from mpi4py import MPI -import numpy as np -import sys - -l10n.setlocale(l10n.LC_ALL, "") - -# Declare an MPI Communicator for the parallel processes to talk through -comm = MPI.COMM_WORLD - -# Read the number of parallel processes tied into the comm channel -cpus = comm.Get_size() - - -# Find out the index ("rank") of *this* process -rank = comm.Get_rank() - -np.random.seed(14159265 + rank) - -def inside_circle(total_count): - """Single-processor task for a group of samples. - - Generates uniform random x and y arrays of size total_count, on the - interval [0,1), and returns the number of the resulting (x,y) pairs - which lie inside the unit circle. - """ - host_name = MPI.Get_processor_name() - print("Rank {} generating {:n} samples on host {}.".format( - rank, total_count, host_name)) - - x = np.float64(np.random.uniform(size=total_count)) - y = np.float64(np.random.uniform(size=total_count)) - - radii = np.sqrt(x*x + y*y) - - count = len(radii[np.where(radii<=1.0)]) - - return count - - -if __name__ == '__main__': - """Main MPI executable. - - This conditional is entered as many times as there are MPI processes. - - Each process knows its index, called 'rank', and the number of - ranks, called 'cpus', from the MPI calls at the top of the module. - - Rank 0 divides the data arrays among the ranks (including itself), - then each rank independently runs the 'inside_circle' function with - its share of the samples. The disparate results are then aggregated - via the 'gather' operation, and then the estimate for π is - computed. - - An estimate of the required memory is also computed. - """ - - n_samples = 8738128 # trust me, this number is not random :-) - - if len(sys.argv) > 1: - n_samples = int(sys.argv[1]) - - if rank == 0: - print("Generating {:n} samples.".format(n_samples)) - # Rank zero builds two arrays with one entry for each rank: - # one for the number of samples they should run, and - # one to store the count info each rank returns. - partitions = [ int(n_samples / cpus) for item in range(cpus)] - counts = [ int(0) ] * cpus - else: - partitions = None - counts = None - - # All ranks participate in the "scatter" operation, which assigns - # the local scalar values to their appropriate array components. - # partition_item is the number of samples this rank should generate, - # and count_item is the place to put the number of counts we see. - - partition_item = comm.scatter(partitions, root=0) - count_item = comm.scatter(counts, root=0) - - # Each rank locally populates its count_item variable. - - count_item = inside_circle(partition_item) - - # All ranks participate in the "gather" operation, which creates an array - # of all the rank's count_items on rank zero. - - counts = comm.gather(count_item, root=0) - - if rank == 0: - # Only rank zero has the entire array of results, so only it can - # compute and print the final answer. - my_pi = 4.0 * sum(counts) / n_samples - size_of_float = np.dtype(np.float64).itemsize - run_type = "serial" if cpus == 1 else "mpi" - print("[{:>8} version] required memory {:.1f} MB".format( - run_type, 3 * n_samples * size_of_float / (1024**2))) - print("[using {:>3} cores ] π is {:n} from {:n} samples".format( - cpus, my_pi, n_samples)) diff --git a/files/simple-pi-illustration.py b/files/simple-pi-illustration.py deleted file mode 100644 index e8ba7548..00000000 --- a/files/simple-pi-illustration.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- - -# This program generates a picture of the algorithm used to estimate the value -# of π by random sampling. - -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.patches as pltpatches - -np.random.seed(14159625) - -n = 128 -x = np.random.uniform(size=n) -y = np.random.uniform(size=n) - -with plt.xkcd(): - - plt.figure(figsize=(5,5)) - plt.axis("equal") - plt.xlim([-0.0125, 1.0125]) - plt.ylim([-0.0125, 1.0125]) - - for d in ["left", "top", "bottom", "right"]: - plt.gca().spines[d].set_visible(False) - - plt.xlabel("x", position=(0.8, 0)) - plt.ylabel("y", rotation=0, position=(0, 0.8)) - - plt.xticks([0, 0.5, 1], ["0", "1/2", "1"]) - plt.yticks([0, 0.5, 1], ["0", "1/2", "1"]) - - plt.scatter(x, y, s=8, c=np.random.uniform(size=(n,3))) - - circ = pltpatches.Arc((0, 0), width=2, height=2, angle=0, theta1=0, theta2=90, color="black", linewidth=3) - plt.gca().add_artist(circ) - squa = plt.Rectangle((0, 0), width=1, height=1, fill=None, linewidth=3) - plt.gca().add_artist(squa) - - plt.savefig("pi.png", bbox_inches="tight", dpi=400) From fac84f21047fe96cdb0b08ddeecfc5cb1e600694 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Tue, 31 Jan 2023 17:09:48 -0500 Subject: [PATCH 19/25] discuss the tarball before transferring it --- _episodes/15-transferring-files.md | 374 ++++++++++++++--------------- 1 file changed, 187 insertions(+), 187 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 225d8974..b7bee6d2 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -68,6 +68,189 @@ After downloading the file, use `ls` to see it in your working directory: ``` {: .language-bash} +## Archiving Files + +One of the biggest challenges we often face when transferring data between +remote HPC systems is that of large numbers of files. There is an overhead to +transferring each individual file and when we are transferring large numbers of +files these overheads combine to slow down our transfers to a large degree. + +The solution to this problem is to _archive_ multiple files into smaller +numbers of larger files before we transfer the data to improve our transfer +efficiency. Sometimes we will combine archiving with _compression_ to reduce +the amount of data we have to transfer and so speed up the transfer. + +The most common archiving command you will use on a (Linux) HPC cluster is +`tar`. `tar` can be used to combine files into a single archive file and, +optionally, compress it. + +Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. +The "gz" part stands for _gzip_, which is a compression library. +This kind of file can usually be interpreted by reading its name: +it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," +wrapped up all its contents in a single file with `tar`, +then compressed that archive with `gzip` to save space. +Let's check using `tar` with the `-t` flag, which prints the "**t**able of +contents" without unpacking the file, specified by `-f `, +on the remote computer. +Note that you can concatenate the two flags, instead of writing `-t -f` separately. + +``` +{{ site.local.prompt }} tar -tf amdahl.tar.gz +hpc-carpentry-amdahl-46c9b4b/ +hpc-carpentry-amdahl-46c9b4b/.github/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +hpc-carpentry-amdahl-46c9b4b/.gitignore +hpc-carpentry-amdahl-46c9b4b/LICENSE +hpc-carpentry-amdahl-46c9b4b/README.md +hpc-carpentry-amdahl-46c9b4b/amdahl/ +hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +hpc-carpentry-amdahl-46c9b4b/requirements.txt +hpc-carpentry-amdahl-46c9b4b/setup.py +``` +{: .language-bash} + +This shows a folder which contains a few files. Let's see about that +compression, using `du` for "**d**isk **u**sage". + +``` +{{ site.local.prompt }} du -sh amdahl.tar.gz +8.0K amdahl.tar.gz +``` +{: .language-bash} + +> ## Files Occupy at Least One "Block" +> +> If the filesystem block size is larger than 3.4 KB, you'll see a larger +> number: files cannot be smaller than one block. +> You can use the `--apparent-size` flag to see the exact size, although the +> unoccupied space in that filesystem block can't be used for anything else. +{: .callout} + +Now let's unpack the archive. We'll run `tar` with a few common flags: + +* `-x` to e**x**tract the archive +* `-v` for **v**erbose output +* `-z` for g**z**ip compression +* `-f «tarball»` for the file to be unpacked + +The folder inside has an unfortunate name, so we'll change that as well using + +* `-C «folder»` to **c**hange directories before extracting + (note that the new directory must exist!) +* `--strip-components=«n»` to remove $n$ levels of depth from the extracted + file hierarchy + +> ## Extract the Archive +> +> Using the flags above, unpack the source code tarball into a new +> directory named "amdahl" using `tar`. +> +> ``` +> {{ site.local.prompt }} mkdir amdahl +> {{ site.local.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 +> ``` +> {: .language-bash} +> +> ``` +> hpc-carpentry-amdahl-46c9b4b/ +> hpc-carpentry-amdahl-46c9b4b/.github/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/ +> hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml +> hpc-carpentry-amdahl-46c9b4b/.gitignore +> hpc-carpentry-amdahl-46c9b4b/LICENSE +> hpc-carpentry-amdahl-46c9b4b/README.md +> hpc-carpentry-amdahl-46c9b4b/amdahl/ +> hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py +> hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py +> hpc-carpentry-amdahl-46c9b4b/requirements.txt +> hpc-carpentry-amdahl-46c9b4b/setup.py +> ``` +> {: .output} +> +> Note that we did not need to type out `-x -v -z -f`, thanks to flag +> concatenation, though the command works identically either way -- +> so long as the concatenated list ends with `f`, because the next string +> must specify the name of the file to extract. +> +> We couldn't concatenate `-C` because the next string must name the directory. +> +> Long options (`--strip-components`) also can't be concatenated. +> +> Since order doesn't generally matter, an equivalent command would be +> ``` +> {{ site.local.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 +> ``` +> {: .language-bash} +{: .discussion} + +Check the size of the extracted directory, and compare to the compressed +file size: + +``` +{{ site.local.prompt }} du -sh amdahl +48K amdahl +``` +{: .language-bash} + +Text files (including Python source code) compress nicely: +the "tarball" is one-sixth the total size of the raw data! + +If you want to reverse the process -- compressing raw data instead of +extracting it -- set a `c` flag instead of `x`, set the archive filename, +then provide a directory to compress: + +``` +{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl +``` +{: .language-bash} +``` +amdahl/ +amdahl/.github/ +amdahl/.github/workflows/ +amdahl/.github/workflows/python-publish.yml +amdahl/.gitignore +amdahl/LICENSE +amdahl/README.md +amdahl/amdahl/ +amdahl/amdahl/__init__.py +amdahl/amdahl/__main__.py +amdahl/amdahl/amdahl.py +amdahl/requirements.txt +amdahl/setup.py +``` +{: .output} + +If you give `amdahl.tar.gz` as the filename in the above command, `tar` will +update the existing tarball with any changes you made to the files, instead of +recompressing everything. + +> ## Working with Windows +> +> When you transfer text files from a Windows system to a Unix system (Mac, +> Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files +> slightly different than Unix, and adds an extra character to every line. +> +> On a Unix system, every line in a file ends with a `\n` (newline). On +> Windows, every line in a file ends with a `\r\n` (carriage return + newline). +> This causes problems sometimes. +> +> Though most modern programming languages and software handles this correctly, +> in some rare instances, you may run into an issue. The solution is to convert +> a file from Windows to Unix encoding with the `dos2unix` command. +> +> You can identify if a file has Windows line endings with `cat -A filename`. A +> file with Windows line endings will have `^M$` at the end of every line. A +> file with Unix line endings will have `$` at the end of a line. +> +> To convert the file, just run `dos2unix filename`. (Conversely, to convert +> back to Windows format, you can run `unix2dos filename`.) +{: .callout} + ## Transferring Single Files and Folders With `scp` To copy a single file to or from the cluster, we can use `scp` ("secure copy"). @@ -129,16 +312,13 @@ Upload the lesson material to your remote home directory like so: ## Transferring a Directory -If you went ahead and extracted the tarball, don't worry! `scp` can handle -entire directories as well as individual files. - -To copy a whole directory, we add the `-r` flag for "**r**ecursive": copy the -item specified, and every item below it, and every item below those... until it -reaches the bottom of the directory tree rooted at the folder name you +To transfer an entire directory, we add the `-r` flag for "**r**ecursive": copy +the item specified, and every item below it, and every item below those... +until it reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r hpc-carpentry-amdahl-46c9b4b {{ site.remote.user }}@{{ site.remote.login }}:~/ +{{ site.local.prompt }} scp -r amdahl {{ site.remote.user }}@{{ site.remote.login }}:~/ ``` {: .language-bash} @@ -276,186 +456,6 @@ will be more efficient than using FileZilla (or related applications) that would copy from the source to your local machine, then to the destination machine. -## Archiving Files - -One of the biggest challenges we often face when transferring data between -remote HPC systems is that of large numbers of files. There is an overhead to -transferring each individual file and when we are transferring large numbers of -files these overheads combine to slow down our transfers to a large degree. - -The solution to this problem is to _archive_ multiple files into smaller -numbers of larger files before we transfer the data to improve our transfer -efficiency. Sometimes we will combine archiving with _compression_ to reduce -the amount of data we have to transfer and so speed up the transfer. - -The most common archiving command you will use on a (Linux) HPC cluster is -`tar`. `tar` can be used to combine files into a single archive file and, -optionally, compress it. - -Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. -The "gz" part stands for _gzip_, which is a compression library. -This kind of file can usually be interpreted by reading its name: -it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," -wrapped up all its contents in a single file with `tar`, -then compressed that archive with `gzip` to save space. -Let's check using `tar` with the `-t` flag, which prints the "**t**able of -contents" without unpacking the file, specified by `-f `, -on the remote computer. -Note that you can concatenate the two flags, instead of writing `-t -f` separately. - -``` -{{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -{{ site.remote.prompt }} tar -tf amdahl.tar.gz -hpc-carpentry-amdahl-46c9b4b/ -hpc-carpentry-amdahl-46c9b4b/.github/ -hpc-carpentry-amdahl-46c9b4b/.github/workflows/ -hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml -hpc-carpentry-amdahl-46c9b4b/.gitignore -hpc-carpentry-amdahl-46c9b4b/LICENSE -hpc-carpentry-amdahl-46c9b4b/README.md -hpc-carpentry-amdahl-46c9b4b/amdahl/ -hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py -hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py -hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py -hpc-carpentry-amdahl-46c9b4b/requirements.txt -hpc-carpentry-amdahl-46c9b4b/setup.py -``` -{: .language-bash} - -This shows a folder which contains a few files. Let's see about that -compression, using `du` for "**d**isk **u**sage". - -``` -{{ site.remote.prompt }} du -sh amdahl.tar.gz -8.0K amdahl.tar.gz -``` -{: .language-bash} - -> ## Files Occupy at Least One "Block" -> -> If the filesystem block size is larger than 3.4 KB, you'll see a larger -> number: files cannot be smaller than one block. -> You can use the `--apparent-size` flag to see the exact size, although the -> unoccupied space in that filesystem block can't be used for anything else. -{: .callout} - -Now let's unpack the archive. We'll run `tar` with a few common flags: - -* `-x` to e**x**tract the archive -* `-v` for **v**erbose output -* `-z` for g**z**ip compression -* `-f «tarball»` for the file to be unpacked - -The folder inside has an unfortunate name, so we'll change that as well using - -* `-C «folder»` to **c**hange directories before extracting - (note that the new directory must exist!) -* `--strip-components=«n»` to remove $n$ levels of depth from the extracted - file hierarchy - -> ## Extract the Archive -> -> Using the flags above, unpack the source code tarball into a new -> directory named "amdahl" using `tar`. -> -> ``` -> {{ site.remote.prompt }} mkdir amdahl -> {{ site.remote.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 -> ``` -> {: .language-bash} -> -> ``` -> hpc-carpentry-amdahl-46c9b4b/ -> hpc-carpentry-amdahl-46c9b4b/.github/ -> hpc-carpentry-amdahl-46c9b4b/.github/workflows/ -> hpc-carpentry-amdahl-46c9b4b/.github/workflows/python-publish.yml -> hpc-carpentry-amdahl-46c9b4b/.gitignore -> hpc-carpentry-amdahl-46c9b4b/LICENSE -> hpc-carpentry-amdahl-46c9b4b/README.md -> hpc-carpentry-amdahl-46c9b4b/amdahl/ -> hpc-carpentry-amdahl-46c9b4b/amdahl/__init__.py -> hpc-carpentry-amdahl-46c9b4b/amdahl/__main__.py -> hpc-carpentry-amdahl-46c9b4b/amdahl/amdahl.py -> hpc-carpentry-amdahl-46c9b4b/requirements.txt -> hpc-carpentry-amdahl-46c9b4b/setup.py -> ``` -> {: .output} -> -> Note that we did not need to type out `-x -v -z -f`, thanks to flag -> concatenation, though the command works identically either way -- -> so long as the concatenated list ends with `f`, because the next string -> must specify the name of the file to extract. -> -> We couldn't concatenate `-C` because the next string must name the directory. -> -> Long options (`--strip-components`) also can't be concatenated. -> -> Since order doesn't generally matter, an equivalent command would be -> ``` -> {{ site.remote.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 -> ``` -> {: .language-bash} -{: .discussion} - -Check the size of the extracted directory, and compare to the compressed -file size: - -``` -{{ site.remote.prompt }} du -sh amdahl -48K amdahl -``` -{: .language-bash} - -Text files (including Python source code) compress nicely: -the "tarball" is one-sixth the total size of the raw data! - -If you want to reverse the process -- compressing raw data instead of -extracting it -- set a `c` flag instead of `x`, set the archive filename, -then provide a directory to compress: - -``` -{{ site.local.prompt }} tar -cvzf compressed_code.tar.gz amdahl -``` -{: .language-bash} -``` -amdahl/ -amdahl/.github/ -amdahl/.github/workflows/ -amdahl/.github/workflows/python-publish.yml -amdahl/.gitignore -amdahl/LICENSE -amdahl/README.md -amdahl/amdahl/ -amdahl/amdahl/__init__.py -amdahl/amdahl/__main__.py -amdahl/amdahl/amdahl.py -amdahl/requirements.txt -amdahl/setup.py -``` -{: .output} - -> ## Working with Windows -> -> When you transfer text files from a Windows system to a Unix system (Mac, -> Linux, BSD, Solaris, etc.) this can cause problems. Windows encodes its files -> slightly different than Unix, and adds an extra character to every line. -> -> On a Unix system, every line in a file ends with a `\n` (newline). On -> Windows, every line in a file ends with a `\r\n` (carriage return + newline). -> This causes problems sometimes. -> -> Though most modern programming languages and software handles this correctly, -> in some rare instances, you may run into an issue. The solution is to convert -> a file from Windows to Unix encoding with the `dos2unix` command. -> -> You can identify if a file has Windows line endings with `cat -A filename`. A -> file with Windows line endings will have `^M$` at the end of every line. A -> file with Unix line endings will have `$` at the end of a line. -> -> To convert the file, just run `dos2unix filename`. (Conversely, to convert -> back to Windows format, you can run `unix2dos filename`.) -{: .callout} - {% include links.md %} [rsync]: https://rsync.samba.org/ From 7dfc411ad518e10d1db205ff39a0997a9054d009 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Fri, 10 Feb 2023 18:18:40 -0500 Subject: [PATCH 20/25] clean up & clarify a thing or two --- _episodes/15-transferring-files.md | 103 ++++++++++++----------------- 1 file changed, 44 insertions(+), 59 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index b7bee6d2..5de0f46c 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -24,11 +24,12 @@ terminal and in GitBash. Any file that can be downloaded in your web browser through a direct link can be downloaded using `curl` or `wget`. This is a quick way to download datasets or source code. The syntax for these commands is -* `curl -O https://some/link/to/a/file [-o new_name]` -* `wget https://some/link/to/a/file [-O new_name]` +* `wget [-O new_name] https://some/link/to/a/file` +* `curl [-o new_name] https://some/link/to/a/file` Try it out by downloading some material we'll use later on, from a terminal on your local machine, using the URL of the current codebase: + > ## Download the "Tarball" @@ -50,12 +51,12 @@ your local machine, using the URL of the current codebase: > In this case, that would be "main," which is not very clear. > Use one of the above commands to save the tarball to "amdahl.tar.gz" instead. > -> > ## Curl & Wget Commands +> > ## `wget` and `curl` Commands > > > > ``` -> > {{ site.local.prompt }} curl -O https://github.com/hpc-carpentry/amdahl/tarball/main -o amdahl.tar.gz +> > {{ site.local.prompt }} wget -O amdahl.tar.gz https://github.com/hpc-carpentry/amdahl/tarball/main > > # or -> > {{ site.local.prompt }} wget https://github.com/hpc-carpentry/amdahl/tarball/main -O amdahl.tar.gz +> > {{ site.local.prompt }} curl -o amdahl.tar.gz https://github.com/hpc-carpentry/amdahl/tarball/main > > ``` > > {: .language-bash} > {: .solution} @@ -79,21 +80,24 @@ The solution to this problem is to _archive_ multiple files into smaller numbers of larger files before we transfer the data to improve our transfer efficiency. Sometimes we will combine archiving with _compression_ to reduce the amount of data we have to transfer and so speed up the transfer. - The most common archiving command you will use on a (Linux) HPC cluster is `tar`. `tar` can be used to combine files into a single archive file and, optionally, compress it. -Let's start with the file we downloaded from the lesson site, `amdahl.tar.gz`. -The "gz" part stands for _gzip_, which is a compression library. -This kind of file can usually be interpreted by reading its name: -it appears somebody took a folder named "hpc-carpentry-amdahl-46c9b4b," -wrapped up all its contents in a single file with `tar`, +Let's look at the file we downloaded from the lesson site, `amdahl.tar.gz`. + +The `.gz` part stands for _gzip_, which is a compression library. +It's common (but not necessary!) that this kind of file can be interpreted by +reading its name: +it appears somebody took files and folders relating to something called +"amdahl," wrapped them all up into a single file with `tar`, then compressed that archive with `gzip` to save space. -Let's check using `tar` with the `-t` flag, which prints the "**t**able of -contents" without unpacking the file, specified by `-f `, -on the remote computer. -Note that you can concatenate the two flags, instead of writing `-t -f` separately. + +Let's see if that's the case without unpacking the file. +`tar` prints the "**t**able of contents" with the `-t` flag, +for the file specified by the `-f` flag and a filename. +Note that you can concatenate the two flags: +writing `-t -f` separately, or `-tf` together, are interchangeable. ``` {{ site.local.prompt }} tar -tf amdahl.tar.gz @@ -113,22 +117,9 @@ hpc-carpentry-amdahl-46c9b4b/setup.py ``` {: .language-bash} -This shows a folder which contains a few files. Let's see about that -compression, using `du` for "**d**isk **u**sage". - -``` -{{ site.local.prompt }} du -sh amdahl.tar.gz -8.0K amdahl.tar.gz -``` -{: .language-bash} - -> ## Files Occupy at Least One "Block" -> -> If the filesystem block size is larger than 3.4 KB, you'll see a larger -> number: files cannot be smaller than one block. -> You can use the `--apparent-size` flag to see the exact size, although the -> unoccupied space in that filesystem block can't be used for anything else. -{: .callout} +This example output shows a folder which contains a few files, +where `46c9b4b` is an 8-character [git][git-swc] commit hash that will change +when the source material is updated. Now let's unpack the archive. We'll run `tar` with a few common flags: @@ -137,21 +128,13 @@ Now let's unpack the archive. We'll run `tar` with a few common flags: * `-z` for g**z**ip compression * `-f «tarball»` for the file to be unpacked -The folder inside has an unfortunate name, so we'll change that as well using - -* `-C «folder»` to **c**hange directories before extracting - (note that the new directory must exist!) -* `--strip-components=«n»` to remove $n$ levels of depth from the extracted - file hierarchy - > ## Extract the Archive > > Using the flags above, unpack the source code tarball into a new > directory named "amdahl" using `tar`. > > ``` -> {{ site.local.prompt }} mkdir amdahl -> {{ site.local.prompt }} tar -xvzf amdahl.tar.gz -C amdahl --strip-components=1 +> {{ site.local.prompt }} tar -xvzf amdahl.tar.gz > ``` > {: .language-bash} > @@ -176,22 +159,22 @@ The folder inside has an unfortunate name, so we'll change that as well using > concatenation, though the command works identically either way -- > so long as the concatenated list ends with `f`, because the next string > must specify the name of the file to extract. -> -> We couldn't concatenate `-C` because the next string must name the directory. -> -> Long options (`--strip-components`) also can't be concatenated. -> -> Since order doesn't generally matter, an equivalent command would be -> ``` -> {{ site.local.prompt }} tar -xvzC amdahl -f amdahl.tar.gz --strip-components=1 -> ``` -> {: .language-bash} {: .discussion} -Check the size of the extracted directory, and compare to the compressed -file size: +The folder has an unfortunate name, so let's change that to something more +convenient. ``` +{{ site.local.prompt }} mv hpc-carpentry-amdahl-46c9b4b amdahl +``` +{: .language-bash} + +Check the size of the extracted directory and compare to the compressed +file size, using `du` for "**d**isk **u**sage". + +``` +{{ site.local.prompt }} du -sh amdahl.tar.gz +8.0K amdahl.tar.gz {{ site.local.prompt }} du -sh amdahl 48K amdahl ``` @@ -225,9 +208,10 @@ amdahl/setup.py ``` {: .output} -If you give `amdahl.tar.gz` as the filename in the above command, `tar` will -update the existing tarball with any changes you made to the files, instead of -recompressing everything. +If you give `amdahl.tar.gz` as the filename in the above command, +`tar` will update the existing tarball with any changes you made to the files. +That would mean adding the new `amdahl` folder to the _existing_ folder +(`hpc-carpentry-amdahl-46c9b4b`), doubling the size of the archive! > ## Working with Windows > @@ -259,7 +243,7 @@ The `scp` command is a relative of the `ssh` command we used to access the system, and can use the same public-key authentication mechanism. -To _upload to_ another computer: +To _upload to_ another computer, the template command is ``` {{ site.local.prompt }} scp local_file {{ site.remote.user }}@{{ site.remote.login }}:remote_destination @@ -299,9 +283,9 @@ Upload the lesson material to your remote home directory like so: > > > > ``` > > {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} -> > {{ site.remote.prompt }} curl -O https://github.com/hpc-carpentry/amdahl/tarball/main -o amdahl.tar.gz +> > {{ site.remote.prompt }} wget -O amdahl.tar.gz https://github.com/hpc-carpentry/amdahl/tarball/main > > # or -> > {{ site.remote.prompt }} wget https://github.com/hpc-carpentry/amdahl/tarball/main -O amdahl.tar.gz +> > {{ site.remote.prompt }} curl -o amdahl.tar.gz https://github.com/hpc-carpentry/amdahl/tarball/main > > ``` > > {: .language-bash} > {: .solution} @@ -374,7 +358,7 @@ A trailing slash on the target directory is optional, and has no effect for > To recursively copy a directory, we can use the same options: > > ``` -> {{ site.local.prompt }} rsync -avP hpc-carpentry-amdahl-46c9b4b {{ site.remote.user }}@{{ site.remote.login }}:~/ +> {{ site.local.prompt }} rsync -avP amdahl {{ site.remote.user }}@{{ site.remote.login }}:~/ > ``` > {: .language-bash} > @@ -458,4 +442,5 @@ machine. {% include links.md %} +[git-swc]: https://swcarpentry.github.io/git-novice/ [rsync]: https://rsync.samba.org/ From 7b9c78e190f97458e53e1bde0af765bcf9fd5db1 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Wed, 15 Feb 2023 18:36:12 -0500 Subject: [PATCH 21/25] revised discussion of tar and scp flags & fields --- _episodes/15-transferring-files.md | 113 ++++++++++++++++------------- 1 file changed, 64 insertions(+), 49 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index 5de0f46c..cbc315bc 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -47,9 +47,9 @@ your local machine, using the URL of the current codebase: > You may also see the extension `.tgz`, which is just an abbreviation of > `.tar.gz`. > -> By default, `curl` and `wget` download files to the same name as the URL. -> In this case, that would be "main," which is not very clear. -> Use one of the above commands to save the tarball to "amdahl.tar.gz" instead. +> By default, `curl` and `wget` download files to the same name as the URL: +> in this case, `main`. +> Use one of the above commands to save the tarball as `amdahl.tar.gz`. > > > ## `wget` and `curl` Commands > > @@ -78,26 +78,29 @@ files these overheads combine to slow down our transfers to a large degree. The solution to this problem is to _archive_ multiple files into smaller numbers of larger files before we transfer the data to improve our transfer -efficiency. Sometimes we will combine archiving with _compression_ to reduce -the amount of data we have to transfer and so speed up the transfer. +efficiency. +Sometimes we will combine archiving with _compression_ to reduce the amount of +data we have to transfer and so speed up the transfer. The most common archiving command you will use on a (Linux) HPC cluster is -`tar`. `tar` can be used to combine files into a single archive file and, -optionally, compress it. +`tar`. +`tar` can be used to combine files and folders into a single archive file and, +optionally, compress the result. Let's look at the file we downloaded from the lesson site, `amdahl.tar.gz`. The `.gz` part stands for _gzip_, which is a compression library. It's common (but not necessary!) that this kind of file can be interpreted by -reading its name: -it appears somebody took files and folders relating to something called -"amdahl," wrapped them all up into a single file with `tar`, +reading its name: it appears somebody took files and folders relating to +something called "amdahl," wrapped them all up into a single file with `tar`, then compressed that archive with `gzip` to save space. -Let's see if that's the case without unpacking the file. -`tar` prints the "**t**able of contents" with the `-t` flag, -for the file specified by the `-f` flag and a filename. -Note that you can concatenate the two flags: -writing `-t -f` separately, or `-tf` together, are interchangeable. +Let's see if that is the case, _without_ unpacking the file. +`tar` prints the "**t**able of contents" with the `-t` flag, for the file +specified with the `-f` flag followed by the filename. +Note that you can concatenate the two flags: writing `-t -f` is interchangeable +with writing `-tf` together. +However, the argument following `-f` must be a filename, so writing `-ft` will +_not_ work. ``` {{ site.local.prompt }} tar -tf amdahl.tar.gz @@ -117,9 +120,9 @@ hpc-carpentry-amdahl-46c9b4b/setup.py ``` {: .language-bash} -This example output shows a folder which contains a few files, -where `46c9b4b` is an 8-character [git][git-swc] commit hash that will change -when the source material is updated. +This example output shows a folder which contains a few files, where `46c9b4b` +is an 8-character [git][git-swc] commit hash that will change when the source +material is updated. Now let's unpack the archive. We'll run `tar` with a few common flags: @@ -208,10 +211,11 @@ amdahl/setup.py ``` {: .output} -If you give `amdahl.tar.gz` as the filename in the above command, -`tar` will update the existing tarball with any changes you made to the files. +If you give `amdahl.tar.gz` as the filename in the above command, `tar` will +update the existing tarball with any changes you made to the files. That would mean adding the new `amdahl` folder to the _existing_ folder -(`hpc-carpentry-amdahl-46c9b4b`), doubling the size of the archive! +(`hpc-carpentry-amdahl-46c9b4b`) inside the tarball, doubling the size of the +archive! > ## Working with Windows > @@ -250,15 +254,18 @@ To _upload to_ another computer, the template command is ``` {: .language-bash} -Note that everything after the `:` is relative to our home directory on the -remote computer. If we don't have a specific destination in mind we can -omit the `remote_destination` and the file will be copied to our home -directory on the remote computer (with its original name). If we include -a `remote_destination` we should note that `scp` interprets this the same -way `cp` does: if it exists and is a folder, the file is copied inside the -folder; if it exists and is a file, the file is overwritten with the -contents of `local_file`; if it does not exist, it is assumed to be a -destination filename for `local_file`. +in which `@` and `:` are field separators and `remote_destination` is a path +relative to your remote home directory, or a new filename if you wish to change +it, or both a relative path _and_ a new filename. +If you don't have a specific folder in mind you can omit the +`remote_destination` and the file will be copied to your home directory on the +remote computer (with its original name). +If you include a `remote_destination`, note that `scp` interprets this the same +way `cp` does when making local copies: +if it exists and is a folder, the file is copied inside the folder; if it +exists and is a file, the file is overwritten with the contents of +`local_file`; if it does not exist, it is assumed to be a destination filename +for `local_file`. Upload the lesson material to your remote home directory like so: @@ -296,13 +303,13 @@ Upload the lesson material to your remote home directory like so: ## Transferring a Directory -To transfer an entire directory, we add the `-r` flag for "**r**ecursive": copy -the item specified, and every item below it, and every item below those... +To transfer an entire directory, we add the `-r` flag for "**r**ecursive": +copy the item specified, and every item below it, and every item below those... until it reaches the bottom of the directory tree rooted at the folder name you provided. ``` -{{ site.local.prompt }} scp -r amdahl {{ site.remote.user }}@{{ site.remote.login }}:~/ +{{ site.local.prompt }} scp -r amdahl {{ site.remote.user }}@{{ site.remote.login }}: ``` {: .language-bash} @@ -312,24 +319,29 @@ provided. > copying with `-r` can take a long time to complete. {: .callout} -## What's in a `/`? - When using `scp`, you may have noticed that a `:` _always_ follows the remote -computer name; sometimes a `/` follows that, and sometimes not, and sometimes -there's a final `/`. On Linux computers, `/` is the ___root___ directory, the -location where the entire filesystem (and others attached to it) is anchored. A -path starting with a `/` is called _absolute_, since there can be nothing above -the root `/`. A path that does not start with `/` is called _relative_, since -it is not anchored to the root. +computer name. +A string _after_ the `:` specifies the remote directory you wish to transfer +the file or folder to, including a new name if you wish to rename the remote +material. +If you leave this field blank, `scp` defaults to your home directory and the +name of the local material to be transferred. + +On Linux computers, `/` is the separator in file or directory paths. +A path starting with a `/` is called _absolute_, since there can be nothing +above the root `/`. +A path that does not start with `/` is called _relative_, since it is not +anchored to the root. If you want to upload a file to a location inside your home directory -- -which is often the case -- then you don't need a leading `/`. After the -`:`, start writing the sequence of folders that lead to the final storage -location for the file or, as mentioned above, provide nothing if your home -directory _is_ the destination. +which is often the case -- then you don't need a _leading_ `/`. After the `:`, +you can type the destination path relative to your home directory. +If your home directory _is_ the destination, you can leave the destination +field blank, or type `~` -- the shorthand for your home directory -- for +completeness. -A trailing slash on the target directory is optional, and has no effect for -`scp -r`, but is important in other commands, like `rsync`. +With `scp`, a trailing slash on the target directory is optional, and has +no effect. It is important for other commands, like `rsync`. > ## A Note on `rsync` > @@ -337,7 +349,7 @@ A trailing slash on the target directory is optional, and has no effect for > command limiting. The [rsync] utility provides > advanced features for file transfer and is typically faster compared to both > `scp` and `sftp` (see below). It is especially useful for transferring large -> and/or many files and creating synced backup folders. +> and/or many files and for synchronizing folder contents between computers. > > The syntax is similar to `scp`. To transfer _to_ another computer with > commonly used options: @@ -371,7 +383,7 @@ A trailing slash on the target directory is optional, and has no effect for > To download a file, we simply change the source and destination: > > ``` -> {{ site.local.prompt }} rsync -avP {{ site.remote.user }}@{{ site.remote.login }}:hpc-carpentry-amdahl-46c9b4b ./ +> {{ site.local.prompt }} rsync -avP {{ site.remote.user }}@{{ site.remote.login }}:amdahl ./ > ``` > {: .language-bash} {: .callout} @@ -392,6 +404,8 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > ``` > {: .language-bash} > +> _Hint:_ check the `man` page or "help" for `rsync`. +> > > ## Solution > > > > ``` @@ -402,6 +416,7 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > > {{ site.local.prompt }} rsync --port=768 amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} +> > (Note that this command will fail, as the correct port in this case is the > default: 22.) > {: .solution} From 9b656ad874b87e4e24f62c1a58d24d398a309cf4 Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Wed, 15 Feb 2023 20:40:12 -0500 Subject: [PATCH 22/25] use nano to edit bashrc --- _episodes/16-parallel.md | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index 20984412..806c018e 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -59,22 +59,24 @@ collect mpi4py from the Internet and install it for you. If this fails due to a one-way firewall, you must retrieve mpi4py on your local machine and upload it, just as we did for Amdahl. -> ## Retrieve and Push mpi4py +> ## Retrieve and Upload `mpi4py` > > If installing Amdahl failed because mpi4py could not be installed, > retrieve the tarball from > then `rsync` it to the cluster, extract, and install: > > ``` -> {{ site.local.prompt }} wget https://github.com/mpi4py/mpi4py/tarball/master -O mpi4py.tar.gz -> {{ site.local.prompt }} scp amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} wget -O mpi4py.tar.gz https://github.com/mpi4py/mpi4py/tarball/master +> {{ site.local.prompt }} scp mpi4py.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > # or -> {{ site.local.prompt }} rsync -avP amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: +> {{ site.local.prompt }} rsync -avP mpi4py.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > ``` > {: .language-bash} +> > ``` -> {{ site.remote.prompt }} mkdir mpi4py -> {{ site.remote.prompt }} tar -xvzf mpi4py.tar.gz -C amdahl --strip-components=1 +> {{ site.local.prompt }} ssh {{ site.remote.user }}@{{ site.remote.login }} +> {{ site.remote.prompt }} tar -xvzf mpi4py.tar.gz # extract the archive +> {{ site.remote.prompt }} mv mpi4py* mpi4py # rename the directory > {{ site.remote.prompt }} cd mpi4py > {{ site.remote.prompt }} python3 -m pip install --user . > {{ site.remote.prompt }} cd ../amdahl @@ -83,9 +85,9 @@ local machine and upload it, just as we did for Amdahl. > {: .language-bash} {: .discussion} -`pip` may warn that your user package binaries are not in your PATH. - -> ## If Pip Raises a Warning... +> ## If `pip` Raises a Warning... +> +> `pip` may warn that your user package binaries are not in your PATH. > > ``` > WARNING: The script amdahl is installed in "${HOME}/.local/bin" which is @@ -105,18 +107,22 @@ local machine and upload it, just as we did for Amdahl. > If the command returns no output, displaying a new prompt, it means the file > `amdahl` has not been found. You must update the environment variable named > `PATH` to include the missing folder. -> Run the following command to update your shell configuration file, then log -> off the cluster and back on again so it takes effect. +> Edit your shell configuration file as follows, then log off the cluster and +> back on again so it takes effect. > > ``` -> {{ site.remote.prompt }} echo "export PATH=${PATH}:${HOME}/.local/bin" >> ~/.bashrc -> {{ site.remote.prompt }} logout -> {{ site.local.prompt }} ... +> {{ site.remote.prompt }} nano ~/.bashrc +> {{ site.remote.prompt }} tail ~/.bashrc > ``` > {: .language-bash} +> ``` +> export PATH=${PATH}:${HOME}/.local/bin +> ``` +> {: .output} > -> `which` should now be able to find `amdahl` without difficulties. -> If you had to load a Python module, load it again! +> After logging back in to {{ site.remote.login }}, `which` should be able to +> find `amdahl` without difficulties. +> If you had to load a Python module, load it again. {: .discussion} ## Help! @@ -178,7 +184,7 @@ reverse-chronological order: newest first. What was the output? > ## Read the Job Output > > The cluster output should be written to a file in the folder you launched the -> job from. +> job from. For example, > > ``` > {{ site.remote.prompt }} ls -t From 470c4c59bf45ddb3b30dc61d54239478e9d180fe Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Wed, 15 Feb 2023 20:41:26 -0500 Subject: [PATCH 23/25] substitute previously-seen example job ID --- _episodes/17-resources.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/_episodes/17-resources.md b/_episodes/17-resources.md index cee174dd..99073cb7 100644 --- a/_episodes/17-resources.md +++ b/_episodes/17-resources.md @@ -58,11 +58,13 @@ use `{{ site.sched.hist }}` to get statistics about `parallel-job.sh`. {% include {{ site.snippets }}/resources/account-history.snip %} -This shows all the jobs we ran today (note that there are multiple entries -per job). To get info about a specific job, we change command slightly. +This shows all the jobs we ran today (note that there are multiple entries per +job). +To get info about a specific job (for example, 347087), we change command +slightly. ``` -{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} 1965 +{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} 347087 ``` {: .language-bash} @@ -72,7 +74,7 @@ information to `less` to make it easier to view (use the left and right arrow keys to scroll through fields). ``` -{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} 1965 | less +{{ site.remote.prompt }} {{ site.sched.hist }} {{ site.sched.flag.histdetail }} 347087 | less ``` {: .language-bash} From 62fd20dfa22f1f75d89758489009543b3f685e3b Mon Sep 17 00:00:00 2001 From: Trevor Keller Date: Wed, 15 Feb 2023 20:45:56 -0500 Subject: [PATCH 24/25] resolve build failure --- _episodes/15-transferring-files.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_episodes/15-transferring-files.md b/_episodes/15-transferring-files.md index cbc315bc..5548dd6d 100644 --- a/_episodes/15-transferring-files.md +++ b/_episodes/15-transferring-files.md @@ -416,9 +416,9 @@ you will have to specify it using the appropriate flag, often `-p`, `-P`, or > > {{ site.local.prompt }} rsync --port=768 amdahl.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > > ``` > > {: .language-bash} -> -> (Note that this command will fail, as the correct port in this case is the -> default: 22.) +> > +> > (Note that this command will fail, as the correct port in this case is the +> > default: 22.) > {: .solution} {: .challenge} From 160e626e63ed5bf717f76a434def7548205dd3e6 Mon Sep 17 00:00:00 2001 From: ocaisa Date: Thu, 16 Feb 2023 16:03:41 +0100 Subject: [PATCH 25/25] Update _episodes/16-parallel.md --- _episodes/16-parallel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_episodes/16-parallel.md b/_episodes/16-parallel.md index 806c018e..dc5635e2 100644 --- a/_episodes/16-parallel.md +++ b/_episodes/16-parallel.md @@ -66,7 +66,7 @@ local machine and upload it, just as we did for Amdahl. > then `rsync` it to the cluster, extract, and install: > > ``` -> {{ site.local.prompt }} wget -O mpi4py.tar.gz https://github.com/mpi4py/mpi4py/tarball/master +> {{ site.local.prompt }} wget -O mpi4py.tar.gz https://github.com/mpi4py/mpi4py/releases/download/3.1.4/mpi4py-3.1.4.tar.gz > {{ site.local.prompt }} scp mpi4py.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: > # or > {{ site.local.prompt }} rsync -avP mpi4py.tar.gz {{ site.remote.user }}@{{ site.remote.login }}: