From e2caa851a94dd64ddd7a1024a4b8351ccb7a4024 Mon Sep 17 00:00:00 2001 From: Christoph Schranz Date: Tue, 18 Jun 2024 14:59:44 +0200 Subject: [PATCH] downgrade cudnn to 8, as TF 2.16 has issues with v9 --- .build/Dockerfile | 12 ++---------- README.md | 19 +++++++++++-------- custom/gpulibs.Dockerfile | 2 +- custom/header.Dockerfile | 2 +- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/.build/Dockerfile b/.build/Dockerfile index dd07a47..40e78dd 100755 --- a/.build/Dockerfile +++ b/.build/Dockerfile @@ -7,7 +7,7 @@ # Use NVIDIA CUDA as base image and run the same installation as in the other packages. # The version of cuda must match those of the packages installed in src/Dockerfile.gpulibs -FROM nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04 +FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04 LABEL authors="Christoph Schranz , Mathematical Michael " # This is a concatenated Dockerfile, the maintainers of subsequent sections may vary. RUN chmod 1777 /tmp && chmod 1777 /var/tmp @@ -426,14 +426,6 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends cmake libncurses5-dev libncursesw5-dev git && \ apt-get clean && rm -rf /var/lib/apt/lists/* -USER $NB_UID -# These need to be two separate pip install commands, otherwise it will throw an error -# attempting to resolve the nvidia-cuda-nvcc package at the same time as nvidia-pyindex -RUN pip install --no-cache-dir nvidia-pyindex && \ - pip install --no-cache-dir nvidia-cuda-nvcc && \ - fix-permissions "${CONDA_DIR}" && \ - fix-permissions "/home/${NB_USER}" - # reinstall nvcc with cuda-nvcc to install ptax USER $NB_UID # These need to be two separate pip install commands, otherwise it will throw an error @@ -444,7 +436,7 @@ RUN pip install --no-cache-dir nvidia-pyindex && \ fix-permissions "/home/${NB_USER}" # Install cuda-nvcc with sepecific version, see here: https://anaconda.org/nvidia/cuda-nvcc/labels -RUN mamba install -c nvidia cuda-nvcc=12.3.107 -y && \ +RUN mamba install -c nvidia cuda-nvcc=12.2.140 -y && \ mamba clean --all -f -y && \ fix-permissions $CONDA_DIR && \ fix-permissions /home/$NB_USER diff --git a/README.md b/README.md index 3229869..a7949af 100644 --- a/README.md +++ b/README.md @@ -45,10 +45,10 @@ for creating and maintaining a robust Python, R, and Julia toolstack for Data Sc 3. Get access to your GPU via CUDA drivers within Docker containers. For this, follow the installation steps in this [Medium article](https://medium.com/@christoph.schranz/set-up-your-own-gpu-based-jupyterlab-e0d45fcacf43). You can confirm that you can access your GPU within Docker if the command below returns a result similar to this one: ```bash - docker run --gpus all nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04 nvidia-smi + docker run --gpus all nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04 nvidia-smi ``` ```bash - Mon Apr 8 16:19:10 2024 + Tue Jun 18 12:39:31 2024 +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 545.23.05 Driver Version: 545.84 CUDA Version: 12.3 | |-----------------------------------------+----------------------+----------------------+ @@ -57,7 +57,7 @@ for creating and maintaining a robust Python, R, and Julia toolstack for Data Sc | | | MIG M. | |=========================================+======================+======================| | 0 NVIDIA GeForce RTX 3060 ... On | 00000000:01:00.0 Off | N/A | - | N/A 46C P8 10W / 60W | 105MiB / 6144MiB | 0% Default | + | N/A 46C P8 10W / 60W | 202MiB / 6144MiB | 0% Default | | | | N/A | +-----------------------------------------+----------------------+----------------------+ @@ -76,9 +76,9 @@ for creating and maintaining a robust Python, R, and Julia toolstack for Data Sc ```bash cd your-working-directory ll data # this path will be mounted by default - docker run --gpus all -d -it -p 8848:8888 -v $(pwd)/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root cschranz/gpu-jupyter:v1.7_cuda-12.3_ubuntu-22.04 + docker run --gpus all -d -it -p 8848:8888 -v $(pwd)/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root cschranz/gpu-jupyter:v1.7_cuda-12.2_ubuntu-22.04 ``` - This starts an instance of *GPU-Jupyter* with the tag `v1.7_cuda-12.3_ubuntu-22.04` at [http://localhost:8848](http://localhost:8848) (port `8848`). + This starts an instance of *GPU-Jupyter* with the tag `v1.7_cuda-12.2_ubuntu-22.04` at [http://localhost:8848](http://localhost:8848) (port `8848`). To log into Jupyterlab, you have to specify a token that you get from: ```bash docker exec -it [container-ID/name] jupyter server list @@ -89,6 +89,9 @@ for creating and maintaining a robust Python, R, and Julia toolstack for Data Sc Additionally, data within the host's `data` directory is shared with the container. The following images of GPU-Jupyter are available on [Dockerhub](https://hub.docker.com/r/cschranz/gpu-jupyter): + - `v1.7_cuda-12.2_ubuntu-22.04` (full image) + - `v1.7_cuda-12.2_ubuntu-22.04_python-only` (only with a python interpreter and without Julia and R) + - `v1.7_cuda-12.2_ubuntu-22.04_slim` (only with a python interpreter and without additional packages) - `v1.7_cuda-12.3_ubuntu-22.04` (full image) - `v1.7_cuda-12.3_ubuntu-22.04_python-only` (only with a python interpreter and without Julia and R) - `v1.7_cuda-12.3_ubuntu-22.04_slim` (only with a python interpreter and without additional packages) @@ -129,7 +132,7 @@ Additionally, data within the host's `data` directory is shared with the contain The version, e.g. `v1.7`, declares the version of the generator setup. - The Cuda version, e.g. `cuda-12.3`, must match the CUDA driver version and be supported by the GPU libraries. + The Cuda version, e.g. `cuda-12.2`, must match the CUDA driver version and be supported by the GPU libraries. These and older versions of GPU-Jupyter are listed on [Dockerhub](https://hub.docker.com/r/cschranz/gpu-jupyter/tags?page=1&ordering=last_updated). In case you are using another version or the GPU libraries don't work on your hardware, please try to build the image on your own as described below. Note that the images built for Ubuntu 20.04 LTS work also on Ubuntu 22.04 LTS is currently not supported. @@ -145,14 +148,14 @@ we recommend checking out this [tutorial](https://www.youtube.com/watch?v=7wfPqA Building a custom Docker image is the recommended option if you have a different GPU architecture or if you want to customize the pre-installed packages. The Dockerfiles in `custom/` can be modified to achieve this. To use a custom base image, modify `custom/header.Dockerfile`. To install specific GPU-related libraries, modify `custom/gpulibs.Dockerfile`, and to add specific libraries, append them to `custom/usefulpackages.Dockerfile`. -After making the necessary modifications, regenerate the `Dockerfile` in `/.build`. Once you have confirmed that your GPU is accessible within Docker containers by running `docker run --gpus all nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04 nvidia-smi` and seeing the GPU statistics, you can generate, build, and run the Docker image. +After making the necessary modifications, regenerate the `Dockerfile` in `/.build`. Once you have confirmed that your GPU is accessible within Docker containers by running `docker run --gpus all nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04 nvidia-smi` and seeing the GPU statistics, you can generate, build, and run the Docker image. The following commands will start *GPU-Jupyter* on [localhost:8848](http://localhost:8848) with the default password `gpu-jupyter`. ```bash git clone https://github.com/iot-salzburg/gpu-jupyter.git cd gpu-jupyter git branch # Check for extisting branches -git checkout v1.7_cuda-12.3_ubuntu-22.04 # select or create a new version +git checkout v1.7_cuda-12.2_ubuntu-22.04 # select or create a new version # generate the Dockerfile with python and without Julia and R (see options: --help) ./generate-Dockerfile.sh --python-only docker build -t gpu-jupyter .build/ # will take a while diff --git a/custom/gpulibs.Dockerfile b/custom/gpulibs.Dockerfile index e789019..7b98b30 100644 --- a/custom/gpulibs.Dockerfile +++ b/custom/gpulibs.Dockerfile @@ -51,7 +51,7 @@ RUN pip install --no-cache-dir nvidia-pyindex && \ fix-permissions "/home/${NB_USER}" # Install cuda-nvcc with sepecific version, see here: https://anaconda.org/nvidia/cuda-nvcc/labels -RUN mamba install -c nvidia cuda-nvcc=12.3.107 -y && \ +RUN mamba install -c nvidia cuda-nvcc=12.2.140 -y && \ mamba clean --all -f -y && \ fix-permissions $CONDA_DIR && \ fix-permissions /home/$NB_USER diff --git a/custom/header.Dockerfile b/custom/header.Dockerfile index 3d6ef22..c37e7fa 100644 --- a/custom/header.Dockerfile +++ b/custom/header.Dockerfile @@ -1,6 +1,6 @@ # Use NVIDIA CUDA as base image and run the same installation as in the other packages. # The version of cuda must match those of the packages installed in src/Dockerfile.gpulibs -FROM nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04 +FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04 LABEL authors="Christoph Schranz , Mathematical Michael " # This is a concatenated Dockerfile, the maintainers of subsequent sections may vary. RUN chmod 1777 /tmp && chmod 1777 /var/tmp