Skip to content

Commit

Permalink
Add option to install PyTorch from release c… (pytorch#2065)
Browse files Browse the repository at this point in the history
Summary:
…hannel

- For the FBGEMM_GPU release workflows, add the option to install PyTorch from the PyTorch PIP release channel in addition to the test channel

Pull Request resolved: pytorch#2065

Reviewed By: spcyppt

Differential Revision: D49960302

Pulled By: q10

fbshipit-source-id: 1576ace7b8c24834f7db4fe19ccb0d07faa2fb1d
  • Loading branch information
q10 authored and facebook-github-bot committed Oct 5, 2023
1 parent 8f7d8c7 commit d307673
Show file tree
Hide file tree
Showing 8 changed files with 51 additions and 39 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/test_torchrec.bash
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ usage () {
echo "CUDA_VERSION : PyTorch's CUDA version (e.g., 11.6, 11.7)"
echo "FBGEMM_WHEEL_PATH : path to FBGEMM_GPU's wheel file"
echo "MINICONDA_PREFIX : path to install Miniconda (default: \$HOME/miniconda)"
echo "Example: Python 3.10 + PyTorch nightly (CUDA 11.7), install miniconda at \$HOME/miniconda, using dist/fbgemm_gpu_nightly.whl"
echo "Example: Python 3.10 + PyTorch nightly (CUDA 12.1), install miniconda at \$HOME/miniconda, using dist/fbgemm_gpu_nightly.whl"
# shellcheck disable=SC2086
echo " bash $(basename ${BASH_SOURCE[0]}) -v -o torchrec_nightly -p 3.10 -P pytorch-nightly -c 11.7 -w dist/fbgemm_gpu_nightly.whl"
}
Expand Down
24 changes: 12 additions & 12 deletions .github/scripts/utils_pip.bash
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ __extract_pip_arguments () {
echo "Usage: ${FUNCNAME[0]} ENV_NAME PACKAGE_NAME PACKAGE_VERSION PACKAGE_VARIANT_TYPE [PACKAGE_VARIANT_VERSION]"
echo "Example(s):"
echo " ${FUNCNAME[0]} build_env torch 1.11.0 cpu # Install the CPU variant a specific version"
echo " ${FUNCNAME[0]} build_env torch latest cpu # Install the CPU variant of the latest stable version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 11.7.1 # Install the variant for CUDA 11.7"
echo " ${FUNCNAME[0]} build_env torch release cpu # Install the CPU variant of the latest stable version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 12.1.0 # Install the variant for CUDA 12.1"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu nightly rocm 5.3 # Install the variant for ROCM 5.3"
return 1
else
Expand All @@ -48,7 +48,7 @@ __extract_pip_arguments () {
local cuda_version="${package_variant_version:-11.8.0}"
# shellcheck disable=SC2206
local cuda_version_arr=(${cuda_version//./ })
# Convert, i.e. cuda 11.7.1 => cu117
# Convert, i.e. cuda 12.1.0 => cu121
export package_variant="cu${cuda_version_arr[0]}${cuda_version_arr[1]}"
elif [ "$package_variant_type" == "rocm" ]; then
# Extract the ROCM version or default to 5.5.1
Expand All @@ -67,7 +67,7 @@ __extract_pip_arguments () {
if [ "$package_version" == "nightly" ] || [ "$package_version" == "test" ]; then
export pip_package="--pre ${package_name}"
export pip_channel="https://download.pytorch.org/whl/${package_version}/${package_variant}/"
elif [ "$package_version" == "latest" ]; then
elif [ "$package_version" == "release" ]; then
export pip_package="${package_name}"
export pip_channel="https://download.pytorch.org/whl/${package_variant}/"
else
Expand All @@ -85,10 +85,10 @@ install_from_pytorch_pip () {
if [ "$package_variant_type" == "" ]; then
echo "Usage: ${FUNCNAME[0]} ENV_NAME PACKAGE_NAME PACKAGE_VERSION PACKAGE_VARIANT_TYPE [PACKAGE_VARIANT_VERSION]"
echo "Example(s):"
echo " ${FUNCNAME[0]} build_env torch 1.11.0 cpu # Install the CPU variant a specific version"
echo " ${FUNCNAME[0]} build_env torch latest cpu # Install the CPU variant of the latest stable version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 11.7.1 # Install the variant for CUDA 11.7"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu nightly rocm 5.3 # Install the variant for ROCM 5.3"
echo " ${FUNCNAME[0]} build_env torch 1.11.0 cpu # Install the CPU variant for a specific version"
echo " ${FUNCNAME[0]} build_env torch release cpu # Install the CPU variant, latest release version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 12.1.0 # Install the CUDA 12.1 variant, latest test version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu nightly rocm 5.3 # Install the ROCM 5.3 variant, latest nightly version"
return 1
else
echo "################################################################################"
Expand Down Expand Up @@ -138,10 +138,10 @@ download_from_pytorch_pip () {
if [ "$package_variant_type" == "" ]; then
echo "Usage: ${FUNCNAME[0]} ENV_NAME PACKAGE_NAME PACKAGE_VERSION PACKAGE_VARIANT_TYPE [PACKAGE_VARIANT_VERSION]"
echo "Example(s):"
echo " ${FUNCNAME[0]} build_env torch 1.11.0 cpu # Download the CPU variant a specific version"
echo " ${FUNCNAME[0]} build_env torch latest cpu # Download the CPU variant of the latest stable version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 11.7.1 # Download the variant for CUDA 11.7"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu nightly rocm 5.3 # Download the variant for ROCM 5.3"
echo " ${FUNCNAME[0]} build_env torch 1.11.0 cpu # Download the CPU variant for a specific version"
echo " ${FUNCNAME[0]} build_env torch release cpu # Download the CPU variant, latest stable version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu test cuda 12.1.0 # Download the CUDA 12.1 variant, latest test version"
echo " ${FUNCNAME[0]} build_env fbgemm_gpu nightly rocm 5.3 # Download the ROCM 5.3 variant, latest nightly version"
return 1
else
echo "################################################################################"
Expand Down
16 changes: 8 additions & 8 deletions .github/scripts/utils_pytorch.bash
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ install_pytorch_conda () {
echo "Usage: ${FUNCNAME[0]} ENV_NAME PYTORCH_VERSION [CPU]"
echo "Example(s):"
echo " ${FUNCNAME[0]} build_env 1.11.0 # Install a specific version"
echo " ${FUNCNAME[0]} build_env latest # Install the latest stable release"
echo " ${FUNCNAME[0]} build_env test # Install the pre-release"
echo " ${FUNCNAME[0]} build_env nightly cpu # Install the CPU variant of the nightly"
echo " ${FUNCNAME[0]} build_env release # Install the latest release"
echo " ${FUNCNAME[0]} build_env test # Install the latest pre-release"
echo " ${FUNCNAME[0]} build_env nightly # Install the latest nightly"
return 1
else
echo "################################################################################"
Expand All @@ -51,7 +51,7 @@ install_pytorch_conda () {
# Set package name and installation channel
if [ "$pytorch_version" == "nightly" ] || [ "$pytorch_version" == "test" ]; then
local pytorch_channel="pytorch-${pytorch_version}"
elif [ "$pytorch_version" == "latest" ]; then
elif [ "$pytorch_version" == "release" ]; then
local pytorch_channel="pytorch"
else
local pytorch_package="${pytorch_package}==${pytorch_version}"
Expand Down Expand Up @@ -111,10 +111,10 @@ install_pytorch_pip () {
if [ "$pytorch_variant_type" == "" ]; then
echo "Usage: ${FUNCNAME[0]} ENV_NAME PYTORCH_VERSION PYTORCH_VARIANT_TYPE [PYTORCH_VARIANT_VERSION]"
echo "Example(s):"
echo " ${FUNCNAME[0]} build_env 1.11.0 cpu # Install the CPU variant a specific version"
echo " ${FUNCNAME[0]} build_env latest cpu # Install the CPU variant of the latest stable version"
echo " ${FUNCNAME[0]} build_env test cuda 11.7.1 # Install the variant for CUDA 11.7"
echo " ${FUNCNAME[0]} build_env nightly rocm 5.3 # Install the variant for ROCM 5.3"
echo " ${FUNCNAME[0]} build_env 1.11.0 cpu # Install the CPU variant for a specific version"
echo " ${FUNCNAME[0]} build_env release cpu # Install the CPU variant, latest release version"
echo " ${FUNCNAME[0]} build_env test cuda 12.1.0 # Install the CUDA 12.1 variant, latest test version"
echo " ${FUNCNAME[0]} build_env nightly rocm 5.3 # Install the ROCM 5.3 variant, latest nightly version"
return 1
else
echo "################################################################################"
Expand Down
10 changes: 8 additions & 2 deletions .github/workflows/fbgemm_gpu_cpu_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ on:
type: boolean
required: false
default: false
pytorch_channel:
description: PyTorch Package Channel
type: choice
required: false
options: [ "test", "release" ]
default: "test"

concurrency:
# Cancel previous runs in the PR if a new commit is pushed
Expand Down Expand Up @@ -85,7 +91,7 @@ jobs:
run: . $PRELUDE; install_build_tools $BUILD_ENV

- name: Install PyTorch-CPU Test
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV test cpu
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV ${{ github.event.inputs.pytorch_channel || 'test' }} cpu

- name: Prepare FBGEMM_GPU Build
run: . $PRELUDE; cd fbgemm_gpu; prepare_fbgemm_gpu_build $BUILD_ENV
Expand Down Expand Up @@ -149,7 +155,7 @@ jobs:
run: . $PRELUDE; create_conda_environment $BUILD_ENV ${{ matrix.python-version }}

- name: Install PyTorch Test
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV test cpu
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV ${{ github.event.inputs.pytorch_channel || 'test' }} cpu

- name: Prepare FBGEMM_GPU Build
run: . $PRELUDE; cd fbgemm_gpu; prepare_fbgemm_gpu_build $BUILD_ENV
Expand Down
10 changes: 8 additions & 2 deletions .github/workflows/fbgemm_gpu_cuda_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@ on:
required: false
options: [ "11.8.0", "12.1.1" ]
default: "12.1.1"
pytorch_channel:
description: PyTorch Package Channel
type: choice
required: false
options: [ "test", "release" ]
default: "test"

concurrency:
# Cancel previous runs in the PR if a new commit is pushed
Expand Down Expand Up @@ -94,7 +100,7 @@ jobs:
run: . $PRELUDE; install_cuda $BUILD_ENV ${{ matrix.cuda-version }}

- name: Install PyTorch Test
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV test cuda ${{ matrix.cuda-version }}
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV ${{ github.event.inputs.pytorch_channel || 'test' }} cuda ${{ matrix.cuda-version }}

- name: Install cuDNN
run: . $PRELUDE; install_cudnn $BUILD_ENV "$(pwd)/build_only/cudnn" ${{ matrix.cuda-version }}
Expand Down Expand Up @@ -162,7 +168,7 @@ jobs:
run: . $PRELUDE; install_cuda $BUILD_ENV ${{ matrix.cuda-version }}

- name: Install PyTorch Test
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV test cuda ${{ matrix.cuda-version }}
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV ${{ github.event.inputs.pytorch_channel || 'test' }} cuda ${{ matrix.cuda-version }}

- name: Prepare FBGEMM_GPU Build
run: . $PRELUDE; cd fbgemm_gpu; prepare_fbgemm_gpu_build $BUILD_ENV
Expand Down
4 changes: 2 additions & 2 deletions fbgemm_gpu/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ FBGEMM_GPU (FBGEMM GPU Kernels Library) is a collection of high-performance PyTo
GPU operator libraries for training and inference. The library provides efficient
table batched embedding bag, data layout transformation, and quantization supports.

FBGEMM_GPU is currently tested with CUDA 11.7.1 and 11.8 in CI, and with PyTorch
packages (1.13+) that are built against those CUDA versions.
FBGEMM_GPU is currently tested with cuda 12.1.0 and 11.8 in CI, and with PyTorch
packages (2.1+) that are built against those CUDA versions.

Only Intel/AMD CPUs with AVX2 extensions are currently supported.

Expand Down
8 changes: 4 additions & 4 deletions fbgemm_gpu/docs/BuildInstructions.md
Original file line number Diff line number Diff line change
Expand Up @@ -253,13 +253,13 @@ PyTorch for ROCm builds.

```sh
# Install the latest nightly
conda run -n "${env_name}" pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cu117/
conda run -n "${env_name}" pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121/
# Install the latest test (RC)
conda run -n "${env_name}" pip install --pre torch --extra-index-url https://download.pytorch.org/whl/test/cu117/
conda run -n "${env_name}" pip install --pre torch --index-url https://download.pytorch.org/whl/test/cu121/
# Install a specific version
conda run -n "${env_name}" pip install torch==2.0.0+cu117 --extra-index-url https://download.pytorch.org/whl/cu117/
conda run -n "${env_name}" pip install torch==2.1.0+cu121 --index-url https://download.pytorch.org/whl/cu121/
# Install the latest nightly (ROCm 5.3)
conda run -n "${env_name}" pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/rocm5.3/
conda run -n "${env_name}" pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm5.3/
```

### Post-Install Checks
Expand Down
16 changes: 8 additions & 8 deletions fbgemm_gpu/docs/InstallationInstructions.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,20 @@ The shortened summary of the installation steps:

```sh
# CUDA Nightly
pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cu117/
pip install fbgemm-gpu-nightly
pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121/
pip install --pre fbgemm-gpu --index-url https://download.pytorch.org/whl/nightly/cu121/

# CUDA Release
pip install --pre torch --extra-index-url https://download.pytorch.org/whl/test/cu117/
pip install fbgemm-gpu
pip install torch --index-url https://download.pytorch.org/whl/cu121/
pip install fbgemm-gpu --index-url https://download.pytorch.org/whl/cu121/

# CPU-only Nightly
pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu/
pip install fbgemm-gpu-nightly-cpu
pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu/
pip install --pre fbgemm-gpu --index-url https://download.pytorch.org/whl/nightly/cpu/

# CPU-only Release
pip install --pre torch --extra-index-url https://download.pytorch.org/whl/test/cpu/
pip install fbgemm-gpu-cpu
pip install torch --index-url https://download.pytorch.org/whl/cpu/
pip install fbgemm-gpu --index-url https://download.pytorch.org/whl/cpu/

# Test the installation
python -c "import torch; import fbgemm_gpu"
Expand Down

0 comments on commit d307673

Please sign in to comment.