diff --git a/fbgemm_gpu/codegen/training/python/split_embedding_codegen_lookup_invoker.template b/fbgemm_gpu/codegen/training/python/split_embedding_codegen_lookup_invoker.template index fec923833..cedc0c41c 100644 --- a/fbgemm_gpu/codegen/training/python/split_embedding_codegen_lookup_invoker.template +++ b/fbgemm_gpu/codegen/training/python/split_embedding_codegen_lookup_invoker.template @@ -24,23 +24,13 @@ try: torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_training") torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_training") except Exception: - if torch.version.hip: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_hip") - else: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops") + torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops") torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu") -if torch.version.hip: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils_hip") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_hip") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings_hip") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update_hip") -else: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings") - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update") - +torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update") +torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings") +torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils") +torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops") torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu") torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update_cpu") diff --git a/fbgemm_gpu/fbgemm_gpu/quantize_utils.py b/fbgemm_gpu/fbgemm_gpu/quantize_utils.py index 3f0477eb0..0ce9e6a8b 100644 --- a/fbgemm_gpu/fbgemm_gpu/quantize_utils.py +++ b/fbgemm_gpu/fbgemm_gpu/quantize_utils.py @@ -17,11 +17,7 @@ # pyre-ignore[21] from fbgemm_gpu import open_source # noqa: F401 except Exception: - if torch.version.hip: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_hip") - else: - torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops") - + torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops") torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu") TORCH_HALF_MIN: float = torch.finfo(torch.float16).min