From aa48aaa724912305dcbf3cf31da50f702fa1804f Mon Sep 17 00:00:00 2001 From: Sarunya Pumma Date: Fri, 15 Sep 2023 11:45:40 -0700 Subject: [PATCH] Improve all_to_one error message (#2019) Summary: Pull Request resolved: https://github.com/pytorch/FBGEMM/pull/2019 As titled Reviewed By: jianyuh Differential Revision: D49296564 fbshipit-source-id: 442c13567cb7aa8de8c208c2ee1fb2ae550a8969 --- fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp b/fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp index 4b90c2bda..0a3ad3b63 100644 --- a/fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp +++ b/fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp @@ -149,7 +149,12 @@ void all_to_one( }); auto target_device_index = target_device.index(); - TORCH_CHECK(target_device_index < num_gpus && target_device_index >= 0); + TORCH_CHECK( + target_device_index != -1, + "target_device.index() is -1. Please pass target_device with device " + "index, e.g., torch.device(\"cuda:0\")") + + TORCH_CHECK(target_device_index < num_gpus); std::vector two_hop_transfers; two_hop_transfers.reserve(input_tensors.size());