Skip to content

Commit

Permalink
Merge pull request #16 from intelligent-machine-learning/hotfix/fix_X…
Browse files Browse the repository at this point in the history
…laDeviceType

fix torch_xla/csrc/aten_xla_type.cpp
  • Loading branch information
mars1248 authored Apr 2, 2024
2 parents 4b6b78d + 663a984 commit 123ad3b
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions torch_xla/csrc/aten_xla_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3237,7 +3237,7 @@ at::Tensor XLANativeFunctions::upsample_bicubic2d(
XlaDeviceType hw_type =
static_cast<XlaDeviceType>(self_tensor->GetDevice().type());
// NOT GPU
if (hw_type != XlaDeviceType::CUDA && hw_type != XlaDeviceType::GPU) {
if (hw_type != XlaDeviceType::CUDA) {
return at::native::call_fallback_fn<
&xla_cpu_fallback, ATEN_OP(upsample_bicubic2d)>::call(self, output_size,
align_corners,
Expand Down Expand Up @@ -3271,7 +3271,7 @@ at::Tensor XLANativeFunctions::upsample_bicubic2d_backward(
// NOT GPU
XlaDeviceType hw_type =
static_cast<XlaDeviceType>(grad_output_tensor->GetDevice().type());
if (hw_type != XlaDeviceType::CUDA && hw_type != XlaDeviceType::GPU) {
if (hw_type != XlaDeviceType::CUDA) {
return at::native::call_fallback_fn<
&xla_cpu_fallback,
ATEN_OP(upsample_nearest2d_backward)>::call(grad_output, output_size,
Expand Down

0 comments on commit 123ad3b

Please sign in to comment.