diff --git a/docs/reference/index.rst b/docs/reference/index.rst index 2ade4839b1..8d57816fa0 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -38,3 +38,4 @@ The MIOpen API library is structured as follows: * :doc:`RotaryPositionalEmbeddings <../doxygen/html/group__RotaryPositionalEmbeddings>` (experimental) * :doc:`ReLU <../doxygen/html/group___re_l_u>` (experimental) * :doc:`GLU <../doxygen/html/group__glu>` (experimental) + * :doc:`AvgPool <../doxygen/html/group__avgpool>` (experimental) diff --git a/driver/CMakeLists.txt b/driver/CMakeLists.txt index 256901aa94..f44f34586a 100644 --- a/driver/CMakeLists.txt +++ b/driver/CMakeLists.txt @@ -32,6 +32,7 @@ add_executable(MIOpenDriver dm_activ.cpp dm_adam.cpp dm_addlayernorm.cpp + dm_avgpool.cpp dm_bnorm.cpp dm_cat.cpp dm_conv.cpp diff --git a/driver/avgpool_driver.hpp b/driver/avgpool_driver.hpp new file mode 100644 index 0000000000..65d0f9d001 --- /dev/null +++ b/driver/avgpool_driver.hpp @@ -0,0 +1,596 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#pragma once + +#include "InputFlags.hpp" +#include "driver.hpp" +#include "mloAvgPoolHost.hpp" +#include "random.hpp" +#include "tensor_driver.hpp" +#include "timer.hpp" + +#include <../test/tensor_holder.hpp> +#include <../test/verify.hpp> + +#include +#include +#include +#include +#include + +template +class AvgPoolDriver : public Driver +{ +public: + AvgPoolDriver() : Driver() + { + miopenCreateTensorDescriptor(&inputDesc); + miopenCreateTensorDescriptor(&outputDesc); + miopenCreateTensorDescriptor(&inputGradDesc); + miopenCreateTensorDescriptor(&outputGradDesc); + + data_type = miopen_type{}; + } + + std::vector ComputeStrides(std::vector input); + int AddCmdLineArgs() override; + int ParseCmdLineArgs(int argc, char* argv[]) override; + InputFlags& GetInputFlags() override { return inflags; } + + int GetandSetData() override; + + int AllocateBuffersAndCopy() override; + + int RunForwardGPU() override; + int RunForwardCPU(); + + int RunBackwardGPU() override; + int RunBackwardCPU(); + + Tref GetTolerance(); + int VerifyBackward() override; + int VerifyForward() override; + ~AvgPoolDriver() override + { + miopenDestroyTensorDescriptor(inputDesc); + miopenDestroyTensorDescriptor(outputDesc); + miopenDestroyTensorDescriptor(inputGradDesc); + miopenDestroyTensorDescriptor(outputGradDesc); + } + +private: + InputFlags inflags; + + miopenTensorDescriptor_t inputDesc; + miopenTensorDescriptor_t outputDesc; + miopenTensorDescriptor_t inputGradDesc; + miopenTensorDescriptor_t outputGradDesc; + + std::unique_ptr input_dev; + std::unique_ptr output_dev; + std::unique_ptr input_grad_dev; + std::unique_ptr output_grad_dev; + + std::vector input; + std::vector output; + std::vector output_host; + std::vector input_grad; + std::vector input_grad_host; + std::vector output_grad; + std::vector ksize; + std::vector stride; + std::vector padding; + + bool ceil_mode; + bool count_include_pad; + int64_t divisor_override; + int64_t N, C, D, H, W, OD, OH, OW; + + std::vector in_dim; + bool isContiguous; +}; + +template +int AvgPoolDriver::ParseCmdLineArgs(int argc, char* argv[]) +{ + inflags.Parse(argc, argv); + isContiguous = inflags.GetValueInt("is-contiguous") == 1 ? true : false; + + if(inflags.GetValueInt("time") == 1) + { + miopenEnableProfiling(GetHandle(), true); + } + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::GetandSetData() +{ + in_dim = inflags.GetValueTensorUint64("input_dims").lengths; + std::vector in_stride = ComputeStrides(in_dim); + int ksp_dim = in_dim.size() - 2; + std::vector ksize_int = inflags.GetValueTensorUint64("kernel_size").lengths; + ksize = std::vector(ksize_int.begin(), ksize_int.end()); + std::vector stride_int = inflags.GetValueTensorUint64("stride").lengths; + stride = std::vector(stride_int.begin(), stride_int.end()); + std::vector padding_int = inflags.GetValueTensorUint64("padding").lengths; + padding = std::vector(padding_int.begin(), padding_int.end()); + + if(ksize.size() != ksp_dim) + { + int ref = ksp_dim - ksize.size(); + if(ref < 0) + MIOPEN_THROW("Invalid kernel size"); + while((ref--) != 0) + ksize.push_back(ksize[0]); + } + if(stride.size() != ksp_dim) + { + int ref = ksp_dim - stride.size(); + if(ref < 0) + MIOPEN_THROW("Invalid stride size"); + while((ref--) != 0) + stride.push_back(stride[0]); + } + if(padding.size() != ksp_dim) + { + int ref = ksp_dim - padding.size(); + if(ref < 0) + MIOPEN_THROW("Invalid padding size"); + while((ref--) != 0) + padding.push_back(padding[0]); + } + + ceil_mode = static_cast(inflags.GetValueInt("ceil_mode")); + count_include_pad = static_cast(inflags.GetValueInt("count_include_pad")); + divisor_override = inflags.GetValueInt("divisor_override"); + + N = in_dim[0]; + C = in_dim[1]; + D = in_dim.size() == 5 ? in_dim[2] : 1; + H = in_dim.size() == 5 ? in_dim[3] : in_dim[2]; + W = in_dim.size() == 5 ? in_dim[4] : in_dim[3]; + + std::vector out_dim; + if(in_dim.size() == 5) + { + if(ceil_mode) + { + OD = std::ceil(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::ceil(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::ceil(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + else + { + OD = std::floor(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::floor(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::floor(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + out_dim = {N, C, OD, OH, OW}; + } + else + { + if(ceil_mode) + { + OH = std::ceil(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::ceil(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + else + { + OH = std::floor(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::floor(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + out_dim = {N, C, OH, OW}; + } + std::vector out_grad_stride = ComputeStrides(out_dim); + if(SetTensorNd(inputDesc, in_dim, in_stride, data_type) != miopenStatusSuccess) + MIOPEN_THROW("Error parsing input tensor: " + inflags.GetValueStr("input_dims") + "."); + if(SetTensorNd(outputDesc, out_dim, data_type) != miopenStatusSuccess) + MIOPEN_THROW("Error parsing output tensor: " + inflags.GetValueStr("output_dims") + "."); + if(SetTensorNd(outputGradDesc, out_dim, out_grad_stride, data_type) != miopenStatusSuccess) + MIOPEN_THROW("Error parsing output grad tensor: " + inflags.GetValueStr("output_dims") + + "."); + if(SetTensorNd(inputGradDesc, in_dim, data_type) != miopenStatusSuccess) + MIOPEN_THROW("Error parsing input grad tensor: " + inflags.GetValueStr("input_dims") + "."); + + return miopenStatusSuccess; +} + +// Equivalent to: tensor.tranpose(0, -1).contiguous().tranpose(0, -1) incase contiguous = False +template +std::vector AvgPoolDriver::ComputeStrides(std::vector inputDim) +{ + if(!isContiguous) + std::swap(inputDim.front(), inputDim.back()); + std::vector strides(inputDim.size()); + strides.back() = 1; + for(int i = inputDim.size() - 2; i >= 0; --i) + strides[i] = strides[i + 1] * inputDim[i + 1]; + if(!isContiguous) + std::swap(strides.front(), strides.back()); + return strides; +} + +template +int AvgPoolDriver::AddCmdLineArgs() +{ + inflags.AddInputFlag("forw", 'F', "1", "Run only Forward AvgPool (Default=1)", "int"); + inflags.AddTensorFlag( + "input_dims", + 'D', + "2x3x7x9", + "The dimensional lengths of the input tensor: N,C,D1,D2,... Example: 2x3x7x9."); + inflags.AddTensorFlag( + "kernel_size", 'k', "1x1", "The size of the window D1,D2,... Example: 1x1."); + inflags.AddTensorFlag( + "stride", + 's', + "1x1", + "The stride of the window. Default value is kernel_size D1,D2,... Example: 1x1."); + inflags.AddTensorFlag( + "padding", + 'p', + "0x0", + "Implicit zero padding to be added on both sides D1,D2,... Example: 0x0."); + inflags.AddInputFlag( + "ceil_mode", + 'c', + "1", + "When 1, will use ceil instead of floor to compute the output shape (Default=1).", + "int"); + inflags.AddInputFlag( + "count_include_pad", + 'P', + "0", + "When 1, will include the zero-padding in the averaging calculation (Default=0).", + "int"); + inflags.AddInputFlag("divisor_override", + 'd', + "0", + "If specified, it will be used as divisor, otherwise size of the pooling " + "region will be used (Default=0).", + "int"); + + inflags.AddInputFlag("is-contiguous", 'C', "1", "is-contiguous (Default=1)", "int"); + inflags.AddInputFlag("iter", 'i', "10", "Number of Iterations (Default=10)", "int"); + inflags.AddInputFlag("verify", 'V', "1", "Verify (Default=1)", "int"); + inflags.AddInputFlag("time", 't', "1", "Time (Default=1)", "int"); + inflags.AddInputFlag( + "wall", 'w', "0", "Wall-clock Time, Requires time == 1 (Default=0)", "int"); + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::AllocateBuffersAndCopy() +{ + size_t input_sz = GetTensorSize(inputDesc); + size_t output_sz = GetTensorSize(outputDesc); + + uint32_t ctx = 0; + + input_dev = std::unique_ptr(new GPUMem(ctx, input_sz, sizeof(Tgpu))); + output_dev = std::unique_ptr(new GPUMem(ctx, output_sz, sizeof(Tgpu))); + input_grad_dev = std::unique_ptr(new GPUMem(ctx, input_sz, sizeof(Tgpu))); + output_grad_dev = std::unique_ptr(new GPUMem(ctx, output_sz, sizeof(Tgpu))); + + input = std::vector(input_sz, static_cast(0)); + output = std::vector(output_sz, static_cast(0)); + output_host = std::vector(output_sz, static_cast(0)); + + input_grad = std::vector(input_sz, static_cast(0)); + input_grad_host = std::vector(input_sz, static_cast(0)); + output_grad = std::vector(output_sz, static_cast(0)); + + int status; + + for(int i = 0; i < input_sz; i++) + { + input[i] = prng::gen_A_to_B(static_cast(-10.0f), static_cast(10.0f)); + } + status = input_dev->ToGPU(q, input.data()); + + status |= output_dev->ToGPU(q, output.data()); + + status |= input_grad_dev->ToGPU(q, input_grad.data()); + + for(int i = 0; i < output_sz; i++) + { + output_grad[i] = prng::gen_A_to_B(static_cast(-1.0), static_cast(1.0)); + } + status |= output_grad_dev->ToGPU(q, output_grad.data()); + + if(status != 0) + { + std::cout << "Error copying data to GPU\n" << std::endl; + return miopenStatusInternalError; + } + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::RunForwardGPU() +{ + float kernel_total_time = 0.0; + float kernel_first_time = 0.0; + + Timer t; + START_TIME + + for(int i = 0; i < inflags.GetValueInt("iter"); i++) + { + auto status = miopenAvgPoolForward(GetHandle(), + inputDesc, + input_dev->GetMem(), + outputDesc, + output_dev->GetMem(), + ksize.size() == 3 ? ksize[0] : 0, + ksize.size() == 3 ? ksize[1] : ksize[0], + ksize.size() == 3 ? ksize[2] : ksize[1], + stride.size() == 3 ? stride[0] : 0, + stride.size() == 3 ? stride[1] : stride[0], + stride.size() == 3 ? stride[2] : stride[1], + padding.size() == 3 ? padding[0] : 0, + padding.size() == 3 ? padding[1] : padding[0], + padding.size() == 3 ? padding[2] : padding[1], + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in miopenAvgPoolForward"); + + float time = 0.0; + miopenGetKernelTime(GetHandle(), &time); + kernel_total_time += time; + if(i == 0) + kernel_first_time = time; + } + + if(inflags.GetValueInt("time") == 1) + { + STOP_TIME + int iter = inflags.GetValueInt("iter"); + if(WALL_CLOCK) + std::cout << "Wall-clock Time Forward AvgPool Elapsed: " << t.gettime_ms() / iter + << " ms" << std::endl; + + float kernel_average_time = + iter > 1 ? (kernel_total_time - kernel_first_time) / (iter - 1) : kernel_first_time; + std::cout << "GPU Kernel Time Forward AvgPool Elapsed: " << kernel_average_time << " ms" + << std::endl; + } + + if(output_dev->FromGPU(GetStream(), output.data()) != 0) + { + std::cerr << "Error copying (output_dev) from GPU, size: " << output_dev->GetSize() + << std::endl; + return miopenStatusInternalError; + } + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::RunForwardCPU() +{ + int status = miopenStatusSuccess; + + if(in_dim.size() == 4) + { + status = mloAvgPoolForward2dRunHost(inputDesc, + outputDesc, + input.data(), + output_host.data(), + N, + C, + H, + W, + OH, + OW, + ksize.data(), + stride.data(), + padding.data(), + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in mloAvgPoolForward2dRunHost"); + } + else if(in_dim.size() == 5) + { + status = mloAvgPoolForward3dRunHost(inputDesc, + outputDesc, + input.data(), + output_host.data(), + N, + C, + D, + H, + W, + OD, + OH, + OW, + ksize.data(), + stride.data(), + padding.data(), + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in mloAvgPoolForward3dRunHost"); + } + return status; +} + +template +int AvgPoolDriver::RunBackwardGPU() +{ + float kernel_total_time = 0.0; + float kernel_first_time = 0.0; + + Timer t; + START_TIME + + for(int i = 0; i < inflags.GetValueInt("iter"); i++) + { + auto status = miopenAvgPoolBackward(GetHandle(), + outputGradDesc, + output_grad_dev->GetMem(), + inputGradDesc, + input_grad_dev->GetMem(), + ksize.size() == 3 ? ksize[0] : 0, + ksize.size() == 3 ? ksize[1] : ksize[0], + ksize.size() == 3 ? ksize[2] : ksize[1], + stride.size() == 3 ? stride[0] : 0, + stride.size() == 3 ? stride[1] : stride[0], + stride.size() == 3 ? stride[2] : stride[1], + padding.size() == 3 ? padding[0] : 0, + padding.size() == 3 ? padding[1] : padding[0], + padding.size() == 3 ? padding[2] : padding[1], + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in miopenAvgPoolBackward"); + + float time = 0.0; + miopenGetKernelTime(GetHandle(), &time); + kernel_total_time += time; + if(i == 0) + kernel_first_time = time; + } + + if(inflags.GetValueInt("time") == 1) + { + STOP_TIME + int iter = inflags.GetValueInt("iter"); + if(WALL_CLOCK) + std::cout << "Wall-clock Time Backward AvgPool Elapsed: " << t.gettime_ms() / iter + << " ms" << std::endl; + + float kernel_average_time = + iter > 1 ? (kernel_total_time - kernel_first_time) / (iter - 1) : kernel_first_time; + std::cout << "GPU Kernel Time Backward AvgPool Elapsed: " << kernel_average_time << " ms" + << std::endl; + } + + if(input_grad_dev->FromGPU(GetStream(), input_grad.data()) != 0) + { + std::cerr << "Error copying (input_grad_dev) from GPU, size: " << input_grad_dev->GetSize() + << std::endl; + return miopenStatusInternalError; + } + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::RunBackwardCPU() +{ + int status = miopenStatusSuccess; + + if(in_dim.size() == 4) + { + status = mloAvgPoolBackward2dRunHost(outputGradDesc, + inputGradDesc, + output_grad.data(), + input_grad_host.data(), + N, + C, + H, + W, + OH, + OW, + ksize.data(), + stride.data(), + padding.data(), + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in mloAvgPoolBackward2dRunHost"); + } + else if(in_dim.size() == 5) + { + status = mloAvgPoolBackward3dRunHost(outputGradDesc, + inputGradDesc, + output_grad.data(), + input_grad_host.data(), + N, + C, + D, + H, + W, + OD, + OH, + OW, + ksize.data(), + stride.data(), + padding.data(), + count_include_pad, + divisor_override); + MIOPEN_THROW_IF(status != miopenStatusSuccess, "Error in mloAvgPoolBackward3dRunHost"); + } + return status; +} + +template +Tref AvgPoolDriver::GetTolerance() +{ + Tref tolerance = std::numeric_limits::epsilon() * 10; + return tolerance; +} + +template +int AvgPoolDriver::VerifyForward() +{ + RunForwardCPU(); + const Tref tolerance = GetTolerance(); + auto error = miopen::rms_range(output_host, output); + + if(!std::isfinite(error) || error > tolerance) + { + std::cout << "Forward AvgPool FAILED: " << error << std::endl; + return EC_VerifyFwd; + } + else + { + std::cout << "Forward AvgPool Verifies on CPU and GPU (err=" << error << ")" << std::endl; + } + + return miopenStatusSuccess; +} + +template +int AvgPoolDriver::VerifyBackward() +{ + RunBackwardCPU(); + const Tref tolerance = GetTolerance(); + auto error = miopen::rms_range(input_grad_host, input_grad); + + if(!std::isfinite(error) || error > tolerance) + { + std::cout << "Backward AvgPool FAILED: " << error << std::endl; + return EC_VerifyBwd; + } + else + { + std::cout << "Backward AvgPool Verifies on CPU and GPU (err=" << error << ")" << std::endl; + } + return miopenStatusSuccess; +} diff --git a/driver/dm_avgpool.cpp b/driver/dm_avgpool.cpp new file mode 100644 index 0000000000..ec0e457056 --- /dev/null +++ b/driver/dm_avgpool.cpp @@ -0,0 +1,40 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "registry_driver_maker.hpp" +#include "avgpool_driver.hpp" + +static Driver* makeDriver(const std::string& base_arg) +{ + if(base_arg == "avgpool") + return new AvgPoolDriver(); + if(base_arg == "avgpoolfp16") + return new AvgPoolDriver(); + if(base_arg == "avgpoolbfp16") + return new AvgPoolDriver(); + return nullptr; +} + +REGISTER_DRIVER_MAKER(makeDriver); diff --git a/driver/driver.hpp b/driver/driver.hpp index aa0b89f10a..5d2349523f 100644 --- a/driver/driver.hpp +++ b/driver/driver.hpp @@ -176,7 +176,7 @@ inline void PadBufferSize(size_t& sz, int datatype_sz) "t5layernorm[bfp16|fp16], adam[fp16], ampadam, reduceextreme[bfp16|fp16], " "adamw[fp16], ampadamw, transformersadamw[fp16], transformersampadamw, " "getitem[bfp16|fp16], reducecalculation[bfp16|fp16], rope[bfp16|fp16], " - "prelu[bfp16|fp16], glu[bfp16|fp16]\n"); + "prelu[bfp16|fp16], glu[bfp16|fp16]\n, avgpool[bfp16|fp16]\n"); exit(0); // NOLINT (concurrency-mt-unsafe) } @@ -210,7 +210,7 @@ inline std::string ParseBaseArg(int argc, char* argv[]) arg != "reducecalculationfp16" && arg != "reducecalculationbfp16" && arg != "rope" && arg != "ropefp16" && arg != "ropebfp16" && arg != "prelu" && arg != "prelufp16" && arg != "prelubfp16" && arg != "glu" && arg != "glufp16" && arg != "glubfp16" && - arg != "--version") + arg != "avgpool" && arg != "avgpoolfp16" && arg != "avgpoolbfp16" && arg != "--version") { printf("FAILED: Invalid Base Input Argument\n"); Usage(); diff --git a/driver/mloAvgPoolHost.hpp b/driver/mloAvgPoolHost.hpp new file mode 100644 index 0000000000..4453f06f06 --- /dev/null +++ b/driver/mloAvgPoolHost.hpp @@ -0,0 +1,416 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#pragma once + +#include +#include +#include <../test/ford.hpp> + +template +int32_t mloAvgPoolForward2dRunHost(const miopenTensorDescriptor_t inputDesc, + const miopenTensorDescriptor_t outputDesc, + const Tgpu* input, + Tcheck* output, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + const int64_t* ksize, + const int64_t* stride, + const int64_t* padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = miopen::deref(inputDesc).GetLengths(); + auto numel = miopen::deref(outputDesc).GetElementSize(); + + auto input_tv = miopen::get_inner_expanded_tv<4>(miopen::deref(inputDesc)); + auto output_tv = miopen::get_inner_expanded_tv<4>(miopen::deref(outputDesc)); + + par_ford(numel)([&](int64_t gid) { + int64_t ncoh = gid / OW, ow = gid % OW; + int64_t nc = ncoh / OH, oh = ncoh % OH; + int64_t n = nc / C, c = nc % C; + int64_t R = ksize[0]; + int64_t S = ksize[1]; + int64_t sh = stride[0]; + int64_t sw = stride[1]; + int64_t ph = padding[0]; + int64_t pw = padding[1]; + + float m = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, h, w) + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + m += static_cast(input[input_tv.get_tensor_view_idx({n, c, h, w})]); + } + } + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + float val = m / divide_factor; + + output[output_tv.get_tensor_view_idx({n, c, oh, ow})] = static_cast(val); + }); + return miopenStatusSuccess; +} + +template +int32_t mloAvgPoolForward3dRunHost(const miopenTensorDescriptor_t inputDesc, + const miopenTensorDescriptor_t outputDesc, + const Tgpu* input, + Tcheck* output, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + const int64_t* ksize, + const int64_t* stride, + const int64_t* padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = miopen::deref(inputDesc).GetLengths(); + auto numel = miopen::deref(outputDesc).GetElementSize(); + + auto input_tv = miopen::get_inner_expanded_tv<5>(miopen::deref(inputDesc)); + auto output_tv = miopen::get_inner_expanded_tv<5>(miopen::deref(outputDesc)); + + par_ford(numel)([&](int64_t gid) { + int64_t ncodoh = gid / OW, ow = gid % OW; + int64_t ncod = ncodoh / OH, oh = ncodoh % OH; + int64_t nc = ncod / OD, od = ncod % OD; + int64_t n = nc / C, c = nc % C; + int64_t KD = ksize[0]; + int64_t R = ksize[1]; + int64_t S = ksize[2]; + int64_t sd = stride[0]; + int64_t sh = stride[1]; + int64_t sw = stride[2]; + int64_t pd = padding[0]; + int64_t ph = padding[1]; + int64_t pw = padding[2]; + + float sum = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, d, h, w) + int64_t d = od * sd - pd + kd; + if(d < 0 || d >= D) + continue; + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + sum += static_cast(input[input_tv.get_tensor_view_idx({n, c, d, h, w})]); + } + } + } + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + float val = sum / divide_factor; + output[output_tv.get_tensor_view_idx({n, c, od, oh, ow})] = static_cast(val); + }); + return miopenStatusSuccess; +} + +template +int32_t mloAvgPoolBackward2dRunHost(const miopenTensorDescriptor_t outputGradDesc, + const miopenTensorDescriptor_t inputGradDesc, + const Tgpu* output_grad, + Tcheck* input_grad, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + const int64_t* ksize, + const int64_t* stride, + const int64_t* padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = miopen::deref(inputGradDesc).GetLengths(); + auto numel = miopen::deref(inputGradDesc).GetElementSize(); + + auto output_grad_tv = miopen::get_inner_expanded_tv<4>(miopen::deref(outputGradDesc)); + auto input_grad_tv = miopen::get_inner_expanded_tv<4>(miopen::deref(inputGradDesc)); + + par_ford(numel)([&](int64_t gid) { + int64_t nch = gid / W, w = gid % W; + int64_t nc = nch / H, h = nch % H; + int64_t n = nc / C, c = nc % C; + int64_t R = ksize[0]; + int64_t S = ksize[1]; + int64_t sh = stride[0]; + int64_t sw = stride[1]; + int64_t ph = padding[0]; + int64_t pw = padding[1]; + + float grad = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + + grad += static_cast( + output_grad[output_grad_tv.get_tensor_view_idx({n, c, oh, ow})]) / + divide_factor; + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, h, w})] = static_cast(grad); + }); + return miopenStatusSuccess; +} + +template +int32_t mloAvgPoolBackward3dRunHost(const miopenTensorDescriptor_t outputGradDesc, + const miopenTensorDescriptor_t inputGradDesc, + const Tgpu* output_grad, + Tcheck* input_grad, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + const int64_t* ksize, + const int64_t* stride, + const int64_t* padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = miopen::deref(inputGradDesc).GetLengths(); + auto numel = miopen::deref(inputGradDesc).GetElementSize(); + + auto output_grad_tv = miopen::get_inner_expanded_tv<5>(miopen::deref(outputGradDesc)); + auto input_grad_tv = miopen::get_inner_expanded_tv<5>(miopen::deref(inputGradDesc)); + + par_ford(numel)([&](int64_t gid) { + int64_t ncdh = gid / W, w = gid % W; + int64_t ncd = ncdh / H, h = ncdh % H; + int64_t nc = ncd / D, d = ncd % D; + int64_t n = nc / C, c = nc % C; + int64_t KD = ksize[0]; + int64_t R = ksize[1]; + int64_t S = ksize[2]; + int64_t sd = stride[0]; + int64_t sh = stride[1]; + int64_t sw = stride[2]; + int64_t pd = padding[0]; + int64_t ph = padding[1]; + int64_t pw = padding[2]; + + float grad = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t odsd = d + pd - kd; + if(odsd % sd != 0) + continue; + int64_t od = odsd / sd; + if(od < 0 || od >= OD) + continue; + + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + grad += + static_cast( + output_grad[output_grad_tv.get_tensor_view_idx({n, c, od, oh, ow})]) / + divide_factor; + } + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, d, h, w})] = static_cast(grad); + }); + return miopenStatusSuccess; +} diff --git a/include/miopen/miopen.h b/include/miopen/miopen.h index 7ed36c72a4..18102e8dde 100644 --- a/include/miopen/miopen.h +++ b/include/miopen/miopen.h @@ -72,6 +72,7 @@ * @defgroup ReduceCalculation * @defgroup RotaryPositionalEmbeddings * @defgroup ReLU + * @defgroup avgpool * */ @@ -7791,6 +7792,94 @@ MIOPEN_EXPORT miopenStatus_t miopenPReLUBackward(miopenHandle_t handle, // CLOSEOUT RELU DOXYGEN GROUP #endif // MIOPEN_BETA_API +#ifdef MIOPEN_BETA_API +// avgpool APIs +/** @addtogroup avgpool + * + * @{ + */ + +/*! @brief Execute an avgpool forward layer + * + * @param handle MIOpen handle (input) + * @param inputDesc Tensor descriptor for input tensor (input) + * @param input Data tensor input (input) + * @param outputDesc Tensor descriptor for output tensor (input) + * @param output Data tensor output (output) + * @param KD Kernel size in dimension D (input) + * @param KH Kernel size in dimension H (input) + * @param KW Kernel size in dimension W (input) + * @param SD Stride size in dimension D (input) + * @param SH Stride size in dimension H (input) + * @param SW Stride size in dimension W (input) + * @param PD Padding size in dimension D (input) + * @param PH Padding size in dimension H (input) + * @param PW Padding size in dimension W (input) + * @param count_include_pad When True, will include the zero-padding in the averaging + * calculation (input) + * @param divisor_override If non-zero, will use this value as the divisor, otherwise will + * use the number of elements in the pooling window (input) + * @return miopenStatus_t + */ +MIOPEN_EXPORT miopenStatus_t miopenAvgPoolForward(miopenHandle_t handle, + const miopenTensorDescriptor_t inputDesc, + const void* input, + const miopenTensorDescriptor_t outputDesc, + void* output, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override); + +/*! @brief Execute an avgpool backward layer + * + * @param handle MIOpen handle (input) + * @param outputGradDesc Tensor descriptor for output grad tensor (input) + * @param output_grad Data tensor output grad (input) + * @param inputGradDesc Tensor descriptor for input grad tensor (input) + * @param input_grad Data tensor input grad (output) + * @param KD Kernel size in dimension D (input) + * @param KH Kernel size in dimension H (input) + * @param KW Kernel size in dimension W (input) + * @param SD Stride size in dimension D (input) + * @param SH Stride size in dimension H (input) + * @param SW Stride size in dimension W (input) + * @param PD Padding size in dimension D (input) + * @param PH Padding size in dimension H (input) + * @param PW Padding size in dimension W (input) + * @param count_include_pad When True, will include the zero-padding in the averaging + * calculation (input) + * @param divisor_override If non-zero, will use this value as the divisor, otherwise will + * use the number of elements in the pooling window (input) + * @return miopenStatus_t + */ +MIOPEN_EXPORT miopenStatus_t miopenAvgPoolBackward(miopenHandle_t handle, + const miopenTensorDescriptor_t outputGradDesc, + const void* output_grad, + const miopenTensorDescriptor_t inputGradDesc, + void* input_grad, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override); +/** @} */ +// CLOSEOUT avgpool DOXYGEN GROUP +#endif // MIOPEN_BETA_API + #ifdef __cplusplus } #endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c4ffeede18..011eefae3a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -89,6 +89,8 @@ set( MIOpen_Source adam_api.cpp addlayernorm_api.cpp api/find2_0_commons.cpp + avgpool_api.cpp + avgpool/problem_description.cpp batch_norm.cpp batch_norm_api.cpp batchnorm/problem_description.cpp @@ -201,6 +203,10 @@ set( MIOpen_Source solver/activ/fwd_1.cpp solver/adam/adam.cpp solver/adam/transformers_adam_w.cpp + solver/avgpool/backward_avgpool_2d.cpp + solver/avgpool/backward_avgpool_3d.cpp + solver/avgpool/forward_avgpool_2d.cpp + solver/avgpool/forward_avgpool_3d.cpp solver/batchnorm/backward_ck.cpp solver/batchnorm/backward_per_activation.cpp solver/batchnorm/backward_per_activation_fused.cpp @@ -502,6 +508,7 @@ if( MIOPEN_BACKEND MATCHES "OpenCL" OR MIOPEN_BACKEND STREQUAL "HIPOC" OR MIOPEN ${GPU_BATCHED_TRANSPOSE_KERNEL_HIP} ${GPU_GENERAL_TENSOR_REORDER_KERNEL_HIP_SOURCE} kernels/MIOpenAdam.cpp + kernels/MIOpenAvgPool.cpp kernels/MIOpenCat.cpp kernels/MIOpenCheckNumerics.cpp kernels/MIOpenBatchNormActivBwdPerAct.cl @@ -651,6 +658,7 @@ if( MIOPEN_BACKEND MATCHES "OpenCL" OR MIOPEN_BACKEND STREQUAL "HIPOC" OR MIOPEN activ.cpp adam.cpp addlayernorm.cpp + avgpool.cpp cat.cpp groupnorm.cpp getitem.cpp diff --git a/src/avgpool.cpp b/src/avgpool.cpp new file mode 100644 index 0000000000..db6f2e72f6 --- /dev/null +++ b/src/avgpool.cpp @@ -0,0 +1,138 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace avgpool { + +miopenStatus_t AvgPoolForward(Handle& handle, + const TensorDescriptor& inputDesc, + ConstData_t input, + const TensorDescriptor& outputDesc, + Data_t output, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override) +{ + const auto problem = avgpool::FwdProblemDescription{inputDesc, outputDesc}; + + const auto invoke_params = [&]() { + auto tmp = avgpool::FwdInvokeParams{}; + tmp.inputDesc = &inputDesc; + tmp.outputDesc = &outputDesc; + + tmp.input = input; + tmp.output = output; + tmp.KD = KD; + tmp.KH = KH; + tmp.KW = KW; + tmp.SD = SD; + tmp.SH = SH; + tmp.SW = SW; + tmp.PD = PD; + tmp.PH = PH; + tmp.PW = PW; + tmp.count_include_pad = count_include_pad; + tmp.divisor_override = divisor_override; + + return tmp; + }(); + const auto algo = AlgorithmName{"AvgPoolForward"}; + const auto solvers = solver::SolverContainer{}; + + solvers.ExecutePrimitive(handle, problem, algo, invoke_params); + + return miopenStatusSuccess; +} + +miopenStatus_t AvgPoolBackward(Handle& handle, + const TensorDescriptor& outputGradDesc, + ConstData_t output_grad, + const TensorDescriptor& inputGradDesc, + Data_t input_grad, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override) +{ + const auto problem = avgpool::BwdProblemDescription{outputGradDesc, inputGradDesc}; + + const auto invoke_params = [&]() { + auto tmp = avgpool::BwdInvokeParams{}; + tmp.outputGradDesc = &outputGradDesc; + tmp.inputGradDesc = &inputGradDesc; + + tmp.output_grad = output_grad; + tmp.input_grad = input_grad; + tmp.KD = KD; + tmp.KH = KH; + tmp.KW = KW; + tmp.SD = SD; + tmp.SH = SH; + tmp.SW = SW; + tmp.PD = PD; + tmp.PH = PH; + tmp.PW = PW; + tmp.count_include_pad = count_include_pad; + tmp.divisor_override = divisor_override; + + return tmp; + }(); + const auto algo = AlgorithmName{"AvgPoolBackward"}; + const auto solvers = solver::SolverContainer{}; + + solvers.ExecutePrimitive(handle, problem, algo, invoke_params); + + return miopenStatusSuccess; +} + +} // namespace avgpool + +} // namespace miopen diff --git a/src/avgpool/problem_description.cpp b/src/avgpool/problem_description.cpp new file mode 100644 index 0000000000..489c63e6d4 --- /dev/null +++ b/src/avgpool/problem_description.cpp @@ -0,0 +1,85 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include + +namespace miopen { + +namespace avgpool { + +inline std::ostream& operator<<(std::ostream& os, const std::vector& v) +{ + os << '{'; + for(int i = 0; i < v.size(); ++i) + { + if(i != 0) + os << ','; + os << v[i]; + } + os << '}'; + return os; +} + +NetworkConfig FwdProblemDescription::MakeNetworkConfig() const +{ + auto input_size = inputDesc.GetLengths(); + auto output_size = outputDesc.GetLengths(); + + auto input_dtype = inputDesc.GetType(); + + std::ostringstream ss; + + ss << "avgpool_fwd"; + ss << "-input_dtype" << input_dtype; + ss << "-Is" << input_size; + ss << "-Os" << output_size; + ss << "-Ic" << IsAllContiguous(); + + return NetworkConfig{ss.str()}; +} + +NetworkConfig BwdProblemDescription::MakeNetworkConfig() const +{ + auto input_grad_size = inputGradDesc.GetLengths(); + auto output_grad_size = outputGradDesc.GetLengths(); + + auto input_dtype = inputGradDesc.GetType(); + + std::ostringstream ss; + + ss << "avgpool_bwd"; + ss << "-input_dtype" << input_dtype; + ss << "-dIs" << input_grad_size; + ss << "-dOs" << output_grad_size; + ss << "-Ic" << IsAllContiguous(); + + return NetworkConfig{ss.str()}; +} + +} // namespace avgpool + +} // namespace miopen diff --git a/src/avgpool_api.cpp b/src/avgpool_api.cpp new file mode 100644 index 0000000000..117aa7c433 --- /dev/null +++ b/src/avgpool_api.cpp @@ -0,0 +1,234 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include +#include +#include + +inline std::ostream& operator<<(std::ostream& os, const std::vector& v) +{ + os << '{'; + for(int i = 0; i < v.size(); ++i) + { + if(i != 0) + os << ','; + os << v[i]; + } + os << '}'; + return os; +} + +static void LogCmdAvgPool(const miopenTensorDescriptor_t iDesc, + const miopenTensorDescriptor_t oDesc, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override, + const bool is_fwd) +{ + if(miopen::IsLoggingCmd()) + { + std::stringstream ss; + auto dtype = miopen::deref(iDesc).GetType(); + if(dtype == miopenHalf) + { + ss << "avgpoolfp16"; + } + else if(dtype == miopenFloat) + { + ss << "avgpoolfp32"; + } + else if(dtype == miopenBFloat16) + { + ss << "avgpoolbfp16"; + } + + MIOPEN_LOG_FUNCTION(iDesc, oDesc, count_include_pad, divisor_override); + ss << " -Is " << miopen::deref(iDesc).GetLengths(); + ss << " -Os " << miopen::deref(oDesc).GetLengths(); + ss << " -Si " << miopen::deref(iDesc).GetStrides(); + ss << " -So " << miopen::deref(oDesc).GetStrides(); + ss << " -KD " << KD; + ss << " -KH " << KH; + ss << " -KW " << KW; + ss << " -SD " << SD; + ss << " -SH " << SH; + ss << " -SW " << SW; + ss << " -PD " << PD; + ss << " -PH " << PH; + ss << " -PW " << PW; + ss << " -Cp " << count_include_pad; + ss << " -Do " << divisor_override; + ss << " -F " << ((is_fwd) ? "1" : "2"); + + MIOPEN_LOG_DRIVER_CMD(ss.str()); + } +} + +extern "C" miopenStatus_t miopenAvgPoolForward(miopenHandle_t handle, + const miopenTensorDescriptor_t inputDesc, + const void* input, + const miopenTensorDescriptor_t outputDesc, + void* output, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override) +{ + MIOPEN_LOG_FUNCTION(handle, + inputDesc, + input, + outputDesc, + output, + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override); + + LogCmdAvgPool(inputDesc, + outputDesc, + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override, + true); + return miopen::try_([&] { + miopen::avgpool::AvgPoolForward(miopen::deref(handle), + miopen::deref(inputDesc), + DataCast(input), + miopen::deref(outputDesc), + DataCast(output), + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override); + }); +} + +extern "C" miopenStatus_t miopenAvgPoolBackward(miopenHandle_t handle, + const miopenTensorDescriptor_t outputGradDesc, + const void* output_grad, + const miopenTensorDescriptor_t inputGradDesc, + void* input_grad, + const int64_t KD, + const int64_t KH, + const int64_t KW, + const int64_t SD, + const int64_t SH, + const int64_t SW, + const int64_t PD, + const int64_t PH, + const int64_t PW, + const bool count_include_pad, + const int64_t divisor_override) +{ + MIOPEN_LOG_FUNCTION(handle, + outputGradDesc, + output_grad, + inputGradDesc, + input_grad, + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override); + + LogCmdAvgPool(inputGradDesc, + outputGradDesc, + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override, + false); + return miopen::try_([&] { + miopen::avgpool::AvgPoolBackward(miopen::deref(handle), + miopen::deref(outputGradDesc), + DataCast(output_grad), + miopen::deref(inputGradDesc), + DataCast(input_grad), + KD, + KH, + KW, + SD, + SH, + SW, + PD, + PH, + PW, + count_include_pad, + divisor_override); + }); +} diff --git a/src/include/miopen/avgpool.hpp b/src/include/miopen/avgpool.hpp new file mode 100644 index 0000000000..23646eb787 --- /dev/null +++ b/src/include/miopen/avgpool.hpp @@ -0,0 +1,71 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#pragma once +#include + +namespace miopen { + +struct Handle; +struct TensorDescriptor; + +namespace avgpool { + +MIOPEN_INTERNALS_EXPORT miopenStatus_t AvgPoolForward(Handle& handle, + const TensorDescriptor& inputDesc, + ConstData_t input, + const TensorDescriptor& outputDesc, + Data_t output, + int64_t KD, + int64_t KH, + int64_t KW, + int64_t SD, + int64_t SH, + int64_t SW, + int64_t PD, + int64_t PH, + int64_t PW, + bool count_include_pad, + int64_t divisor_override); + +MIOPEN_INTERNALS_EXPORT miopenStatus_t AvgPoolBackward(Handle& handle, + const TensorDescriptor& outputGradDesc, + ConstData_t output_grad, + const TensorDescriptor& inputGradDesc, + Data_t input_grad, + int64_t KD, + int64_t KH, + int64_t KW, + int64_t SD, + int64_t SH, + int64_t SW, + int64_t PD, + int64_t PH, + int64_t PW, + bool count_include_pad, + int64_t divisor_override); +} // namespace avgpool + +} // namespace miopen diff --git a/src/include/miopen/avgpool/invoke_params.hpp b/src/include/miopen/avgpool/invoke_params.hpp new file mode 100644 index 0000000000..65d1f2beeb --- /dev/null +++ b/src/include/miopen/avgpool/invoke_params.hpp @@ -0,0 +1,93 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include +#include + +namespace miopen { + +namespace avgpool { + +struct FwdInvokeParams : public miopen::InvokeParams +{ + + FwdInvokeParams() = default; + + const TensorDescriptor* inputDesc = nullptr; + const TensorDescriptor* outputDesc = nullptr; + + ConstData_t input = nullptr; + Data_t output = nullptr; + + int64_t KD = 0; + int64_t KH = 0; + int64_t KW = 0; + int64_t SD = 0; + int64_t SH = 0; + int64_t SW = 0; + int64_t PD = 0; + int64_t PH = 0; + int64_t PW = 0; + bool count_include_pad = false; + int64_t divisor_override = 0; + + std::size_t GetWorkspaceSize() const { return 0; } + Data_t GetWorkspace() const { return nullptr; } +}; + +struct BwdInvokeParams : public miopen::InvokeParams +{ + + BwdInvokeParams() = default; + + const TensorDescriptor* outputGradDesc = nullptr; + const TensorDescriptor* inputGradDesc = nullptr; + + ConstData_t output_grad = nullptr; + Data_t input_grad = nullptr; + + int64_t KD = 0; + int64_t KH = 0; + int64_t KW = 0; + int64_t SD = 0; + int64_t SH = 0; + int64_t SW = 0; + int64_t PD = 0; + int64_t PH = 0; + int64_t PW = 0; + bool count_include_pad = false; + int64_t divisor_override = 0; + + std::size_t GetWorkspaceSize() const { return 0; } + Data_t GetWorkspace() const { return nullptr; } +}; + +} // namespace avgpool + +} // namespace miopen diff --git a/src/include/miopen/avgpool/problem_description.hpp b/src/include/miopen/avgpool/problem_description.hpp new file mode 100644 index 0000000000..e71ba5e617 --- /dev/null +++ b/src/include/miopen/avgpool/problem_description.hpp @@ -0,0 +1,161 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include +#include + +namespace miopen { + +struct NetworkConfig; + +namespace avgpool { + +struct FwdProblemDescription : ProblemDescriptionBase +{ + FwdProblemDescription(const TensorDescriptor& inputDesc_, const TensorDescriptor& outputDesc_) + : inputDesc(inputDesc_), outputDesc(outputDesc_) + { + IsValidLength(); + IsSameType(); + IsValidDims(); + } + + const TensorDescriptor& GetInputDesc() const { return inputDesc; } + const TensorDescriptor& GetOutputDesc() const { return outputDesc; } + auto GetNtotal() const { return outputDesc.GetElementSize(); } + + bool IsValidLength() const + { + auto input_dims = inputDesc.GetLengths().size(); + if(outputDesc.GetLengths()[0] != inputDesc.GetLengths()[0] || + outputDesc.GetLengths()[1] != inputDesc.GetLengths()[1] || + outputDesc.GetLengths().size() != input_dims) + { + MIOPEN_THROW(miopenStatusBadParm, + "AvgPool: Input and output tensor sizes do not match."); + } + + return true; + } + + bool IsValidDims() const + { + if(inputDesc.GetLengths().size() > 5 || inputDesc.GetLengths().size() < 4) + { + MIOPEN_THROW(miopenStatusBadParm, "AvgPool: Only 4D and 5D tensors are supported."); + } + + return true; + } + + bool IsAllContiguous() const { return inputDesc.IsContiguous() && outputDesc.IsContiguous(); } + + bool IsSameType() const + { + if(inputDesc.GetType() != outputDesc.GetType()) + { + MIOPEN_THROW(miopenStatusBadParm, + "AvgPool: Input and output tensor types do not match."); + } + + return true; + } + + NetworkConfig MakeNetworkConfig() const override; + +protected: + TensorDescriptor inputDesc; + TensorDescriptor outputDesc; +}; + +struct BwdProblemDescription : ProblemDescriptionBase +{ + BwdProblemDescription(const TensorDescriptor& outputGradDesc_, + const TensorDescriptor& inputGradDesc_) + : outputGradDesc(outputGradDesc_), inputGradDesc(inputGradDesc_) + { + IsValidLength(); + IsSameType(); + IsValidDims(); + } + + const TensorDescriptor& GetOutputGradDesc() const { return outputGradDesc; } + const TensorDescriptor& GetInputGradDesc() const { return inputGradDesc; } + auto GetNtotal() const { return inputGradDesc.GetElementSize(); } + + bool IsValidLength() const + { + auto input_dims = inputGradDesc.GetLengths().size(); + if(outputGradDesc.GetLengths()[0] != inputGradDesc.GetLengths()[0] || + outputGradDesc.GetLengths()[1] != inputGradDesc.GetLengths()[1] || + outputGradDesc.GetLengths().size() != input_dims) + { + MIOPEN_THROW(miopenStatusBadParm, + "AvgPool: Input grad and output grad tensor sizes do not match."); + } + + return true; + } + + bool IsValidDims() const + { + if(inputGradDesc.GetLengths().size() > 5 || inputGradDesc.GetLengths().size() < 4) + { + MIOPEN_THROW(miopenStatusBadParm, "AvgPool: Only 4D and 5D tensors are supported."); + } + + return true; + } + + bool IsAllContiguous() const + { + return inputGradDesc.IsContiguous() && outputGradDesc.IsContiguous(); + } + + bool IsSameType() const + { + if(inputGradDesc.GetType() != outputGradDesc.GetType()) + { + MIOPEN_THROW(miopenStatusBadParm, + "AvgPool: Input grad and output grad tensor types do not match."); + } + + return true; + } + + NetworkConfig MakeNetworkConfig() const override; + +protected: + TensorDescriptor outputGradDesc; + TensorDescriptor inputGradDesc; +}; + +} // namespace avgpool + +} // namespace miopen diff --git a/src/include/miopen/avgpool/solvers.hpp b/src/include/miopen/avgpool/solvers.hpp new file mode 100644 index 0000000000..854611dd07 --- /dev/null +++ b/src/include/miopen/avgpool/solvers.hpp @@ -0,0 +1,114 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace avgpool { + +const auto make_hip_kernel = [](std::vector localsize, + std::vector gridsize, + std::string kernel_file, + std::string kernel_name, + KernelBuildParameters build_params) { + while(localsize.size() < 3) + localsize.push_back(1); + while(gridsize.size() < 3) + gridsize.push_back(1); + for(int i = 0; i < localsize.size(); ++i) + gridsize[i] = AlignUp(gridsize[i], localsize[i]); + return KernelInfo{ + build_params.GenerateFor(kbp::HIP{}), localsize, gridsize, kernel_file, kernel_name}; +}; + +using AvgPoolForward = + NonTunableSolverBase; + +using AvgPoolBackward = + NonTunableSolverBase; + +// FORWARD +struct AvgPoolForward2d final : AvgPoolForward +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const override; +}; + +struct AvgPoolForward3d final : AvgPoolForward +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const override; +}; + +// BACKWARD +struct AvgPoolBackward2d final : AvgPoolBackward +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const override; +}; + +struct AvgPoolBackward3d final : AvgPoolBackward +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const override; +}; + +} // namespace avgpool + +} // namespace solver + +} // namespace miopen diff --git a/src/include/miopen/solver_id.hpp b/src/include/miopen/solver_id.hpp index ab824faa32..18c538a1db 100644 --- a/src/include/miopen/solver_id.hpp +++ b/src/include/miopen/solver_id.hpp @@ -61,7 +61,8 @@ enum class Primitive Adam, Item, RoPE, - ReLU + ReLU, + AvgPool }; struct MIOPEN_INTERNALS_EXPORT Id diff --git a/src/include/miopen/tensor_view_utils.hpp b/src/include/miopen/tensor_view_utils.hpp index 1b095affb7..a75d0a380f 100644 --- a/src/include/miopen/tensor_view_utils.hpp +++ b/src/include/miopen/tensor_view_utils.hpp @@ -27,9 +27,8 @@ #ifndef MIOPEN_TENSOR_VIEW_UTIL_HPP_ #define MIOPEN_TENSOR_VIEW_UTIL_HPP_ -#include -#include #include "../../kernels/tensor_view.hpp" +#include namespace miopen { @@ -42,7 +41,12 @@ inline tensor_view_t get_inner_expanded_tv(const TensorDescriptor Desc) tensor_view_t tensor_view{}; for(size_t i = 0; i < N; ++i) { - if(i < dims.size()) + if(dims.empty()) + { + tensor_view.stride[i] = 0; + tensor_view.size[i] = 0; + } + else if(i < dims.size()) { tensor_view.stride[i] = strides[i]; tensor_view.size[i] = dims[i]; @@ -76,6 +80,28 @@ inline void slice_tv(tensor_view_t& tensor_view, int32_t sliceCount, const in } } +template +inline tensor_view_t get_tv_without_dim(const tensor_view_t& origin_tv, int selected_dim) +{ + tensor_view_t res{}; + for(int i = 0; i < N; ++i) + { + if(i == selected_dim) + continue; + if(i < selected_dim) + { + res.size[i] = origin_tv.size[i]; + res.stride[i] = origin_tv.stride[i]; + } + else + { + res.size[i - 1] = origin_tv.size[i]; + res.stride[i - 1] = origin_tv.stride[i]; + } + } + return res; +} + } // namespace miopen #endif // MIOPEN_TENSOR_VIEW_UTIL_HPP_ diff --git a/src/kernels/MIOpenAvgPool.cpp b/src/kernels/MIOpenAvgPool.cpp new file mode 100644 index 0000000000..7c64b1076f --- /dev/null +++ b/src/kernels/MIOpenAvgPool.cpp @@ -0,0 +1,560 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#ifndef MIOPEN_DONT_USE_HIP_RUNTIME_HEADERS +#include +#include +#endif + +#include "float_types.h" +#include "tensor_view.hpp" + +template +__device__ void avgPoolForward2d(const TI* __restrict__ input, + TO* __restrict__ output, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + int64_t R, + int64_t S, + int64_t sh, + int64_t sw, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<4> input_tv, + tensor_view_t<4> output_tv) +{ + int64_t gid = threadIdx.x + blockIdx.x * blockDim.x; + int64_t ncoh = gid / OW, ow = gid % OW; + int64_t nc = ncoh / OH, oh = ncoh % OH; + int64_t n = nc / C, c = nc % C; + + if(n >= N) + return; + + FLOAT_ACCUM m = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, h, w) + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + m += CVT_FLOAT2ACCUM(input[input_tv.get_tensor_view_idx({n, c, h, w})]); + } + } + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + FLOAT_ACCUM val = m / divide_factor; + + output[output_tv.get_tensor_view_idx({n, c, oh, ow})] = CVT_ACCUM2FLOAT(val); +} + +extern "C" __global__ void AvgPoolForward2d(const INPUT_TYPE* __restrict__ input, + OUTPUT_TYPE* __restrict__ output, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + int64_t R, + int64_t S, + int64_t sh, + int64_t sw, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<4> input_tv, + tensor_view_t<4> output_tv) +{ + avgPoolForward2d(input, + output, + N, + C, + H, + W, + OH, + OW, + R, + S, + sh, + sw, + ph, + pw, + count_include_pad, + divisor_override, + input_tv, + output_tv); +} + +template +__device__ void avgPoolForward3d(const TI* __restrict__ input, + TO* __restrict__ output, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + int64_t KD, + int64_t R, + int64_t S, + int64_t sd, + int64_t sh, + int64_t sw, + int64_t pd, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<5> input_tv, + tensor_view_t<5> output_tv) +{ + int64_t gid = threadIdx.x + blockIdx.x * blockDim.x; + int64_t ncodoh = gid / OW, ow = gid % OW; + int64_t ncod = ncodoh / OH, oh = ncodoh % OH; + int64_t nc = ncod / OD, od = ncod % OD; + int64_t n = nc / C, c = nc % C; + + if(n >= N) + return; + FLOAT_ACCUM sum = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, d, h, w) + int64_t d = od * sd - pd + kd; + if(d < 0 || d >= D) + continue; + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + sum += CVT_FLOAT2ACCUM(input[input_tv.get_tensor_view_idx({n, c, d, h, w})]); + } + } + } + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + FLOAT_ACCUM val = sum / divide_factor; + output[output_tv.get_tensor_view_idx({n, c, od, oh, ow})] = CVT_ACCUM2FLOAT(val); +} + +extern "C" __global__ void AvgPoolForward3d(const INPUT_TYPE* __restrict__ input, + OUTPUT_TYPE* __restrict__ output, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + int64_t KD, + int64_t R, + int64_t S, + int64_t sd, + int64_t sh, + int64_t sw, + int64_t pd, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<5> input_tv, + tensor_view_t<5> output_tv) +{ + avgPoolForward3d(input, + output, + N, + C, + D, + H, + W, + OD, + OH, + OW, + KD, + R, + S, + sd, + sh, + sw, + pd, + ph, + pw, + count_include_pad, + divisor_override, + input_tv, + output_tv); +} + +template +__device__ void avgPoolBackward2d(const TI* __restrict__ output_grad, + TO* __restrict__ input_grad, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + int64_t R, + int64_t S, + int64_t sh, + int64_t sw, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<4> output_grad_tv, + tensor_view_t<4> input_grad_tv) +{ + int64_t gid = threadIdx.x + blockIdx.x * blockDim.x; + int64_t nch = gid / W, w = gid % W; + int64_t nc = nch / H, h = nch % H; + int64_t n = nc / C, c = nc % C; + + if(n >= N) + return; + + FLOAT_ACCUM grad = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + + grad += + CVT_FLOAT2ACCUM(output_grad[output_grad_tv.get_tensor_view_idx({n, c, oh, ow})]) / + divide_factor; + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, h, w})] = CVT_ACCUM2FLOAT(grad); +} + +extern "C" __global__ void AvgPoolBackward2d(const INPUT_TYPE* __restrict__ output_grad, + OUTPUT_TYPE* __restrict__ input_grad, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + int64_t R, + int64_t S, + int64_t sh, + int64_t sw, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<4> output_grad_tv, + tensor_view_t<4> input_grad_tv) +{ + avgPoolBackward2d(output_grad, + input_grad, + N, + C, + H, + W, + OH, + OW, + R, + S, + sh, + sw, + ph, + pw, + count_include_pad, + divisor_override, + output_grad_tv, + input_grad_tv); +} + +template +__device__ void avgPoolBackward3d(const TI* __restrict__ output_grad, + TO* __restrict__ input_grad, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + int64_t KD, + int64_t R, + int64_t S, + int64_t sd, + int64_t sh, + int64_t sw, + int64_t pd, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<5> output_grad_tv, + tensor_view_t<5> input_grad_tv) +{ + int64_t gid = threadIdx.x + blockIdx.x * blockDim.x; + int64_t ncdh = gid / W, w = gid % W; + int64_t ncd = ncdh / H, h = ncdh % H; + int64_t nc = ncd / D, d = ncd % D; + int64_t n = nc / C, c = nc % C; + + if(n >= N) + return; + + FLOAT_ACCUM grad = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t odsd = d + pd - kd; + if(odsd % sd != 0) + continue; + int64_t od = odsd / sd; + if(od < 0 || od >= OD) + continue; + + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + grad += CVT_FLOAT2ACCUM( + output_grad[output_grad_tv.get_tensor_view_idx({n, c, od, oh, ow})]) / + divide_factor; + } + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, d, h, w})] = CVT_ACCUM2FLOAT(grad); +} + +extern "C" __global__ void AvgPoolBackward3d(const INPUT_TYPE* __restrict__ output_grad, + OUTPUT_TYPE* __restrict__ input_grad, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + int64_t KD, + int64_t R, + int64_t S, + int64_t sd, + int64_t sh, + int64_t sw, + int64_t pd, + int64_t ph, + int64_t pw, + bool count_include_pad, + int64_t divisor_override, + tensor_view_t<5> output_grad_tv, + tensor_view_t<5> input_grad_tv) +{ + avgPoolBackward3d(output_grad, + input_grad, + N, + C, + D, + H, + W, + OD, + OH, + OW, + KD, + R, + S, + sd, + sh, + sw, + pd, + ph, + pw, + count_include_pad, + divisor_override, + output_grad_tv, + input_grad_tv); +} diff --git a/src/solver/avgpool/backward_avgpool_2d.cpp b/src/solver/avgpool/backward_avgpool_2d.cpp new file mode 100644 index 0000000000..0f8e4e1f42 --- /dev/null +++ b/src/solver/avgpool/backward_avgpool_2d.cpp @@ -0,0 +1,150 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LOCAL_SIZE_BWD_2D 256 + +namespace miopen { + +namespace solver { + +namespace avgpool { + +bool IsOverRocmBwd2d(const miopen::avgpool::BwdProblemDescription& problem) +{ + if(!problem.IsAllContiguous()) + { + auto in_nelems = problem.GetInputGradDesc().GetElementSize(); + auto out_nelems = problem.GetOutputGradDesc().GetElementSize(); + auto in_over_out = static_cast(in_nelems) / out_nelems; + + if(in_over_out == 4) + { + return true; + } + } + return false; +} + +bool AvgPoolBackward2d::IsApplicable(const ExecutionContext&, + const miopen::avgpool::BwdProblemDescription& problem) const +{ + if(problem.GetInputGradDesc().GetNumDims() != 4 || + problem.GetOutputGradDesc().GetNumDims() != 4) + { + return false; + } + if(!(problem.GetOutputGradDesc().GetType() == miopenHalf || + problem.GetOutputGradDesc().GetType() == miopenFloat || + problem.GetOutputGradDesc().GetType() == miopenBFloat16)) + { + return false; + } + if(!IsOverRocmBwd2d(problem)) + { + return false; + } + return true; +} + +ConvSolution +AvgPoolBackward2d::GetSolution(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const +{ + std::ignore = context; + + auto result = ConvSolution{miopenStatusSuccess}; + auto input_dtype = miopen::GetDataType(problem.GetOutputGradDesc().GetType()); + auto output_dtype = miopen::GetDataType(problem.GetInputGradDesc().GetType()); + auto dtype = problem.GetInputGradDesc().GetType(); + uint64_t N_total = problem.GetNtotal(); + + auto build_params = KernelBuildParameters{ + {"MIOPEN_USE_FP16", static_cast(dtype == miopenHalf)}, + {"MIOPEN_USE_FP32", static_cast(dtype == miopenFloat)}, + {"MIOPEN_USE_FP64", static_cast(dtype == miopenDouble)}, + {"MIOPEN_USE_BFP16", static_cast(dtype == miopenBFloat16)}, + {"INPUT_TYPE", input_dtype == "bfloat16" ? "ushort" : input_dtype}, + {"OUTPUT_TYPE", output_dtype == "bfloat16" ? "ushort" : output_dtype}}; + + result.construction_params.push_back(make_hip_kernel( + {LOCAL_SIZE_BWD_2D}, {N_total}, "MIOpenAvgPool.cpp", "AvgPoolBackward2d", build_params)); + + result.invoker_factory = [](const std::vector& kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) params = raw_params.CastTo(); + + decltype(auto) kernel = handle_.Run(kernels.front()); + + auto input_grad_tv = get_inner_expanded_tv<4>(deref(params.inputGradDesc)); + auto output_grad_tv = get_inner_expanded_tv<4>(deref(params.outputGradDesc)); + + int64_t N = deref(params.inputGradDesc).GetLengths()[0]; + int64_t C = deref(params.inputGradDesc).GetLengths()[1]; + int64_t H = deref(params.inputGradDesc).GetLengths()[2]; + int64_t W = deref(params.inputGradDesc).GetLengths()[3]; + int64_t OH = deref(params.outputGradDesc).GetLengths()[2]; + int64_t OW = deref(params.outputGradDesc).GetLengths()[3]; + + kernel(params.output_grad, + params.input_grad, + N, + C, + H, + W, + OH, + OW, + params.KH, + params.KW, + params.SH, + params.SW, + params.PH, + params.PW, + params.count_include_pad, + params.divisor_override, + output_grad_tv, + input_grad_tv); + }; + }; + + return result; +} + +} // namespace avgpool + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/avgpool/backward_avgpool_3d.cpp b/src/solver/avgpool/backward_avgpool_3d.cpp new file mode 100644 index 0000000000..662d53be4a --- /dev/null +++ b/src/solver/avgpool/backward_avgpool_3d.cpp @@ -0,0 +1,160 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LOCAL_SIZE_BWD_3D 256 + +namespace miopen { + +namespace solver { + +namespace avgpool { + +bool IsOverRocmBwd3d(const miopen::avgpool::BwdProblemDescription& problem) +{ + + auto dtype = problem.GetInputGradDesc().GetType(); + auto in_nelems = problem.GetInputGradDesc().GetElementSize(); + auto out_nelems = problem.GetOutputGradDesc().GetElementSize(); + auto in_over_out = static_cast(in_nelems) / out_nelems; + + if(dtype == miopenBFloat16 || dtype == miopenHalf) + { + if(in_over_out < 2) + { + return true; + } + } + + return false; +} + +bool AvgPoolBackward3d::IsApplicable(const ExecutionContext&, + const miopen::avgpool::BwdProblemDescription& problem) const +{ + if(problem.GetInputGradDesc().GetNumDims() != 5 || + problem.GetOutputGradDesc().GetNumDims() != 5) + { + return false; + } + if(!(problem.GetOutputGradDesc().GetType() == miopenHalf || + problem.GetOutputGradDesc().GetType() == miopenFloat || + problem.GetOutputGradDesc().GetType() == miopenBFloat16)) + { + return false; + } + if(!IsOverRocmBwd3d(problem)) + { + return false; + } + return true; +} + +ConvSolution +AvgPoolBackward3d::GetSolution(const ExecutionContext& context, + const miopen::avgpool::BwdProblemDescription& problem) const +{ + std::ignore = context; + + auto result = ConvSolution{miopenStatusSuccess}; + auto input_dtype = miopen::GetDataType(problem.GetOutputGradDesc().GetType()); + auto output_dtype = miopen::GetDataType(problem.GetInputGradDesc().GetType()); + auto dtype = problem.GetInputGradDesc().GetType(); + uint64_t N_total = problem.GetNtotal(); + + auto build_params = KernelBuildParameters{ + {"MIOPEN_USE_FP16", static_cast(dtype == miopenHalf)}, + {"MIOPEN_USE_FP32", static_cast(dtype == miopenFloat)}, + {"MIOPEN_USE_FP64", static_cast(dtype == miopenDouble)}, + {"MIOPEN_USE_BFP16", static_cast(dtype == miopenBFloat16)}, + {"INPUT_TYPE", input_dtype == "bfloat16" ? "ushort" : input_dtype}, + {"OUTPUT_TYPE", output_dtype == "bfloat16" ? "ushort" : output_dtype}}; + + result.construction_params.push_back(make_hip_kernel( + {LOCAL_SIZE_BWD_3D}, {N_total}, "MIOpenAvgPool.cpp", "AvgPoolBackward3d", build_params)); + + result.invoker_factory = [](const std::vector& kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) params = raw_params.CastTo(); + + decltype(auto) kernel = handle_.Run(kernels.front()); + + auto input_grad_tv = get_inner_expanded_tv<5>(deref(params.inputGradDesc)); + auto output_grad_tv = get_inner_expanded_tv<5>(deref(params.outputGradDesc)); + + int64_t N = deref(params.inputGradDesc).GetLengths()[0]; + int64_t C = deref(params.inputGradDesc).GetLengths()[1]; + int64_t D = deref(params.inputGradDesc).GetLengths()[2]; + int64_t H = deref(params.inputGradDesc).GetLengths()[3]; + int64_t W = deref(params.inputGradDesc).GetLengths()[4]; + int64_t OD = deref(params.outputGradDesc).GetLengths()[2]; + int64_t OH = deref(params.outputGradDesc).GetLengths()[3]; + int64_t OW = deref(params.outputGradDesc).GetLengths()[4]; + + kernel(params.output_grad, + params.input_grad, + N, + C, + D, + H, + W, + OD, + OH, + OW, + params.KD, + params.KH, + params.KW, + params.SD, + params.SH, + params.SW, + params.PD, + params.PH, + params.PW, + params.count_include_pad, + params.divisor_override, + output_grad_tv, + input_grad_tv); + }; + }; + + return result; +} + +} // namespace avgpool + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/avgpool/forward_avgpool_2d.cpp b/src/solver/avgpool/forward_avgpool_2d.cpp new file mode 100644 index 0000000000..25be3af3d4 --- /dev/null +++ b/src/solver/avgpool/forward_avgpool_2d.cpp @@ -0,0 +1,161 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LOCAL_SIZE_FWD_2D 256 + +namespace miopen { + +namespace solver { + +namespace avgpool { + +bool IsOverRocmFwd2d(const miopen::avgpool::FwdProblemDescription& problem) +{ + if(problem.IsAllContiguous()) + return true; + else + { + auto dtype = problem.GetInputDesc().GetType(); + auto in_nelems = problem.GetInputDesc().GetElementSize(); + auto out_nelems = problem.GetOutputDesc().GetElementSize(); + auto in_over_out = static_cast(in_nelems) / out_nelems; + if(dtype == miopenFloat) + { + if(out_nelems <= 9633792 && in_over_out >= 4) + { + return true; + } + } + else if(dtype == miopenHalf || dtype == miopenBFloat16) + { + if(out_nelems <= 3311616 && in_over_out >= 4) + { + return true; + } + } + } + return false; +} + +bool AvgPoolForward2d::IsApplicable(const ExecutionContext&, + const miopen::avgpool::FwdProblemDescription& problem) const +{ + if(problem.GetInputDesc().GetNumDims() != 4 || problem.GetOutputDesc().GetNumDims() != 4) + { + return false; + } + if(!(problem.GetInputDesc().GetType() == miopenHalf || + problem.GetInputDesc().GetType() == miopenFloat || + problem.GetInputDesc().GetType() == miopenBFloat16)) + { + return false; + } + if(!IsOverRocmFwd2d(problem)) + { + return false; + } + return true; +} + +ConvSolution +AvgPoolForward2d::GetSolution(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const +{ + std::ignore = context; + + auto result = ConvSolution{miopenStatusSuccess}; + auto input_dtype = miopen::GetDataType(problem.GetInputDesc().GetType()); + auto output_dtype = miopen::GetDataType(problem.GetOutputDesc().GetType()); + auto dtype = problem.GetOutputDesc().GetType(); + uint64_t N_total = problem.GetNtotal(); + + auto build_params = KernelBuildParameters{ + {"MIOPEN_USE_FP16", static_cast(dtype == miopenHalf)}, + {"MIOPEN_USE_FP32", static_cast(dtype == miopenFloat)}, + {"MIOPEN_USE_FP64", static_cast(dtype == miopenDouble)}, + {"MIOPEN_USE_BFP16", static_cast(dtype == miopenBFloat16)}, + {"INPUT_TYPE", input_dtype == "bfloat16" ? "ushort" : input_dtype}, + {"OUTPUT_TYPE", output_dtype == "bfloat16" ? "ushort" : output_dtype}}; + + result.construction_params.push_back(make_hip_kernel( + {LOCAL_SIZE_FWD_2D}, {N_total}, "MIOpenAvgPool.cpp", "AvgPoolForward2d", build_params)); + + result.invoker_factory = [](const std::vector& kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) params = raw_params.CastTo(); + + decltype(auto) kernel = handle_.Run(kernels.front()); + + auto input_tv = get_inner_expanded_tv<4>(deref(params.inputDesc)); + auto output_tv = get_inner_expanded_tv<4>(deref(params.outputDesc)); + + int64_t N = deref(params.inputDesc).GetLengths()[0]; + int64_t C = deref(params.inputDesc).GetLengths()[1]; + int64_t H = deref(params.inputDesc).GetLengths()[2]; + int64_t W = deref(params.inputDesc).GetLengths()[3]; + int64_t OH = deref(params.outputDesc).GetLengths()[2]; + int64_t OW = deref(params.outputDesc).GetLengths()[3]; + + kernel(params.input, + params.output, + N, + C, + H, + W, + OH, + OW, + params.KH, + params.KW, + params.SH, + params.SW, + params.PH, + params.PW, + params.count_include_pad, + params.divisor_override, + input_tv, + output_tv); + }; + }; + + return result; +} + +} // namespace avgpool + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/avgpool/forward_avgpool_3d.cpp b/src/solver/avgpool/forward_avgpool_3d.cpp new file mode 100644 index 0000000000..b89134f403 --- /dev/null +++ b/src/solver/avgpool/forward_avgpool_3d.cpp @@ -0,0 +1,156 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LOCAL_SIZE_FWD_3D 256 + +namespace miopen { + +namespace solver { + +namespace avgpool { + +bool IsOverRocmFwd3d(const miopen::avgpool::FwdProblemDescription& problem) +{ + auto out_nelems = problem.GetOutputDesc().GetElementSize(); + if(problem.IsAllContiguous()) + { + if(out_nelems > 1536) + return true; + } + else + { + if(out_nelems > 6144 && out_nelems <= 17915904) + return true; + } + return false; +} + +bool AvgPoolForward3d::IsApplicable(const ExecutionContext&, + const miopen::avgpool::FwdProblemDescription& problem) const +{ + if(problem.GetInputDesc().GetNumDims() != 5 || problem.GetOutputDesc().GetNumDims() != 5) + { + return false; + } + if(!(problem.GetInputDesc().GetType() == miopenHalf || + problem.GetInputDesc().GetType() == miopenFloat || + problem.GetInputDesc().GetType() == miopenBFloat16)) + { + return false; + } + if(!IsOverRocmFwd3d(problem)) + { + return false; + } + return true; +} + +ConvSolution +AvgPoolForward3d::GetSolution(const ExecutionContext& context, + const miopen::avgpool::FwdProblemDescription& problem) const +{ + std::ignore = context; + + auto result = ConvSolution{miopenStatusSuccess}; + auto input_dtype = miopen::GetDataType(problem.GetInputDesc().GetType()); + auto output_dtype = miopen::GetDataType(problem.GetOutputDesc().GetType()); + auto dtype = problem.GetOutputDesc().GetType(); + uint64_t N_total = problem.GetNtotal(); + + auto build_params = KernelBuildParameters{ + {"MIOPEN_USE_FP16", static_cast(dtype == miopenHalf)}, + {"MIOPEN_USE_FP32", static_cast(dtype == miopenFloat)}, + {"MIOPEN_USE_FP64", static_cast(dtype == miopenDouble)}, + {"MIOPEN_USE_BFP16", static_cast(dtype == miopenBFloat16)}, + {"INPUT_TYPE", input_dtype == "bfloat16" ? "ushort" : input_dtype}, + {"OUTPUT_TYPE", output_dtype == "bfloat16" ? "ushort" : output_dtype}}; + + result.construction_params.push_back(make_hip_kernel( + {LOCAL_SIZE_FWD_3D}, {N_total}, "MIOpenAvgPool.cpp", "AvgPoolForward3d", build_params)); + + result.invoker_factory = [](const std::vector& kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) params = raw_params.CastTo(); + + decltype(auto) kernel = handle_.Run(kernels.front()); + + auto input_tv = get_inner_expanded_tv<5>(deref(params.inputDesc)); + auto output_tv = get_inner_expanded_tv<5>(deref(params.outputDesc)); + + int64_t N = deref(params.inputDesc).GetLengths()[0]; + int64_t C = deref(params.inputDesc).GetLengths()[1]; + int64_t D = deref(params.inputDesc).GetLengths()[2]; + int64_t H = deref(params.inputDesc).GetLengths()[3]; + int64_t W = deref(params.inputDesc).GetLengths()[4]; + int64_t OD = deref(params.outputDesc).GetLengths()[2]; + int64_t OH = deref(params.outputDesc).GetLengths()[3]; + int64_t OW = deref(params.outputDesc).GetLengths()[4]; + + kernel(params.input, + params.output, + N, + C, + D, + H, + W, + OD, + OH, + OW, + params.KD, + params.KH, + params.KW, + params.SD, + params.SH, + params.SW, + params.PD, + params.PH, + params.PW, + params.count_include_pad, + params.divisor_override, + input_tv, + output_tv); + }; + }; + + return result; +} + +} // namespace avgpool + +} // namespace solver + +} // namespace miopen diff --git a/test/cpu_avgpool.hpp b/test/cpu_avgpool.hpp new file mode 100644 index 0000000000..8e5adb1da8 --- /dev/null +++ b/test/cpu_avgpool.hpp @@ -0,0 +1,415 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#pragma once + +#include "tensor_holder.hpp" +#include +#include "ford.hpp" + +template +void cpu_avgpool_forward_2d(tensor input, + tensor& output, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + tensor ksize, + tensor stride, + tensor padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = input.desc.GetLengths(); + auto numel = output.desc.GetElementSize(); + + auto input_tv = miopen::get_inner_expanded_tv<4>(input.desc); + auto output_tv = miopen::get_inner_expanded_tv<4>(output.desc); + + par_ford(numel)([&](int64_t gid) { + int64_t ncoh = gid / OW, ow = gid % OW; + int64_t nc = ncoh / OH, oh = ncoh % OH; + int64_t n = nc / C, c = nc % C; + int64_t R = ksize[0]; + int64_t S = ksize[1]; + int64_t sh = stride[0]; + int64_t sw = stride[1]; + int64_t ph = padding[0]; + int64_t pw = padding[1]; + + if(n >= N) + return; + + float m = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, h, w) + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + m += static_cast(input[input_tv.get_tensor_view_idx({n, c, h, w})]); + } + } + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + float val = m / divide_factor; + + output[output_tv.get_tensor_view_idx({n, c, oh, ow})] = static_cast(val); + }); +} + +template +void cpu_avgpool_forward_3d(tensor input, + tensor& output, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + tensor ksize, + tensor stride, + tensor padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = input.desc.GetLengths(); + auto numel = output.desc.GetElementSize(); + + auto input_tv = miopen::get_inner_expanded_tv<5>(input.desc); + auto output_tv = miopen::get_inner_expanded_tv<5>(output.desc); + + par_ford(numel)([&](int64_t gid) { + int64_t ncodoh = gid / OW, ow = gid % OW; + int64_t ncod = ncodoh / OH, oh = ncodoh % OH; + int64_t nc = ncod / OD, od = ncod % OD; + int64_t n = nc / C, c = nc % C; + int64_t KD = ksize[0]; + int64_t R = ksize[1]; + int64_t S = ksize[2]; + int64_t sd = stride[0]; + int64_t sh = stride[1]; + int64_t sw = stride[2]; + int64_t pd = padding[0]; + int64_t ph = padding[1]; + int64_t pw = padding[2]; + + if(n >= N) + return; + float sum = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + // input idx : (n, c, d, h, w) + int64_t d = od * sd - pd + kd; + if(d < 0 || d >= D) + continue; + int64_t h = oh * sh - ph + r; + if(h < 0 || h >= H) + continue; + int64_t w = ow * sw - pw + s; + if(w < 0 || w >= W) + continue; + // int64_t input_idx = ((n * C + c) * H + h) * W + w; + sum += static_cast(input[input_tv.get_tensor_view_idx({n, c, d, h, w})]); + } + } + } + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + float val = sum / divide_factor; + output[output_tv.get_tensor_view_idx({n, c, od, oh, ow})] = static_cast(val); + }); +} + +template +void cpu_avgpool_backward_2d(tensor output_grad, + tensor& input_grad, + int64_t N, + int64_t C, + int64_t H, + int64_t W, + int64_t OH, + int64_t OW, + tensor ksize, + tensor stride, + tensor padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = input_grad.desc.GetLengths(); + auto numel = input_grad.desc.GetElementSize(); + + auto output_grad_tv = miopen::get_inner_expanded_tv<4>(output_grad.desc); + auto input_grad_tv = miopen::get_inner_expanded_tv<4>(input_grad.desc); + + par_ford(numel)([&](int64_t gid) { + int64_t nch = gid / W, w = gid % W; + int64_t nc = nch / H, h = nch % H; + int64_t n = nc / C, c = nc % C; + int64_t R = ksize[0]; + int64_t S = ksize[1]; + int64_t sh = stride[0]; + int64_t sw = stride[1]; + int64_t ph = padding[0]; + int64_t pw = padding[1]; + + if(n >= N) + return; + + float grad = 0; + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (hend - hstart) * (wend - wstart); + + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, H); + wend = min(wend, W); + + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (hend - hstart) * (wend - wstart); + } + } + + grad += static_cast( + output_grad[output_grad_tv.get_tensor_view_idx({n, c, oh, ow})]) / + divide_factor; + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, h, w})] = static_cast(grad); + }); +} + +template +void cpu_avgpool_backward_3d(tensor output_grad, + tensor& input_grad, + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + int64_t OD, + int64_t OH, + int64_t OW, + tensor ksize, + tensor stride, + tensor padding, + bool count_include_pad, + int64_t divisor_override) +{ + auto dims = input_grad.desc.GetLengths(); + auto numel = input_grad.desc.GetElementSize(); + + auto output_grad_tv = miopen::get_inner_expanded_tv<5>(output_grad.desc); + auto input_grad_tv = miopen::get_inner_expanded_tv<5>(input_grad.desc); + + par_ford(numel)([&](int64_t gid) { + int64_t ncdh = gid / W, w = gid % W; + int64_t ncd = ncdh / H, h = ncdh % H; + int64_t nc = ncd / D, d = ncd % D; + int64_t n = nc / C, c = nc % C; + int64_t KD = ksize[0]; + int64_t R = ksize[1]; + int64_t S = ksize[2]; + int64_t sd = stride[0]; + int64_t sh = stride[1]; + int64_t sw = stride[2]; + int64_t pd = padding[0]; + int64_t ph = padding[1]; + int64_t pw = padding[2]; + + if(n >= N) + return; + + float grad = 0; + for(int64_t kd = 0; kd < KD; ++kd) + { + for(int64_t r = 0; r < R; ++r) + { + for(int64_t s = 0; s < S; ++s) + { + int64_t odsd = d + pd - kd; + if(odsd % sd != 0) + continue; + int64_t od = odsd / sd; + if(od < 0 || od >= OD) + continue; + + int64_t ohsh = h + ph - r; + if(ohsh % sh != 0) + continue; + int64_t oh = ohsh / sh; + if(oh < 0 || oh >= OH) + continue; + + int64_t owsw = w + pw - s; + if(owsw % sw != 0) + continue; + int64_t ow = owsw / sw; + if(ow < 0 || ow >= OW) + continue; + + int64_t dstart = od * sd - pd; + int64_t hstart = oh * sh - ph; + int64_t wstart = ow * sw - pw; + int64_t dend = min(dstart + KD, D + pd); + int64_t hend = min(hstart + R, H + ph); + int64_t wend = min(wstart + S, W + pw); + + const int64_t pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + dend = min(dend, D); + hend = min(hend, H); + wend = min(wend, W); + int64_t divide_factor; + if(divisor_override != 0) + { + divide_factor = divisor_override; + } + else + { + if(count_include_pad) + { + divide_factor = pool_size; + } + else + { + divide_factor = (dend - dstart) * (hend - hstart) * (wend - wstart); + } + } + grad += + static_cast( + output_grad[output_grad_tv.get_tensor_view_idx({n, c, od, oh, ow})]) / + divide_factor; + } + } + } + input_grad[input_grad_tv.get_tensor_view_idx({n, c, d, h, w})] = static_cast(grad); + }); +} diff --git a/test/gtest/avgpool.cpp b/test/gtest/avgpool.cpp new file mode 100644 index 0000000000..28c9def5a4 --- /dev/null +++ b/test/gtest/avgpool.cpp @@ -0,0 +1,88 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "avgpool.hpp" +#include "gtest/gtest.h" +using float16 = half_float::half; + +// FORWARD TEST +using GPU_Avgpool_fwd_FP32 = AvgPoolTestFwd; +using GPU_Avgpool_fwd_FP16 = AvgPoolTestFwd; +using GPU_Avgpool_fwd_BFP16 = AvgPoolTestFwd; + +TEST_P(GPU_Avgpool_fwd_FP32, AvgPoolTestFwd) +{ + RunTest(); + Verify(); +}; + +TEST_P(GPU_Avgpool_fwd_FP16, AvgPoolTestFwd) +{ + RunTest(); + Verify(); +}; + +TEST_P(GPU_Avgpool_fwd_BFP16, AvgPoolTestFwd) +{ + RunTest(); + Verify(); +}; + +INSTANTIATE_TEST_SUITE_P(Smoke, GPU_Avgpool_fwd_FP32, testing::ValuesIn(AvgPoolTestConfigsFwd())); +INSTANTIATE_TEST_SUITE_P(Smoke, GPU_Avgpool_fwd_FP16, testing::ValuesIn(AvgPoolTestConfigsFwd())); +INSTANTIATE_TEST_SUITE_P(Smoke, GPU_Avgpool_fwd_BFP16, testing::ValuesIn(AvgPoolTestConfigsFwd())); + +// BACKWARD TEST +using GPU_Avgpool_bwd_FP32 = AvgPoolTestBwd; +using GPU_Avgpool_bwd_FP16 = AvgPoolTestBwd; +using GPU_Avgpool_bwd_BFP16 = AvgPoolTestBwd; + +TEST_P(GPU_Avgpool_bwd_FP32, AvgPoolTestBwd) +{ + RunTest(); + Verify(); +}; + +TEST_P(GPU_Avgpool_bwd_FP16, AvgPoolTestBwd) +{ + RunTest(); + Verify(); +}; + +TEST_P(GPU_Avgpool_bwd_BFP16, AvgPoolTestBwd) +{ + RunTest(); + Verify(); +}; + +INSTANTIATE_TEST_SUITE_P(Smoke, + GPU_Avgpool_bwd_FP32, + testing::ValuesIn(AvgPoolTestConfigsBwdFp32())); +INSTANTIATE_TEST_SUITE_P(Smoke, + GPU_Avgpool_bwd_FP16, + testing::ValuesIn(AvgPoolTestConfigsBwdFp16BFp16())); +INSTANTIATE_TEST_SUITE_P(Smoke, + GPU_Avgpool_bwd_BFP16, + testing::ValuesIn(AvgPoolTestConfigsBwdFp16BFp16())); diff --git a/test/gtest/avgpool.hpp b/test/gtest/avgpool.hpp new file mode 100644 index 0000000000..01b87f1023 --- /dev/null +++ b/test/gtest/avgpool.hpp @@ -0,0 +1,435 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "cpu_avgpool.hpp" +#include "get_handle.hpp" +#include "tensor_holder.hpp" +#include "verify.hpp" +#include +#include +#include +#include + +template +inline std::ostream& operator<<(std::ostream& os, const std::vector& v) +{ + os << '{'; + for(int i = 0; i < v.size(); ++i) + { + if(i != 0) + os << ','; + os << v[i]; + } + os << '}'; + return os; +} + +struct AvgPoolTestCase +{ + std::vector input_dims; + std::vector kernel_size; + std::vector stride; + std::vector padding; + bool ceil_mode; + bool count_include_pad; + int64_t divisor_override; + bool is_contiguous = true; + + friend std::ostream& operator<<(std::ostream& os, const AvgPoolTestCase& tc) + { + return os << " input_dims:" << tc.input_dims << " kernel_size:" << tc.kernel_size + << " stride:" << tc.stride << " padding:" << tc.padding + << " ceil_mode:" << tc.ceil_mode << " count_include_pad:" << tc.count_include_pad + << " divisor_override:" << tc.divisor_override + << " is_contiguous:" << tc.is_contiguous; + } + + std::vector GetInput() const { return input_dims; } + std::vector ComputeStrides(std::vector inputDim) const + { + if(!is_contiguous) + std::swap(inputDim.front(), inputDim.back()); + std::vector strides(inputDim.size()); + strides.back() = 1; + for(int i = inputDim.size() - 2; i >= 0; --i) + strides[i] = strides[i + 1] * inputDim[i + 1]; + if(!is_contiguous) + std::swap(strides.front(), strides.back()); + return strides; + } +}; + +inline std::vector AvgPoolTestConfigsFwd() +{ + return { + {{64, 512, 14, 14}, {2, 2}, {2, 2}, {0, 0}, false, true, 0, false}, + {{64, 512, 14, 14}, {2, 2}, {2, 2}, {0, 0}, false, true, 0, true}, + {{4, 512, 14, 14, 14}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, false, true, 0, false}, + {{4, 512, 14, 14, 14}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, false, true, 0, true}, + + }; +} + +inline std::vector AvgPoolTestConfigsBwdFp32() +{ + return { + {{16, 112, 112, 112}, {3, 3}, {2, 2}, {1, 1}, false, true, 0, false}, + }; +} + +inline std::vector AvgPoolTestConfigsBwdFp16BFp16() +{ + return { + {{16, 112, 112, 112}, {3, 3}, {2, 2}, {1, 1}, false, true, 0, false}, + {{4, 912, 8, 8, 8}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, false, true, 0, false}, + {{4, 912, 8, 8, 8}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, false, true, 0, true}, + + }; +} + +// FORWARD TEST +template +struct AvgPoolTestFwd : public ::testing::TestWithParam +{ +protected: + void SetUp() override + { + auto&& handle = get_handle(); + avgpool_config = GetParam(); + std::vector in_dim = avgpool_config.GetInput(); + std::vector in_strides = avgpool_config.ComputeStrides(in_dim); + + N = in_dim[0]; + C = in_dim[1]; + D = in_dim.size() == 5 ? in_dim[2] : 1; + H = in_dim.size() == 5 ? in_dim[3] : in_dim[2]; + W = in_dim.size() == 5 ? in_dim[4] : in_dim[3]; + ksize = tensor{in_dim.size() - 2}; + ksize.data = avgpool_config.kernel_size; + stride = tensor{in_dim.size() - 2}; + stride.data = avgpool_config.stride; + padding = tensor{in_dim.size() - 2}; + padding.data = avgpool_config.padding; + + ceil_mode = avgpool_config.ceil_mode; + count_include_pad = avgpool_config.count_include_pad; + divisor_override = avgpool_config.divisor_override; + + auto gen_input_value = [](auto...) { + return prng::gen_A_to_B(static_cast(-10.0f), static_cast(10.0f)); + }; + input = tensor{in_dim, in_strides}.generate(gen_input_value); + + std::vector out_dim; + if(in_dim.size() == 5) + { + if(ceil_mode) + { + OD = std::ceil(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::ceil(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::ceil(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + else + { + OD = std::floor(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::floor(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::floor(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + out_dim = {N, C, OD, OH, OW}; + } + else + { + if(ceil_mode) + { + OH = std::ceil(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::ceil(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + else + { + OH = std::floor(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::floor(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + out_dim = {N, C, OH, OW}; + } + + output = tensor{out_dim}; + std::fill(output.begin(), output.end(), std::numeric_limits::quiet_NaN()); + + ref_output = tensor{out_dim}; + std::fill(ref_output.begin(), ref_output.end(), std::numeric_limits::quiet_NaN()); + + input_dev = handle.Write(input.data); + output_dev = handle.Write(output.data); + } + + void RunTest() + { + auto&& handle = get_handle(); + miopenStatus_t status; + + auto dims = input.desc.GetNumDims(); + if(dims == 4) + { + cpu_avgpool_forward_2d(input, + ref_output, + N, + C, + H, + W, + OH, + OW, + ksize, + stride, + padding, + count_include_pad, + divisor_override); + } + else if(dims == 5) + { + cpu_avgpool_forward_3d(input, + ref_output, + N, + C, + D, + H, + W, + OD, + OH, + OW, + ksize, + stride, + padding, + count_include_pad, + divisor_override); + } + status = miopen::avgpool::AvgPoolForward(handle, + input.desc, + input_dev.get(), + output.desc, + output_dev.get(), + ksize.GetSize() == 3 ? ksize[0] : 1, + ksize.GetSize() == 3 ? ksize[1] : ksize[0], + ksize.GetSize() == 3 ? ksize[2] : ksize[1], + stride.GetSize() == 3 ? stride[0] : 1, + stride.GetSize() == 3 ? stride[1] : stride[0], + stride.GetSize() == 3 ? stride[2] : stride[1], + padding.GetSize() == 3 ? padding[0] : 1, + padding.GetSize() == 3 ? padding[1] : padding[0], + padding.GetSize() == 3 ? padding[2] : padding[1], + count_include_pad, + divisor_override); + ASSERT_EQ(status, miopenStatusSuccess); + + output.data = handle.Read(output_dev, output.data.size()); + } + + void Verify() + { + double threshold = std::numeric_limits::epsilon(); + + auto error = miopen::rms_range(ref_output, output); + + ASSERT_EQ(miopen::range_distance(ref_output), miopen::range_distance(output)); + EXPECT_LT(error, threshold * 10); + } + AvgPoolTestCase avgpool_config; + + tensor input; + tensor output; + tensor ref_output; + tensor ksize; + tensor stride; + tensor padding; + + bool ceil_mode; + bool count_include_pad; + int64_t divisor_override; + int64_t N = 1, C = 1, D = 1, H = 1, W = 1, OD = 1, OH = 1, OW = 1; + + miopen::Allocator::ManageDataPtr input_dev; + miopen::Allocator::ManageDataPtr output_dev; +}; + +// BACKWARD TEST +template +struct AvgPoolTestBwd : public ::testing::TestWithParam +{ +protected: + void SetUp() override + { + auto&& handle = get_handle(); + avgpool_config = GetParam(); + auto in_grad_dim = avgpool_config.GetInput(); + N = in_grad_dim[0]; + C = in_grad_dim[1]; + D = in_grad_dim.size() == 5 ? in_grad_dim[2] : 1; + H = in_grad_dim.size() == 5 ? in_grad_dim[3] : in_grad_dim[2]; + W = in_grad_dim.size() == 5 ? in_grad_dim[4] : in_grad_dim[3]; + ksize = tensor{in_grad_dim.size() - 2}; + ksize.data = avgpool_config.kernel_size; + stride = tensor{in_grad_dim.size() - 2}; + stride.data = avgpool_config.stride; + padding = tensor{in_grad_dim.size() - 2}; + padding.data = avgpool_config.padding; + ceil_mode = avgpool_config.ceil_mode; + count_include_pad = avgpool_config.count_include_pad; + divisor_override = avgpool_config.divisor_override; + + std::vector out_grad_dim; + if(in_grad_dim.size() == 5) + { + if(ceil_mode) + { + OD = std::ceil(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::ceil(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::ceil(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + else + { + OD = std::floor(static_cast(D - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OH = std::floor(static_cast(H - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + OW = std::floor(static_cast(W - ksize[2] + 2 * padding[2]) / stride[2]) + 1; + } + out_grad_dim = {N, C, OD, OH, OW}; + } + else + { + if(ceil_mode) + { + OH = std::ceil(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::ceil(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + else + { + OH = std::floor(static_cast(H - ksize[0] + 2 * padding[0]) / stride[0]) + 1; + OW = std::floor(static_cast(W - ksize[1] + 2 * padding[1]) / stride[1]) + 1; + } + out_grad_dim = {N, C, OH, OW}; + } + + auto gen_output_grad_value = [](auto...) { + return prng::gen_A_to_B(static_cast(-10.0f), static_cast(10.0f)); + }; + auto out_grad_strides = avgpool_config.ComputeStrides(out_grad_dim); + output_grad = tensor{out_grad_dim, out_grad_strides}.generate(gen_output_grad_value); + + input_grad = tensor{in_grad_dim}; + std::fill(input_grad.begin(), input_grad.end(), std::numeric_limits::quiet_NaN()); + + ref_input_grad = tensor{in_grad_dim}; + std::fill( + ref_input_grad.begin(), ref_input_grad.end(), std::numeric_limits::quiet_NaN()); + + output_grad_dev = handle.Write(output_grad.data); + input_grad_dev = handle.Write(input_grad.data); + } + + void RunTest() + { + auto&& handle = get_handle(); + + miopenStatus_t status; + + auto dims = input_grad.desc.GetNumDims(); + if(dims == 4) + { + cpu_avgpool_backward_2d(output_grad, + ref_input_grad, + N, + C, + H, + W, + OH, + OW, + ksize, + stride, + padding, + count_include_pad, + divisor_override); + } + else if(dims == 5) + { + cpu_avgpool_backward_3d(output_grad, + ref_input_grad, + N, + C, + D, + H, + W, + OD, + OH, + OW, + ksize, + stride, + padding, + count_include_pad, + divisor_override); + } + status = miopen::avgpool::AvgPoolBackward(handle, + output_grad.desc, + output_grad_dev.get(), + input_grad.desc, + input_grad_dev.get(), + ksize.GetSize() == 3 ? ksize[0] : 1, + ksize.GetSize() == 3 ? ksize[1] : ksize[0], + ksize.GetSize() == 3 ? ksize[2] : ksize[1], + stride.GetSize() == 3 ? stride[0] : 1, + stride.GetSize() == 3 ? stride[1] : stride[0], + stride.GetSize() == 3 ? stride[2] : stride[1], + padding.GetSize() == 3 ? padding[0] : 1, + padding.GetSize() == 3 ? padding[1] : padding[0], + padding.GetSize() == 3 ? padding[2] : padding[1], + count_include_pad, + divisor_override); + + ASSERT_EQ(status, miopenStatusSuccess); + + input_grad.data = handle.Read(input_grad_dev, input_grad.data.size()); + } + + void Verify() + { + double threshold = std::numeric_limits::epsilon(); + auto error = miopen::rms_range(ref_input_grad, input_grad); + ASSERT_EQ(miopen::range_distance(ref_input_grad), miopen::range_distance(input_grad)); + EXPECT_LT(error, threshold * 10); + } + AvgPoolTestCase avgpool_config; + + tensor output_grad; + tensor input_grad; + tensor ref_input_grad; + tensor ksize; + tensor stride; + tensor padding; + + bool ceil_mode; + bool count_include_pad; + int64_t divisor_override; + int64_t N, C, D, H, W, OD, OH, OW; + + miopen::Allocator::ManageDataPtr output_grad_dev; + miopen::Allocator::ManageDataPtr input_grad_dev; +};