diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ccb8cbab28..2d2e0ef69b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -336,11 +336,22 @@ set( MIOpen_Source solver/softmarginloss/forward_softmarginloss.cpp solver/softmax/attn_softmax.cpp solver/softmax/softmax.cpp + solver/tensorOp/Op1dTensorGeneric.cpp + solver/tensorOp/Op2dTensorGeneric.cpp + solver/tensorOp/Op2dTensorLite.cpp + solver/tensorOp/Op2dTensorSquash.cpp + solver/tensorOp/Op3dTensorGeneric.cpp + solver/tensorOp/Op4dTensorGeneric.cpp + solver/tensorOp/Op4dTensorLite.cpp + solver/tensorOp/Op5dTensorGeneric.cpp + solver/tensorOp/OpTensorFwdBias.cpp + solver/tensorOp/OpTensorLeadingOnes.cpp subbuffers.cpp t5layernorm_api.cpp target_properties.cpp temp_file.cpp tensor.cpp + tensorOp/problem_description.cpp tensor_api.cpp transformers_adam_w_api.cpp seq_tensor.cpp @@ -684,7 +695,6 @@ if( MIOPEN_BACKEND MATCHES "OpenCL" OR MIOPEN_BACKEND STREQUAL "HIPOC" OR MIOPEN ocl/lrn_ocl.cpp ocl/mloNorm.cpp ocl/pooling_ocl.cpp - ocl/tensorocl.cpp ocl/rnnocl.cpp ocl/utilocl.cpp ocl/ctcocl.cpp diff --git a/src/include/miopen/names.hpp b/src/include/miopen/names.hpp index 17b96b8732..bdf59c361c 100644 --- a/src/include/miopen/names.hpp +++ b/src/include/miopen/names.hpp @@ -34,6 +34,7 @@ struct NetworkConfig { NetworkConfig() = default; explicit NetworkConfig(const std::string& value_) : value(value_) {} + explicit NetworkConfig(std::string&& value_) noexcept : value(std::move(value_)) {} operator std::string() const { return value; } const std::string& ToString() const { return value; } diff --git a/src/include/miopen/tensorOp/invoke_params.hpp b/src/include/miopen/tensorOp/invoke_params.hpp new file mode 100644 index 0000000000..6b8f2ca88c --- /dev/null +++ b/src/include/miopen/tensorOp/invoke_params.hpp @@ -0,0 +1,78 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include + +namespace miopen { + +namespace tensorOp { + +struct InvokeParams : public miopen::InvokeParams +{ + InvokeParams(const void* alpha0_, + ConstData_t ATensor_, + const void* alpha1_, + ConstData_t BTensor_, + const void* beta_, + Data_t CTensor_, + const size_t Aoffset_, + const size_t Boffset_, + const size_t Coffset_) + : alpha0(alpha0_), + alpha1(alpha1_), + beta(beta_), + ATensor(ATensor_), + BTensor(BTensor_), + CTensor(CTensor_), + Aoffset(Aoffset_), + Boffset(Boffset_), + Coffset(Coffset_) + { + } + + size_t GetWorkspaceSize() const { return 0; } + Data_t GetWorkspace() const { return nullptr; } + +public: + const void* alpha0; + const void* alpha1; + const void* beta; + + ConstData_t ATensor; + ConstData_t BTensor; + Data_t CTensor; + + size_t Aoffset; + size_t Boffset; + size_t Coffset; +}; + +} // namespace tensorOp + +} // namespace miopen diff --git a/src/include/miopen/tensorOp/problem_description.hpp b/src/include/miopen/tensorOp/problem_description.hpp new file mode 100644 index 0000000000..ecbf189b3f --- /dev/null +++ b/src/include/miopen/tensorOp/problem_description.hpp @@ -0,0 +1,130 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include + +namespace miopen { + +struct NetworkConfig; + +namespace tensorOp { + +struct MIOPEN_INTERNALS_EXPORT ProblemDescription : ProblemDescriptionBase +{ + ProblemDescription(const miopenTensorOp_t tensorOp_, + const void* beta_, + const TensorDescriptor& aTensorDesc_, + const TensorDescriptor& bTensorDesc_, + const TensorDescriptor& cTensorDesc_, + const bool nonStandardSquash_) + : tensorOp(tensorOp_), + aTensorDesc(aTensorDesc_), + bTensorDesc(bTensorDesc_), + cTensorDesc(cTensorDesc_), + nonStandardSquash(nonStandardSquash_) + { + if(beta_ == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm, "Beta value is nullptr"); + } + + beta = *(static_cast(beta_)); + + if(aTensorDesc.GetElementSize() != cTensorDesc.GetElementSize()) + { + MIOPEN_THROW("A and C Tensors do not match"); + } + + if(bTensorDesc.GetType() != cTensorDesc.GetType()) + { + MIOPEN_THROW("Datatypes for B and C tensors do not match !"); + } + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + if(clens.size() > 5) + { + MIOPEN_THROW("Tensor dimension larger than 5: " + std::to_string(clens.size())); + } + + if(blens.size() != clens.size()) + { + MIOPEN_THROW("Number of dims in B and C Tensors do not match: " + + std::to_string(blens.size()) + ", " + std::to_string(clens.size())); + } + + if(!nonStandardSquash) + { + constexpr auto comparator = [](size_t c, size_t b) { return b == 1 || b == c; }; + const auto [c_diff, b_diff] = + std::mismatch(clens.begin(), clens.end(), blens.begin(), comparator); + if(c_diff != clens.end()) + MIOPEN_THROW("BTensor dim != 1 && BTensor dim != CTensor dim:" + + std::to_string(std::distance(clens.begin(), c_diff))); + } + else + { + // non standard behavior because blens[1] can be not equalt to clens[1] + if(!(clens.size() == 3 && blens[0] == 1 && clens[0] == 1 && blens[2] == clens[2])) + { + MIOPEN_THROW( + "Non standard squashed operation supported only for 3d tensors and for " + "the specific configuration"); + } + } + } + + miopenTensorOp_t GetTensorOp() const { return tensorOp; } + + float GetBeta() const { return beta; } + + const TensorDescriptor& GetATensorDesc() const { return aTensorDesc; } + const TensorDescriptor& GetBTensorDesc() const { return bTensorDesc; } + const TensorDescriptor& GetCTensorDesc() const { return cTensorDesc; } + + bool GetNonStandardSquash() const { return nonStandardSquash; } + + NetworkConfig MakeNetworkConfig() const override; + +private: + const miopenTensorOp_t tensorOp; + + float beta; + + TensorDescriptor aTensorDesc; + TensorDescriptor bTensorDesc; + TensorDescriptor cTensorDesc; + + const bool nonStandardSquash; +}; + +} // namespace tensorOp + +} // namespace miopen diff --git a/src/include/miopen/tensorOp/solvers.hpp b/src/include/miopen/tensorOp/solvers.hpp new file mode 100644 index 0000000000..635d0ab777 --- /dev/null +++ b/src/include/miopen/tensorOp/solvers.hpp @@ -0,0 +1,216 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#pragma once + +#include +#include + +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +using TensorOpSolver = NonTunableSolverBase; + +struct Op1dTensorGeneric final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op2dTensorGeneric final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op2dTensorLite final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op2dTensorSquash final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op3dTensorGeneric final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct OpTensorFwdBias final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op4dTensorLite final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct OpTensorLeadingOnes final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op4dTensorGeneric final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +struct Op5dTensorGeneric final : TensorOpSolver +{ + const std::string& SolverDbId() const override { return GetSolverDbId(); } + + bool IsApplicable(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + ConvSolution GetSolution(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + std::size_t + GetWorkspaceSize(const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const override; + + bool MayNeedWorkspace() const override { return false; } +}; + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/kernels/MIOpenTensorKernels.cl b/src/kernels/MIOpenTensorKernels.cl index cc47d8e6ce..8203dad1f0 100644 --- a/src/kernels/MIOpenTensorKernels.cl +++ b/src/kernels/MIOpenTensorKernels.cl @@ -23,24 +23,7 @@ * SOFTWARE. * *******************************************************************************/ - -#if MIOPEN_USE_FP16 == 1 #pragma OPENCL EXTENSION cl_khr_fp16 : enable -#define _FLOAT half -#ifndef HALF_MAX -#define MAX_VAL 65504 /* max value */ -#else -#define MAX_VAL HALF_MAX -#endif -#endif -#if MIOPEN_USE_FP32 == 1 -#define _FLOAT float -#ifndef FLT_MAX -#define MAX_VAL 3.402823466e+38F /* max value */ -#else -#define MAX_VAL FLT_MAX -#endif -#endif /* Only works for NCHW * bitmap tracks which dims are the same between 'a' and 'c'. diff --git a/src/ocl/tensorocl.cpp b/src/ocl/tensorocl.cpp deleted file mode 100644 index 81d735a2bc..0000000000 --- a/src/ocl/tensorocl.cpp +++ /dev/null @@ -1,2619 +0,0 @@ -/******************************************************************************* - * - * MIT License - * - * Copyright (c) 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - *******************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MIO_TENSOROCL_DEBUG 0 - -namespace miopen { - -TensorDescriptor GetFlattenedTensorDescriptor(const TensorDescriptor& desc) -{ - // is packed - if(desc.IsPacked()) - return {desc.GetType(), {desc.GetElementSize()}, {static_cast(1)}}; - - // start flattening tensor - std::vector flat_lengths; - std::vector flat_strides; - - auto non1_length_strides = boost::combine(desc.GetLengths(), desc.GetStrides()) | - boost::adaptors::filtered(f_length_is_not_1_t()); - - auto i = non1_length_strides.begin(); - std::size_t flat_len = boost::get<0>(*i); - auto i_previous = i++; - - // the 0-th dimension full-length doesn't matter - for(; i != non1_length_strides.end(); ++i) - { - std::size_t len = boost::get<0>(*i); - std::size_t stride = boost::get<1>(*i); - std::size_t previous_stride = boost::get<1>(*i_previous); - std::size_t full_len = previous_stride / stride; - - if(len == full_len) - { - flat_len *= len; - } - else - { - flat_lengths.push_back(flat_len); - flat_strides.push_back(previous_stride); - flat_len = len; - } - i_previous = i; - } - flat_lengths.push_back(flat_len); - flat_strides.push_back(boost::get<1>(*i_previous)); - - return {desc.GetType(), flat_lengths, flat_strides}; -} - -// Free Tensor Functions -static void CreateBitmapAndGrid(unsigned int& bitmap, - const std::vector& a_lens, - const std::vector& c_lens, - int& num_wg, - int& work, - int d) -{ - for(int i = d; i >= 0; i--) - { - if(a_lens[i] != 1) - { - bitmap |= (1 << (a_lens.size() - (i + 1))); - num_wg *= a_lens[i]; - } - else - { - work *= c_lens[i]; - } - } -} - -static bool IsBitmapLeadingOnes(unsigned int bitmap, int n_size, int first_not_one) -{ - bool leading_ones = true; - - for(int i = first_not_one; i >= 0; i--) - { - bool is_one = (bitmap & (1 << (n_size - 1 - i))) != 0u; - leading_ones &= is_one; - } - return leading_ones; -} - -void OpTensor3d(const Handle& handle, - miopenTensorOp_t tensorOp, - const void* alpha0, - const TensorDescriptor& aTensorDesc, - ConstData_t ATensor, - const void* alpha1, - const TensorDescriptor& bTensorDesc, - ConstData_t BTensor, - const void* beta, - const TensorDescriptor& cTensorDesc, - Data_t CTensor, - const size_t Aoffset, - const size_t Boffset, - const size_t Coffset, - const bool nonStandardSquash) -{ - auto alens = aTensorDesc.GetLengths(); - auto blens = bTensorDesc.GetLengths(); - auto clens = cTensorDesc.GetLengths(); - - auto astrides = aTensorDesc.GetStrides(); - auto bstrides = bTensorDesc.GetStrides(); - auto cstrides = cTensorDesc.GetStrides(); - - auto bsize = blens.size(); - - // first_not_one is incorrect if btensor size equal to 1 - auto first_not_one = std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); - auto d = std::distance(blens.begin(), first_not_one.base()); - - // quick fix - int num_wg = first_not_one != blens.rend() - ? static_cast(*first_not_one == 0 ? 1 : *first_not_one) - : 1; - int work_per_wg = std::accumulate(clens.begin() + d, clens.end(), 1, std::multiplies()); - - unsigned int bitmap = 0; - // update bitmap for first_not_one - bitmap |= (1 << (bsize - d)); - - // (d-2) is because distance starts from 1 and 0 - // also, we need to go past the "first_not_one" as that is already - // accounted for in the bitmap - CreateBitmapAndGrid(bitmap, blens, clens, num_wg, work_per_wg, static_cast(d - 2)); - -#if(MIO_TENSOROCL_DEBUG == 1) - printf("bitmap: %u\n", bitmap); - printf("work_per_wg: %d, num_wg: %d\n", work_per_wg, num_wg); -#endif - - int max_num_wg = 4096; - num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; - - size_t local_threads = 256; - - std::string network_config{}; - - network_config = std::to_string(bTensorDesc.GetType()) + "-" + - std::to_string(aTensorDesc.GetType()) + "-" + std::to_string(tensorOp) + "-"; - - // for naive tensor ops - size_t RD_BLCK = (clens[2] % 4 == 0) ? 4 : (clens[2] % 2 == 0) ? 2 : 1; - const std::string data_type = GetDataType(bTensorDesc.GetType()); - const std::string READ_TYPE = (RD_BLCK == 1) ? data_type : data_type + std::to_string(RD_BLCK); - - size_t total_work = std::max(clens[2] / RD_BLCK, size_t(1)); - size_t grp_sz = (total_work + local_threads - 1) / local_threads; - - // opencl kernels are no longer supported, fallback to generic case - bool lite_applicable = grp_sz <= size_t(max_num_wg); - - bool is_lite = clens[0] == 1 && blens[0] == 1 && alens[0] == 1 && - (blens[1] == clens[1] || blens[1] == 1) && blens[2] == clens[2]; - - bool is_squashed = nonStandardSquash && !is_lite && - (blens[0] == 1 && clens[0] == 1 && clens[1] == 1 && blens[2] == clens[2]); - - grp_sz = std::min(size_t(max_num_wg), grp_sz); - size_t glb_sz = local_threads * grp_sz; - - size_t local_threads2 = 64; - size_t total_work2 = clens[1]; - size_t grp_sz2 = (total_work2 + local_threads2 - 1) / local_threads2; - grp_sz2 = std::min(size_t(max_num_wg / grp_sz), grp_sz2); - size_t glb_sz2 = local_threads2 * grp_sz2; - - visit_float(bTensorDesc.GetType(), [&](auto as_float) { - auto miopen_alpha0 = as_float(*(static_cast(alpha0))); - auto miopen_alpha1 = as_float(*(static_cast(alpha1))); - auto miopen_beta = as_float(*(static_cast(beta))); - - if(lite_applicable && is_lite) - { - - network_config += std::to_string(RD_BLCK) + "x" + std::to_string(local_threads) + "x" + - std::to_string(grp_sz) + std::to_string(local_threads2) + - std::to_string(grp_sz2); - - auto&& kernels = handle.GetKernels("Op2dTensorLite", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - - kernel(ATensor, - static_cast(astrides[1]), // a_cstride, - BTensor, - static_cast(bstrides[1]), // b_cstride, - CTensor, - static_cast(cstrides[1]), // c_cstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(total_work2), - static_cast(!float_equal(miopen_beta, 0.0)), - static_cast(blens[1] == 1)); - - return; - } - } - else if(is_squashed) - { - network_config += std::to_string(RD_BLCK) + "x" + std::to_string(local_threads) + "x" + - std::to_string(grp_sz); - - auto&& kernels = handle.GetKernels("Op2dTensorSquash", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - - kernel(ATensor, - BTensor, - static_cast(blens[1]), // b_c, - static_cast(bstrides[1]), // b_cstride, - CTensor, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(!float_equal(miopen_alpha0, 0.0)), - static_cast(!float_equal(miopen_alpha1, 0.0)), - static_cast(!float_equal(miopen_beta, 0.0))); - - return; - } - } - else - { - - network_config += std::to_string(max_num_wg) + "-" + std::to_string(local_threads) + - "x" + std::to_string(num_wg); - - auto&& kernels = handle.GetKernels("Op3dTensorGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - - kernel(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), // b_c, - static_cast(blens[2] == 1 ? clens[2] : blens[2]), // b_h, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_h, - static_cast(astrides[0]), // a_nstride, - static_cast(astrides[1]), // a_cstride, - static_cast(astrides[2]), // a_hstride, - static_cast(blens[0] == 1 ? 0 : bstrides[0]), // b_nstride, - static_cast(blens[1] == 1 ? 0 : bstrides[1]), // b_cstride, - static_cast(blens[2] == 1 ? 0 : bstrides[2]), // b_hstride, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - - return; - } - } - - std::string parms = " -DMIOPEN_TYPE=" + GetDataType(bTensorDesc.GetType()); - - parms += GetDataTypeKernelParams(aTensorDesc.GetType()); - - parms += " -DMIOPEN_TENSOR_OP="; - switch(tensorOp) - { - case 0: parms += "miopenAdd"; break; - case 1: parms += "miopenMul"; break; - case 2: parms += "miopenMin"; break; - case 3: parms += "miopenMax"; break; - } - std::string program_name = "MIOpenTensorKernels.cl"; - - if(lite_applicable && is_lite) - { - parms += " -DUSE_2D_TENSOR_LITE"; - parms += " -DRD_BLCK=" + std::to_string(RD_BLCK) + " -DREAD_TYPE=" + READ_TYPE; - - const std::vector vld{local_threads, 1, 1}; - const std::vector vgd1{glb_sz, glb_sz2, 1}; - - handle.AddKernel( - "Op2dTensorLite", network_config, program_name, "Op2dTensorLite", vld, vgd1, parms)( - ATensor, - static_cast(astrides[1]), // a_cstride, - BTensor, - static_cast(bstrides[1]), // b_cstride, - CTensor, - static_cast(cstrides[1]), // c_cstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(total_work2), - static_cast(!float_equal(miopen_beta, 0.0)), - static_cast(blens[1] == 1)); - } - else if(is_squashed) - { - parms += " -DUSE_2D_TENSOR_SQUASH"; - parms += " -DRD_BLCK=" + std::to_string(RD_BLCK) + " -DREAD_TYPE=" + READ_TYPE; - - const std::vector vld{local_threads, 1, 1}; - const std::vector vgd1{glb_sz, 1, 1}; - - handle.AddKernel("Op2dTensorSquash", - network_config, - program_name, - "Op2dTensorSquash", - vld, - vgd1, - parms)(ATensor, - BTensor, - static_cast(blens[1]), // b_c, - static_cast(bstrides[1]), // b_cstride, - CTensor, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(!float_equal(miopen_alpha0, 0.0)), - static_cast(!float_equal(miopen_alpha1, 0.0)), - static_cast(!float_equal(miopen_beta, 0.0))); - } - else - { - // Special case for adding tensors in place - program_name = "MIOpenTensorKernelsHip.cpp"; - local_threads = 32; - num_wg = std::clamp( - (clens[0] * clens[1] * clens[2]) / local_threads, size_t(1), size_t(max_num_wg)); - num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; - - size_t global_threads; - global_threads = num_wg * local_threads; - const std::vector vld{local_threads, 1, 1}; - const std::vector vgd{global_threads, 1, 1}; - - parms += " -DUSE_3D_TENSOR_GENERIC"; - - handle.AddKernel("Op3dTensorGeneric", - network_config, - program_name, - "Op3dTensorGeneric", - vld, - vgd, - parms)( - ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), // b_c, - static_cast(blens[2] == 1 ? clens[2] : blens[2]), // b_h, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_h, - static_cast(astrides[0]), // a_nstride, - static_cast(astrides[1]), // a_cstride, - static_cast(astrides[2]), // a_hstride, - static_cast(blens[0] == 1 ? 0 : bstrides[0]), // b_nstride, - static_cast(blens[1] == 1 ? 0 : bstrides[1]), // b_cstride, - static_cast(blens[2] == 1 ? 0 : bstrides[2]), // b_hstride, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - }); -} - -void OpTensor4d(const Handle& handle, - miopenTensorOp_t tensorOp, - const void* alpha0, - const TensorDescriptor& aTensorDesc, - ConstData_t ATensor, - const void* alpha1, - const TensorDescriptor& bTensorDesc, - ConstData_t BTensor, - const void* beta, - const TensorDescriptor& cTensorDesc, - Data_t CTensor, - const size_t Aoffset, - const size_t Boffset, - const size_t Coffset) -{ - auto blens = bTensorDesc.GetLengths(); - auto clens = cTensorDesc.GetLengths(); - auto dims = clens.size(); - - auto astrides = aTensorDesc.GetStrides(); - auto bstrides = bTensorDesc.GetStrides(); - auto bsize = blens.size(); - auto cstrides = cTensorDesc.GetStrides(); - - // first_not_one is incorrect if btensor size equal to 1 - auto first_not_one = std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); - auto d = std::distance(blens.begin(), first_not_one.base()); - - // quick fix - int num_wg = first_not_one != blens.rend() - ? static_cast(*first_not_one == 0 ? 1 : *first_not_one) - : 1; - int work_per_wg = std::accumulate(clens.begin() + d, clens.end(), 1, std::multiplies()); - - unsigned int bitmap = 0; - // update bitmap for first_not_one - bitmap |= (1 << (bsize - d)); - - // (d-2) is because distance starts from 1 and 0 - // also, we need to go past the "first_not_one" as that is already - // accounted for in the bitmap - CreateBitmapAndGrid(bitmap, blens, clens, num_wg, work_per_wg, static_cast(d - 2)); - - // quick fix for btensor = <1, 1, 1, 1> - if(bTensorDesc.GetElementSize() == 1) - bitmap = 4; - -#if(MIO_TENSOROCL_DEBUG == 1) - printf("bitmap: %u\n", bitmap); - printf("work_per_wg: %d, num_wg: %d\n", work_per_wg, num_wg); -#endif - - // Forward Convolution Bias specialization - // for fwd-bias, bitmap looks like <0, 1, 0, 0> - // Is the no. of work-groups and the work for each wg balanced? - auto fwd_conv_bias = bitmap == (1 << 2) ? 1 : 0; - auto incr_wg = 0; - // This block gives off indexing for 5d tensors, skipping - if(fwd_conv_bias == 1 && dims < 5 && num_wg < 640 && work_per_wg > 256 && clens[0] > 0) - { // 640 workgroups of size 256 needed to completely fill the GPU - - work_per_wg /= clens[0]; // c_n; - num_wg *= clens[0]; // c_n; - incr_wg = 1; - } - - int num_wg_orig = num_wg; - int max_num_wg = 4096; - num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; - - size_t local_threads = 256; - - // Does the bitmap contain leading ones, i.e. 1,1,1,0 or 1,1,0,0 - // or 1,1,1,1 or 1,0,0,0 - bool leading_ones = IsBitmapLeadingOnes(bitmap, dims, static_cast(d - 2)); - if(leading_ones && work_per_wg < 64) - { - local_threads = 64; - } - - std::string program_name = "MIOpenTensorKernels.cl"; - - const std::vector vld{local_threads, 1, 1}; - - // Special case for adding tensors in place - size_t global_threads; - global_threads = - (static_cast(leading_ones) == 1 && (d - 1) == 3) ? num_wg : num_wg * local_threads; - global_threads = (global_threads < local_threads) ? local_threads : global_threads; - - const std::vector vgd{global_threads, 1, 1}; - - bool packed_tensor = true; - - // auto alens = aTensorDesc.GetLengths(); - packed_tensor &= aTensorDesc.IsPacked(); - packed_tensor &= bTensorDesc.IsPacked(); - packed_tensor &= cTensorDesc.IsPacked(); - - bool packed_equal_tensor = - packed_tensor && (bTensorDesc.GetElementSize() == cTensorDesc.GetElementSize()); - -#if(MIO_TENSOROCL_DEBUG == 1) - printf("packed_tensor: %d\n", packed_tensor); - printf("equal_tensor: %d\n", bTensorDesc.GetElementSize() == cTensorDesc.GetElementSize()); -#endif - - // for naive tensor ops - const std::string data_type = GetDataType(bTensorDesc.GetType()); - - size_t TENS_LEN = cTensorDesc.GetElementSize(); - size_t RD_BLCK = (TENS_LEN % 4 == 0) ? 4 : (TENS_LEN % 2 == 0) ? 2 : 1; - const std::string READ_TYPE = (RD_BLCK == 1) ? data_type : data_type + std::to_string(RD_BLCK); - - size_t total_work = std::max(TENS_LEN / RD_BLCK, size_t(1)); - size_t grp_sz = (total_work + local_threads - 1) / local_threads; - grp_sz = std::min(size_t(max_num_wg), grp_sz); - size_t glb_sz = local_threads * grp_sz; - - std::string network_config{}; - network_config += - std::to_string(bTensorDesc.GetType()) + "-" + std::to_string(aTensorDesc.GetType()) + "-" + - std::to_string(tensorOp) + "-" + std::to_string(max_num_wg) + "-" + - ((fwd_conv_bias == 0 && packed_equal_tensor) ? "" : std::to_string(global_threads)) + "-" + - std::to_string(local_threads); - - visit_float(bTensorDesc.GetType(), [&](auto as_float) { - auto miopen_alpha0 = as_float(*(static_cast(alpha0))); - auto miopen_alpha1 = as_float(*(static_cast(alpha1))); - auto miopen_beta = as_float(*(static_cast(beta))); - - if(fwd_conv_bias != 0) - { - if(packed_tensor) - { - auto&& kernels = handle.GetKernels("OpTensorFwdBias", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - BTensor, - static_cast(blens[1]), - CTensor, - static_cast(clens[0]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - work_per_wg, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - static_cast(incr_wg)); - - return; - } - } - else - { - - auto&& kernels = handle.GetKernels("OpTensorFwdBiasGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - BTensor, - static_cast(blens[1]), - static_cast(bstrides[1]), - CTensor, - static_cast(clens[0]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - static_cast(cstrides[2]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - static_cast(incr_wg)); - return; - } - } - } - // precede leading_ones for bitmap = 1,1,1,1 - else if(packed_equal_tensor) - { - network_config += "x" + std::to_string(grp_sz) + "x" + std::to_string(RD_BLCK); - auto&& kernels = handle.GetKernels("Op4dTensorLite", network_config); - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - BTensor, - CTensor, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(!float_equal(miopen_beta, 0.0))); - return; - } - } - else if(leading_ones) - { - if(packed_tensor) - { - - auto&& kernels = handle.GetKernels("OpTensorLeadingOnes", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - BTensor, - CTensor, - static_cast(clens[1]), - static_cast(clens[2]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - work_per_wg, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - bitmap); - - return; - } - } - else - { - auto&& kernels = handle.GetKernels("OpTensorLeadingOnesGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - BTensor, - static_cast(bstrides[0]), - static_cast(bstrides[1]), - static_cast(bstrides[2]), - CTensor, - static_cast(clens[1]), - static_cast(clens[2]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - static_cast(cstrides[2]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - bitmap); - return; - } - } - } - else - { - auto&& kernels = handle.GetKernels("Op4dTensorGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - static_cast(astrides[0]), // a_nstride, - static_cast(astrides[1]), // a_cstride, - static_cast(astrides[2]), // a_hstride, - BTensor, - static_cast(blens[1]), // b_c, - static_cast(blens[2]), // b_h, - static_cast(blens[3]), // b_w, - static_cast(bstrides[0]), // b_nstride, - static_cast(bstrides[1]), // b_cstride, - static_cast(bstrides[2]), // b_hstride, - CTensor, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_h, - static_cast(clens[3]), // c_w, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - bitmap, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig)); - return; - } - } - - std::string parms = " -DMIOPEN_TYPE=" + GetDataType(bTensorDesc.GetType()) + - " -DMAX_NUM_WG=" + std::to_string(max_num_wg); - - parms += GetDataTypeKernelParams(aTensorDesc.GetType()); - - parms += " -DMIOPEN_TENSOR_OP="; - switch(tensorOp) - { - case 0: parms += "miopenAdd"; break; - case 1: parms += "miopenMul"; break; - case 2: parms += "miopenMin"; break; - case 3: parms += "miopenMax"; break; - } - - if(fwd_conv_bias != 0) - { - if(packed_tensor) - { - parms += " -DUSE_FWD_BIAS"; - - handle.AddKernel("OpTensorFwdBias", - network_config, - program_name, - "OpTensorFwdBias", - vld, - vgd, - parms)(ATensor, - BTensor, - static_cast(blens[1]), - CTensor, - static_cast(clens[0]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - work_per_wg, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - static_cast(incr_wg)); - } - else - { - parms += " -DUSE_FWD_BIAS_GENERIC"; - handle.AddKernel("OpTensorFwdBiasGeneric", - network_config, - program_name, - "OpTensorFwdBiasGeneric", - vld, - vgd, - parms)(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - BTensor, - static_cast(blens[1]), - static_cast(bstrides[1]), - CTensor, - static_cast(clens[0]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - static_cast(cstrides[2]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - static_cast(incr_wg)); - } - } - // precede leading_ones for bitmap = 1,1,1,1 - else if(packed_equal_tensor) - { - parms += " -DUSE_4D_TENSOR_LITE"; - parms += " -DRD_BLCK=" + std::to_string(RD_BLCK) + " -DREAD_TYPE=" + READ_TYPE; - - const std::vector vgd1{glb_sz, 1, 1}; - - handle.AddKernel( - "Op4dTensorLite", network_config, program_name, "Op4dTensorLite", vld, vgd1, parms)( - ATensor, - BTensor, - CTensor, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(total_work), - static_cast(!float_equal(miopen_beta, 0.0))); - } - else if(leading_ones) - { - if(packed_tensor) - { - parms += " -DUSE_LEADING_ONES"; - handle.AddKernel("OpTensorLeadingOnes", - network_config, - program_name, - "OpTensorLeadingOnes", - vld, - vgd, - parms)(ATensor, - BTensor, - CTensor, - static_cast(clens[1]), - static_cast(clens[2]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - work_per_wg, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - bitmap); - } - else - { - - parms += " -DUSE_LEADING_ONES_GENERIC"; - - handle.AddKernel("OpTensorLeadingOnesGeneric", - network_config, - program_name, - "OpTensorLeadingOnesGeneric", - vld, - vgd, - parms)(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - BTensor, - static_cast(bstrides[0]), - static_cast(bstrides[1]), - static_cast(bstrides[2]), - CTensor, - static_cast(clens[1]), - static_cast(clens[2]), - static_cast(clens[3]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - static_cast(cstrides[2]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig), - bitmap); - } - } - else - { - parms += " -DUSE_4D_TENSOR_GENERIC"; - - handle.AddKernel("Op4dTensorGeneric", - network_config, - program_name, - "Op4dTensorGeneric", - vld, - vgd, - parms)(ATensor, - static_cast(astrides[0]), // a_nstride, - static_cast(astrides[1]), // a_cstride, - static_cast(astrides[2]), // a_hstride, - BTensor, - static_cast(blens[1]), // b_c, - static_cast(blens[2]), // b_h, - static_cast(blens[3]), // b_w, - static_cast(bstrides[0]), // b_nstride, - static_cast(bstrides[1]), // b_cstride, - static_cast(bstrides[2]), // b_hstride, - CTensor, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_h, - static_cast(clens[3]), // c_w, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - bitmap, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig)); - } - }); -} - -void OpTensorOther(const Handle& handle, - miopenTensorOp_t tensorOp, - const void* alpha0, - const TensorDescriptor& aTensorDesc, - ConstData_t ATensor, - const void* alpha1, - const TensorDescriptor& bTensorDesc, - ConstData_t BTensor, - const void* beta, - const TensorDescriptor& cTensorDesc, - Data_t CTensor, - const size_t Aoffset, - const size_t Boffset, - const size_t Coffset) -{ - auto blens = bTensorDesc.GetLengths(); - auto clens = cTensorDesc.GetLengths(); - - auto astrides = aTensorDesc.GetStrides(); - auto bstrides = bTensorDesc.GetStrides(); - auto bsize = blens.size(); - auto cstrides = cTensorDesc.GetStrides(); - - const bool case_1d = bsize == 1; - const bool case_2d = bsize == 2; - const bool case_5d = bsize == 5; - - const bool use_hip = case_1d || case_2d; - - // first_not_one is incorrect if btensor size equal to 1 - auto first_not_one = std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); - auto d = std::distance(blens.begin(), first_not_one.base()); - - // quick fix - int num_wg = first_not_one != blens.rend() - ? static_cast(*first_not_one == 0 ? 1 : *first_not_one) - : 1; - int work_per_wg = std::accumulate(clens.begin() + d, clens.end(), 1, std::multiplies()); - - unsigned int bitmap = 0; - // update bitmap for first_not_one - bitmap |= (1 << (bsize - d)); - - // (d-2) is because distance starts from 1 and 0 - // also, we need to go past the "first_not_one" as that is already - // accounted for in the bitmap - CreateBitmapAndGrid(bitmap, blens, clens, num_wg, work_per_wg, static_cast(d - 2)); - -#if(MIO_TENSOROCL_DEBUG == 1) - printf("bitmap: %u\n", bitmap); - printf("work_per_wg: %d, num_wg: %d\n", work_per_wg, num_wg); -#endif - - int num_wg_orig = num_wg; - int max_num_wg = 4096; - - size_t local_threads = 256; - - if(case_2d) - local_threads = 32; - - if(case_1d) - num_wg = std::clamp(clens[0] / local_threads, size_t(1), size_t(max_num_wg)); - if(case_2d) - num_wg = std::clamp((clens[0] * clens[1]) / local_threads, size_t(1), size_t(max_num_wg)); - num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; - - const std::vector vld{local_threads, 1, 1}; - - // Special case for adding tensors in place - size_t global_threads; - global_threads = num_wg * local_threads; - - const std::vector vgd{global_threads, 1, 1}; - - std::string program_name = use_hip ? "MIOpenTensorKernelsHip.cpp" : "MIOpenTensorKernels.cl"; - - std::string network_config{}; - network_config += std::to_string(bTensorDesc.GetType()) + "-" + - std::to_string(aTensorDesc.GetType()) + "-" + std::to_string(tensorOp) + "-" + - std::to_string(global_threads) + "-" + std::to_string(local_threads); - - if(case_1d || case_2d) - { - if(aTensorDesc.AllDimsFitIntoInt()) - { - network_config += "-32bit"; - } - else - { - network_config += "-64bit"; - } - } - - visit_float(bTensorDesc.GetType(), [&](auto as_float) { - auto miopen_alpha0 = as_float(*(static_cast(alpha0))); - auto miopen_alpha1 = as_float(*(static_cast(alpha1))); - auto miopen_beta = as_float(*(static_cast(beta))); - - if(case_5d) - { - auto&& kernels = handle.GetKernels("Op5dTensorGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - kernel(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - static_cast(astrides[3]), - BTensor, - static_cast(blens[1]), // b_c, - static_cast(blens[2]), // b_d, - static_cast(blens[3]), // b_h, - static_cast(blens[4]), // b_w, - static_cast(bstrides[0]), // b_nstride, - static_cast(bstrides[1]), // b_cstride, - static_cast(bstrides[2]), // b_dstride, - static_cast(bstrides[3]), // b_hstride, - CTensor, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_d, - static_cast(clens[3]), // c_h, - static_cast(clens[4]), // c_w, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_dstride, - static_cast(cstrides[3]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - bitmap, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig)); - return; - } - } - else if(case_2d) - { - auto&& kernels = handle.GetKernels("Op2dTensorGeneric", network_config); - - if(!kernels.empty()) - { - auto kernel = kernels.front(); - - if(aTensorDesc.AllDimsFitIntoInt()) - { - kernel(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), - static_cast(clens[1]), - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(blens[1] == 1 ? 0 : bstrides[1]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - else - { - kernel(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), - static_cast(clens[1]), - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(blens[1] == 1 ? 0 : bstrides[1]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - - return; - } - } - else if(case_1d) - { - auto&& kernels = handle.GetKernels("Op1dTensorGeneric", network_config); - - if(!kernels.empty()) - { - - auto kernel = kernels.front(); - - if(aTensorDesc.AllDimsFitIntoInt()) - { - kernel(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(astrides[0]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(cstrides[0]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - else - { - kernel(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(astrides[0]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(cstrides[0]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - - return; - } - } - - std::string parms = " -DMIOPEN_TYPE=" + GetDataType(bTensorDesc.GetType()) + - " -DMAX_NUM_WG=" + std::to_string(max_num_wg); - - parms += GetDataTypeKernelParams(aTensorDesc.GetType()); - - parms += " -DMIOPEN_TENSOR_OP="; - switch(tensorOp) - { - case 0: parms += "miopenAdd"; break; - case 1: parms += "miopenMul"; break; - case 2: parms += "miopenMin"; break; - case 3: parms += "miopenMax"; break; - } - - if(aTensorDesc.AllDimsFitIntoInt()) - { - parms += " -DDIM_TYPE=uint32_t"; - } - else - { - parms += " -DDIM_TYPE=uint64_t"; - } - - if(case_5d) - { - parms += " -DUSE_5D_TENSOR_GENERIC"; - - handle.AddKernel("Op5dTensorGeneric", - network_config, - program_name, - "Op5dTensorGeneric", - vld, - vgd, - parms)(ATensor, - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(astrides[2]), - static_cast(astrides[3]), - BTensor, - static_cast(blens[1]), // b_c, - static_cast(blens[2]), // b_d, - static_cast(blens[3]), // b_h, - static_cast(blens[4]), // b_w, - static_cast(bstrides[0]), // b_nstride, - static_cast(bstrides[1]), // b_cstride, - static_cast(bstrides[2]), // b_dstride, - static_cast(bstrides[3]), // b_hstride, - CTensor, - static_cast(clens[1]), // c_c, - static_cast(clens[2]), // c_d, - static_cast(clens[3]), // c_h, - static_cast(clens[4]), // c_w, - static_cast(cstrides[0]), // c_nstride, - static_cast(cstrides[1]), // c_cstride, - static_cast(cstrides[2]), // c_dstride, - static_cast(cstrides[3]), // c_hstride, - miopen_alpha0, - miopen_alpha1, - miopen_beta, - bitmap, - work_per_wg, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(num_wg_orig)); - } - else if(case_2d) - { - parms += " -DUSE_2D_TENSOR_GENERIC"; - - if(aTensorDesc.AllDimsFitIntoInt()) - { - handle.AddKernel("Op2dTensorGeneric", - network_config, - program_name, - "Op2dTensorGeneric", - vld, - vgd, - parms)(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), - static_cast(clens[1]), - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(blens[1] == 1 ? 0 : bstrides[1]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - else - { - handle.AddKernel("Op2dTensorGeneric", - network_config, - program_name, - "Op2dTensorGeneric", - vld, - vgd, - parms)(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(blens[1] == 1 ? clens[1] : blens[1]), - static_cast(clens[1]), - static_cast(astrides[0]), - static_cast(astrides[1]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(blens[1] == 1 ? 0 : bstrides[1]), - static_cast(cstrides[0]), - static_cast(cstrides[1]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - } - else if(case_1d) - { - parms += " -DUSE_1D_TENSOR_GENERIC"; - - if(aTensorDesc.AllDimsFitIntoInt()) - { - handle.AddKernel("Op1dTensorGeneric", - network_config, - program_name, - "Op1dTensorGeneric", - vld, - vgd, - parms)(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(astrides[0]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(cstrides[0]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - else - { - handle.AddKernel("Op1dTensorGeneric", - network_config, - program_name, - "Op1dTensorGeneric", - vld, - vgd, - parms)(ATensor, - BTensor, - CTensor, - static_cast(Aoffset), - static_cast(Boffset), - static_cast(Coffset), - static_cast(astrides[0]), - static_cast(blens[0] == 1 ? 0 : bstrides[0]), - static_cast(cstrides[0]), - miopen_alpha0, - miopen_alpha1, - miopen_beta, - static_cast(clens[0]), - !float_equal(miopen_beta, 0.0)); - } - } - }); -} - -void OpTensor(const Handle& handle, - miopenTensorOp_t tensorOp, - const void* alpha0, - const TensorDescriptor& aTensorDesc, - ConstData_t ATensor, - const void* alpha1, - const TensorDescriptor& bTensorDesc, - ConstData_t BTensor, - const void* beta, - const TensorDescriptor& cTensorDesc, - Data_t CTensor, - const size_t Aoffset, - const size_t Boffset, - const size_t Coffset, - bool nonStandardSquash) -{ - if(ATensor == nullptr || BTensor == nullptr || CTensor == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm); - } - - // if(aTensorDesc != cTensorDesc) - if(aTensorDesc.GetElementSize() != cTensorDesc.GetElementSize()) - { - MIOPEN_THROW("A and C Tensors do not match"); - } - - if(bTensorDesc.GetType() != cTensorDesc.GetType()) - { - MIOPEN_THROW("Datatypes for B and C tensors do not match !"); - } - - auto blens = bTensorDesc.GetLengths(); -#if(MIO_TENSOROCL_DEBUG == 1) - printf("blen:["); - for(auto len : blens) - { - printf(" %lu", len); - } - printf("]\n"); -#endif - auto clens = cTensorDesc.GetLengths(); - - if(clens.size() > 5) - { - MIOPEN_THROW("Tensor dimension larger than 5: " + std::to_string(clens.size())); - } - - if(blens.size() != clens.size()) - { - MIOPEN_THROW("Number of dims in B and C Tensors do not match: " + - std::to_string(blens.size()) + ", " + std::to_string(clens.size())); - } - - if(!nonStandardSquash) - { - for(std::size_t i = 0; i < clens.size(); i++) - { - if(blens[i] != 1 && blens[i] != clens[i]) - { - MIOPEN_THROW("BTensor dim != 1 && BTensor dim != CTensor dim: " + - std::to_string(i)); - } - } - } - else - { - // non standard behavior because blens[1] can be not equalt to clens[1] - if(!(clens.size() == 3 && blens[0] == 1 && clens[0] == 1 && blens[2] == clens[2])) - { - MIOPEN_THROW("Non standard squashed operation supported only for 3d tensors and for " - "the specific configuration"); - } - } - - auto bsize = blens.size(); - if(bsize == 3) - { - OpTensor3d(handle, - tensorOp, - alpha0, - aTensorDesc, - ATensor, - alpha1, - bTensorDesc, - BTensor, - beta, - cTensorDesc, - CTensor, - Aoffset, - Boffset, - Coffset, - nonStandardSquash); - } - else if(bsize == 4) - { - OpTensor4d(handle, - tensorOp, - alpha0, - aTensorDesc, - ATensor, - alpha1, - bTensorDesc, - BTensor, - beta, - cTensorDesc, - CTensor, - Aoffset, - Boffset, - Coffset); - } - else - { - OpTensorOther(handle, - tensorOp, - alpha0, - aTensorDesc, - ATensor, - alpha1, - bTensorDesc, - BTensor, - beta, - cTensorDesc, - CTensor, - Aoffset, - Boffset, - Coffset); - } -} - -struct two_exp_ceiling_t -{ - std::size_t operator()(std::size_t n) const - { - assert(n > 0); - - std::size_t i = 1; - - n--; - while(n != 0) - { - i *= 2; - n /= 2; - } - - return i; - } -}; - -static std::vector get_worker_sizes(const std::vector& data_sizes) -{ - const std::size_t dim = data_sizes.size(); - - std::vector worker_sizes(dim); - - std::transform(data_sizes.begin(), data_sizes.end(), worker_sizes.begin(), two_exp_ceiling_t{}); - - std::size_t wgd = std::accumulate( - worker_sizes.begin(), worker_sizes.end(), std::size_t{1}, std::multiplies()); - - if(wgd > 65536) - { - std::size_t n = wgd / 65536; - - int i = 0; - while(n > 1 && i < dim) - { - std::size_t size_old = worker_sizes[i]; - worker_sizes[i] = (size_old - 1) / n + 1; - n /= size_old / worker_sizes[i]; - ++i; - } - } - - return worker_sizes; -} - -void SetTensor(const Handle& handle, - const TensorDescriptor& yDesc, - Data_t y, - const void* alpha, - const int offset) -{ - if(y == nullptr || alpha == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm); - } - - const TensorDescriptor yDesc_flat = GetFlattenedTensorDescriptor(yDesc); - -#ifndef NDEBUG - if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("real descriptor: " << yDesc); - MIOPEN_LOG_I2("flat descriptor: " << yDesc_flat); - } -#endif - - const std::size_t yDim_flat = yDesc_flat.GetNumDims(); - - assert(yDim_flat > 0 && yDim_flat <= 5); - - std::string kernel_name = "SubTensorOpWithScalar" + std::to_string(yDim_flat) + "d"; - - const miopenDataType_t dataType = yDesc_flat.GetType(); - - std::string network_config = "set " + std::to_string(dataType); - for(auto& len : yDesc_flat.GetLengths()) - { - network_config += " " + std::to_string(len); - } - - auto&& kernels = handle.GetKernels(kernel_name, network_config); - - KernelInvoke kernel; - - if(!kernels.empty()) - { - kernel = kernels.front(); - } - else - { - std::string program_name = "MIOpenSubTensorOpWithScalarKernel.cl"; - - std::vector worker_sizes = get_worker_sizes(yDesc_flat.GetLengths()); - - std::size_t wgd = std::accumulate(worker_sizes.begin(), - worker_sizes.end(), - std::size_t{1}, - std::multiplies()); - - std::size_t wld = 256 < wgd ? 256 : wgd; - std::stringstream ss; - ss << "-DSUBTENSOR_OP_WITH_SCALAR=SUBTENSOR_OP_WITH_SCALAR_SET" - << GetDataTypeKernelParams(dataType); - for(int i = 0; i < yDim_flat; ++i) - { - ss << " -DWORK_LENGTH_" << std::to_string(i) << "=" << std::to_string(worker_sizes[i]); - } - - kernel = handle.AddKernel(kernel_name, - network_config, - program_name, - kernel_name, - {wld, 1, 1}, - {wgd, 1, 1}, - ss.str()); - } - - switch(yDim_flat) - { - case 1: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetLengths()[0])); - }); - - break; - } - case 2: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1])); - }); - - break; - } - case 3: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2])); - }); - - break; - } - case 4: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3])); - }); - - break; - } - case 5: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetStrides()[4]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3]), - static_cast(yDesc_flat.GetLengths()[4])); - }); - - break; - } - default: assert(false); - } -} - -void ScaleTensor(const Handle& handle, - const TensorDescriptor& yDesc, - Data_t y, - const void* alpha, - const int offset) -{ - if(y == nullptr || alpha == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm); - } - - const TensorDescriptor yDesc_flat = GetFlattenedTensorDescriptor(yDesc); - -#ifndef NDEBUG - if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("real descriptor: " << yDesc); - MIOPEN_LOG_I2("flat descriptor: " << yDesc_flat); - } -#endif - - const std::size_t yDim_flat = yDesc_flat.GetNumDims(); - - assert(yDim_flat > 0 && yDim_flat <= 5); - - const miopenDataType_t dataType = yDesc_flat.GetType(); - - if(!(dataType == miopenHalf // - || dataType == miopenFloat // - || dataType == miopenInt32 // - || dataType == miopenDouble)) - { - MIOPEN_THROW(miopenStatusBadParm, "ScaleTensor: unsupported data type."); - } - - std::string kernel_name = "SubTensorOpWithScalar" + std::to_string(yDim_flat) + "d"; - - const std::vector& lens = yDesc_flat.GetLengths(); - - std::string network_config = "scale " + std::to_string(yDesc_flat.GetType()); - for(auto& len : lens) - { - network_config += " " + std::to_string(len); - } - - auto&& kernels = handle.GetKernels(kernel_name, network_config); - - KernelInvoke kernel; - - if(!kernels.empty()) - { - kernel = kernels.front(); - } - else - { - std::string program_name = "MIOpenSubTensorOpWithScalarKernel.cl"; - - std::vector worker_sizes = get_worker_sizes(lens); - - std::size_t wgd = std::accumulate(worker_sizes.begin(), - worker_sizes.end(), - std::size_t{1}, - std::multiplies()); - - std::size_t wld = 256 < wgd ? 256 : wgd; - - std::string parms = "-DSUBTENSOR_OP_WITH_SCALAR=SUBTENSOR_OP_WITH_SCALAR_MULTIPLY" + - GetDataTypeKernelParams(dataType); - for(int i = 0; i < yDim_flat; ++i) - { - parms += " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); - } - - kernel = handle.AddKernel(kernel_name, - network_config, - program_name, - kernel_name, - {wld, 1, 1}, - {wgd, 1, 1}, - parms); - } - - switch(yDim_flat) - { - case 1: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetLengths()[0])); - }); - - break; - } - case 2: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1])); - }); - - break; - } - case 3: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2])); - }); - - break; - } - case 4: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3])); - }); - - break; - } - case 5: { - visit_float(dataType, [&](auto as_float) { - kernel(y, - *as_float(alpha), - offset, - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetStrides()[4]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3]), - static_cast(yDesc_flat.GetLengths()[4])); - }); - - break; - } - default: assert(false); - } -} - -void CopyTensor(const Handle& handle, - const TensorDescriptor& srcDesc, - ConstData_t src, - const TensorDescriptor& dstDesc, - Data_t dst, - int srcOffset, - int dstOffset, - bool forseAsync) -{ - if(src == nullptr || dst == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm, "Null pointer for tensor."); - } - - if(srcDesc.GetType() != dstDesc.GetType()) - { - MIOPEN_THROW(miopenStatusBadParm, "Tensor types do not match."); - } - - if(srcDesc.GetLengths() != dstDesc.GetLengths()) - { - MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); - } - - auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(srcDesc, dstDesc); - const TensorDescriptor& srcDesc_flat = std::get<0>(flat_descriptors); - const TensorDescriptor& dstDesc_flat = std::get<1>(flat_descriptors); - -#ifndef NDEBUG - if(srcDesc.GetNumDims() != srcDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("src real descriptor: " << srcDesc); - MIOPEN_LOG_I2("src flat descriptor: " << srcDesc_flat); - MIOPEN_LOG_I2("dst real descriptor: " << dstDesc); - MIOPEN_LOG_I2("dst flat descriptor: " << dstDesc_flat); - } -#endif - - std::size_t srcDim_flat = srcDesc_flat.GetNumDims(); - - if(srcDim_flat < 1 || srcDim_flat > 5) - { - MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension sizes unsupported."); - } - - if(forseAsync || srcOffset > 0 || dstOffset > 0 || - (!(srcDesc_flat.IsPacked() && dstDesc_flat.IsPacked()))) - { - std::string kernel_name = "SubTensorOpWithSubTensor" + std::to_string(srcDim_flat) + "d"; - - const std::vector& lens = srcDesc_flat.GetLengths(); - - std::string network_config = "copy " + std::to_string(srcDesc_flat.GetType()); - for(auto& len : lens) - { - network_config += " " + std::to_string(len); - } - - auto&& kernels = handle.GetKernels(kernel_name, network_config); - - KernelInvoke kernel; - - if(!kernels.empty()) - { - kernel = kernels.front(); - } - else - { - std::string program_name = "MIOpenSubTensorOpWithSubTensorKernel.cl"; - - std::vector worker_sizes = get_worker_sizes(lens); - - std::size_t wgd = std::accumulate(worker_sizes.begin(), - worker_sizes.end(), - std::size_t{1}, - std::multiplies()); - - std::size_t wld = 256 < wgd ? 256 : wgd; - - std::string parms = "-DSUBTENSOR_OP_WITH_SUBTENSOR=SUBTENSOR_OP_WITH_SUBTENSOR_COPY" + - GetDataTypeKernelParams(srcDesc_flat.GetType()); - for(std::size_t i = 0; i < srcDim_flat; ++i) - { - parms += - " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); - } - - kernel = handle.AddKernel(kernel_name, - network_config, - program_name, - kernel_name, - {wld, 1, 1}, - {wgd, 1, 1}, - parms); - } - - switch(srcDim_flat) - { - case 1: { - kernel(src, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetLengths()[0]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0])); - - break; - } - case 2: { - kernel(src, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1])); - - break; - } - case 3: { - kernel(src, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2])); - - break; - } - case 4: { - kernel(src, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetStrides()[3]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - static_cast(srcDesc_flat.GetLengths()[3]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2]), - static_cast(dstDesc_flat.GetStrides()[3])); - - break; - } - case 5: { - kernel(src, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetStrides()[3]), - static_cast(srcDesc_flat.GetStrides()[4]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - static_cast(srcDesc_flat.GetLengths()[3]), - static_cast(srcDesc_flat.GetLengths()[4]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2]), - static_cast(dstDesc_flat.GetStrides()[3]), - static_cast(dstDesc_flat.GetStrides()[4])); - - break; - } - default: assert(false); - } - } - else - { - handle.Copy(src, dst, srcDesc_flat.GetElementSize() * GetTypeSize(srcDesc_flat.GetType())); - } -} - -std::string GetCastTensorBuildOptionFromType(const std::string& buildOption, miopenDataType_t type) -{ - std::string option(buildOption); - switch(type) - { - case miopenInt8: return option += "0"; - case miopenInt32: return option += "1"; - case miopenHalf: return option += "2"; - case miopenFloat: return option += "3"; - case miopenBFloat16: return option += "4"; - case miopenFloat8: - MIOPEN_THROW(miopenStatusBadParm, "miopenFloat8 data type not supported in cast tensor."); - case miopenBFloat8: - MIOPEN_THROW(miopenStatusBadParm, "miopenBFloat8 data type not supported in cast tensor."); - case miopenDouble: - // TODO - MIOPEN_THROW(miopenStatusBadParm, "miopenDouble data type not supported in cast tensor."); - case miopenInt64: - MIOPEN_THROW(miopenStatusBadParm, "miopenInt64 data type not supported in cast tensor."); - default: MIOPEN_THROW(miopenStatusBadParm, "Invalid data type in cast tensor desc."); - } -} - -void CastTensor(const Handle& handle, - const void* alpha, - const bool clamping, - const TensorDescriptor& srcDesc, - ConstData_t src, - const TensorDescriptor& dstDesc, - Data_t dst, - int srcOffset, - int dstOffset) -{ - if(src == nullptr || dst == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm, "Null pointer for tensor."); - } - - if(srcDesc.GetLengths() != dstDesc.GetLengths()) - { - MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); - } - - auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(srcDesc, dstDesc); - const TensorDescriptor& srcDesc_flat = std::get<0>(flat_descriptors); - const TensorDescriptor& dstDesc_flat = std::get<1>(flat_descriptors); - -#ifndef NDEBUG - if(srcDesc.GetNumDims() != srcDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("src real descriptor: " << srcDesc); - MIOPEN_LOG_I2("src flat descriptor: " << srcDesc_flat); - MIOPEN_LOG_I2("dst real descriptor: " << dstDesc); - MIOPEN_LOG_I2("dst flat descriptor: " << dstDesc_flat); - } -#endif - - std::size_t srcDim_flat = srcDesc_flat.GetNumDims(); - - if(srcDim_flat < 1 || srcDim_flat > 5) - { - MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension sizes unsupported."); - } - - auto miopen_alpha = *(static_cast(alpha)); - - if(srcDesc.GetType() == dstDesc.GetType() && srcOffset == 0 && dstOffset == 0 && - srcDesc_flat.IsPacked() && dstDesc_flat.IsPacked() && float_equal(miopen_alpha, 1.0)) - { - handle.Copy(src, dst, srcDesc_flat.GetElementSize() * GetTypeSize(srcDesc_flat.GetType())); - } - else - { - std::string kernel_name = "SubTensorOpWithCastTensor" + std::to_string(srcDim_flat) + "d"; - - const std::vector& lens = srcDesc_flat.GetLengths(); - - // TODO: make proper network config - std::string network_config = "cast " + std::to_string(srcDesc_flat.GetType()) + - std::to_string(dstDesc_flat.GetType()); - for(auto& len : lens) - { - network_config += " " + std::to_string(len); - } - - auto&& kernels = handle.GetKernels(kernel_name, network_config); - KernelInvoke kernel; - - if(!kernels.empty()) - { - kernel = kernels.front(); - } - else - { - std::string program_name = "MIOpenSubTensorOpWithCastTensorKernel.cl"; - - std::vector worker_sizes = get_worker_sizes(lens); - - std::size_t wgd = std::accumulate(worker_sizes.begin(), - worker_sizes.end(), - std::size_t{1}, - std::multiplies()); - - std::size_t wld = 256 < wgd ? 256 : wgd; - - std::string parms = - GetCastTensorBuildOptionFromType(" -DMIOPEN_SRC_TYPE=", srcDesc_flat.GetType()) + - GetCastTensorBuildOptionFromType(" -DMIOPEN_DST_TYPE=", dstDesc_flat.GetType()); - - for(std::size_t i = 0; i < srcDim_flat; ++i) - { - parms += - " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); - } - - if(dstDesc_flat.GetType() == miopenBFloat16) - { - parms += " -DMIOPEN_USE_RNE_BFLOAT16=1"; - } - - kernel = handle.AddKernel(kernel_name, - network_config, - program_name, - kernel_name, - {wld, 1, 1}, - {wgd, 1, 1}, - parms); - } - - const int clamping_arg = clamping ? 1 : 0; - switch(srcDim_flat) - { - case 1: { - kernel(src, - miopen_alpha, - clamping_arg, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetLengths()[0]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0])); - - break; - } - case 2: { - kernel(src, - miopen_alpha, - clamping_arg, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1])); - - break; - } - case 3: { - kernel(src, - miopen_alpha, - clamping_arg, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2])); - - break; - } - case 4: { - kernel(src, - miopen_alpha, - clamping_arg, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetStrides()[3]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - static_cast(srcDesc_flat.GetLengths()[3]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2]), - static_cast(dstDesc_flat.GetStrides()[3])); - - break; - } - case 5: { - kernel(src, - miopen_alpha, - clamping_arg, - srcOffset, - static_cast(srcDesc_flat.GetStrides()[0]), - static_cast(srcDesc_flat.GetStrides()[1]), - static_cast(srcDesc_flat.GetStrides()[2]), - static_cast(srcDesc_flat.GetStrides()[3]), - static_cast(srcDesc_flat.GetStrides()[4]), - static_cast(srcDesc_flat.GetLengths()[0]), - static_cast(srcDesc_flat.GetLengths()[1]), - static_cast(srcDesc_flat.GetLengths()[2]), - static_cast(srcDesc_flat.GetLengths()[3]), - static_cast(srcDesc_flat.GetLengths()[4]), - dst, - dstOffset, - static_cast(dstDesc_flat.GetStrides()[0]), - static_cast(dstDesc_flat.GetStrides()[1]), - static_cast(dstDesc_flat.GetStrides()[2]), - static_cast(dstDesc_flat.GetStrides()[3]), - static_cast(dstDesc_flat.GetStrides()[4])); - - break; - } - default: assert(false); - } - } -} - -void TransformTensor(const Handle& handle, - const void* alpha, - const TensorDescriptor& xDesc, - ConstData_t x, - const void* beta, - const TensorDescriptor& yDesc, - Data_t y, - size_t Xoffset, - size_t Yoffset) -{ - if(x == nullptr || y == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm); - } - - if(alpha == nullptr || beta == nullptr) - { - MIOPEN_THROW(miopenStatusBadParm); - } - - auto x_len = xDesc.GetLengths(); - auto y_len = yDesc.GetLengths(); - - if(x_len.size() != y_len.size()) - { - MIOPEN_THROW("Tensor dimension must be the same"); - } - - if(x_len[0] != y_len[0]) - { - MIOPEN_THROW("Tensor x and y batch sizes do not match"); - } - - const auto is_alpha_one = float_equal(*(static_cast(alpha)), 1); - const auto is_beta_zero = float_equal(*(static_cast(beta)), 0); - - if(xDesc.GetType() == miopenInt8 && yDesc.GetType() == miopenInt8 && x_len.size() >= 3) - { - if(x_len[1] <= y_len[1]) - { - if(x_len[1] <= (y_len[1] - 4) || y_len[1] % 4 != 0) - { - MIOPEN_THROW("Invalid y channel size"); - } - - int8_t zero = 0; - SetTensor(handle, yDesc, y, &zero); - } - else if(x_len[1] % 4 != 0) - { - MIOPEN_THROW("Invalid x channel size"); - } - - size_t batch_n = x_len[0]; - - x_len[0] = 1; - y_len[0] = 1; - - miopen::TensorDescriptor x_batch_desc, y_batch_desc; - x_batch_desc = miopen::TensorDescriptor(miopenInt8, x_len); - y_batch_desc = miopen::TensorDescriptor(miopenInt8, y_len); - - size_t x_batch_sz = x_batch_desc.GetElementSize(); - size_t y_batch_sz = y_batch_desc.GetElementSize(); - - for(size_t i = 0; i < batch_n; i++) - { - size_t x_offset = i * x_batch_sz; - size_t y_offset = i * y_batch_sz; - - if(is_alpha_one && is_beta_zero) - { - CopyTensor(handle, - ((x_len[1] <= y_len[1]) ? x_batch_desc : y_batch_desc), - x, - ((x_len[1] <= y_len[1]) ? x_batch_desc : y_batch_desc), - y, - x_offset, - y_offset); - } - else - { - MIOPEN_THROW(miopenStatusNotImplemented, - "y=alpha*x+beta*y is not supported for int8 yet"); - } - } - } - else - { - auto x_y_len = boost::combine(x_len, y_len); - bool same_spatial_len = std::all_of(x_y_len.begin(), x_y_len.end(), [](auto v) { - return boost::get<0>(v) == boost::get<1>(v); - }); - - if(!same_spatial_len) - { - MIOPEN_THROW("Tensor x and y spatial sizes do not match"); - } - - auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(xDesc, yDesc); - const TensorDescriptor& xDesc_flat = std::get<0>(flat_descriptors); - const TensorDescriptor& yDesc_flat = std::get<1>(flat_descriptors); - - if(xDesc.GetNumDims() != xDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("x real descriptor: " << xDesc); - MIOPEN_LOG_I2("x flat descriptor: " << xDesc_flat); - } - - if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) - { - MIOPEN_LOG_I2("y real descriptor: " << yDesc); - MIOPEN_LOG_I2("y flat descriptor: " << yDesc_flat); - } - - const std::size_t yDim_flat = yDesc_flat.GetNumDims(); - - assert(yDim_flat > 0 && yDim_flat <= 5); - - const miopenDataType_t dataTypex = xDesc_flat.GetType(); - const miopenDataType_t dataTypey = yDesc_flat.GetType(); - - if(!(dataTypex == miopenHalf // - || dataTypex == miopenFloat // - || dataTypex == miopenInt32 // - || dataTypex == miopenBFloat16 // - || dataTypex == miopenDouble)) - { - MIOPEN_THROW("Tensor x is a unsupported data type"); - } - - if(!(dataTypey == miopenHalf // - || dataTypey == miopenFloat // - || dataTypey == miopenInt32 // - || dataTypey == miopenBFloat16 // - || dataTypey == miopenDouble)) - { - MIOPEN_THROW("Tensor y is a unsupported data type"); - } - - if(dataTypex != dataTypey) - { - MIOPEN_THROW("Tensor x and y have different data types"); - } - - std::string kernel_name = "SubTensorOpWithTransform" + std::to_string(yDim_flat) + "d"; - - const std::vector& lens = yDesc_flat.GetLengths(); - - std::string network_config = "transform " + std::to_string(yDesc_flat.GetType()); - for(auto& len : lens) - { - network_config += "x" + std::to_string(len); - } - - if(is_beta_zero) - network_config += "xBETA_IS_ZERO"; - if(is_alpha_one) - network_config += "xALPHA_IS_ONE"; - - auto&& kernels = handle.GetKernels(kernel_name, network_config); - - KernelInvoke kernel; - - if(!kernels.empty()) - { - kernel = kernels.front(); - } - else - { - std::string program_name = "MIOpenSubTensorOpWithTransformKernel.cl"; - - std::vector worker_sizes = get_worker_sizes(lens); - - std::size_t wgd = std::accumulate(worker_sizes.begin(), - worker_sizes.end(), - std::size_t{1}, - std::multiplies()); - - std::size_t wld = 256 < wgd ? 256 : wgd; - - std::string parms = - GetDataTypeKernelParams(dataTypey) // - + " -DMIOPEN_BETA_IS_ZERO=" + std::to_string(static_cast(is_beta_zero)) // - + " -DMIOPEN_ALPHA_IS_ONE=" + std::to_string(static_cast(is_alpha_one)); - - for(int i = 0; i < yDim_flat; ++i) - { - parms += - " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); - } - - kernel = handle.AddKernel(kernel_name, - network_config, - program_name, - kernel_name, - {wld, 1, 1}, - {wgd, 1, 1}, - parms); - } - - switch(yDim_flat) - { - case 1: { - visit_float(dataTypey, [&](auto as_float) { - kernel(x, - *as_float(alpha), - y, - *as_float(beta), - static_cast(Xoffset), - static_cast(Yoffset), - static_cast(xDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetLengths()[0])); - }); - - break; - } - case 2: { - visit_float(dataTypey, [&](auto as_float) { - kernel(x, - *as_float(alpha), - y, - *as_float(beta), - static_cast(Xoffset), - static_cast(Yoffset), - static_cast(xDesc_flat.GetStrides()[0]), - static_cast(xDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1])); - }); - - break; - } - case 3: { - visit_float(dataTypey, [&](auto as_float) { - kernel(x, - *as_float(alpha), - y, - *as_float(beta), - static_cast(Xoffset), - static_cast(Yoffset), - static_cast(xDesc_flat.GetStrides()[0]), - static_cast(xDesc_flat.GetStrides()[1]), - static_cast(xDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2])); - }); - - break; - } - case 4: { - visit_float(dataTypey, [&](auto as_float) { - kernel(x, - *as_float(alpha), - y, - *as_float(beta), - static_cast(Xoffset), - static_cast(Yoffset), - static_cast(xDesc_flat.GetStrides()[0]), - static_cast(xDesc_flat.GetStrides()[1]), - static_cast(xDesc_flat.GetStrides()[2]), - static_cast(xDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3])); - }); - - break; - } - case 5: { - visit_float(dataTypey, [&](auto as_float) { - kernel(x, - *as_float(alpha), - y, - *as_float(beta), - static_cast(Xoffset), - static_cast(Yoffset), - static_cast(xDesc_flat.GetStrides()[0]), - static_cast(xDesc_flat.GetStrides()[1]), - static_cast(xDesc_flat.GetStrides()[2]), - static_cast(xDesc_flat.GetStrides()[3]), - static_cast(xDesc_flat.GetStrides()[4]), - static_cast(yDesc_flat.GetStrides()[0]), - static_cast(yDesc_flat.GetStrides()[1]), - static_cast(yDesc_flat.GetStrides()[2]), - static_cast(yDesc_flat.GetStrides()[3]), - static_cast(yDesc_flat.GetStrides()[4]), - static_cast(yDesc_flat.GetLengths()[0]), - static_cast(yDesc_flat.GetLengths()[1]), - static_cast(yDesc_flat.GetLengths()[2]), - static_cast(yDesc_flat.GetLengths()[3]), - static_cast(yDesc_flat.GetLengths()[4])); - }); - - break; - } - default: assert(false); - } - } -} - -} // namespace miopen diff --git a/src/solver/tensorOp/Op1dTensorGeneric.cpp b/src/solver/tensorOp/Op1dTensorGeneric.cpp new file mode 100644 index 0000000000..1b8ea569b8 --- /dev/null +++ b/src/solver/tensorOp/Op1dTensorGeneric.cpp @@ -0,0 +1,167 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op1dTensorGeneric::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& alens = aTensorDesc.GetLengths(); + auto asize = alens.size(); + + if(asize == 1) + { + return true; + } + + return false; +} + +std::size_t Op1dTensorGeneric::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op1dTensorGeneric::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const size_t b_n = bTensorDesc.GetLengths()[0]; + const size_t c_n = cTensorDesc.GetLengths()[0]; + + const size_t a_nstrides = aTensorDesc.GetStrides()[0]; + const size_t b_nstrides = bTensorDesc.GetStrides()[0]; + const size_t c_nstrides = cTensorDesc.GetStrides()[0]; + + miopenDataType_t data_type = bTensorDesc.GetType(); + bool fit_into_int = aTensorDesc.AllDimsFitIntoInt(); + + size_t local_threads = 256; + size_t max_num_wg = 4096; + + auto num_wg = std::clamp(c_n / local_threads, size_t(1), size_t(max_num_wg)); + size_t global_threads = num_wg * local_threads; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, true); + + build_params.Define("USE_1D_TENSOR_GENERIC"); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::HIP{}); + kernel.kernel_file = "MIOpenTensorKernelsHip.cpp"; + kernel.kernel_name = "Op1dTensorGeneric"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, fit_into_int, b_n, c_n, a_nstrides, b_nstrides, c_nstrides]( + const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + if(fit_into_int) + { + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(a_nstrides), + static_cast(b_n == 1 ? 0 : b_nstrides), + static_cast(c_nstrides), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(c_n), + !float_equal(miopen_beta, 0.0)); + } + else + { + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(a_nstrides), + static_cast(b_n == 1 ? 0 : b_nstrides), + static_cast(c_nstrides), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(c_n), + !float_equal(miopen_beta, 0.0)); + } + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op2dTensorGeneric.cpp b/src/solver/tensorOp/Op2dTensorGeneric.cpp new file mode 100644 index 0000000000..37358be46d --- /dev/null +++ b/src/solver/tensorOp/Op2dTensorGeneric.cpp @@ -0,0 +1,180 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op2dTensorGeneric::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& alens = aTensorDesc.GetLengths(); + auto asize = alens.size(); + + if(asize == 2) + { + return true; + } + + return false; +} + +std::size_t Op2dTensorGeneric::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op2dTensorGeneric::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + std::array blens; + std::array clens; + std::tie(blens[0], blens[1]) = miopen::tien<2>(bTensorDesc.GetLengths()); + std::tie(clens[0], clens[1]) = miopen::tien<2>(cTensorDesc.GetLengths()); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1]) = miopen::tien<2>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1]) = miopen::tien<2>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1]) = miopen::tien<2>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + bool fit_into_int = aTensorDesc.AllDimsFitIntoInt(); + + size_t local_threads = 32; + size_t max_num_wg = 4096; + + auto num_wg = std::clamp((clens[0] * clens[1]) / local_threads, size_t(1), size_t(max_num_wg)); + size_t global_threads = num_wg * local_threads; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, true); + + build_params.Define("USE_2D_TENSOR_GENERIC"); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::HIP{}); + kernel.kernel_file = "MIOpenTensorKernelsHip.cpp"; + kernel.kernel_name = "Op2dTensorGeneric"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = [data_type, fit_into_int, blens, clens, astrides, bstrides, cstrides]( + const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + if(fit_into_int) + { + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(blens[1] == 1 ? clens[1] : blens[1]), + static_cast(clens[1]), + static_cast(astrides[0]), + static_cast(astrides[1]), + static_cast(blens[0] == 1 ? 0 : bstrides[0]), + static_cast(blens[1] == 1 ? 0 : bstrides[1]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(clens[0]), + !float_equal(miopen_beta, 0.0)); + } + else + { + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(blens[1] == 1 ? clens[1] : blens[1]), + static_cast(clens[1]), + static_cast(astrides[0]), + static_cast(astrides[1]), + static_cast(blens[0] == 1 ? 0 : bstrides[0]), + static_cast(blens[1] == 1 ? 0 : bstrides[1]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(clens[0]), + !float_equal(miopen_beta, 0.0)); + } + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op2dTensorLite.cpp b/src/solver/tensorOp/Op2dTensorLite.cpp new file mode 100644 index 0000000000..884dc3ecc4 --- /dev/null +++ b/src/solver/tensorOp/Op2dTensorLite.cpp @@ -0,0 +1,188 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" + +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op2dTensorLite::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto asize = alens.size(); + + if(asize == 3) + { + size_t local_threads = 256; + int max_num_wg = 4096; + + // for naive tensor ops + size_t RD_BLCK = (clens[2] % 4 == 0) ? 4 : (clens[2] % 2 == 0) ? 2 : 1; + size_t total_work = std::max(clens[2] / RD_BLCK, size_t(1)); + size_t grp_sz = (total_work + local_threads - 1) / local_threads; + + // opencl kernels are no longer supported, fallback to generic case + bool lite_applicable = grp_sz <= size_t(max_num_wg); + + bool is_lite = clens[0] == 1 && blens[0] == 1 && alens[0] == 1 && + (blens[1] == clens[1] || blens[1] == 1) && blens[2] == clens[2]; + + if(lite_applicable && is_lite) + { + return true; + } + } + + return false; +} + +std::size_t Op2dTensorLite::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution Op2dTensorLite::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + const size_t a_cstride = aTensorDesc.GetStrides()[1]; + const size_t b_cstride = bTensorDesc.GetStrides()[1]; + const size_t c_cstride = cTensorDesc.GetStrides()[1]; + + miopenDataType_t data_type = bTensorDesc.GetType(); + + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + int max_num_wg = 4096; + num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; + + size_t local_threads = 256; + + // for naive tensor ops + auto&& [RD_BLCK, READ_TYPE] = GetRDBLCKandREADTYPE(clens[2], data_type); + + size_t total_work = std::max(clens[2] / RD_BLCK, size_t(1)); + size_t grp_sz = (total_work + local_threads - 1) / local_threads; + + grp_sz = std::min(size_t(max_num_wg), grp_sz); + size_t glb_sz = local_threads * grp_sz; + + size_t local_threads2 = 64; + size_t total_work2 = clens[1]; + size_t grp_sz2 = (total_work2 + local_threads2 - 1) / local_threads2; + grp_sz2 = std::min(size_t(max_num_wg / grp_sz), grp_sz2); + size_t glb_sz2 = local_threads2 * grp_sz2; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{glb_sz, glb_sz2, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_2D_TENSOR_LITE"); + build_params.Define("RD_BLCK", std::to_string(RD_BLCK)); + build_params.Define("READ_TYPE", READ_TYPE); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + kernel.kernel_name = "Op2dTensorLite"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, b_c = blens[1], a_cstride, b_cstride, c_cstride, total_work, total_work2]( + const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + static_cast(a_cstride), + params.BTensor, + static_cast(b_cstride), + params.CTensor, + static_cast(c_cstride), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(total_work), + static_cast(total_work2), + static_cast(!float_equal(miopen_beta, 0.0)), + static_cast(b_c == 1)); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op2dTensorSquash.cpp b/src/solver/tensorOp/Op2dTensorSquash.cpp new file mode 100644 index 0000000000..36021a25a5 --- /dev/null +++ b/src/solver/tensorOp/Op2dTensorSquash.cpp @@ -0,0 +1,170 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op2dTensorSquash::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto asize = alens.size(); + + if(asize == 3) + { + bool is_lite = clens[0] == 1 && blens[0] == 1 && alens[0] == 1 && + (blens[1] == clens[1] || blens[1] == 1) && blens[2] == clens[2]; + + bool is_squashed = + problem.GetNonStandardSquash() && !is_lite && + (blens[0] == 1 && clens[0] == 1 && clens[1] == 1 && blens[2] == clens[2]); + + if(is_squashed) + { + return true; + } + } + + return false; +} + +std::size_t Op2dTensorSquash::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op2dTensorSquash::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + const size_t b_nstride = bTensorDesc.GetStrides()[1]; + + miopenDataType_t data_type = bTensorDesc.GetType(); + + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + int max_num_wg = 4096; + num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; + + size_t local_threads = 256; + + // for naive tensor ops + auto&& [RD_BLCK, READ_TYPE] = GetRDBLCKandREADTYPE(clens[2], data_type); + + size_t total_work = std::max(clens[2] / RD_BLCK, size_t(1)); + size_t grp_sz = (total_work + local_threads - 1) / local_threads; + + grp_sz = std::min(size_t(max_num_wg), grp_sz); + size_t glb_sz = local_threads * grp_sz; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{glb_sz, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_2D_TENSOR_SQUASH"); + build_params.Define("RD_BLCK", std::to_string(RD_BLCK)); + build_params.Define("READ_TYPE", READ_TYPE); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + kernel.kernel_name = "Op2dTensorSquash"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, b_c = blens[1], b_nstride, total_work](const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + params.BTensor, + static_cast(b_c), + static_cast(b_nstride), + params.CTensor, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(total_work), + static_cast(!float_equal(miopen_alpha0, 0.0)), + static_cast(!float_equal(miopen_alpha1, 0.0)), + static_cast(!float_equal(miopen_beta, 0.0))); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op3dTensorGeneric.cpp b/src/solver/tensorOp/Op3dTensorGeneric.cpp new file mode 100644 index 0000000000..12e18f933c --- /dev/null +++ b/src/solver/tensorOp/Op3dTensorGeneric.cpp @@ -0,0 +1,158 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op3dTensorGeneric::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& alens = aTensorDesc.GetLengths(); + auto asize = alens.size(); + + if(asize == 3) + { + return true; + } + + return false; +} + +std::size_t Op3dTensorGeneric::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op3dTensorGeneric::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1], astrides[2]) = miopen::tien<3>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1], bstrides[2]) = miopen::tien<3>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1], cstrides[2]) = miopen::tien<3>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + size_t local_threads = 32; + size_t max_num_wg = 4096; + + auto num_wg = + std::clamp((clens[0] * clens[1] * clens[2]) / local_threads, size_t(1), size_t(max_num_wg)); + size_t global_threads = num_wg * local_threads; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_3D_TENSOR_GENERIC"); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::HIP{}); + kernel.kernel_file = "MIOpenTensorKernelsHip.cpp"; + kernel.kernel_name = "Op3dTensorGeneric"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, blens, clens, astrides, bstrides, cstrides](const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(blens[1] == 1 ? clens[1] : blens[1]), // b_c, + static_cast(blens[2] == 1 ? clens[2] : blens[2]), // b_h, + static_cast(clens[1]), // c_c, + static_cast(clens[2]), // c_h, + static_cast(astrides[0]), // a_nstride, + static_cast(astrides[1]), // a_cstride, + static_cast(astrides[2]), // a_hstride, + static_cast(blens[0] == 1 ? 0 : bstrides[0]), // b_nstride, + static_cast(blens[1] == 1 ? 0 : bstrides[1]), // b_cstride, + static_cast(blens[2] == 1 ? 0 : bstrides[2]), // b_hstride, + static_cast(cstrides[0]), // c_nstride, + static_cast(cstrides[1]), // c_cstride, + static_cast(cstrides[2]), // c_hstride, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(clens[0]), + !float_equal(miopen_beta, 0.0)); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op4dTensorGeneric.cpp b/src/solver/tensorOp/Op4dTensorGeneric.cpp new file mode 100644 index 0000000000..146c408491 --- /dev/null +++ b/src/solver/tensorOp/Op4dTensorGeneric.cpp @@ -0,0 +1,165 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op4dTensorGeneric::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& alens = aTensorDesc.GetLengths(); + auto asize = alens.size(); + + if(asize == 4) + { + return true; + } + + return false; +} + +std::size_t Op4dTensorGeneric::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op4dTensorGeneric::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + std::array blens; + std::array clens; + std::tie(blens[0], blens[1], blens[2], blens[3]) = miopen::tien<4>(bTensorDesc.GetLengths()); + std::tie(clens[0], clens[1], clens[2], clens[3]) = miopen::tien<4>(cTensorDesc.GetLengths()); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1], astrides[2], astrides[3]) = + miopen::tien<4>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1], bstrides[2], bstrides[3]) = + miopen::tien<4>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1], cstrides[2], cstrides[3]) = + miopen::tien<4>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + int max_num_wg = 4096; + + auto&& [num_wg_orig, work_per_wg, incr_wg, bitmap, local_threads, global_threads] = + Get4dParams(problem, false); + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_4D_TENSOR_GENERIC"); + build_params.Define("MAX_NUM_WG", std::to_string(max_num_wg)); + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + kernel.kernel_name = "Op4dTensorGeneric"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, blens, clens, astrides, bstrides, cstrides, work_per_wg, num_wg_orig, bitmap]( + const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + static_cast(astrides[0]), // a_nstride, + static_cast(astrides[1]), // a_cstride, + static_cast(astrides[2]), // a_hstride, + params.BTensor, + static_cast(blens[1]), // b_c, + static_cast(blens[2]), // b_h, + static_cast(blens[3]), // b_w, + static_cast(bstrides[0]), // b_nstride, + static_cast(bstrides[1]), // b_cstride, + static_cast(bstrides[2]), // b_hstride, + params.CTensor, + static_cast(clens[1]), // c_c, + static_cast(clens[2]), // c_h, + static_cast(clens[3]), // c_w, + static_cast(cstrides[0]), // c_nstride, + static_cast(cstrides[1]), // c_cstride, + static_cast(cstrides[2]), // c_hstride, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + bitmap, + work_per_wg, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig)); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op4dTensorLite.cpp b/src/solver/tensorOp/Op4dTensorLite.cpp new file mode 100644 index 0000000000..50b7af79ad --- /dev/null +++ b/src/solver/tensorOp/Op4dTensorLite.cpp @@ -0,0 +1,160 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op4dTensorLite::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto asize = alens.size(); + + if(asize == 4) + { + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + // quick fix for btensor = <1, 1, 1, 1> + if(bTensorDesc.GetElementSize() == 1) + bitmap = 4; + + bool fwd_conv_bias = (bitmap == (1 << 2)); + + bool packed_tensor = true; + packed_tensor &= aTensorDesc.IsPacked(); + packed_tensor &= bTensorDesc.IsPacked(); + packed_tensor &= cTensorDesc.IsPacked(); + + bool packed_equal_tensor = + packed_tensor && (bTensorDesc.GetElementSize() == cTensorDesc.GetElementSize()); + + if(!fwd_conv_bias && packed_equal_tensor) + { + return true; + } + } + + return false; +} + +std::size_t Op4dTensorLite::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution Op4dTensorLite::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + auto&& [num_wg_orig, work_per_wg, incr_wg, bitmap, local_threads, global_threads] = + Get4dParams(problem, true); + + auto&& [RD_BLCK, READ_TYPE] = + GetRDBLCKandREADTYPE(cTensorDesc.GetElementSize(), bTensorDesc.GetType()); + + size_t total_work = std::max(cTensorDesc.GetElementSize() / RD_BLCK, size_t(1)); + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_4D_TENSOR_LITE"); + build_params.Define("RD_BLCK", std::to_string(RD_BLCK)); + build_params.Define("READ_TYPE", READ_TYPE); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + kernel.kernel_name = "Op4dTensorLite"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = [data_type, total_work](const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + params.BTensor, + params.CTensor, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(total_work), + static_cast(!float_equal(miopen_beta, 0.0))); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/Op5dTensorGeneric.cpp b/src/solver/tensorOp/Op5dTensorGeneric.cpp new file mode 100644 index 0000000000..77126dcaca --- /dev/null +++ b/src/solver/tensorOp/Op5dTensorGeneric.cpp @@ -0,0 +1,174 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool Op5dTensorGeneric::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& alens = aTensorDesc.GetLengths(); + auto asize = alens.size(); + + if(asize == 5) + { + return true; + } + + return false; +} + +std::size_t Op5dTensorGeneric::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +Op5dTensorGeneric::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1], astrides[2], astrides[3], astrides[4]) = + miopen::tien<5>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1], bstrides[2], bstrides[3], bstrides[4]) = + miopen::tien<5>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1], cstrides[2], cstrides[3], cstrides[4]) = + miopen::tien<5>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + int num_wg_orig = num_wg; + int max_num_wg = 4096; + num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; + + size_t local_threads = 256; + size_t global_threads = num_wg * local_threads; + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("USE_5D_TENSOR_GENERIC"); + build_params.Define("MAX_NUM_WG", std::to_string(max_num_wg)); + + auto kernel = KernelInfo{}; + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + kernel.kernel_name = "Op5dTensorGeneric"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = + [data_type, blens, clens, astrides, bstrides, cstrides, bitmap, work_per_wg, num_wg_orig]( + const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + kernel(params.ATensor, + static_cast(astrides[0]), + static_cast(astrides[1]), + static_cast(astrides[2]), + static_cast(astrides[3]), + params.BTensor, + static_cast(blens[1]), // b_c, + static_cast(blens[2]), // b_d, + static_cast(blens[3]), // b_h, + static_cast(blens[4]), // b_w, + static_cast(bstrides[0]), // b_nstride, + static_cast(bstrides[1]), // b_cstride, + static_cast(bstrides[2]), // b_dstride, + static_cast(bstrides[3]), // b_hstride, + params.CTensor, + static_cast(clens[1]), // c_c, + static_cast(clens[2]), // c_d, + static_cast(clens[3]), // c_h, + static_cast(clens[4]), // c_w, + static_cast(cstrides[0]), // c_nstride, + static_cast(cstrides[1]), // c_cstride, + static_cast(cstrides[2]), // c_dstride, + static_cast(cstrides[3]), // c_hstride, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + bitmap, + work_per_wg, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig)); + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/OpTensorFwdBias.cpp b/src/solver/tensorOp/OpTensorFwdBias.cpp new file mode 100644 index 0000000000..2b87b6fbfc --- /dev/null +++ b/src/solver/tensorOp/OpTensorFwdBias.cpp @@ -0,0 +1,219 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool OpTensorFwdBias::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto asize = alens.size(); + + if(asize == 4) + { + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + // quick fix for btensor = <1, 1, 1, 1> + if(bTensorDesc.GetElementSize() == 1) + bitmap = 4; + + bool fwd_conv_bias = (bitmap == (1 << 2)); + + if(fwd_conv_bias) + { + return true; + } + } + return false; +} + +std::size_t OpTensorFwdBias::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution OpTensorFwdBias::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + std::array blens; + std::array clens; + std::tie(blens[0], blens[1], blens[2], blens[3]) = miopen::tien<4>(bTensorDesc.GetLengths()); + std::tie(clens[0], clens[1], clens[2], clens[3]) = miopen::tien<4>(cTensorDesc.GetLengths()); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1], astrides[2], astrides[3]) = + miopen::tien<4>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1], bstrides[2], bstrides[3]) = + miopen::tien<4>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1], cstrides[2], cstrides[3]) = + miopen::tien<4>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + int max_num_wg = 4096; + + auto&& [num_wg_orig, work_per_wg, incr_wg, bitmap, local_threads, global_threads] = + Get4dParams(problem, false); + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + bool packed_tensor = true; + packed_tensor &= aTensorDesc.IsPacked(); + packed_tensor &= bTensorDesc.IsPacked(); + packed_tensor &= cTensorDesc.IsPacked(); + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("MAX_NUM_WG", std::to_string(max_num_wg)); + + auto kernel = KernelInfo{}; + + if(packed_tensor) + { + build_params.Define("USE_FWD_BIAS"); + kernel.kernel_name = "OpTensorFwdBias"; + } + else + { + build_params.Define("USE_FWD_BIAS_GENERIC"); + kernel.kernel_name = "OpTensorFwdBiasGeneric"; + } + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = [data_type, + blens, + clens, + astrides, + bstrides, + cstrides, + work_per_wg, + num_wg_orig, + incr_wg, + packed_tensor](const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + if(packed_tensor) + { // OpTensorFwdBias + kernel(params.ATensor, + params.BTensor, + static_cast(blens[1]), + params.CTensor, + static_cast(clens[0]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + work_per_wg, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig), + static_cast(incr_wg)); + } + else + { // OpTensorFwdBiasGeneric + kernel(params.ATensor, + static_cast(astrides[0]), + static_cast(astrides[1]), + static_cast(astrides[2]), + params.BTensor, + static_cast(blens[1]), + static_cast(bstrides[1]), + params.CTensor, + static_cast(clens[0]), + static_cast(clens[3]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + static_cast(cstrides[2]), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + work_per_wg, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig), + static_cast(incr_wg)); + } + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/OpTensorLeadingOnes.cpp b/src/solver/tensorOp/OpTensorLeadingOnes.cpp new file mode 100644 index 0000000000..b105bde69b --- /dev/null +++ b/src/solver/tensorOp/OpTensorLeadingOnes.cpp @@ -0,0 +1,236 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#include "tensor_op_helpers.hpp" +#include +#include +#include +#include +#include +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +bool OpTensorLeadingOnes::IsApplicable([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto asize = alens.size(); + + if(asize == 4) + { + + auto&& [num_wg, work_per_wg, bitmap] = GetBitmapAndWgInfo(blens, clens); + + // quick fix for btensor = <1, 1, 1, 1> + if(bTensorDesc.GetElementSize() == 1) + bitmap = 4; + + bool fwd_conv_bias = (bitmap == (1 << 2)); + + bool packed_tensor = true; + packed_tensor &= aTensorDesc.IsPacked(); + packed_tensor &= bTensorDesc.IsPacked(); + packed_tensor &= cTensorDesc.IsPacked(); + + bool packed_equal_tensor = + packed_tensor && (bTensorDesc.GetElementSize() == cTensorDesc.GetElementSize()); + + // first_not_one is incorrect if btensor size equal to 1 + auto first_not_one = + std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); + auto d = std::distance(blens.begin(), first_not_one.base()); + + bool leading_ones = IsBitmapLeadingOnes(bitmap, clens.size(), static_cast(d - 2)); + + if(!fwd_conv_bias && !packed_equal_tensor && leading_ones) + { + return true; + } + } + + return false; +} + +std::size_t OpTensorLeadingOnes::GetWorkspaceSize( + [[maybe_unused]] const ExecutionContext& context, + [[maybe_unused]] const miopen::tensorOp::ProblemDescription& problem) const +{ + return 0; +} + +ConvSolution +OpTensorLeadingOnes::GetSolution([[maybe_unused]] const ExecutionContext& context, + const miopen::tensorOp::ProblemDescription& problem) const +{ + auto result = ConvSolution{miopenStatusSuccess}; + + const auto& aTensorDesc = problem.GetATensorDesc(); + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + std::array clens; + std::tie(clens[0], clens[1], clens[2], clens[3]) = miopen::tien<4>(cTensorDesc.GetLengths()); + + std::array astrides; + std::array bstrides; + std::array cstrides; + std::tie(astrides[0], astrides[1], astrides[2], astrides[3]) = + miopen::tien<4>(aTensorDesc.GetStrides()); + std::tie(bstrides[0], bstrides[1], bstrides[2], bstrides[3]) = + miopen::tien<4>(bTensorDesc.GetStrides()); + std::tie(cstrides[0], cstrides[1], cstrides[2], cstrides[3]) = + miopen::tien<4>(cTensorDesc.GetStrides()); + + miopenDataType_t data_type = bTensorDesc.GetType(); + + int max_num_wg = 4096; + + auto&& [num_wg_orig, work_per_wg, incr_wg, bitmap, local_threads, global_threads] = + Get4dParams(problem, false); + + const std::array vld{local_threads, 1, 1}; + const std::array vgd{global_threads, 1, 1}; + + bool packed_tensor = true; + packed_tensor &= aTensorDesc.IsPacked(); + packed_tensor &= bTensorDesc.IsPacked(); + packed_tensor &= cTensorDesc.IsPacked(); + + KernelBuildParameters build_params = KernelBuildParameters{}; + + GetCommonParams(build_params, problem, false); + + build_params.Define("MAX_NUM_WG", std::to_string(max_num_wg)); + auto kernel = KernelInfo{}; + + if(packed_tensor) + { + build_params.Define("USE_LEADING_ONES"); + kernel.kernel_name = "OpTensorLeadingOnes"; + } + else + { + build_params.Define("USE_LEADING_ONES_GENERIC"); + kernel.kernel_name = "OpTensorLeadingOnesGeneric"; + } + + kernel.comp_options = build_params.GenerateFor(kbp::OpenCL{}); + kernel.kernel_file = "MIOpenTensorKernels.cl"; + + using std::begin, std::end; + + kernel.l_wk.insert(end(kernel.l_wk), begin(vld), end(vld)); + kernel.g_wk.insert(end(kernel.g_wk), begin(vgd), end(vgd)); + + result.invoker_factory = [data_type, + clens, + astrides, + bstrides, + cstrides, + work_per_wg, + num_wg_orig, + bitmap, + packed_tensor](const std::vector kernels) { + return [=](const Handle& handle_, const AnyInvokeParams& raw_params) { + decltype(auto) kernel = handle_.Run(kernels.front()); + decltype(auto) params = raw_params.CastTo(); + + visit_float(data_type, [&](auto as_float) { + auto miopen_alpha0 = as_float(*(static_cast(params.alpha0))); + auto miopen_alpha1 = as_float(*(static_cast(params.alpha1))); + auto miopen_beta = as_float(*(static_cast(params.beta))); + + if(packed_tensor) + { // OpTensorLeadingOnes + kernel(params.ATensor, + params.BTensor, + params.CTensor, + static_cast(clens[1]), + static_cast(clens[2]), + static_cast(clens[3]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + work_per_wg, + miopen_alpha0, + miopen_alpha1, + miopen_beta, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig), + bitmap); + } + else + { // OpTensorLeadingOnesGeneric + kernel(params.ATensor, + static_cast(astrides[0]), + static_cast(astrides[1]), + static_cast(astrides[2]), + params.BTensor, + static_cast(bstrides[0]), + static_cast(bstrides[1]), + static_cast(bstrides[2]), + params.CTensor, + static_cast(clens[1]), + static_cast(clens[2]), + static_cast(clens[3]), + static_cast(cstrides[0]), + static_cast(cstrides[1]), + static_cast(cstrides[2]), + miopen_alpha0, + miopen_alpha1, + miopen_beta, + work_per_wg, + static_cast(params.Aoffset), + static_cast(params.Boffset), + static_cast(params.Coffset), + static_cast(num_wg_orig), + bitmap); + } + }); + }; + }; + result.construction_params.push_back(kernel); + + return result; +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/solver/tensorOp/tensor_op_helpers.hpp b/src/solver/tensorOp/tensor_op_helpers.hpp new file mode 100644 index 0000000000..cf46c6efe8 --- /dev/null +++ b/src/solver/tensorOp/tensor_op_helpers.hpp @@ -0,0 +1,215 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ +#pragma once + +#include +#include +#include + +#include + +namespace miopen { + +namespace solver { + +namespace tensorOp { + +inline void GetCommonParams(KernelBuildParameters& build_params, + const miopen::tensorOp::ProblemDescription& problem, + bool is64bSupported) +{ + miopenDataType_t data_type = problem.GetBTensorDesc().GetType(); + + build_params.Define("MIOPEN_TYPE", miopen::GetDataType(data_type)); + + switch(problem.GetTensorOp()) + { + case 0: build_params.Define("MIOPEN_TENSOR_OP", "miopenAdd"); break; + case 1: build_params.Define("MIOPEN_TENSOR_OP", "miopenMul"); break; + case 2: build_params.Define("MIOPEN_TENSOR_OP", "miopenMin"); break; + case 3: build_params.Define("MIOPEN_TENSOR_OP", "miopenMax"); break; + } + + if(is64bSupported && problem.GetATensorDesc().AllDimsFitIntoInt()) + { + build_params.Define("DIM_TYPE", "uint32_t"); + } + else + { + build_params.Define("DIM_TYPE", "uint64_t"); + } +} + +inline std::tuple GetRDBLCKandREADTYPE(size_t len, miopenDataType_t type) +{ + const std::string data_type = GetDataType(type); + size_t RD_BLCK = (len % 4 == 0) ? 4 : (len % 2 == 0) ? 2 : 1; + return std::make_tuple(RD_BLCK, + (RD_BLCK == 1) ? data_type : data_type + std::to_string(RD_BLCK)); +} + +inline std::tuple GetBitmapAndWgInfo(const std::vector& blens, + const std::vector& clens) +{ + // first_not_one is incorrect if btensor size equal to 1 + auto first_not_one = std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); + auto d = std::distance(blens.begin(), first_not_one.base()); + + // quick fix + int num_wg = first_not_one != blens.rend() + ? static_cast(*first_not_one == 0 ? 1 : *first_not_one) + : 1; + + int work_per_wg = std::accumulate(clens.begin() + d, clens.end(), 1, std::multiplies()); + + unsigned int bitmap = 0; + // update bitmap for first_not_one + bitmap |= (1 << (blens.size() - d)); + + for(int i = (d - 2); i >= 0; i--) + { + if(blens[i] != 1) + { + bitmap |= (1 << (blens.size() - (i + 1))); + num_wg *= blens[i]; + } + else + { + work_per_wg *= clens[i]; + } + } + + return std::make_tuple(num_wg, work_per_wg, bitmap); +} + +inline bool IsBitmapLeadingOnes(unsigned int bitmap, int n_size, int first_not_one) +{ + bool leading_ones = true; + for(int i = first_not_one; i >= 0; i--) + { + bool is_one = (bitmap & (1 << (n_size - 1 - i))) != 0u; + leading_ones &= is_one; + } + return leading_ones; +} + +inline std::tuple +Get4dParams(const miopen::tensorOp::ProblemDescription& problem, bool is4dLite) +{ + const auto& bTensorDesc = problem.GetBTensorDesc(); + const auto& cTensorDesc = problem.GetCTensorDesc(); + + const auto& blens = bTensorDesc.GetLengths(); + const auto& clens = cTensorDesc.GetLengths(); + + auto dims = clens.size(); + + // first_not_one is incorrect if btensor size equal to 1 + auto first_not_one = std::find_if(blens.rbegin(), blens.rend(), [](int i) { return i != 1; }); + auto d = std::distance(blens.begin(), first_not_one.base()); + + // quick fix + int num_wg = first_not_one != blens.rend() + ? static_cast(*first_not_one == 0 ? 1 : *first_not_one) + : 1; + + int work_per_wg = std::accumulate(clens.begin() + d, clens.end(), 1, std::multiplies()); + + unsigned int bitmap = 0; + // update bitmap for first_not_one + bitmap |= (1 << (blens.size() - d)); + + for(int i = (d - 2); i >= 0; i--) + { + if(blens[i] != 1) + { + bitmap |= (1 << (blens.size() - (i + 1))); + num_wg *= blens[i]; + } + else + { + work_per_wg *= clens[i]; + } + } + + // quick fix for btensor = <1, 1, 1, 1> + if(bTensorDesc.GetElementSize() == 1) + bitmap = 4; + + int incr_wg = 0; + // Forward Convolution Bias specialization + // for fwd-bias, bitmap looks like <0, 1, 0, 0> + // Is the no. of work-groups and the work for each wg balanced? + auto fwd_conv_bias = bitmap == (1 << 2) ? 1 : 0; + // This block gives off indexing for 5d tensors, skipping + if(fwd_conv_bias == 1 && dims < 5 && num_wg < 640 && work_per_wg > 256 && clens[0] > 0) + { // 640 workgroups of size 256 needed to completely fill the GPU + + work_per_wg /= clens[0]; // c_n; + num_wg *= clens[0]; // c_n; + incr_wg = 1; + } + + int num_wg_orig = num_wg; + int max_num_wg = 4096; + num_wg = num_wg > max_num_wg ? max_num_wg : num_wg; + + size_t local_threads = 256; + + bool leading_ones = IsBitmapLeadingOnes(bitmap, clens.size(), static_cast(d - 2)); + + if(leading_ones && work_per_wg < 64) + { + local_threads = 64; + } + + // Special case for adding tensors in place + size_t global_threads = + (static_cast(leading_ones) == 1 && (d - 1) == 3) ? num_wg : num_wg * local_threads; + global_threads = (global_threads < local_threads) ? local_threads : global_threads; + + if(is4dLite) + { + size_t TENS_LEN = cTensorDesc.GetElementSize(); + size_t RD_BLCK = (TENS_LEN % 4 == 0) ? 4 : (TENS_LEN % 2 == 0) ? 2 : 1; + + size_t total_work = std::max(TENS_LEN / RD_BLCK, size_t(1)); + size_t grp_sz = (total_work + local_threads - 1) / local_threads; + grp_sz = std::min(size_t(max_num_wg), grp_sz); + size_t glb_sz = local_threads * grp_sz; + + global_threads = glb_sz; + } + + return std::make_tuple( + num_wg_orig, work_per_wg, incr_wg, bitmap, local_threads, global_threads); +} + +} // namespace tensorOp + +} // namespace solver + +} // namespace miopen diff --git a/src/tensor.cpp b/src/tensor.cpp index 2d1b48faf9..a56cffc7b5 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -26,8 +26,19 @@ #include #include +#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include @@ -865,6 +876,1200 @@ void from_json(const nlohmann::json& j, TensorDescriptor& descriptor) j.at("type").get_to(descriptor.type); } +TensorDescriptor GetFlattenedTensorDescriptor(const TensorDescriptor& desc) +{ + // is packed + if(desc.IsPacked()) + return {desc.GetType(), {desc.GetElementSize()}, {static_cast(1)}}; + + // start flattening tensor + std::vector flat_lengths; + std::vector flat_strides; + + auto non1_length_strides = boost::combine(desc.GetLengths(), desc.GetStrides()) | + boost::adaptors::filtered(f_length_is_not_1_t()); + + auto i = non1_length_strides.begin(); + std::size_t flat_len = boost::get<0>(*i); + auto i_previous = i++; + + // the 0-th dimension full-length doesn't matter + for(; i != non1_length_strides.end(); ++i) + { + std::size_t len = boost::get<0>(*i); + std::size_t stride = boost::get<1>(*i); + std::size_t previous_stride = boost::get<1>(*i_previous); + std::size_t full_len = previous_stride / stride; + + if(len == full_len) + { + flat_len *= len; + } + else + { + flat_lengths.push_back(flat_len); + flat_strides.push_back(previous_stride); + flat_len = len; + } + i_previous = i; + } + flat_lengths.push_back(flat_len); + flat_strides.push_back(boost::get<1>(*i_previous)); + + return {desc.GetType(), flat_lengths, flat_strides}; +} + +struct two_exp_ceiling_t +{ + std::size_t operator()(std::size_t n) const + { + assert(n > 0); + + std::size_t i = 1; + + n--; + while(n != 0) + { + i *= 2; + n /= 2; + } + + return i; + } +}; + +static std::vector get_worker_sizes(const std::vector& data_sizes) +{ + const std::size_t dim = data_sizes.size(); + + std::vector worker_sizes(dim); + + std::transform(data_sizes.begin(), data_sizes.end(), worker_sizes.begin(), two_exp_ceiling_t{}); + + std::size_t wgd = std::accumulate( + worker_sizes.begin(), worker_sizes.end(), std::size_t{1}, std::multiplies()); + + if(wgd > 65536) + { + std::size_t n = wgd / 65536; + + int i = 0; + while(n > 1 && i < dim) + { + std::size_t size_old = worker_sizes[i]; + worker_sizes[i] = (size_old - 1) / n + 1; + n /= size_old / worker_sizes[i]; + ++i; + } + } + + return worker_sizes; +} + +void SetTensor(const Handle& handle, + const TensorDescriptor& yDesc, + Data_t y, + const void* alpha, + const int offset) +{ + if(y == nullptr || alpha == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm); + } + + const TensorDescriptor yDesc_flat = GetFlattenedTensorDescriptor(yDesc); + +#ifndef NDEBUG + if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("real descriptor: " << yDesc); + MIOPEN_LOG_I2("flat descriptor: " << yDesc_flat); + } +#endif + + const std::size_t yDim_flat = yDesc_flat.GetNumDims(); + + assert(yDim_flat > 0 && yDim_flat <= 5); + + std::string kernel_name = "SubTensorOpWithScalar" + std::to_string(yDim_flat) + "d"; + + const miopenDataType_t dataType = yDesc_flat.GetType(); + + std::string network_config = "set " + std::to_string(dataType); + for(auto& len : yDesc_flat.GetLengths()) + { + network_config += " " + std::to_string(len); + } + + auto&& kernels = handle.GetKernels(kernel_name, network_config); + + KernelInvoke kernel; + + if(!kernels.empty()) + { + kernel = kernels.front(); + } + else + { + std::string program_name = "MIOpenSubTensorOpWithScalarKernel.cl"; + + std::vector worker_sizes = get_worker_sizes(yDesc_flat.GetLengths()); + + std::size_t wgd = std::accumulate(worker_sizes.begin(), + worker_sizes.end(), + std::size_t{1}, + std::multiplies()); + + std::size_t wld = 256 < wgd ? 256 : wgd; + std::stringstream ss; + ss << "-DSUBTENSOR_OP_WITH_SCALAR=SUBTENSOR_OP_WITH_SCALAR_SET" + << GetDataTypeKernelParams(dataType); + for(int i = 0; i < yDim_flat; ++i) + { + ss << " -DWORK_LENGTH_" << std::to_string(i) << "=" << std::to_string(worker_sizes[i]); + } + + kernel = handle.AddKernel(kernel_name, + network_config, + program_name, + kernel_name, + {wld, 1, 1}, + {wgd, 1, 1}, + ss.str()); + } + + switch(yDim_flat) + { + case 1: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetLengths()[0])); + }); + + break; + } + case 2: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1])); + }); + + break; + } + case 3: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2])); + }); + + break; + } + case 4: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3])); + }); + + break; + } + case 5: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetStrides()[4]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3]), + static_cast(yDesc_flat.GetLengths()[4])); + }); + + break; + } + default: assert(false); + } +} + +void ScaleTensor(const Handle& handle, + const TensorDescriptor& yDesc, + Data_t y, + const void* alpha, + const int offset) +{ + if(y == nullptr || alpha == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm); + } + + const TensorDescriptor yDesc_flat = GetFlattenedTensorDescriptor(yDesc); + +#ifndef NDEBUG + if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("real descriptor: " << yDesc); + MIOPEN_LOG_I2("flat descriptor: " << yDesc_flat); + } +#endif + + const std::size_t yDim_flat = yDesc_flat.GetNumDims(); + + assert(yDim_flat > 0 && yDim_flat <= 5); + + const miopenDataType_t dataType = yDesc_flat.GetType(); + + if(!(dataType == miopenHalf // + || dataType == miopenFloat // + || dataType == miopenInt32 // + || dataType == miopenDouble)) + { + MIOPEN_THROW(miopenStatusBadParm, "ScaleTensor: unsupported data type."); + } + + std::string kernel_name = "SubTensorOpWithScalar" + std::to_string(yDim_flat) + "d"; + + const std::vector& lens = yDesc_flat.GetLengths(); + + std::string network_config = "scale " + std::to_string(yDesc_flat.GetType()); + for(auto& len : lens) + { + network_config += " " + std::to_string(len); + } + + auto&& kernels = handle.GetKernels(kernel_name, network_config); + + KernelInvoke kernel; + + if(!kernels.empty()) + { + kernel = kernels.front(); + } + else + { + std::string program_name = "MIOpenSubTensorOpWithScalarKernel.cl"; + + std::vector worker_sizes = get_worker_sizes(lens); + + std::size_t wgd = std::accumulate(worker_sizes.begin(), + worker_sizes.end(), + std::size_t{1}, + std::multiplies()); + + std::size_t wld = 256 < wgd ? 256 : wgd; + + std::string parms = "-DSUBTENSOR_OP_WITH_SCALAR=SUBTENSOR_OP_WITH_SCALAR_MULTIPLY" + + GetDataTypeKernelParams(dataType); + for(int i = 0; i < yDim_flat; ++i) + { + parms += " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); + } + + kernel = handle.AddKernel(kernel_name, + network_config, + program_name, + kernel_name, + {wld, 1, 1}, + {wgd, 1, 1}, + parms); + } + + switch(yDim_flat) + { + case 1: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetLengths()[0])); + }); + + break; + } + case 2: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1])); + }); + + break; + } + case 3: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2])); + }); + + break; + } + case 4: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3])); + }); + + break; + } + case 5: { + visit_float(dataType, [&](auto as_float) { + kernel(y, + *as_float(alpha), + offset, + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetStrides()[4]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3]), + static_cast(yDesc_flat.GetLengths()[4])); + }); + + break; + } + default: assert(false); + } +} + +void CopyTensor(const Handle& handle, + const TensorDescriptor& srcDesc, + ConstData_t src, + const TensorDescriptor& dstDesc, + Data_t dst, + int srcOffset, + int dstOffset, + bool forseAsync) +{ + if(src == nullptr || dst == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm, "Null pointer for tensor."); + } + + if(srcDesc.GetType() != dstDesc.GetType()) + { + MIOPEN_THROW(miopenStatusBadParm, "Tensor types do not match."); + } + + if(srcDesc.GetLengths() != dstDesc.GetLengths()) + { + MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); + } + + auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(srcDesc, dstDesc); + const TensorDescriptor& srcDesc_flat = std::get<0>(flat_descriptors); + const TensorDescriptor& dstDesc_flat = std::get<1>(flat_descriptors); + +#ifndef NDEBUG + if(srcDesc.GetNumDims() != srcDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("src real descriptor: " << srcDesc); + MIOPEN_LOG_I2("src flat descriptor: " << srcDesc_flat); + MIOPEN_LOG_I2("dst real descriptor: " << dstDesc); + MIOPEN_LOG_I2("dst flat descriptor: " << dstDesc_flat); + } +#endif + + std::size_t srcDim_flat = srcDesc_flat.GetNumDims(); + + if(srcDim_flat < 1 || srcDim_flat > 5) + { + MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension sizes unsupported."); + } + + if(forseAsync || srcOffset > 0 || dstOffset > 0 || + (!(srcDesc_flat.IsPacked() && dstDesc_flat.IsPacked()))) + { + std::string kernel_name = "SubTensorOpWithSubTensor" + std::to_string(srcDim_flat) + "d"; + + const std::vector& lens = srcDesc_flat.GetLengths(); + + std::string network_config = "copy " + std::to_string(srcDesc_flat.GetType()); + for(auto& len : lens) + { + network_config += " " + std::to_string(len); + } + + auto&& kernels = handle.GetKernels(kernel_name, network_config); + + KernelInvoke kernel; + + if(!kernels.empty()) + { + kernel = kernels.front(); + } + else + { + std::string program_name = "MIOpenSubTensorOpWithSubTensorKernel.cl"; + + std::vector worker_sizes = get_worker_sizes(lens); + + std::size_t wgd = std::accumulate(worker_sizes.begin(), + worker_sizes.end(), + std::size_t{1}, + std::multiplies()); + + std::size_t wld = 256 < wgd ? 256 : wgd; + + std::string parms = "-DSUBTENSOR_OP_WITH_SUBTENSOR=SUBTENSOR_OP_WITH_SUBTENSOR_COPY" + + GetDataTypeKernelParams(srcDesc_flat.GetType()); + for(std::size_t i = 0; i < srcDim_flat; ++i) + { + parms += + " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); + } + + kernel = handle.AddKernel(kernel_name, + network_config, + program_name, + kernel_name, + {wld, 1, 1}, + {wgd, 1, 1}, + parms); + } + + switch(srcDim_flat) + { + case 1: { + kernel(src, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetLengths()[0]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0])); + + break; + } + case 2: { + kernel(src, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1])); + + break; + } + case 3: { + kernel(src, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2])); + + break; + } + case 4: { + kernel(src, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetStrides()[3]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + static_cast(srcDesc_flat.GetLengths()[3]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2]), + static_cast(dstDesc_flat.GetStrides()[3])); + + break; + } + case 5: { + kernel(src, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetStrides()[3]), + static_cast(srcDesc_flat.GetStrides()[4]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + static_cast(srcDesc_flat.GetLengths()[3]), + static_cast(srcDesc_flat.GetLengths()[4]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2]), + static_cast(dstDesc_flat.GetStrides()[3]), + static_cast(dstDesc_flat.GetStrides()[4])); + + break; + } + default: assert(false); + } + } + else + { + handle.Copy(src, dst, srcDesc_flat.GetElementSize() * GetTypeSize(srcDesc_flat.GetType())); + } +} + +std::string GetCastTensorBuildOptionFromType(const std::string& buildOption, miopenDataType_t type) +{ + std::string option(buildOption); + switch(type) + { + case miopenInt8: return option += "0"; + case miopenInt32: return option += "1"; + case miopenHalf: return option += "2"; + case miopenFloat: return option += "3"; + case miopenBFloat16: return option += "4"; + case miopenFloat8: + MIOPEN_THROW(miopenStatusBadParm, "miopenFloat8 data type not supported in cast tensor."); + case miopenBFloat8: + MIOPEN_THROW(miopenStatusBadParm, "miopenBFloat8 data type not supported in cast tensor."); + case miopenDouble: + // TODO + MIOPEN_THROW(miopenStatusBadParm, "miopenDouble data type not supported in cast tensor."); + case miopenInt64: + MIOPEN_THROW(miopenStatusBadParm, "miopenInt64 data type not supported in cast tensor."); + default: MIOPEN_THROW(miopenStatusBadParm, "Invalid data type in cast tensor desc."); + } +} + +void CastTensor(const Handle& handle, + const void* alpha, + const bool clamping, + const TensorDescriptor& srcDesc, + ConstData_t src, + const TensorDescriptor& dstDesc, + Data_t dst, + int srcOffset, + int dstOffset) +{ + if(src == nullptr || dst == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm, "Null pointer for tensor."); + } + + if(srcDesc.GetLengths() != dstDesc.GetLengths()) + { + MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); + } + + auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(srcDesc, dstDesc); + const TensorDescriptor& srcDesc_flat = std::get<0>(flat_descriptors); + const TensorDescriptor& dstDesc_flat = std::get<1>(flat_descriptors); + +#ifndef NDEBUG + if(srcDesc.GetNumDims() != srcDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("src real descriptor: " << srcDesc); + MIOPEN_LOG_I2("src flat descriptor: " << srcDesc_flat); + MIOPEN_LOG_I2("dst real descriptor: " << dstDesc); + MIOPEN_LOG_I2("dst flat descriptor: " << dstDesc_flat); + } +#endif + + std::size_t srcDim_flat = srcDesc_flat.GetNumDims(); + + if(srcDim_flat < 1 || srcDim_flat > 5) + { + MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension sizes unsupported."); + } + + auto miopen_alpha = *(static_cast(alpha)); + + if(srcDesc.GetType() == dstDesc.GetType() && srcOffset == 0 && dstOffset == 0 && + srcDesc_flat.IsPacked() && dstDesc_flat.IsPacked() && float_equal(miopen_alpha, 1.0)) + { + handle.Copy(src, dst, srcDesc_flat.GetElementSize() * GetTypeSize(srcDesc_flat.GetType())); + } + else + { + std::string kernel_name = "SubTensorOpWithCastTensor" + std::to_string(srcDim_flat) + "d"; + + const std::vector& lens = srcDesc_flat.GetLengths(); + + // TODO: make proper network config + std::string network_config = "cast " + std::to_string(srcDesc_flat.GetType()) + + std::to_string(dstDesc_flat.GetType()); + for(auto& len : lens) + { + network_config += " " + std::to_string(len); + } + + auto&& kernels = handle.GetKernels(kernel_name, network_config); + KernelInvoke kernel; + + if(!kernels.empty()) + { + kernel = kernels.front(); + } + else + { + std::string program_name = "MIOpenSubTensorOpWithCastTensorKernel.cl"; + + std::vector worker_sizes = get_worker_sizes(lens); + + std::size_t wgd = std::accumulate(worker_sizes.begin(), + worker_sizes.end(), + std::size_t{1}, + std::multiplies()); + + std::size_t wld = 256 < wgd ? 256 : wgd; + + std::string parms = + GetCastTensorBuildOptionFromType(" -DMIOPEN_SRC_TYPE=", srcDesc_flat.GetType()) + + GetCastTensorBuildOptionFromType(" -DMIOPEN_DST_TYPE=", dstDesc_flat.GetType()); + + for(std::size_t i = 0; i < srcDim_flat; ++i) + { + parms += + " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); + } + + if(dstDesc_flat.GetType() == miopenBFloat16) + { + parms += " -DMIOPEN_USE_RNE_BFLOAT16=1"; + } + + kernel = handle.AddKernel(kernel_name, + network_config, + program_name, + kernel_name, + {wld, 1, 1}, + {wgd, 1, 1}, + parms); + } + + const int clamping_arg = clamping ? 1 : 0; + switch(srcDim_flat) + { + case 1: { + kernel(src, + miopen_alpha, + clamping_arg, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetLengths()[0]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0])); + + break; + } + case 2: { + kernel(src, + miopen_alpha, + clamping_arg, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1])); + + break; + } + case 3: { + kernel(src, + miopen_alpha, + clamping_arg, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2])); + + break; + } + case 4: { + kernel(src, + miopen_alpha, + clamping_arg, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetStrides()[3]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + static_cast(srcDesc_flat.GetLengths()[3]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2]), + static_cast(dstDesc_flat.GetStrides()[3])); + + break; + } + case 5: { + kernel(src, + miopen_alpha, + clamping_arg, + srcOffset, + static_cast(srcDesc_flat.GetStrides()[0]), + static_cast(srcDesc_flat.GetStrides()[1]), + static_cast(srcDesc_flat.GetStrides()[2]), + static_cast(srcDesc_flat.GetStrides()[3]), + static_cast(srcDesc_flat.GetStrides()[4]), + static_cast(srcDesc_flat.GetLengths()[0]), + static_cast(srcDesc_flat.GetLengths()[1]), + static_cast(srcDesc_flat.GetLengths()[2]), + static_cast(srcDesc_flat.GetLengths()[3]), + static_cast(srcDesc_flat.GetLengths()[4]), + dst, + dstOffset, + static_cast(dstDesc_flat.GetStrides()[0]), + static_cast(dstDesc_flat.GetStrides()[1]), + static_cast(dstDesc_flat.GetStrides()[2]), + static_cast(dstDesc_flat.GetStrides()[3]), + static_cast(dstDesc_flat.GetStrides()[4])); + + break; + } + default: assert(false); + } + } +} + +void TransformTensor(const Handle& handle, + const void* alpha, + const TensorDescriptor& xDesc, + ConstData_t x, + const void* beta, + const TensorDescriptor& yDesc, + Data_t y, + size_t Xoffset, + size_t Yoffset) +{ + if(x == nullptr || y == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm); + } + + if(alpha == nullptr || beta == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm); + } + + auto x_len = xDesc.GetLengths(); + auto y_len = yDesc.GetLengths(); + + if(x_len.size() != y_len.size()) + { + MIOPEN_THROW("Tensor dimension must be the same"); + } + + if(x_len[0] != y_len[0]) + { + MIOPEN_THROW("Tensor x and y batch sizes do not match"); + } + + const auto is_alpha_one = float_equal(*(static_cast(alpha)), 1); + const auto is_beta_zero = float_equal(*(static_cast(beta)), 0); + + if(xDesc.GetType() == miopenInt8 && yDesc.GetType() == miopenInt8 && x_len.size() >= 3) + { + if(x_len[1] <= y_len[1]) + { + if(x_len[1] <= (y_len[1] - 4) || y_len[1] % 4 != 0) + { + MIOPEN_THROW("Invalid y channel size"); + } + + int8_t zero = 0; + SetTensor(handle, yDesc, y, &zero); + } + else if(x_len[1] % 4 != 0) + { + MIOPEN_THROW("Invalid x channel size"); + } + + size_t batch_n = x_len[0]; + + x_len[0] = 1; + y_len[0] = 1; + + miopen::TensorDescriptor x_batch_desc, y_batch_desc; + x_batch_desc = miopen::TensorDescriptor(miopenInt8, x_len); + y_batch_desc = miopen::TensorDescriptor(miopenInt8, y_len); + + size_t x_batch_sz = x_batch_desc.GetElementSize(); + size_t y_batch_sz = y_batch_desc.GetElementSize(); + + for(size_t i = 0; i < batch_n; i++) + { + size_t x_offset = i * x_batch_sz; + size_t y_offset = i * y_batch_sz; + + if(is_alpha_one && is_beta_zero) + { + CopyTensor(handle, + ((x_len[1] <= y_len[1]) ? x_batch_desc : y_batch_desc), + x, + ((x_len[1] <= y_len[1]) ? x_batch_desc : y_batch_desc), + y, + x_offset, + y_offset); + } + else + { + MIOPEN_THROW(miopenStatusNotImplemented, + "y=alpha*x+beta*y is not supported for int8 yet"); + } + } + } + else + { + auto x_y_len = boost::combine(x_len, y_len); + bool same_spatial_len = std::all_of(x_y_len.begin(), x_y_len.end(), [](auto v) { + return boost::get<0>(v) == boost::get<1>(v); + }); + + if(!same_spatial_len) + { + MIOPEN_THROW("Tensor x and y spatial sizes do not match"); + } + + auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(xDesc, yDesc); + const TensorDescriptor& xDesc_flat = std::get<0>(flat_descriptors); + const TensorDescriptor& yDesc_flat = std::get<1>(flat_descriptors); + + if(xDesc.GetNumDims() != xDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("x real descriptor: " << xDesc); + MIOPEN_LOG_I2("x flat descriptor: " << xDesc_flat); + } + + if(yDesc.GetNumDims() != yDesc_flat.GetNumDims()) + { + MIOPEN_LOG_I2("y real descriptor: " << yDesc); + MIOPEN_LOG_I2("y flat descriptor: " << yDesc_flat); + } + + const std::size_t yDim_flat = yDesc_flat.GetNumDims(); + + assert(yDim_flat > 0 && yDim_flat <= 5); + + const miopenDataType_t dataTypex = xDesc_flat.GetType(); + const miopenDataType_t dataTypey = yDesc_flat.GetType(); + + if(!(dataTypex == miopenHalf // + || dataTypex == miopenFloat // + || dataTypex == miopenInt32 // + || dataTypex == miopenBFloat16 // + || dataTypex == miopenDouble)) + { + MIOPEN_THROW("Tensor x is a unsupported data type"); + } + + if(!(dataTypey == miopenHalf // + || dataTypey == miopenFloat // + || dataTypey == miopenInt32 // + || dataTypey == miopenBFloat16 // + || dataTypey == miopenDouble)) + { + MIOPEN_THROW("Tensor y is a unsupported data type"); + } + + if(dataTypex != dataTypey) + { + MIOPEN_THROW("Tensor x and y have different data types"); + } + + std::string kernel_name = "SubTensorOpWithTransform" + std::to_string(yDim_flat) + "d"; + + const std::vector& lens = yDesc_flat.GetLengths(); + + std::string network_config = "transform " + std::to_string(yDesc_flat.GetType()); + for(auto& len : lens) + { + network_config += "x" + std::to_string(len); + } + + if(is_beta_zero) + network_config += "xBETA_IS_ZERO"; + if(is_alpha_one) + network_config += "xALPHA_IS_ONE"; + + auto&& kernels = handle.GetKernels(kernel_name, network_config); + + KernelInvoke kernel; + + if(!kernels.empty()) + { + kernel = kernels.front(); + } + else + { + std::string program_name = "MIOpenSubTensorOpWithTransformKernel.cl"; + + std::vector worker_sizes = get_worker_sizes(lens); + + std::size_t wgd = std::accumulate(worker_sizes.begin(), + worker_sizes.end(), + std::size_t{1}, + std::multiplies()); + + std::size_t wld = 256 < wgd ? 256 : wgd; + + std::string parms = + GetDataTypeKernelParams(dataTypey) // + + " -DMIOPEN_BETA_IS_ZERO=" + std::to_string(static_cast(is_beta_zero)) // + + " -DMIOPEN_ALPHA_IS_ONE=" + std::to_string(static_cast(is_alpha_one)); + + for(int i = 0; i < yDim_flat; ++i) + { + parms += + " -DWORK_LENGTH_" + std::to_string(i) + "=" + std::to_string(worker_sizes[i]); + } + + kernel = handle.AddKernel(kernel_name, + network_config, + program_name, + kernel_name, + {wld, 1, 1}, + {wgd, 1, 1}, + parms); + } + + switch(yDim_flat) + { + case 1: { + visit_float(dataTypey, [&](auto as_float) { + kernel(x, + *as_float(alpha), + y, + *as_float(beta), + static_cast(Xoffset), + static_cast(Yoffset), + static_cast(xDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetLengths()[0])); + }); + + break; + } + case 2: { + visit_float(dataTypey, [&](auto as_float) { + kernel(x, + *as_float(alpha), + y, + *as_float(beta), + static_cast(Xoffset), + static_cast(Yoffset), + static_cast(xDesc_flat.GetStrides()[0]), + static_cast(xDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1])); + }); + + break; + } + case 3: { + visit_float(dataTypey, [&](auto as_float) { + kernel(x, + *as_float(alpha), + y, + *as_float(beta), + static_cast(Xoffset), + static_cast(Yoffset), + static_cast(xDesc_flat.GetStrides()[0]), + static_cast(xDesc_flat.GetStrides()[1]), + static_cast(xDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2])); + }); + + break; + } + case 4: { + visit_float(dataTypey, [&](auto as_float) { + kernel(x, + *as_float(alpha), + y, + *as_float(beta), + static_cast(Xoffset), + static_cast(Yoffset), + static_cast(xDesc_flat.GetStrides()[0]), + static_cast(xDesc_flat.GetStrides()[1]), + static_cast(xDesc_flat.GetStrides()[2]), + static_cast(xDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3])); + }); + + break; + } + case 5: { + visit_float(dataTypey, [&](auto as_float) { + kernel(x, + *as_float(alpha), + y, + *as_float(beta), + static_cast(Xoffset), + static_cast(Yoffset), + static_cast(xDesc_flat.GetStrides()[0]), + static_cast(xDesc_flat.GetStrides()[1]), + static_cast(xDesc_flat.GetStrides()[2]), + static_cast(xDesc_flat.GetStrides()[3]), + static_cast(xDesc_flat.GetStrides()[4]), + static_cast(yDesc_flat.GetStrides()[0]), + static_cast(yDesc_flat.GetStrides()[1]), + static_cast(yDesc_flat.GetStrides()[2]), + static_cast(yDesc_flat.GetStrides()[3]), + static_cast(yDesc_flat.GetStrides()[4]), + static_cast(yDesc_flat.GetLengths()[0]), + static_cast(yDesc_flat.GetLengths()[1]), + static_cast(yDesc_flat.GetLengths()[2]), + static_cast(yDesc_flat.GetLengths()[3]), + static_cast(yDesc_flat.GetLengths()[4])); + }); + + break; + } + default: assert(false); + } + } +} + +void OpTensor(const Handle& handle, + miopenTensorOp_t tensorOp, + const void* alpha0, + const TensorDescriptor& aTensorDesc, + ConstData_t ATensor, + const void* alpha1, + const TensorDescriptor& bTensorDesc, + ConstData_t BTensor, + const void* beta, + const TensorDescriptor& cTensorDesc, + Data_t CTensor, + const size_t Aoffset, + const size_t Boffset, + const size_t Coffset, + bool nonStandardSquash) +{ + if(ATensor == nullptr || BTensor == nullptr || CTensor == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm); + } + + if(alpha0 == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm, "Alpha0 value is nullptr"); + } + + if(alpha1 == nullptr) + { + MIOPEN_THROW(miopenStatusBadParm, "Alpha1 value is nullptr"); + } + + const auto problem = tensorOp::ProblemDescription{ + tensorOp, beta, aTensorDesc, bTensorDesc, cTensorDesc, nonStandardSquash}; + + const auto invoke_params = tensorOp::InvokeParams{ + alpha0, ATensor, alpha1, BTensor, beta, CTensor, Aoffset, Boffset, Coffset}; + + const auto algo = AlgorithmName{"TensorOpSolver"}; + const auto solvers = solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{} + + solver::SolverContainer{}; + solvers.ExecutePrimitive(handle, problem, algo, invoke_params); +} + } // namespace miopen int miopenGetTensorIndex(miopenTensorDescriptor_t tensorDesc, std::initializer_list indices) diff --git a/src/tensorOp/problem_description.cpp b/src/tensorOp/problem_description.cpp new file mode 100644 index 0000000000..6053e7f1a0 --- /dev/null +++ b/src/tensorOp/problem_description.cpp @@ -0,0 +1,76 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include +#include + +namespace miopen { + +namespace tensorOp { + +NetworkConfig ProblemDescription::MakeNetworkConfig() const +{ + std::string ss; + + const auto& alens = aTensorDesc.GetLengths(); + const auto& blens = bTensorDesc.GetLengths(); + + const auto& astrides = aTensorDesc.GetStrides(); + const auto& bstrides = bTensorDesc.GetStrides(); + const auto& cstrides = cTensorDesc.GetStrides(); + + auto printDims = [&ss, dims = alens.size() - 1](const auto& dim) { + for(uint32_t i = 0; i < dims; i++) + { + ss.append(std::to_string(dim[i])); + ss += 'x'; + } + ss += std::to_string(dim.back()); + ss += '-'; + }; + + ss.reserve(1024); + ss.append(std::string_view("TensorOp-")); + ss += std::to_string(aTensorDesc.GetType()); + ss += '-'; + ss += std::to_string(tensorOp); + ss += '-'; + + printDims(alens); + printDims(blens); + printDims(astrides); + printDims(bstrides); + printDims(cstrides); + + ss += (float_equal(beta, 0.0f) ? '1' : '0'); + + return NetworkConfig(std::move(ss)); +} + +} // namespace tensorOp + +} // namespace miopen diff --git a/test/gtest/unit_tensorOp_ProblemDescription.cpp b/test/gtest/unit_tensorOp_ProblemDescription.cpp new file mode 100644 index 0000000000..1b02382881 --- /dev/null +++ b/test/gtest/unit_tensorOp_ProblemDescription.cpp @@ -0,0 +1,200 @@ +/******************************************************************************* + * + * MIT License + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + *******************************************************************************/ + +#include +#include + +#include "unit_TensorDescriptor.hpp" +#include + +namespace { + +struct TensorOpProblemDescriptionTestCase +{ + miopenTensorOp_t tensorOp; + float beta; + miopen::unit_tests::TensorDescriptorParams aTensorDesc; + miopen::unit_tests::TensorDescriptorParams bTensorDesc; + miopen::unit_tests::TensorDescriptorParams cTensorDesc; + bool nonStandardSquash; + bool isOk; + + friend std::ostream& operator<<(std::ostream& os, const TensorOpProblemDescriptionTestCase& tc) + { + std::string op; + switch(tc.tensorOp) + { + case miopenTensorOpAdd: op.append("miopenTensorOpAdd"); break; + case miopenTensorOpMul: op.append("miopenTensorOpMul"); break; + case miopenTensorOpMin: op.append("miopenTensorOpMin"); break; + case miopenTensorOpMax: op.append("miopenTensorOpMax"); break; + + default: break; + } + + os << "(" << tc.aTensorDesc << "), "; + os << "(" << tc.bTensorDesc << "), "; + os << "(" << tc.cTensorDesc << "), \n"; + os << "(" << op << ") - beta "; + os << std::to_string(tc.beta) << ")\n"; + return os; + } +}; + +class TestTensorOpPD : public ::testing::TestWithParam +{ +public: + static auto GetTestCases() + { + using TestCase = TensorOpProblemDescriptionTestCase; + + return std::vector{ + // clang-format off + // 4D + TestCase{ + miopenTensorOpAdd, // tensorOp + 0.0f, // beta + {miopenHalf, {1, 4, 4, 4}}, // A + {miopenHalf, {1, 4, 4, 4}}, // B + {miopenHalf, {1, 4, 4, 4}}, // C + false, // nonStandardSquash + true // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 0.0f, // beta + {miopenHalf, {4, 4, 4}}, // A + {miopenHalf, {1, 1, 4}}, // B + {miopenHalf, {4, 4, 4}}, // C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 1, 4}}, // A + {miopenHalf, {1, 1, 4}}, // B + {miopenHalf, {4, 4, 4}}, // C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 4, 4}}, // A + {miopenHalf, {1, 1, 4}}, // B + {miopenFloat, {4, 4, 4}}, // C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 4, 4, 4, 4, 4}},// A + {miopenHalf, {1, 1, 4}}, // B + {miopenHalf, {4, 4, 4, 4, 4, 4}},// C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 4, 4}}, // A + {miopenHalf, {1, 4}}, // B + {miopenHalf, {4, 4, 4}}, // C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 4, 4}}, // A + {miopenHalf, {1, 1, 5}}, // B + {miopenHalf, {4, 4, 4}}, // C + false, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {4, 4, 4, 4}}, // A + {miopenHalf, {1, 1, 4, 4}}, // B + {miopenHalf, {4, 4, 4, 4}}, // C + true, // nonStandardSquash + false // isOk + }, + TestCase{ + miopenTensorOpAdd, // tensorOp + 1.0f, // beta + {miopenHalf, {1, 4, 2}}, // A + {miopenHalf, {1, 1, 4}}, // B + {miopenHalf, {1, 4, 2}}, // C + true, // nonStandardSquash + false // isOk + } + // clang-format on + }; + } + + void RunTest() + { + const auto p = GetParam(); + + if(p.isOk) + { + const auto pd = + miopen::tensorOp::ProblemDescription{p.tensorOp, + static_cast(&p.beta), + p.aTensorDesc.GetTensorDescriptor(), + p.bTensorDesc.GetTensorDescriptor(), + p.cTensorDesc.GetTensorDescriptor(), + p.nonStandardSquash}; + ASSERT_EQ(pd.GetBeta(), p.beta); + } + else + { + ASSERT_ANY_THROW({ + const auto pd = miopen::tensorOp::ProblemDescription( + p.tensorOp, + miopen::float_equal(p.beta, 0.0) ? nullptr : static_cast(&p.beta), + p.aTensorDesc.GetTensorDescriptor(), + p.bTensorDesc.GetTensorDescriptor(), + p.cTensorDesc.GetTensorDescriptor(), + p.nonStandardSquash); + }); + } + } +}; + +} // namespace + +using CPU_TensorOpProblemDescription_NONE = TestTensorOpPD; + +TEST_P(CPU_TensorOpProblemDescription_NONE, TensorOpProblemDescription) { this->RunTest(); }; + +INSTANTIATE_TEST_SUITE_P(Full, + CPU_TensorOpProblemDescription_NONE, + testing::ValuesIn(TestTensorOpPD::GetTestCases()));